diff --git a/Project.toml b/Project.toml
index 99d06800..12abda20 100644
--- a/Project.toml
+++ b/Project.toml
@@ -5,8 +5,10 @@ version = "0.12.0"
[deps]
AutoHashEquals = "15f4f7f2-30c1-5605-9d31-71845cf9641f"
+CRC32c = "8bf52ea8-c179-5cab-976a-9e18b702a9bc"
Compat = "34da2185-b29b-5c13-b0c7-acf172513d20"
Conda = "8f4d0f93-b110-5947-807f-2305c1781a2d"
+Dates = "ade2ca70-3891-5945-98fb-dc099432e06a"
Distributed = "8ba89e20-285c-5b6f-9357-94700520ee1b"
Distributions = "31c24e10-a181-5473-b8eb-7969acd0382f"
FileIO = "5789e2e9-d7fb-5bc7-8068-2c6fae9b9549"
diff --git a/deps/build.jl b/deps/build.jl
index caabe43d..57fb3683 100644
--- a/deps/build.jl
+++ b/deps/build.jl
@@ -1,8 +1,8 @@
using PyCall
using Conda
-const cur_version = "1.12.0"
-const cur_py_version = "1.12.0"
+const cur_version = "1.13.1"
+const cur_py_version = "1.12" # Temporarily downgrade Python version until 1.13.1 is released on Conda
############################
diff --git a/examples/diffeq.jl b/examples/diffeq.jl
new file mode 100644
index 00000000..c7309f64
--- /dev/null
+++ b/examples/diffeq.jl
@@ -0,0 +1,8 @@
+using DifferentialEquations
+
+f(u,p,t)=1.01 .* u
+
+u0=constant(0.5)
+tspan=(0.0,1.0)
+prob=ODEProblem(f, u0, tspan)
+s=solve(prob)
diff --git a/examples/keras.jl b/examples/keras.jl
new file mode 100644
index 00000000..a6f472c3
--- /dev/null
+++ b/examples/keras.jl
@@ -0,0 +1,13 @@
+using TensorFlow
+tf=TensorFlow
+tf.enable_eager_execution()
+m = tf.Sequential()
+
+tf.add(m, tf.Dense(3,10))
+tf.add(m, tf.Relu())
+tf.add(m, tf.Dense(10, 3))
+
+x=constant(randn(5,3))
+y=3x+5
+tf.compile(m, optimizer=tf.SGD(lr=1e-4), loss=tf.mse)
+tf.fit(m, x, y, n_epochs=200)
diff --git a/src/TensorFlow.jl b/src/TensorFlow.jl
index fac6ffdf..27b62e23 100644
--- a/src/TensorFlow.jl
+++ b/src/TensorFlow.jl
@@ -124,7 +124,14 @@ Ops,
slice,
import_op,
@tfimport,
-tf_versioninfo
+tf_versioninfo,
+copy_to_device,
+enable_eager_execution,
+EagerTensor,
+summary,
+create_tape,
+set_tape,
+with_tape
using Distributed
@@ -141,8 +148,13 @@ function deallocator(data, len, arg)
end
+include("context.jl")
+
function __init__()
c_deallocator[] = @cfunction(deallocator, Cvoid, (Ptr{Cvoid}, Csize_t, Ptr{Cvoid}))
+ for context in default_context()
+ push!(global_context, context)
+ end
end
function load_python_process(;force_reload=false)
@@ -198,6 +210,7 @@ include("meta.jl")
include("constants.jl")
include("tensorflow_protos.jl")
include("core.jl")
+include("eager.jl")
include("run.jl")
include("version.jl")
include("ops.jl")
@@ -211,5 +224,7 @@ include("summary.jl")
include("deprecated.jl")
include("show.jl")
include("generate_ops.jl")
+include("tape.jl")
+include("keras.jl")
end
diff --git a/src/context.jl b/src/context.jl
new file mode 100644
index 00000000..b1405da9
--- /dev/null
+++ b/src/context.jl
@@ -0,0 +1,44 @@
+abstract type Context
+end
+
+struct ContextStack
+ contexts::Vector{Context}
+end
+
+ContextStack() = ContextStack(Context[])
+
+function Base.push!(stack::ContextStack, context::Context)
+ push!(stack.contexts, context)
+end
+
+function Base.pop!(stack::ContextStack)
+ pop!(stack.contexts)
+end
+
+function default_context()
+ return [ExecutionMode(eager=false)]
+end
+
+function context_value(context_type)
+ return global_context[context_type]
+end
+
+function Base.getindex(c::ContextStack, context_type)
+ value = nothing
+ for context in c.contexts
+ if isa(context, context_type)
+ value = context
+ end
+ end
+ return value
+end
+
+function with_context(block, ctx)
+ push!(global_context, ctx)
+ res = block()
+ # This assumes the block doesn't adjust the context. We should pop explicitly the pushed context.
+ pop!(global_context)
+ return res
+end
+
+const global_context = ContextStack()
diff --git a/src/core.jl b/src/core.jl
index b122e176..b66b4c71 100644
--- a/src/core.jl
+++ b/src/core.jl
@@ -507,20 +507,20 @@ end
mutable struct DeviceList
ptr::Ptr{Cvoid}
count::Int
+end
- function DeviceList(s::Session)
- status = Status()
- ptr = @tfcall(:TF_SessionListDevices, Ptr{Cvoid},
- (Ptr{Cvoid}, Ptr{Cvoid}), s, status)
- check_status(status)
- count = @tfcall(:TF_DeviceListCount, Cint, (Ptr{Cvoid},),
- ptr)
- this = new(ptr, count)
- finalizer(this) do self
- close(self)
- end
- this
+function DeviceList(s::Session)
+ status = Status()
+ ptr = @tfcall(:TF_SessionListDevices, Ptr{Cvoid},
+ (Ptr{Cvoid}, Ptr{Cvoid}), s, status)
+ check_status(status)
+ count = @tfcall(:TF_DeviceListCount, Cint, (Ptr{Cvoid},),
+ ptr)
+ this = DeviceList(ptr, count)
+ finalizer(this) do self
+ close(self)
end
+ this
end
struct DeviceInfo
@@ -663,6 +663,8 @@ RawTensor(data::AbstractArray) = RawTensor(collect(data))
RawTensor(t::RawTensor) = t
+Base.unsafe_convert(::Type{Ptr{Cvoid}}, t::RawTensor) = t.ptr
+
function varint_encode(b::IO, n::Integer)
while n ≥ 2^7
write(b, UInt8(0b10000000 | (n & 0b1111111)))
@@ -803,7 +805,7 @@ function Base.sizeof(t::RawTensor)
@tfcall(:TF_TensorByteSize, Csize_t, (Ptr{Cvoid},), t.ptr) |> Int
end
-function set_device(node_desc, device::String)
+function set_device(node_desc, device)
@tfcall(:TF_SetDevice, Cvoid,
(Ptr{Cvoid}, Cstring),
node_desc.ptr, device)
@@ -1168,7 +1170,10 @@ function load_proto(value::tensorflow.AttrValue)
load_proto(value.list)
elseif has_field(value, :_type)
type_ = value._type
- proto_type_map[type_]
+ get(proto_type_map, type_) do
+ @warn "Unrecognized type. Defaulting to Float32." type_
+ Float32
+ end
end
end
@@ -1218,10 +1223,6 @@ Represents the output of an operation in the computation graph
value_index::Int
end
-get_graph(t::AbstractTensor) = Tensor(t).op.graph
-
-node_name(t::AbstractTensor) = (node_name(Tensor(t).op), Tensor(t).value_index)
-
function Tensor(op::Operation, value_index::Int)
base_tensor = Tensor{Any}(op, value_index)
Tensor{get_output_type(base_tensor)}(op, value_index)
@@ -1242,6 +1243,10 @@ Base.convert(::Type{Tensor{Any}}, value::Tensor{R}) where {R} = value
Base.convert(::Type{Tensor{T}}, value) where {T} = convert(Tensor{T}, constant(value))
+get_graph(t::AbstractTensor) = Tensor(t).op.graph
+
+node_name(t::AbstractTensor) = (node_name(Tensor(t).op), Tensor(t).value_index)
+
function operation_output_type(port::Port)
@tfcall(:TF_OperationOutputType, TF_DataType, (Port,), port)
end
diff --git a/src/eager.jl b/src/eager.jl
new file mode 100644
index 00000000..84f3e5f0
--- /dev/null
+++ b/src/eager.jl
@@ -0,0 +1,349 @@
+@enum PlacementPolicy begin
+ PLACEMENT_EXPLICIT=0
+ PLACEMENT_WARN=1
+ PLACEMENT_SILENT=2
+ PLACEMENT_SILENT_FOR_INT32=3
+end
+
+mutable struct EagerContext <: Context
+ ptr::Ptr{Cvoid}
+end
+
+function EagerContext(;async=false, placement_policy=nothing)
+ # For some reason, `get_all_op_list`
+ # has to be called before :TFE_Execute or else tf
+ # crashes. Maybe something about TF_GetAllOpList is causing the tf
+ # library to enter a bad state.
+ get_all_op_list()
+
+ options = @tfcall(:TFE_NewContextOptions, Ptr{Cvoid}, ())
+ @tfcall(:TFE_ContextOptionsSetAsync, Cvoid, (Ptr{Cvoid}, Cuchar), options, async)
+ if placement_policy !== nothing
+ @tfcall(:TFE_ContextOptionsSetDevicePlacementPolicy, Cvoid, (Ptr{Cvoid}, Int), options, placement_policy)
+ end
+
+ status = Status()
+ context = @tfcall(:TFE_NewContext, Ptr{Cvoid}, (Ptr{Cvoid}, Ptr{Cvoid}), options, status)
+ check_status(status)
+ this = EagerContext(context)
+ finalizer(this) do self
+ @tfcall(:TFE_DeleteContext, Cvoid, (Ptr{Cvoid},), self.ptr)
+ end
+ @tfcall(:TFE_DeleteContextOptions, Cvoid, (Ptr{Cvoid},), options)
+ return this
+end
+
+Base.unsafe_convert(::Type{Ptr{Cvoid}}, c::EagerContext) = c.ptr
+
+function DeviceList(ctx::EagerContext)
+ status = Status()
+ ptr = @tfcall(:TFE_ContextListDevices, Ptr{Cvoid}, (Ptr{Cvoid}, Ptr{Cvoid}), ctx, status)
+ check_status(status)
+ count = @tfcall(:TF_DeviceListCount, Cint, (Ptr{Cvoid},), ptr)
+ this = DeviceList(ptr, count)
+ return this
+end
+
+mutable struct EagerTensor <: AbstractTensor{Any}
+ ptr::Ptr{Cvoid}
+ EagerTensor(ptr::Ptr) = new(ptr)
+end
+
+function EagerTensor(tensor::RawTensor)
+ status = Status()
+ ptr = @tfcall(:TFE_NewTensorHandle, Ptr{Cvoid}, (Ptr{Cvoid}, Ptr{Cvoid}), tensor.ptr, status)
+ check_status(status)
+ this = EagerTensor(ptr)
+ finalizer(this) do self
+ @tfcall(:TFE_DeleteTensorHandle, Cvoid, (Ptr{Cvoid},), self.ptr)
+ end
+ return this
+end
+
+EagerTensor(value) = EagerTensor(RawTensor(value))
+Base.unsafe_convert(::Type{Ptr{Cvoid}}, h::EagerTensor) = h.ptr
+
+function async_wait(ctx::EagerContext)
+ status = Status()
+ @tfcall(:TFE_ContextAsyncWait, Cvoid, (Ptr{Cvoid}, Ptr{Cvoid}), ctx, status)
+ check_status(status)
+end
+
+function device_name(h::EagerTensor)
+ status = Status()
+ c_name = @tfcall(:TFE_TensorHandleDeviceName, Cstring, (Ptr{Cvoid}, Ptr{Cvoid}), h, status)
+ check_status(status)
+ return unsafe_string(c_name)
+end
+
+function backing_device_name(h::EagerTensor)
+ status = Status()
+ c_name = @tfcall(:TFE_TensorHandleBackingDeviceName, Cstring, (Ptr{Cvoid}, Ptr{Cvoid}), h, status)
+ check_status(status)
+ return unsafe_string(c_name)
+end
+
+
+function data_type(h::EagerTensor)
+ return @tfcall(:TFE_TensorHandleDataType, TF_DataType, (Ptr{Cvoid},), h) |> tf_to_jl_type
+end
+
+Base.eltype(h::EagerTensor) = data_type(h)
+
+function resolve(h::EagerTensor)
+ status = Status()
+ ptr = @tfcall(:TFE_TensorHandleResolve, Ptr{Cvoid}, (Ptr{Cvoid}, Ptr{Cvoid}), h, status)
+ check_status(status)
+ tensor = RawTensor(ptr)
+ return tensor
+end
+
+Base.convert(::Type{Array}, h::EagerTensor) = convert(Array, resolve(h))
+
+mutable struct EagerOp
+ ptr::Ptr{Cvoid}
+ op_name::String
+end
+
+function EagerOp(ctx::EagerContext, op_name)
+ status = Status()
+ ptr = @tfcall(:TFE_NewOp, Ptr{Cvoid}, (Ptr{Cvoid}, Cstring, Ptr{Cvoid}), ctx, op_name, status)
+ check_status(status)
+ this = EagerOp(ptr, String(op_name))
+ finalizer(this) do self
+ @tfcall(:TFE_DeleteOp, Cvoid, (Ptr{Cvoid},), self)
+ end
+ return this
+end
+
+function EagerOp(op_name)
+ ctx = get_eager_context()
+ status = Status()
+ ptr = @tfcall(:TFE_NewOp, Ptr{Cvoid}, (Ptr{Cvoid}, Cstring, Ptr{Cvoid}), ctx, op_name, status)
+ check_status(status)
+ this = EagerOp(ptr, String(op_name))
+ finalizer(this) do self
+ @tfcall(:TFE_DeleteOp, Cvoid, (Ptr{Cvoid},), self)
+ end
+ return this
+end
+
+Base.unsafe_convert(::Type{Ptr{Cvoid}}, op::EagerOp) = op.ptr
+
+function add_input(op::EagerOp, h::EagerTensor)
+ status = Status()
+ @tfcall(:TFE_OpAddInput, Cvoid, (Ptr{Cvoid}, Ptr{Cvoid}, Ptr{Cvoid}), op, h, status)
+ check_status(status)
+ return
+end
+
+function add_input(op::EagerOp, hs::Vector{EagerTensor})
+ for h in hs
+ add_input(op, h)
+ end
+end
+
+function execute(op::EagerOp)
+ op_desc = get_op_def(op.op_name)
+ n_outputs = length(op_desc.output_arg)
+ handles = [EagerTensor(C_NULL) for _ in 1:n_outputs]
+ ptrs = [Ptr{Cvoid}(0) for _ in 1:n_outputs]
+ num_ret = Ref{Cint}(n_outputs)
+ status = Status()
+ @tfcall(:TFE_Execute, Cvoid, (Ptr{Cvoid}, Ptr{Cvoid}, Ptr{Cint}, Ptr{Cvoid}), op, ptrs, num_ret, status)
+ check_status(status)
+ for i in 1:n_outputs
+ handles[i].ptr = ptrs[i]
+ end
+ return handles
+end
+
+function setindex!(op::EagerOp, tensor::RawTensor, attr_name)
+ status = Status()
+ @tfcall(:TFE_OpSetAttrTensor, Cvoid, (Ptr{Cvoid}, Cstring, Ptr{Cvoid}, Ptr{Cvoid}), op, attr_name, tensor, status)
+ check_status(status)
+end
+
+function setindex!(op::EagerOp, dtype::DataType, attr_name)
+ @tfcall(:TFE_OpSetAttrType, Cvoid, (Ptr{Cvoid}, Cstring, TF_DataType), op, attr_name, dtype |> jl_to_df_type)
+end
+
+function setindex!(op::EagerOp, value::Integer, attr_name)
+ value = Int64(value)
+ @tfcall(:TFE_OpSetAttrInt, Cvoid, (Ptr{Cvoid}, Cstring, Int64), op, attr_name, value)
+end
+
+function setindex!(op::EagerOp, value::Bool, attr_name)
+ @tfcall(:TFE_OpSetAttrBool, Cvoid, (Ptr{Cvoid}, Cstring, Cuchar), op, attr_name, value)
+end
+
+function setindex!(op::EagerOp, value::AbstractFloat, attr_name)
+ value = Float32(value)
+ @tfcall(:TFE_OpSetAttrFloat, Cvoid, (Ptr{Cvoid}, Cstring, Cfloat), op, attr_name, value)
+end
+
+function setindex!(op::EagerOp, value::AbstractString, attr_name)
+ value = String(value)
+ @tfcall(:TFE_OpSetAttrString, Cvoid, (Ptr{Cvoid}, Cstring, Ptr{Cvoid}, Cint), op, attr_name, Vector{UInt8}(value), sizeof(value))
+end
+
+function setindex!(op::EagerOp, value::Vector, attr_name)
+ set_attr_list(op, attr_name, value)
+end
+
+function set_attr_list(op::EagerOp, attr_name, list::Vector{<:Integer})
+ list = Int64.(list)
+ @tfcall(:TFE_OpSetAttrIntList, Cvoid, (Ptr{Cvoid}, Cstring, Ptr{Int64}, Cint), op, attr_name, list, length(list))
+end
+
+function set_attr_list(op::EagerOp, attr_name, list::Vector{<:AbstractFloat})
+ list = Float32.(list)
+ @tfcall(:TFE_OpSetAttrFloatList, Cvoid, (Ptr{Cvoid}, Cstring, Ptr{Float32}, Cint), op, attr_name, list, length(list))
+end
+
+function set_attr_list(op::EagerOp, attr_name, list::Vector{<:DataType})
+ list = map(jl_to_df_type, list)
+ @tfcall(:TFE_OpSetAttrTypeList, Cvoid, (Ptr{Cvoid}, Cstring, Ptr{Cvoid}, Cint), op, attr_name, list, length(list))
+end
+
+function set_attr_shape_list(op::EagerOp, attr_name, list::Vector)
+ dims = Vector{Int64}[]
+ for shape in list
+ push!(dims, Int64[shape...])
+ end
+ @tfcall(:TFE_OpSetAttrShapeList, Cvoid, (Ptr{Cvoid}, Cstring, Ptr{Ptr{Int64}}, Ptr{Cint}, Cint),
+ op,
+ attr_name,
+ dims,
+ Cint[length(x) for x in dims],
+ length(dims))
+end
+
+function clear_caches(ctx::EagerContext)
+ @tfcall(:TFE_ContextClearCaches, Cvoid, (Ptr{Cvoid},), ctx)
+end
+
+function num_dims(h::EagerTensor)
+ status = Status()
+ res = @tfcall(:TFE_TensorHandleNumDims, Cint, (Ptr{Cvoid}, Ptr{Cvoid}), h, status)
+ check_status(status)
+ Int(res)
+end
+
+function num_elements(h::EagerTensor)
+ status = Status()
+ res = @tfcall(:TFE_TensorHandleNumElements, Int64, (Ptr{Cvoid}, Ptr{Cvoid}), h, status)
+ check_status(status)
+ Int(res)
+end
+
+
+function dim(h::EagerTensor, dim_index)
+ status = Status()
+ res = @tfcall(:TFE_TensorHandleDim, Int64, (Ptr{Cvoid}, Cint, Ptr{Cvoid}), h, dim_index-1, status)
+ check_status(status)
+ Int(res)
+end
+
+function copy_sharing_tensor(h::EagerTensor)
+ status = Status()
+ res = EagerTensor()
+ res.ptr = @tfcall(:TFE_TensorHandleCopySharingTensor, Ptr{Cvoid}, (Ptr{Cvoid}, Ptr{Cvoid}), h, status)
+ check_status(status)
+ return res
+end
+
+function copy_to_device(ctx::EagerContext, h::EagerTensor, device_name)
+ status = Status()
+ res = EagerTensor()
+ res.ptr = @tfcall(:TFE_TensorHandleCopyToDevice, Ptr{Cvoid}, (Ptr{Cvoid}, Ptr{Cvoid}, Cstring, Ptr{Cvoid}), h, ctx, device_name, status)
+ check_status(status)
+ return res
+end
+
+copy_to_device(h, device_name) = copy_to_device(get_eager_context(), h, device_name)
+
+function set_device(op::EagerOp, device_name)
+ status = Status()
+ @tfcall(:TFE_OpSetDevice, Ptr{Cvoid}, (Ptr{Cvoid}, Cstring, Ptr{Cvoid}), op, device_name, status)
+ check_status(status)
+end
+
+function get_device(op::EagerOp)
+ status = Status()
+ str = @tfcall(:TFE_OpGetDevice, Cstring, (Ptr{Cvoid}, Ptr{Cvoid}), op, status)
+ return String(str)
+end
+
+
+"""
+ set_xla_compilation(op::EagerOp, enable::Bool)
+
+When 'enable' is 'true', and if TensorFlow is build with XLA support, a subsequent
+'execute' call on `op` will run the op via XLA.
+
+If the library is not built with XLA support, this call is a no-op.
+"""
+function set_xla_compilation(op::EagerOp, enable)
+ @tfcall(:TFE_OpSetXLACompilation, Ptr{Cvoid}, (Ptr{Cvoid}, Cuchar), op, enable)
+end
+
+Base.convert(::Type{EagerTensor}, h::EagerTensor) = h
+Base.convert(::Type{EagerTensor}, h) = constant(h)
+
+function item(t::EagerTensor)
+ x = convert(Array, t)
+ if length(x) != 1
+ throw(DimensionMismatch("item can only be called on scalar tensors"))
+ end
+ return x[1]
+end
+
+Base.length(t::EagerTensor) = item(Ops.size(t))
+
+Base.IteratorEltype(::Type{EagerTensor}) = Base.EltypeUnknown() # temp hack
+Base.eltype(::Type{EagerTensor}) = Any
+Base.collect(t::EagerTensor) = Array(t)
+Base.iterate(t::EagerTensor, args...) = iterate(Array(t), args...)
+Base.zero(t::AbstractTensor) = Ops.zeros_like(t)
+Base.ones(t::AbstractTensor) = Ops.ones_like(t)
+
+function Base.:*(t1::EagerTensor, t2::Number)
+ return t1 .* t2
+end
+
+function Base.:*(t1::Number, t2::EagerTensor)
+ return t1 .* t2
+end
+
+function inplace_sub(x, y)
+ i = cast(constant(0:(item(size(x,0))-1)), Int32)
+ Ops.inplace_sub(x, i, y)
+end
+
+struct ExecutionMode <: Context
+ eager::Bool
+end
+
+ExecutionMode(;eager=true) = ExecutionMode(eager)
+
+function enable_eager_execution()
+ # context = Context()
+ # context.attrs["eager"] = true
+ # push!(global_context, context)
+ push!(global_context, ExecutionMode(eager=true))
+ return nothing
+end
+
+function in_eager_mode()
+ return context_value(ExecutionMode).eager
+end
+
+function get_eager_context()
+ ctx = context_value(EagerContext)
+ if ctx === nothing
+ ctx = EagerContext()
+ push!(global_context, ctx)
+ end
+ return ctx
+end
diff --git a/src/generate_ops.jl b/src/generate_ops.jl
index 0020a48f..6c7856a5 100644
--- a/src/generate_ops.jl
+++ b/src/generate_ops.jl
@@ -4,9 +4,12 @@
######
using MacroTools
+using Dates
struct OpFunc
expr::Expr
+ eager_expr::Expr
+ dispatch_expr::Expr
docstring::String
name::Symbol
end
@@ -18,7 +21,7 @@ If `string` is not allowed as a Julia variable identifier, suffix it with a `_`.
Otherwise, return it unchanged.
"""
function keyword_escape(s)
- keywords = ["const", "type"]
+ keywords = ["const", "type", "while", "for", "if"]
if (s ∈ keywords) || Base.isoperator(Symbol(s))
s = string(s, "_")
end
@@ -41,9 +44,9 @@ function opname_to_jlname(name)
if idx == length(name)
word_end = true
else
- next_char = name[idx+1]
- if idx < length(name)-1
- next_next_char = name[idx+2]
+ next_char = name[idx + 1]
+ if idx < length(name) - 1
+ next_next_char = name[idx + 2]
if isuppercase(cur_char) && isuppercase(next_char) && islowercase(next_next_char)
word_end = true
end
@@ -78,7 +81,7 @@ function to_function(op::tensorflow.OpDef)
inputs = []
input_block = quote end
convert_block = quote end
- type_sets = Dict{String, Vector{Symbol}}()
+ type_sets = Dict{String,Vector{Symbol}}()
for (i, input) in enumerate(op.input_arg)
sym = Symbol("$(input.name)_")
push!(inputs, sym)
@@ -114,14 +117,14 @@ function to_function(op::tensorflow.OpDef)
end
end
if input._type > 0 && haskey(proto_type_map, input._type)
- convert_target = tf.Tensor{proto_type_map[input._type]}
+ convert_target = tf.Tensor{(proto_type_map[input._type])}
end
convert_expr = if isempty(input.number_attr) && isempty(input.type_list_attr) # Scalar input
- :($sym=convert($(convert_target), $sym))
- else # Array argument
+ :($sym = convert($(convert_target), $sym))
+ else # Array argument
# :($sym=convert.($(convert_target), $sym))
- :($sym=[convert($(convert_target), x) for x in $sym])
- end
+ :($sym = [convert($(convert_target), x) for x in $sym])
+ end
push!(convert_block.args, quote
$convert_expr
$diff_expr
@@ -137,6 +140,7 @@ function to_function(op::tensorflow.OpDef)
tf.add_input(desc, $(inputs[input_idx]))
end)
end
+
kwargs = Expr(:parameters)
push!(kwargs.args, Expr(:kw, :name, nothing))
attr_block = quote end
@@ -149,10 +153,10 @@ function to_function(op::tensorflow.OpDef)
m = match(r"list(\(.*\))|(.*)", attr._type)
t = m[1] !== nothing ? m[1] : m[2]
- t_map = Dict("int"=>:(Base.Int),
- "bool"=>:(Base.Bool),
- "tensor"=>:(TensorFlow.RawTensor),
- "string"=>:(Base.String))
+ t_map = Dict("int" => :(Base.Int),
+ "bool" => :(Base.Bool),
+ "tensor" => :(TensorFlow.RawTensor),
+ "string" => :(Base.String))
t_target = get(t_map, t, :(Base.identity))
if m[1] === nothing
source = :($(t_target)($name))
@@ -174,6 +178,18 @@ function to_function(op::tensorflow.OpDef)
end
end)
end
+ t_block = []
+ for (i, input_arg) in enumerate(op.input_arg)
+ if has_field(input_arg, :type_attr)
+ type_attr = input_arg.type_attr
+ if length(type_attr) > 0
+ code = quote
+ desc[$type_attr] = tf.data_type($(inputs[i]))
+ end
+ push!(t_block, code)
+ end
+ end
+ end
pushfirst!(inputs, kwargs)
scalar_output = true
if length(op.output_arg) > 1
@@ -198,8 +214,11 @@ function to_function(op::tensorflow.OpDef)
out
end
end
+ eager_output_block = scalar_output ? :(return res[1]) : :(return res)
+ graph_name = Symbol("$(jl_name)_graph")
+ eager_name = Symbol("$(jl_name)_eager")
expr = quote
- @tf.op function $(jl_name)($(inputs...))
+ function $graph_name($(inputs...))
local desc
tf.with_op_name(name, $(op.name)) do
desc = tf.NodeDescription($(op.name))
@@ -210,6 +229,44 @@ function to_function(op::tensorflow.OpDef)
$output_block
end
end
+
+ eager_convert_block = []
+ for input in inputs[2:end]
+ c = :($input = convert(tf.EagerTensor, $input))
+ push!(eager_convert_block, c)
+ end
+
+ eager_expr = quote
+ function $eager_name($(inputs...))
+ desc = tf.EagerOp($(op.name))
+ # $convert_block
+ $(eager_convert_block...)
+ $input_block
+ $attr_block
+ $(t_block...)
+ res = tf.execute(desc)
+ node = tf.TapeNode($jl_name, [$(inputs[2:end]...)], $(inputs[1].args...), res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ $eager_output_block
+ end
+ end
+ end
+
+ call_kw_params = Expr(:parameters)
+ for arg in inputs[1].args
+ push!(call_kw_params.args, Expr(:kw, arg.args[1], arg.args[1]))
+ end
+ call_args = [call_kw_params; inputs[2:end]]
+ dispatch_expr = quote
+ @tf.op function $jl_name($(inputs...))
+ if tf.in_eager_mode()
+ $(eager_name)($(call_args...))
+ else
+ $(graph_name)($(call_args...))
+ end
+ end
+ end
posargs_str = join((arg.name for arg in op.input_arg), ", ")
kwargs_str = []
for arg in op.attr
@@ -221,6 +278,9 @@ function to_function(op::tensorflow.OpDef)
catch err
default = "?"
end
+ if default === nothing # Not sure why this is happening. It's happening for dropout
+ default = "?"
+ end
push!(kwargs_str, "$(arg.name)=$default")
end
if isempty(kwargs_str)
@@ -231,10 +291,8 @@ function to_function(op::tensorflow.OpDef)
sig = "$jl_name($(posargs_str)$(kwargs_str))"
doc_str = string(" ", sig, "\n\n",
- escape_string(op.summary)
- ) #TODO Workout how to get descriptions for docstrings
- expr = unblock(MacroTools.flatten(MacroTools.striplines(expr)))
- OpFunc(expr, doc_str, jl_name)
+ escape_string(op.summary)) #TODO Workout how to get descriptions for docstrings
+ OpFunc(expr, eager_expr, dispatch_expr, doc_str, jl_name)
end
"""
@@ -246,11 +304,19 @@ parsed by Julia's parser.
The function is returned with a triple-quoted docstring.
"""
function stringify_func(opfunc::OpFunc)
- s = string(opfunc.expr)
- docstring = replace(opfunc.docstring, "\$", "")
+ expr = quote
+ $(opfunc.expr)
+ $(opfunc.eager_expr)
+ $(opfunc.dispatch_expr)
+ end
+ # MacroTools.flatten seems to have a bug that's causins an invalid expression for 'NoOp'
+ # expr = (MacroTools.flatten(MacroTools.striplines(expr)))
+ expr = MacroTools.striplines(expr)
+
+ s = string(expr)
+ docstring = replace(opfunc.docstring, "\$" => "")
doc_line = "\"\"\"\n$(docstring)\n\"\"\""
- lines = []
- "$doc_line\n$s"
+ "$doc_line\n$s\n"
end
stringify_func(op::tensorflow.OpDef) = stringify_func(to_function(op))
@@ -279,18 +345,19 @@ function import_ops(op_names)
module Ops
import TensorFlow
const tf = TensorFlow
+ import TensorFlow: Tensor
""")
for name in op_names
op = ops[name]
- try
+ # try
f = to_function(op)
s = stringify_func(f)
write(ops_file, s)
print(ops_file, "\n\n")
- catch err
- err_msg = sprint(showerror, err)
- @warn("Could not import operation $name: $err_msg")
- end
+ # catch err
+ # err_msg = sprint(showerror, err)
+ # @warn("Could not import operation $name: $err_msg")
+ # end
end
write(ops_file, """
end
@@ -330,7 +397,7 @@ Returns a reference to a Julia function corresponding to the operation.
function import_op(name)
jl_name = opname_to_jlname(name)
mod = TensorFlow.Ops
- if jl_name ∉ names(mod, all=true)
+ if jl_name ∉ names(mod, all = true)
ops = Dict(get_all_op_list())
op = ops[name]
op_desc = to_function(op)
diff --git a/src/keras.jl b/src/keras.jl
new file mode 100644
index 00000000..cd1cfd92
--- /dev/null
+++ b/src/keras.jl
@@ -0,0 +1,113 @@
+using Statistics
+
+
+abstract type KerasCallable
+end
+
+abstract type Model <: KerasCallable
+end
+
+abstract type Layer <: KerasCallable
+end
+
+function struct_name(f)
+ @capture(f, struct name_ <: _
+ __
+ end) && return name
+ @capture(f, mutable struct name_ <: _
+ __
+ end) && return name
+ return nothing
+end
+
+# Get around https://github.com/JuliaLang/julia/issues/14919
+macro callable(f)
+ name = struct_name(f)
+ quote
+ $(esc(f))
+ (m::$name)(args...; kwargs...) = forward(m, args...; kwargs...)
+ end
+end
+
+@callable mutable struct Sequential <: Model
+ layers::Vector{Layer}
+ loss # TODO constrain these fields more
+ optimizer
+ trainable::Set
+end
+
+@callable struct Dense <: Layer
+ weights::EagerTensor
+ bias::EagerTensor
+end
+
+function Dense(in_size::Integer, out_size::Integer)
+ layer = Dense(constant(randn(in_size, out_size)), constant(zeros(out_size)))
+ return layer
+end
+
+@callable struct Relu <: Layer
+end
+
+function forward(r::Relu, x)
+ nn.relu(x)
+end
+
+struct SGD
+ lr::EagerTensor
+end
+
+SGD(;lr=1e-3)= SGD(convert(EagerTensor, lr))
+
+Sequential() = Sequential([], nothing, nothing, Set())
+
+function add(m::Sequential, d::Dense)
+ set_trainable(m, d.weights)
+ set_trainable(m, d.bias)
+ push!(m.layers, d)
+end
+
+add(m::Sequential, layer) = push!(m.layers, layer)
+
+forward(d::Dense, x) = Ops.bias_add(x*d.weights, d.bias)
+
+function forward(m::Sequential, x)
+ for layer in m.layers
+ x = forward(layer, x)
+ end
+ return x
+end
+
+mse(y, y_target) = mean((y .- y_target) .^ 2)
+
+function set_trainable(m::Sequential, tensor)
+ push!(m.trainable, tensor)
+end
+
+function compile(m::Sequential; optimizer=nothing, loss=nothing)
+ m.optimizer = optimizer
+ m.loss = loss
+end
+
+optimizier_step(g::SGD, value, grads) = inplace_sub(value, g.lr .* grads)
+
+function fit(m::Sequential, x, y; n_epochs=1, batch_size=nothing)
+ optimizer = m.optimizer
+ for epoch in 1:n_epochs
+ tape = create_tape()
+ y_predicted = x
+ for layer in m.layers
+ y_predicted = forward(layer, y_predicted)
+ end
+ loss = m.loss(y, y_predicted)
+ @info "" epoch loss=item(loss)
+ values = collect(m.trainable)
+ grads = grad(tape, loss, values)
+ for (value, g) in zip(values, grads)
+ if g === nothing
+ continue
+ end
+ optimizier_step(optimizer, value, g)
+ end
+ end
+end
diff --git a/src/ops.jl b/src/ops.jl
index bcf630e8..25770101 100644
--- a/src/ops.jl
+++ b/src/ops.jl
@@ -23,7 +23,11 @@ function tf_promote(args...)
if isa(arg, AbstractArray)
push!(new_args, arg)
else
- push!(new_args, convert(Tensor{big_type}, arg))
+ if in_eager_mode()
+ push!(new_args, Ops.cast(arg, DstT = big_type)) # TODO implement promotion
+ else
+ push!(new_args, convert(Tensor{big_type}, arg))
+ end
end
end
(new_args...,)
@@ -32,8 +36,8 @@ end
macro define_binary(jl_func, tf_func)
quote
@op $jl_func(t1::AbstractTensor, t2::AbstractTensor; kwargs...) = $tf_func(tf_promote(t1, t2)...; kwargs...)
- @op $jl_func(t1::AbstractTensor, t2; kwargs...) = $tf_func(t1, Tensor(t2); kwargs...)
- @op $jl_func(t1, t2::AbstractTensor; kwargs...) = $tf_func(Tensor(t1), t2; kwargs...)
+ @op $jl_func(t1::AbstractTensor, t2; kwargs...) = $jl_func(t1, Tensor(t2); kwargs...)
+ @op $jl_func(t1, t2::AbstractTensor; kwargs...) = $jl_func(Tensor(t1), t2; kwargs...)
end |> esc
end
@@ -63,10 +67,10 @@ end
macro define_broadcast(jl_op, tf_func)
quote
Base.Broadcast.broadcasted(::typeof($jl_op), t1::AbstractTensor, t2::AbstractTensor) = $tf_func(tf_promote(t1, t2)...)
- Base.Broadcast.broadcasted(::typeof($jl_op), t1::AbstractTensor, t2) = $tf_func(t1, Tensor(t2))
- Base.Broadcast.broadcasted(::typeof($jl_op), t1, t2::AbstractTensor) = $tf_func(Tensor(t1), t2)
- Base.Broadcast.broadcasted(::typeof($jl_op), t1::AbstractTensor, t2::Base.Broadcast.Broadcasted) = $tf_func(t1, Tensor(collect(t2)))
- Base.Broadcast.broadcasted(::typeof($jl_op), t1::Base.Broadcast.Broadcasted, t2::AbstractTensor) = $tf_func(Tensor(collect(t1)), t2)
+ Base.Broadcast.broadcasted(::typeof($jl_op), t1::AbstractTensor, t2) = $tf_func(tf_promote(t1, Tensor(t2))...) # TODO don't replicate the tf_promote calls
+ Base.Broadcast.broadcasted(::typeof($jl_op), t1, t2::AbstractTensor) = $tf_func(tf_promote(Tensor(t1), t2)...)
+ Base.Broadcast.broadcasted(::typeof($jl_op), t1::AbstractTensor, t2::Base.Broadcast.Broadcasted) = $tf_func(tf_promote(t1, Tensor(collect(t2)))...)
+ Base.Broadcast.broadcasted(::typeof($jl_op), t1::Base.Broadcast.Broadcasted, t2::AbstractTensor) = $tf_func(tf_promote(Tensor(collect(t1)), t2)...)
end |> esc
end
@@ -115,7 +119,7 @@ end
capitalize(s::Symbol) = capitalize(string(s))
-function get_name(name="node")
+function get_name(name = "node")
graph = get_def_graph()
name_idx = graph.name_idx
if name == ""
@@ -158,7 +162,7 @@ Returns:
A `Tensor` that may be used as a handle for feeding a value, but not
evaluated directly.
"""
-@op function placeholder(dtype; name=nothing, shape=nothing)
+@op function placeholder(dtype; name = nothing, shape = nothing)
local node
with_op_name(name, "placeholder") do
graph = get_def_graph()
diff --git a/src/ops/imported_ops.jl b/src/ops/imported_ops.jl
index 14b582ef..93fe206b 100644
--- a/src/ops/imported_ops.jl
+++ b/src/ops/imported_ops.jl
@@ -1,3478 +1,110198 @@
-# Autogenerated on 2018-08-22T19:25:49.359
+# Autogenerated on 2019-03-15T20:34:54.858
module Ops
import TensorFlow
-import SpecialFunctions
const tf = TensorFlow
+import TensorFlow: Tensor
"""
- equal(x, y)
+ reduce_join(inputs, reduction_indices; keep_dims=false, separator=)
"""
-tf.@op function equal(x_, y_; name=nothing)
- local desc
- tf.with_op_name((()->begin
- desc = tf.NodeDescription("Equal")
- x_ = convert(TensorFlow.Tensor{Any}, x_)
- y_ = convert(TensorFlow.Tensor{Any}, y_)
- (x_, y_) = tf.tf_promote(x_, y_)
- tf.add_input(desc, x_)
- tf.add_input(desc, y_)
- end), name, "Equal")
- tf.Tensor(tf.Operation(desc))
+begin
+ begin
+ function reduce_join_graph(inputs_, reduction_indices_; name=nothing, keep_dims=nothing, separator=nothing)
+ local desc
+ tf.with_op_name(name, "ReduceJoin") do
+ desc = tf.NodeDescription("ReduceJoin")
+ begin
+ begin
+ inputs_ = convert(Tensor{String}, inputs_)
+ begin
+ end
+ end
+ begin
+ reduction_indices_ = convert(Tensor{Int32}, reduction_indices_)
+ begin
+ end
+ end
+ end
+ begin
+ begin
+ tf.add_input(desc, inputs_)
+ end
+ begin
+ tf.add_input(desc, reduction_indices_)
+ end
+ end
+ begin
+ begin
+ if keep_dims !== nothing
+ desc["keep_dims"] = Base.Bool(keep_dims)
+ end
+ end
+ begin
+ if separator !== nothing
+ desc["separator"] = Base.String(separator)
+ end
+ end
+ end
+ end
+ tf.Tensor(tf.Operation(desc))
+ end
end
-
-"""
- not_equal(x, y)
-
-
-"""
-tf.@op function not_equal(x_, y_; name=nothing)
- local desc
- tf.with_op_name((()->begin
- desc = tf.NodeDescription("NotEqual")
- x_ = convert(TensorFlow.Tensor{Any}, x_)
- y_ = convert(TensorFlow.Tensor{Any}, y_)
- (x_, y_) = tf.tf_promote(x_, y_)
- tf.add_input(desc, x_)
- tf.add_input(desc, y_)
- end), name, "NotEqual")
- tf.Tensor(tf.Operation(desc))
+ begin
+ function reduce_join_eager(inputs_, reduction_indices_; name=nothing, keep_dims=nothing, separator=nothing)
+ desc = tf.EagerOp("ReduceJoin")
+ inputs_ = convert(tf.EagerTensor, inputs_)
+ reduction_indices_ = convert(tf.EagerTensor, reduction_indices_)
+ begin
+ begin
+ tf.add_input(desc, inputs_)
+ end
+ begin
+ tf.add_input(desc, reduction_indices_)
+ end
+ end
+ begin
+ begin
+ if keep_dims !== nothing
+ desc["keep_dims"] = Base.Bool(keep_dims)
+ end
+ end
+ begin
+ if separator !== nothing
+ desc["separator"] = Base.String(separator)
+ end
+ end
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(reduce_join, [inputs_, reduction_indices_], name=nothing, keep_dims=nothing, separator=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res[1]
+ end
+ end
end
-
-"""
- less_equal(x, y)
-
-
-"""
-tf.@op function less_equal(x_, y_; name=nothing)
- local desc
- tf.with_op_name((()->begin
- desc = tf.NodeDescription("LessEqual")
- x_ = convert(TensorFlow.Tensor{Any}, x_)
- y_ = convert(TensorFlow.Tensor{Any}, y_)
- (x_, y_) = tf.tf_promote(x_, y_)
- tf.add_input(desc, x_)
- tf.add_input(desc, y_)
- end), name, "LessEqual")
- tf.Tensor(tf.Operation(desc))
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function reduce_join(inputs_, reduction_indices_; name=nothing, keep_dims=nothing, separator=nothing)
+ if tf.in_eager_mode()
+ reduce_join_eager(inputs_, reduction_indices_; name=name, keep_dims=keep_dims, separator=separator)
+ else
+ reduce_join_graph(inputs_, reduction_indices_; name=name, keep_dims=keep_dims, separator=separator)
+ end
+ end
end
+end
-"""
- greater(x, y)
-
-
-"""
-tf.@op function greater(x_, y_; name=nothing)
- local desc
- tf.with_op_name((()->begin
- desc = tf.NodeDescription("Greater")
- x_ = convert(TensorFlow.Tensor{Any}, x_)
- y_ = convert(TensorFlow.Tensor{Any}, y_)
- (x_, y_) = tf.tf_promote(x_, y_)
- tf.add_input(desc, x_)
- tf.add_input(desc, y_)
- end), name, "Greater")
- tf.Tensor(tf.Operation(desc))
- end
"""
- greater_equal(x, y)
+ reduce_dataset(input_dataset, initial_state, other_arguments; use_inter_op_parallelism=true)
"""
-tf.@op function greater_equal(x_, y_; name=nothing)
- local desc
- tf.with_op_name((()->begin
- desc = tf.NodeDescription("GreaterEqual")
- x_ = convert(TensorFlow.Tensor{Any}, x_)
- y_ = convert(TensorFlow.Tensor{Any}, y_)
- (x_, y_) = tf.tf_promote(x_, y_)
- tf.add_input(desc, x_)
- tf.add_input(desc, y_)
- end), name, "GreaterEqual")
- tf.Tensor(tf.Operation(desc))
+begin
+ begin
+ function reduce_dataset_graph(input_dataset_, initial_state_, other_arguments_; name=nothing, f=nothing, Tstate=nothing, Targuments=nothing, output_types=nothing, output_shapes=nothing, use_inter_op_parallelism=nothing)
+ local desc
+ tf.with_op_name(name, "ReduceDataset") do
+ desc = tf.NodeDescription("ReduceDataset")
+ begin
+ begin
+ input_dataset_ = convert(Tensor{Any}, input_dataset_)
+ begin
+ end
+ end
+ begin
+ initial_state_ = [convert(Tensor{Any}, x) for x = initial_state_]
+ begin
+ end
+ end
+ begin
+ other_arguments_ = [convert(Tensor{Any}, x) for x = other_arguments_]
+ begin
+ end
+ end
+ end
+ begin
+ begin
+ tf.add_input(desc, input_dataset_)
+ end
+ begin
+ tf.add_input(desc, initial_state_)
+ end
+ begin
+ tf.add_input(desc, other_arguments_)
+ end
+ end
+ begin
+ begin
+ if f !== nothing
+ desc["f"] = Base.identity(f)
+ end
+ end
+ begin
+ if Tstate !== nothing
+ desc["Tstate"] = map(Base.identity, Tstate)
+ end
+ end
+ begin
+ if Targuments !== nothing
+ desc["Targuments"] = map(Base.identity, Targuments)
+ end
+ end
+ begin
+ if output_types !== nothing
+ desc["output_types"] = map(Base.identity, output_types)
+ end
+ end
+ begin
+ if output_shapes !== nothing
+ desc["output_shapes"] = map(Base.identity, output_shapes)
+ end
+ end
+ begin
+ if use_inter_op_parallelism !== nothing
+ desc["use_inter_op_parallelism"] = Base.Bool(use_inter_op_parallelism)
+ end
+ end
+ end
+ end
+ tf.Tensor(tf.Operation(desc))
+ end
end
-
-"""
- less(x, y)
-
-
-"""
-tf.@op function less(x_, y_; name=nothing)
- local desc
- tf.with_op_name((()->begin
- desc = tf.NodeDescription("Less")
- x_ = convert(TensorFlow.Tensor{Any}, x_)
- y_ = convert(TensorFlow.Tensor{Any}, y_)
- (x_, y_) = tf.tf_promote(x_, y_)
- tf.add_input(desc, x_)
- tf.add_input(desc, y_)
- end), name, "Less")
- tf.Tensor(tf.Operation(desc))
+ begin
+ function reduce_dataset_eager(input_dataset_, initial_state_, other_arguments_; name=nothing, f=nothing, Tstate=nothing, Targuments=nothing, output_types=nothing, output_shapes=nothing, use_inter_op_parallelism=nothing)
+ desc = tf.EagerOp("ReduceDataset")
+ input_dataset_ = convert(tf.EagerTensor, input_dataset_)
+ initial_state_ = convert(tf.EagerTensor, initial_state_)
+ other_arguments_ = convert(tf.EagerTensor, other_arguments_)
+ begin
+ begin
+ tf.add_input(desc, input_dataset_)
+ end
+ begin
+ tf.add_input(desc, initial_state_)
+ end
+ begin
+ tf.add_input(desc, other_arguments_)
+ end
+ end
+ begin
+ begin
+ if f !== nothing
+ desc["f"] = Base.identity(f)
+ end
+ end
+ begin
+ if Tstate !== nothing
+ desc["Tstate"] = map(Base.identity, Tstate)
+ end
+ end
+ begin
+ if Targuments !== nothing
+ desc["Targuments"] = map(Base.identity, Targuments)
+ end
+ end
+ begin
+ if output_types !== nothing
+ desc["output_types"] = map(Base.identity, output_types)
+ end
+ end
+ begin
+ if output_shapes !== nothing
+ desc["output_shapes"] = map(Base.identity, output_shapes)
+ end
+ end
+ begin
+ if use_inter_op_parallelism !== nothing
+ desc["use_inter_op_parallelism"] = Base.Bool(use_inter_op_parallelism)
+ end
+ end
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(reduce_dataset, [input_dataset_, initial_state_, other_arguments_], name=nothing, f=nothing, Tstate=nothing, Targuments=nothing, output_types=nothing, output_shapes=nothing, use_inter_op_parallelism=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res[1]
+ end
+ end
end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function reduce_dataset(input_dataset_, initial_state_, other_arguments_; name=nothing, f=nothing, Tstate=nothing, Targuments=nothing, output_types=nothing, output_shapes=nothing, use_inter_op_parallelism=nothing)
+ if tf.in_eager_mode()
+ reduce_dataset_eager(input_dataset_, initial_state_, other_arguments_; name=name, f=f, Tstate=Tstate, Targuments=Targuments, output_types=output_types, output_shapes=output_shapes, use_inter_op_parallelism=use_inter_op_parallelism)
+ else
+ reduce_dataset_graph(input_dataset_, initial_state_, other_arguments_; name=name, f=f, Tstate=Tstate, Targuments=Targuments, output_types=output_types, output_shapes=output_shapes, use_inter_op_parallelism=use_inter_op_parallelism)
+ end
+ end
+ end
+end
+
"""
- no_op()
+ tensor_list_from_tensor(tensor, element_shape)
"""
-tf.@op function no_op(; name=nothing)
- local desc
- tf.with_op_name((()->desc = tf.NodeDescription("NoOp")), name, "NoOp")
- tf.Tensor(tf.Operation(desc))
+begin
+ begin
+ function tensor_list_from_tensor_graph(tensor_, element_shape_; name=nothing, element_dtype=nothing, shape_type=nothing)
+ local desc
+ tf.with_op_name(name, "TensorListFromTensor") do
+ desc = tf.NodeDescription("TensorListFromTensor")
+ begin
+ begin
+ tensor_ = convert(Tensor{Any}, tensor_)
+ begin
+ end
+ end
+ begin
+ element_shape_ = convert(Tensor{Any}, element_shape_)
+ begin
+ end
+ end
+ begin
+ (tensor_,) = tf.tf_promote(tensor_)
+ end
+ begin
+ (element_shape_,) = tf.tf_promote(element_shape_)
+ end
+ end
+ begin
+ begin
+ tf.add_input(desc, tensor_)
+ end
+ begin
+ tf.add_input(desc, element_shape_)
+ end
+ end
+ begin
+ begin
+ if element_dtype !== nothing
+ desc["element_dtype"] = Base.identity(element_dtype)
+ end
+ end
+ begin
+ if shape_type !== nothing
+ desc["shape_type"] = Base.identity(shape_type)
+ end
+ end
+ end
+ end
+ tf.Tensor(tf.Operation(desc))
+ end
+ end
+ begin
+ function tensor_list_from_tensor_eager(tensor_, element_shape_; name=nothing, element_dtype=nothing, shape_type=nothing)
+ desc = tf.EagerOp("TensorListFromTensor")
+ tensor_ = convert(tf.EagerTensor, tensor_)
+ element_shape_ = convert(tf.EagerTensor, element_shape_)
+ begin
+ begin
+ tf.add_input(desc, tensor_)
+ end
+ begin
+ tf.add_input(desc, element_shape_)
+ end
+ end
+ begin
+ begin
+ if element_dtype !== nothing
+ desc["element_dtype"] = Base.identity(element_dtype)
+ end
+ end
+ begin
+ if shape_type !== nothing
+ desc["shape_type"] = Base.identity(shape_type)
+ end
+ end
+ end
+ begin
+ desc["element_dtype"] = tf.data_type(tensor_)
+ end
+ begin
+ desc["shape_type"] = tf.data_type(element_shape_)
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(tensor_list_from_tensor, [tensor_, element_shape_], name=nothing, element_dtype=nothing, shape_type=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res[1]
+ end
+ end
end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function tensor_list_from_tensor(tensor_, element_shape_; name=nothing, element_dtype=nothing, shape_type=nothing)
+ if tf.in_eager_mode()
+ tensor_list_from_tensor_eager(tensor_, element_shape_; name=name, element_dtype=element_dtype, shape_type=shape_type)
+ else
+ tensor_list_from_tensor_graph(tensor_, element_shape_; name=name, element_dtype=element_dtype, shape_type=shape_type)
+ end
+ end
+ end
+end
+
"""
- count_up_to(ref)
+ extract_jpeg_shape(contents; output_type=Int32)
"""
-tf.@op function count_up_to(ref_; name=nothing, limit=nothing)
- local desc
- tf.with_op_name((()->begin
- desc = tf.NodeDescription("CountUpTo")
- ref_ = convert(TensorFlow.Tensor{Any}, ref_)
- (ref_,) = tf.tf_promote(ref_)
- tf.add_input(desc, ref_)
- if limit !== nothing
- desc["limit"] = Base.Int(limit)
+begin
+ begin
+ function extract_jpeg_shape_graph(contents_; name=nothing, output_type=nothing)
+ local desc
+ tf.with_op_name(name, "ExtractJpegShape") do
+ desc = tf.NodeDescription("ExtractJpegShape")
+ begin
+ begin
+ contents_ = convert(Tensor{String}, contents_)
+ begin
+ end
+ end
+ end
+ begin
+ begin
+ tf.add_input(desc, contents_)
end
- end), name, "CountUpTo")
- tf.Tensor(tf.Operation(desc))
+ end
+ begin
+ begin
+ if output_type !== nothing
+ desc["output_type"] = Base.identity(output_type)
+ end
+ end
+ end
+ end
+ tf.Tensor(tf.Operation(desc))
+ end
+ end
+ begin
+ function extract_jpeg_shape_eager(contents_; name=nothing, output_type=nothing)
+ desc = tf.EagerOp("ExtractJpegShape")
+ contents_ = convert(tf.EagerTensor, contents_)
+ begin
+ begin
+ tf.add_input(desc, contents_)
+ end
+ end
+ begin
+ begin
+ if output_type !== nothing
+ desc["output_type"] = Base.identity(output_type)
+ end
+ end
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(extract_jpeg_shape, [contents_], name=nothing, output_type=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res[1]
+ end
+ end
end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function extract_jpeg_shape(contents_; name=nothing, output_type=nothing)
+ if tf.in_eager_mode()
+ extract_jpeg_shape_eager(contents_; name=name, output_type=output_type)
+ else
+ extract_jpeg_shape_graph(contents_; name=name, output_type=output_type)
+ end
+ end
+ end
+end
+
"""
- decode_gif(contents)
+ svd(input; compute_uv=true, full_matrices=false)
"""
-tf.@op function decode_gif(contents_; name=nothing)
- local desc
- tf.with_op_name((()->begin
- desc = tf.NodeDescription("DecodeGif")
- contents_ = convert(TensorFlow.Tensor{String}, contents_)
- tf.add_input(desc, contents_)
- end), name, "DecodeGif")
- tf.Tensor(tf.Operation(desc))
+begin
+ begin
+ function svd_graph(input_; name=nothing, compute_uv=nothing, full_matrices=nothing)
+ local desc
+ tf.with_op_name(name, "Svd") do
+ desc = tf.NodeDescription("Svd")
+ begin
+ begin
+ input_ = convert(Tensor{Any}, input_)
+ begin
+ end
+ end
+ begin
+ (input_,) = tf.tf_promote(input_)
+ end
+ end
+ begin
+ begin
+ tf.add_input(desc, input_)
+ end
+ end
+ begin
+ begin
+ if compute_uv !== nothing
+ desc["compute_uv"] = Base.Bool(compute_uv)
+ end
+ end
+ begin
+ if full_matrices !== nothing
+ desc["full_matrices"] = Base.Bool(full_matrices)
+ end
+ end
+ end
+ end
+ begin
+ out = tf.Tensor[]
+ op = tf.Operation(desc)
+ for out_idx = 1:3
+ push!(out, tf.Tensor(op, out_idx))
+ end
+ out
+ end
+ end
+ end
+ begin
+ function svd_eager(input_; name=nothing, compute_uv=nothing, full_matrices=nothing)
+ desc = tf.EagerOp("Svd")
+ input_ = convert(tf.EagerTensor, input_)
+ begin
+ begin
+ tf.add_input(desc, input_)
+ end
+ end
+ begin
+ begin
+ if compute_uv !== nothing
+ desc["compute_uv"] = Base.Bool(compute_uv)
+ end
+ end
+ begin
+ if full_matrices !== nothing
+ desc["full_matrices"] = Base.Bool(full_matrices)
+ end
+ end
+ end
+ begin
+ desc["T"] = tf.data_type(input_)
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(svd, [input_], name=nothing, compute_uv=nothing, full_matrices=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function svd(input_; name=nothing, compute_uv=nothing, full_matrices=nothing)
+ if tf.in_eager_mode()
+ svd_eager(input_; name=name, compute_uv=compute_uv, full_matrices=full_matrices)
+ else
+ svd_graph(input_; name=name, compute_uv=compute_uv, full_matrices=full_matrices)
+ end
+ end
end
+end
+
"""
- decode_jpeg(contents; channels=0, ratio=1, fancy_upscaling=true, try_recover_truncated=false, acceptable_fraction=nothing, dct_method=)
+ iterator_get_next_sync(iterator)
"""
-tf.@op function decode_jpeg(contents_; name=nothing, channels=nothing, ratio=nothing, fancy_upscaling=nothing, try_recover_truncated=nothing, acceptable_fraction=nothing, dct_method=nothing)
- local desc
- tf.with_op_name((()->begin
- desc = tf.NodeDescription("DecodeJpeg")
- contents_ = convert(TensorFlow.Tensor{String}, contents_)
- tf.add_input(desc, contents_)
- if channels !== nothing
- desc["channels"] = Base.Int(channels)
+begin
+ begin
+ function iterator_get_next_sync_graph(iterator_; name=nothing, output_types=nothing, output_shapes=nothing)
+ local desc
+ tf.with_op_name(name, "IteratorGetNextSync") do
+ desc = tf.NodeDescription("IteratorGetNextSync")
+ begin
+ begin
+ iterator_ = convert(Tensor{Any}, iterator_)
+ begin
+ end
end
- if ratio !== nothing
- desc["ratio"] = Base.Int(ratio)
+ end
+ begin
+ begin
+ tf.add_input(desc, iterator_)
end
- if fancy_upscaling !== nothing
- desc["fancy_upscaling"] = Base.Bool(fancy_upscaling)
+ end
+ begin
+ begin
+ if output_types !== nothing
+ desc["output_types"] = map(Base.identity, output_types)
+ end
end
- if try_recover_truncated !== nothing
- desc["try_recover_truncated"] = Base.Bool(try_recover_truncated)
+ begin
+ if output_shapes !== nothing
+ desc["output_shapes"] = map(Base.identity, output_shapes)
+ end
end
- if acceptable_fraction !== nothing
- desc["acceptable_fraction"] = Base.identity(acceptable_fraction)
+ end
+ end
+ tf.Tensor(tf.Operation(desc))
+ end
+ end
+ begin
+ function iterator_get_next_sync_eager(iterator_; name=nothing, output_types=nothing, output_shapes=nothing)
+ desc = tf.EagerOp("IteratorGetNextSync")
+ iterator_ = convert(tf.EagerTensor, iterator_)
+ begin
+ begin
+ tf.add_input(desc, iterator_)
+ end
+ end
+ begin
+ begin
+ if output_types !== nothing
+ desc["output_types"] = map(Base.identity, output_types)
end
- if dct_method !== nothing
- desc["dct_method"] = Base.String(dct_method)
+ end
+ begin
+ if output_shapes !== nothing
+ desc["output_shapes"] = map(Base.identity, output_shapes)
end
- end), name, "DecodeJpeg")
- tf.Tensor(tf.Operation(desc))
+ end
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(iterator_get_next_sync, [iterator_], name=nothing, output_types=nothing, output_shapes=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res[1]
+ end
+ end
end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function iterator_get_next_sync(iterator_; name=nothing, output_types=nothing, output_shapes=nothing)
+ if tf.in_eager_mode()
+ iterator_get_next_sync_eager(iterator_; name=name, output_types=output_types, output_shapes=output_shapes)
+ else
+ iterator_get_next_sync_graph(iterator_; name=name, output_types=output_types, output_shapes=output_shapes)
+ end
+ end
+ end
+end
+
"""
- encode_jpeg(image; format=, quality=95, progressive=false, optimize_size=false, chroma_downsampling=true, density_unit=in, x_density=300, y_density=300, xmp_metadata=)
+ ref_enter(data; is_constant=false, parallel_iterations=10)
"""
-tf.@op function encode_jpeg(image_; name=nothing, format=nothing, quality=nothing, progressive=nothing, optimize_size=nothing, chroma_downsampling=nothing, density_unit=nothing, x_density=nothing, y_density=nothing, xmp_metadata=nothing)
- local desc
- tf.with_op_name((()->begin
- desc = tf.NodeDescription("EncodeJpeg")
- image_ = convert(TensorFlow.Tensor{UInt8}, image_)
- tf.add_input(desc, image_)
- if format !== nothing
- desc["format"] = Base.String(format)
+begin
+ begin
+ function ref_enter_graph(data_; name=nothing, frame_name=nothing, is_constant=nothing, parallel_iterations=nothing)
+ local desc
+ tf.with_op_name(name, "RefEnter") do
+ desc = tf.NodeDescription("RefEnter")
+ begin
+ begin
+ data_ = convert(Tensor{Any}, data_)
+ begin
+ end
end
- if quality !== nothing
- desc["quality"] = Base.Int(quality)
+ begin
+ (data_,) = tf.tf_promote(data_)
end
- if progressive !== nothing
- desc["progressive"] = Base.Bool(progressive)
+ end
+ begin
+ begin
+ tf.add_input(desc, data_)
end
- if optimize_size !== nothing
- desc["optimize_size"] = Base.Bool(optimize_size)
+ end
+ begin
+ begin
+ if frame_name !== nothing
+ desc["frame_name"] = Base.String(frame_name)
+ end
end
- if chroma_downsampling !== nothing
- desc["chroma_downsampling"] = Base.Bool(chroma_downsampling)
+ begin
+ if is_constant !== nothing
+ desc["is_constant"] = Base.Bool(is_constant)
+ end
end
- if density_unit !== nothing
- desc["density_unit"] = Base.String(density_unit)
+ begin
+ if parallel_iterations !== nothing
+ desc["parallel_iterations"] = Base.Int(parallel_iterations)
+ end
end
- if x_density !== nothing
- desc["x_density"] = Base.Int(x_density)
+ end
+ end
+ tf.Tensor(tf.Operation(desc))
+ end
+ end
+ begin
+ function ref_enter_eager(data_; name=nothing, frame_name=nothing, is_constant=nothing, parallel_iterations=nothing)
+ desc = tf.EagerOp("RefEnter")
+ data_ = convert(tf.EagerTensor, data_)
+ begin
+ begin
+ tf.add_input(desc, data_)
+ end
+ end
+ begin
+ begin
+ if frame_name !== nothing
+ desc["frame_name"] = Base.String(frame_name)
end
- if y_density !== nothing
- desc["y_density"] = Base.Int(y_density)
+ end
+ begin
+ if is_constant !== nothing
+ desc["is_constant"] = Base.Bool(is_constant)
end
- if xmp_metadata !== nothing
- desc["xmp_metadata"] = Base.String(xmp_metadata)
+ end
+ begin
+ if parallel_iterations !== nothing
+ desc["parallel_iterations"] = Base.Int(parallel_iterations)
end
- end), name, "EncodeJpeg")
- tf.Tensor(tf.Operation(desc))
+ end
+ end
+ begin
+ desc["T"] = tf.data_type(data_)
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(ref_enter, [data_], name=nothing, frame_name=nothing, is_constant=nothing, parallel_iterations=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res[1]
+ end
+ end
end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function ref_enter(data_; name=nothing, frame_name=nothing, is_constant=nothing, parallel_iterations=nothing)
+ if tf.in_eager_mode()
+ ref_enter_eager(data_; name=name, frame_name=frame_name, is_constant=is_constant, parallel_iterations=parallel_iterations)
+ else
+ ref_enter_graph(data_; name=name, frame_name=frame_name, is_constant=is_constant, parallel_iterations=parallel_iterations)
+ end
+ end
+ end
+end
+
"""
- encode_png(image; compression=-1)
+ erf(x)
"""
-tf.@op function encode_png(image_; name=nothing, compression=nothing)
- local desc
- tf.with_op_name((()->begin
- desc = tf.NodeDescription("EncodePng")
- image_ = convert(TensorFlow.Tensor{UInt8}, image_)
- (image_,) = tf.tf_promote(image_)
- tf.add_input(desc, image_)
- if compression !== nothing
- desc["compression"] = Base.Int(compression)
+begin
+ begin
+ function erf_graph(x_; name=nothing)
+ local desc
+ tf.with_op_name(name, "Erf") do
+ desc = tf.NodeDescription("Erf")
+ begin
+ begin
+ x_ = convert(Tensor{Any}, x_)
+ begin
+ end
end
- end), name, "EncodePng")
- tf.Tensor(tf.Operation(desc))
+ begin
+ (x_,) = tf.tf_promote(x_)
+ end
+ end
+ begin
+ begin
+ tf.add_input(desc, x_)
+ end
+ end
+ begin
+ end
+ end
+ tf.Tensor(tf.Operation(desc))
+ end
+ end
+ begin
+ function erf_eager(x_; name=nothing)
+ desc = tf.EagerOp("Erf")
+ x_ = convert(tf.EagerTensor, x_)
+ begin
+ begin
+ tf.add_input(desc, x_)
+ end
+ end
+ begin
+ end
+ begin
+ desc["T"] = tf.data_type(x_)
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(erf, [x_], name=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res[1]
+ end
+ end
end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function erf(x_; name=nothing)
+ if tf.in_eager_mode()
+ erf_eager(x_; name=name)
+ else
+ erf_graph(x_; name=name)
+ end
+ end
+ end
+end
+
"""
- resize_area(images, size; align_corners=false)
+ lookup_table_export_v2(table_handle)
"""
-tf.@op function resize_area(images_, size_; name=nothing, align_corners=nothing)
- local desc
- tf.with_op_name((()->begin
- desc = tf.NodeDescription("ResizeArea")
- images_ = convert(TensorFlow.Tensor{Any}, images_)
- size_ = convert(TensorFlow.Tensor{Int32}, size_)
- (images_,) = tf.tf_promote(images_)
- tf.add_input(desc, images_)
- tf.add_input(desc, size_)
- if align_corners !== nothing
- desc["align_corners"] = Base.Bool(align_corners)
+begin
+ begin
+ function lookup_table_export_v2_graph(table_handle_; name=nothing)
+ local desc
+ tf.with_op_name(name, "LookupTableExportV2") do
+ desc = tf.NodeDescription("LookupTableExportV2")
+ begin
+ begin
+ table_handle_ = convert(Tensor{Any}, table_handle_)
+ begin
+ end
end
- end), name, "ResizeArea")
- tf.Tensor(tf.Operation(desc))
+ end
+ begin
+ begin
+ tf.add_input(desc, table_handle_)
+ end
+ end
+ begin
+ end
+ end
+ begin
+ out = tf.Tensor[]
+ op = tf.Operation(desc)
+ for out_idx = 1:2
+ push!(out, tf.Tensor(op, out_idx))
+ end
+ out
+ end
+ end
end
+ begin
+ function lookup_table_export_v2_eager(table_handle_; name=nothing)
+ desc = tf.EagerOp("LookupTableExportV2")
+ table_handle_ = convert(tf.EagerTensor, table_handle_)
+ begin
+ begin
+ tf.add_input(desc, table_handle_)
+ end
+ end
+ begin
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(lookup_table_export_v2, [table_handle_], name=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function lookup_table_export_v2(table_handle_; name=nothing)
+ if tf.in_eager_mode()
+ lookup_table_export_v2_eager(table_handle_; name=name)
+ else
+ lookup_table_export_v2_graph(table_handle_; name=name)
+ end
+ end
+ end
+end
+
"""
- resize_bicubic(images, size; align_corners=false)
+ round(x)
"""
-tf.@op function resize_bicubic(images_, size_; name=nothing, align_corners=nothing)
- local desc
- tf.with_op_name((()->begin
- desc = tf.NodeDescription("ResizeBicubic")
- images_ = convert(TensorFlow.Tensor{Any}, images_)
- size_ = convert(TensorFlow.Tensor{Int32}, size_)
- (images_,) = tf.tf_promote(images_)
- tf.add_input(desc, images_)
- tf.add_input(desc, size_)
- if align_corners !== nothing
- desc["align_corners"] = Base.Bool(align_corners)
+begin
+ begin
+ function round_graph(x_; name=nothing)
+ local desc
+ tf.with_op_name(name, "Round") do
+ desc = tf.NodeDescription("Round")
+ begin
+ begin
+ x_ = convert(Tensor{Any}, x_)
+ begin
+ end
+ end
+ begin
+ (x_,) = tf.tf_promote(x_)
end
- end), name, "ResizeBicubic")
- tf.Tensor(tf.Operation(desc))
+ end
+ begin
+ begin
+ tf.add_input(desc, x_)
+ end
+ end
+ begin
+ end
+ end
+ tf.Tensor(tf.Operation(desc))
+ end
+ end
+ begin
+ function round_eager(x_; name=nothing)
+ desc = tf.EagerOp("Round")
+ x_ = convert(tf.EagerTensor, x_)
+ begin
+ begin
+ tf.add_input(desc, x_)
+ end
+ end
+ begin
+ end
+ begin
+ desc["T"] = tf.data_type(x_)
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(round, [x_], name=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res[1]
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function round(x_; name=nothing)
+ if tf.in_eager_mode()
+ round_eager(x_; name=name)
+ else
+ round_graph(x_; name=name)
+ end
+ end
end
+end
-"""
- resize_bilinear(images, size; align_corners=false)
+"""
+ outfeed_dequeue(; device_ordinal=-1)
+Retrieves a single tensor from the computation outfeed. This operation will
"""
-tf.@op function resize_bilinear(images_, size_; name=nothing, align_corners=nothing)
- local desc
- tf.with_op_name((()->begin
- desc = tf.NodeDescription("ResizeBilinear")
- images_ = convert(TensorFlow.Tensor{Any}, images_)
- size_ = convert(TensorFlow.Tensor{Int32}, size_)
- (images_,) = tf.tf_promote(images_)
- tf.add_input(desc, images_)
- tf.add_input(desc, size_)
- if align_corners !== nothing
- desc["align_corners"] = Base.Bool(align_corners)
+begin
+ begin
+ function outfeed_dequeue_graph(; name=nothing, dtype=nothing, shape=nothing, device_ordinal=nothing)
+ local desc
+ tf.with_op_name(name, "OutfeedDequeue") do
+ desc = tf.NodeDescription("OutfeedDequeue")
+ begin
+ end
+ begin
+ end
+ begin
+ begin
+ if dtype !== nothing
+ desc["dtype"] = Base.identity(dtype)
+ end
+ end
+ begin
+ if shape !== nothing
+ desc["shape"] = Base.identity(shape)
+ end
+ end
+ begin
+ if device_ordinal !== nothing
+ desc["device_ordinal"] = Base.Int(device_ordinal)
+ end
+ end
+ end
+ end
+ tf.Tensor(tf.Operation(desc))
+ end
+ end
+ begin
+ function outfeed_dequeue_eager(; name=nothing, dtype=nothing, shape=nothing, device_ordinal=nothing)
+ desc = tf.EagerOp("OutfeedDequeue")
+ begin
+ end
+ begin
+ begin
+ if dtype !== nothing
+ desc["dtype"] = Base.identity(dtype)
+ end
+ end
+ begin
+ if shape !== nothing
+ desc["shape"] = Base.identity(shape)
end
- end), name, "ResizeBilinear")
- tf.Tensor(tf.Operation(desc))
+ end
+ begin
+ if device_ordinal !== nothing
+ desc["device_ordinal"] = Base.Int(device_ordinal)
+ end
+ end
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(outfeed_dequeue, [], name=nothing, dtype=nothing, shape=nothing, device_ordinal=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res[1]
+ end
+ end
end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function outfeed_dequeue(; name=nothing, dtype=nothing, shape=nothing, device_ordinal=nothing)
+ if tf.in_eager_mode()
+ outfeed_dequeue_eager(; name=name, dtype=dtype, shape=shape, device_ordinal=device_ordinal)
+ else
+ outfeed_dequeue_graph(; name=name, dtype=dtype, shape=shape, device_ordinal=device_ordinal)
+ end
+ end
+ end
+end
+
"""
- resize_nearest_neighbor(images, size; align_corners=false)
+ tensor_forest_tree_is_initialized_op(tree_handle)
"""
-tf.@op function resize_nearest_neighbor(images_, size_; name=nothing, align_corners=nothing)
- local desc
- tf.with_op_name((()->begin
- desc = tf.NodeDescription("ResizeNearestNeighbor")
- images_ = convert(TensorFlow.Tensor{Any}, images_)
- size_ = convert(TensorFlow.Tensor{Int32}, size_)
- (images_,) = tf.tf_promote(images_)
- tf.add_input(desc, images_)
- tf.add_input(desc, size_)
- if align_corners !== nothing
- desc["align_corners"] = Base.Bool(align_corners)
+begin
+ begin
+ function tensor_forest_tree_is_initialized_op_graph(tree_handle_; name=nothing)
+ local desc
+ tf.with_op_name(name, "TensorForestTreeIsInitializedOp") do
+ desc = tf.NodeDescription("TensorForestTreeIsInitializedOp")
+ begin
+ begin
+ tree_handle_ = convert(Tensor{Any}, tree_handle_)
+ begin
+ end
end
- end), name, "ResizeNearestNeighbor")
- tf.Tensor(tf.Operation(desc))
+ end
+ begin
+ begin
+ tf.add_input(desc, tree_handle_)
+ end
+ end
+ begin
+ end
+ end
+ tf.Tensor(tf.Operation(desc))
+ end
end
+ begin
+ function tensor_forest_tree_is_initialized_op_eager(tree_handle_; name=nothing)
+ desc = tf.EagerOp("TensorForestTreeIsInitializedOp")
+ tree_handle_ = convert(tf.EagerTensor, tree_handle_)
+ begin
+ begin
+ tf.add_input(desc, tree_handle_)
+ end
+ end
+ begin
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(tensor_forest_tree_is_initialized_op, [tree_handle_], name=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res[1]
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function tensor_forest_tree_is_initialized_op(tree_handle_; name=nothing)
+ if tf.in_eager_mode()
+ tensor_forest_tree_is_initialized_op_eager(tree_handle_; name=name)
+ else
+ tensor_forest_tree_is_initialized_op_graph(tree_handle_; name=name)
+ end
+ end
+ end
+end
+
"""
- extract_glimpse(input, size, offsets; centered=true, normalized=true, uniform_noise=true)
+ merge(inputs)
"""
-tf.@op function extract_glimpse(input_, size_, offsets_; name=nothing, centered=nothing, normalized=nothing, uniform_noise=nothing)
- local desc
- tf.with_op_name((()->begin
- desc = tf.NodeDescription("ExtractGlimpse")
- input_ = convert(TensorFlow.Tensor{Float32}, input_)
- size_ = convert(TensorFlow.Tensor{Int32}, size_)
- offsets_ = convert(TensorFlow.Tensor{Float32}, offsets_)
- tf.add_input(desc, input_)
- tf.add_input(desc, size_)
- tf.add_input(desc, offsets_)
- if centered !== nothing
- desc["centered"] = Base.Bool(centered)
+begin
+ begin
+ function merge_graph(inputs_; name=nothing, N=nothing)
+ local desc
+ tf.with_op_name(name, "Merge") do
+ desc = tf.NodeDescription("Merge")
+ begin
+ begin
+ inputs_ = [convert(Tensor{Any}, x) for x = inputs_]
+ begin
+ end
end
- if normalized !== nothing
- desc["normalized"] = Base.Bool(normalized)
+ begin
+ (inputs_,) = tf.tf_promote(inputs_)
end
- if uniform_noise !== nothing
- desc["uniform_noise"] = Base.Bool(uniform_noise)
+ end
+ begin
+ begin
+ tf.add_input(desc, inputs_)
end
- end), name, "ExtractGlimpse")
- tf.Tensor(tf.Operation(desc))
+ end
+ begin
+ begin
+ if N !== nothing
+ desc["N"] = Base.Int(N)
+ end
+ end
+ end
+ end
+ begin
+ out = tf.Tensor[]
+ op = tf.Operation(desc)
+ for out_idx = 1:2
+ push!(out, tf.Tensor(op, out_idx))
+ end
+ out
+ end
+ end
+ end
+ begin
+ function merge_eager(inputs_; name=nothing, N=nothing)
+ desc = tf.EagerOp("Merge")
+ inputs_ = convert(tf.EagerTensor, inputs_)
+ begin
+ begin
+ tf.add_input(desc, inputs_)
+ end
+ end
+ begin
+ begin
+ if N !== nothing
+ desc["N"] = Base.Int(N)
+ end
+ end
+ end
+ begin
+ desc["T"] = tf.data_type(inputs_)
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(merge, [inputs_], name=nothing, N=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res
+ end
+ end
end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function merge(inputs_; name=nothing, N=nothing)
+ if tf.in_eager_mode()
+ merge_eager(inputs_; name=name, N=N)
+ else
+ merge_graph(inputs_; name=name, N=N)
+ end
+ end
+ end
+end
+
"""
- crop_and_resize(image, boxes, box_ind, crop_size; method=bilinear, extrapolation_value=nothing)
+ histogram_fixed_width(values, value_range, nbins; dtype=Int32)
"""
-tf.@op function crop_and_resize(image_, boxes_, box_ind_, crop_size_; name=nothing, method=nothing, extrapolation_value=nothing)
- local desc
- tf.with_op_name((()->begin
- desc = tf.NodeDescription("CropAndResize")
- image_ = convert(TensorFlow.Tensor{Any}, image_)
- boxes_ = convert(TensorFlow.Tensor{Float32}, boxes_)
- box_ind_ = convert(TensorFlow.Tensor{Int32}, box_ind_)
- crop_size_ = convert(TensorFlow.Tensor{Int32}, crop_size_)
- (image_,) = tf.tf_promote(image_)
- tf.add_input(desc, image_)
- tf.add_input(desc, boxes_)
- tf.add_input(desc, box_ind_)
- tf.add_input(desc, crop_size_)
- if method !== nothing
- desc["method"] = Base.String(method)
+begin
+ begin
+ function histogram_fixed_width_graph(values_, value_range_, nbins_; name=nothing, dtype=nothing)
+ local desc
+ tf.with_op_name(name, "HistogramFixedWidth") do
+ desc = tf.NodeDescription("HistogramFixedWidth")
+ begin
+ begin
+ values_ = convert(Tensor{Any}, values_)
+ begin
+ end
end
- if extrapolation_value !== nothing
- desc["extrapolation_value"] = Base.identity(extrapolation_value)
+ begin
+ value_range_ = convert(Tensor{Any}, value_range_)
+ begin
+ end
+ end
+ begin
+ nbins_ = convert(Tensor{Int32}, nbins_)
+ begin
+ end
+ end
+ begin
+ (values_, value_range_) = tf.tf_promote(values_, value_range_)
+ end
+ end
+ begin
+ begin
+ tf.add_input(desc, values_)
+ end
+ begin
+ tf.add_input(desc, value_range_)
+ end
+ begin
+ tf.add_input(desc, nbins_)
end
- end), name, "CropAndResize")
- tf.Tensor(tf.Operation(desc))
+ end
+ begin
+ begin
+ if dtype !== nothing
+ desc["dtype"] = Base.identity(dtype)
+ end
+ end
+ end
+ end
+ tf.Tensor(tf.Operation(desc))
+ end
+ end
+ begin
+ function histogram_fixed_width_eager(values_, value_range_, nbins_; name=nothing, dtype=nothing)
+ desc = tf.EagerOp("HistogramFixedWidth")
+ values_ = convert(tf.EagerTensor, values_)
+ value_range_ = convert(tf.EagerTensor, value_range_)
+ nbins_ = convert(tf.EagerTensor, nbins_)
+ begin
+ begin
+ tf.add_input(desc, values_)
+ end
+ begin
+ tf.add_input(desc, value_range_)
+ end
+ begin
+ tf.add_input(desc, nbins_)
+ end
+ end
+ begin
+ begin
+ if dtype !== nothing
+ desc["dtype"] = Base.identity(dtype)
+ end
+ end
+ end
+ begin
+ desc["T"] = tf.data_type(values_)
+ end
+ begin
+ desc["T"] = tf.data_type(value_range_)
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(histogram_fixed_width, [values_, value_range_, nbins_], name=nothing, dtype=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res[1]
+ end
+ end
end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function histogram_fixed_width(values_, value_range_, nbins_; name=nothing, dtype=nothing)
+ if tf.in_eager_mode()
+ histogram_fixed_width_eager(values_, value_range_, nbins_; name=name, dtype=dtype)
+ else
+ histogram_fixed_width_graph(values_, value_range_, nbins_; name=name, dtype=dtype)
+ end
+ end
+ end
+end
+
"""
- adjust_hue(images, delta)
+ asin(x)
"""
-tf.@op function adjust_hue(images_, delta_; name=nothing)
- local desc
- tf.with_op_name((()->begin
- desc = tf.NodeDescription("AdjustHue")
- images_ = convert(TensorFlow.Tensor{Float32}, images_)
- delta_ = convert(TensorFlow.Tensor{Float32}, delta_)
- tf.add_input(desc, images_)
- tf.add_input(desc, delta_)
- end), name, "AdjustHue")
- tf.Tensor(tf.Operation(desc))
+begin
+ begin
+ function asin_graph(x_; name=nothing)
+ local desc
+ tf.with_op_name(name, "Asin") do
+ desc = tf.NodeDescription("Asin")
+ begin
+ begin
+ x_ = convert(Tensor{Any}, x_)
+ begin
+ end
+ end
+ begin
+ (x_,) = tf.tf_promote(x_)
+ end
+ end
+ begin
+ begin
+ tf.add_input(desc, x_)
+ end
+ end
+ begin
+ end
+ end
+ tf.Tensor(tf.Operation(desc))
+ end
+ end
+ begin
+ function asin_eager(x_; name=nothing)
+ desc = tf.EagerOp("Asin")
+ x_ = convert(tf.EagerTensor, x_)
+ begin
+ begin
+ tf.add_input(desc, x_)
+ end
+ end
+ begin
+ end
+ begin
+ desc["T"] = tf.data_type(x_)
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(asin, [x_], name=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res[1]
+ end
+ end
end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function asin(x_; name=nothing)
+ if tf.in_eager_mode()
+ asin_eager(x_; name=name)
+ else
+ asin_graph(x_; name=name)
+ end
+ end
+ end
+end
+
"""
- adjust_saturation(images, scale)
+ any(input, reduction_indices; keep_dims=false)
"""
-tf.@op function adjust_saturation(images_, scale_; name=nothing)
- local desc
- tf.with_op_name((()->begin
- desc = tf.NodeDescription("AdjustSaturation")
- images_ = convert(TensorFlow.Tensor{Float32}, images_)
- scale_ = convert(TensorFlow.Tensor{Float32}, scale_)
- tf.add_input(desc, images_)
- tf.add_input(desc, scale_)
- end), name, "AdjustSaturation")
- tf.Tensor(tf.Operation(desc))
+begin
+ begin
+ function any_graph(input_, reduction_indices_; name=nothing, keep_dims=nothing)
+ local desc
+ tf.with_op_name(name, "Any") do
+ desc = tf.NodeDescription("Any")
+ begin
+ begin
+ input_ = convert(Tensor{Bool}, input_)
+ begin
+ end
+ end
+ begin
+ reduction_indices_ = convert(Tensor{Int32}, reduction_indices_)
+ begin
+ reduction_indices_ = reduction_indices_ - convert(tf.Tensor{eltype(reduction_indices_)}, 1)
+ end
+ end
+ begin
+ (reduction_indices_,) = tf.tf_promote(reduction_indices_)
+ end
+ end
+ begin
+ begin
+ tf.add_input(desc, input_)
+ end
+ begin
+ tf.add_input(desc, reduction_indices_)
+ end
+ end
+ begin
+ begin
+ if keep_dims !== nothing
+ desc["keep_dims"] = Base.Bool(keep_dims)
+ end
+ end
+ end
+ end
+ tf.Tensor(tf.Operation(desc))
+ end
+ end
+ begin
+ function any_eager(input_, reduction_indices_; name=nothing, keep_dims=nothing)
+ desc = tf.EagerOp("Any")
+ input_ = convert(tf.EagerTensor, input_)
+ reduction_indices_ = convert(tf.EagerTensor, reduction_indices_)
+ begin
+ begin
+ tf.add_input(desc, input_)
+ end
+ begin
+ tf.add_input(desc, reduction_indices_)
+ end
+ end
+ begin
+ begin
+ if keep_dims !== nothing
+ desc["keep_dims"] = Base.Bool(keep_dims)
+ end
+ end
+ end
+ begin
+ desc["Tidx"] = tf.data_type(reduction_indices_)
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(any, [input_, reduction_indices_], name=nothing, keep_dims=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res[1]
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function any(input_, reduction_indices_; name=nothing, keep_dims=nothing)
+ if tf.in_eager_mode()
+ any_eager(input_, reduction_indices_; name=name, keep_dims=keep_dims)
+ else
+ any_graph(input_, reduction_indices_; name=name, keep_dims=keep_dims)
+ end
+ end
end
+end
+
"""
- draw_bounding_boxes(images, boxes)
+ rsqrt_grad(y, dy)
"""
-tf.@op function draw_bounding_boxes(images_, boxes_; name=nothing)
- local desc
- tf.with_op_name((()->begin
- desc = tf.NodeDescription("DrawBoundingBoxes")
- images_ = convert(TensorFlow.Tensor{Float32}, images_)
- boxes_ = convert(TensorFlow.Tensor{Float32}, boxes_)
- (images_,) = tf.tf_promote(images_)
- tf.add_input(desc, images_)
- tf.add_input(desc, boxes_)
- end), name, "DrawBoundingBoxes")
- tf.Tensor(tf.Operation(desc))
+begin
+ begin
+ function rsqrt_grad_graph(y_, dy_; name=nothing)
+ local desc
+ tf.with_op_name(name, "RsqrtGrad") do
+ desc = tf.NodeDescription("RsqrtGrad")
+ begin
+ begin
+ y_ = convert(Tensor{Any}, y_)
+ begin
+ end
+ end
+ begin
+ dy_ = convert(Tensor{Any}, dy_)
+ begin
+ end
+ end
+ begin
+ (y_, dy_) = tf.tf_promote(y_, dy_)
+ end
+ end
+ begin
+ begin
+ tf.add_input(desc, y_)
+ end
+ begin
+ tf.add_input(desc, dy_)
+ end
+ end
+ begin
+ end
+ end
+ tf.Tensor(tf.Operation(desc))
+ end
+ end
+ begin
+ function rsqrt_grad_eager(y_, dy_; name=nothing)
+ desc = tf.EagerOp("RsqrtGrad")
+ y_ = convert(tf.EagerTensor, y_)
+ dy_ = convert(tf.EagerTensor, dy_)
+ begin
+ begin
+ tf.add_input(desc, y_)
+ end
+ begin
+ tf.add_input(desc, dy_)
+ end
+ end
+ begin
+ end
+ begin
+ desc["T"] = tf.data_type(y_)
+ end
+ begin
+ desc["T"] = tf.data_type(dy_)
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(rsqrt_grad, [y_, dy_], name=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res[1]
+ end
+ end
end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function rsqrt_grad(y_, dy_; name=nothing)
+ if tf.in_eager_mode()
+ rsqrt_grad_eager(y_, dy_; name=name)
+ else
+ rsqrt_grad_graph(y_, dy_; name=name)
+ end
+ end
+ end
+end
+
"""
- non_max_suppression(boxes, scores, max_output_size; iou_threshold=nothing)
+ tensor_array_scatter(handle, indices, value, flow_in)
"""
-tf.@op function non_max_suppression(boxes_, scores_, max_output_size_; name=nothing, iou_threshold=nothing)
- local desc
- tf.with_op_name((()->begin
- desc = tf.NodeDescription("NonMaxSuppression")
- boxes_ = convert(TensorFlow.Tensor{Float32}, boxes_)
- scores_ = convert(TensorFlow.Tensor{Float32}, scores_)
- max_output_size_ = convert(TensorFlow.Tensor{Int32}, max_output_size_)
- tf.add_input(desc, boxes_)
- tf.add_input(desc, scores_)
- tf.add_input(desc, max_output_size_)
- if iou_threshold !== nothing
- desc["iou_threshold"] = Base.identity(iou_threshold)
+begin
+ begin
+ function tensor_array_scatter_graph(handle_, indices_, value_, flow_in_; name=nothing)
+ local desc
+ tf.with_op_name(name, "TensorArrayScatter") do
+ desc = tf.NodeDescription("TensorArrayScatter")
+ begin
+ begin
+ handle_ = convert(Tensor{String}, handle_)
+ begin
+ end
+ end
+ begin
+ indices_ = convert(Tensor{Int32}, indices_)
+ begin
+ end
+ end
+ begin
+ value_ = convert(Tensor{Any}, value_)
+ begin
+ end
end
- end), name, "NonMaxSuppression")
- tf.Tensor(tf.Operation(desc))
+ begin
+ flow_in_ = convert(Tensor{Float32}, flow_in_)
+ begin
+ end
+ end
+ begin
+ (value_,) = tf.tf_promote(value_)
+ end
+ end
+ begin
+ begin
+ tf.add_input(desc, handle_)
+ end
+ begin
+ tf.add_input(desc, indices_)
+ end
+ begin
+ tf.add_input(desc, value_)
+ end
+ begin
+ tf.add_input(desc, flow_in_)
+ end
+ end
+ begin
+ end
+ end
+ tf.Tensor(tf.Operation(desc))
+ end
+ end
+ begin
+ function tensor_array_scatter_eager(handle_, indices_, value_, flow_in_; name=nothing)
+ desc = tf.EagerOp("TensorArrayScatter")
+ handle_ = convert(tf.EagerTensor, handle_)
+ indices_ = convert(tf.EagerTensor, indices_)
+ value_ = convert(tf.EagerTensor, value_)
+ flow_in_ = convert(tf.EagerTensor, flow_in_)
+ begin
+ begin
+ tf.add_input(desc, handle_)
+ end
+ begin
+ tf.add_input(desc, indices_)
+ end
+ begin
+ tf.add_input(desc, value_)
+ end
+ begin
+ tf.add_input(desc, flow_in_)
+ end
+ end
+ begin
+ end
+ begin
+ desc["T"] = tf.data_type(value_)
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(tensor_array_scatter, [handle_, indices_, value_, flow_in_], name=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res[1]
+ end
+ end
end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function tensor_array_scatter(handle_, indices_, value_, flow_in_; name=nothing)
+ if tf.in_eager_mode()
+ tensor_array_scatter_eager(handle_, indices_, value_, flow_in_; name=name)
+ else
+ tensor_array_scatter_graph(handle_, indices_, value_, flow_in_; name=name)
+ end
+ end
+ end
+end
+
"""
- sample_distorted_bounding_box(image_size, bounding_boxes; seed=0, seed2=0, min_object_covered=nothing, aspect_ratio_range=Int64[], area_range=Int64[], max_attempts=100, use_image_if_no_bounding_boxes=false)
+ dynamic_partition(data, partitions)
"""
-tf.@op function sample_distorted_bounding_box(image_size_, bounding_boxes_; name=nothing, seed=nothing, seed2=nothing, min_object_covered=nothing, aspect_ratio_range=nothing, area_range=nothing, max_attempts=nothing, use_image_if_no_bounding_boxes=nothing)
- local desc
- tf.with_op_name((()->begin
- desc = tf.NodeDescription("SampleDistortedBoundingBox")
- image_size_ = convert(TensorFlow.Tensor{Any}, image_size_)
- bounding_boxes_ = convert(TensorFlow.Tensor{Float32}, bounding_boxes_)
- (image_size_,) = tf.tf_promote(image_size_)
- tf.add_input(desc, image_size_)
- tf.add_input(desc, bounding_boxes_)
- if seed !== nothing
- desc["seed"] = Base.Int(seed)
+begin
+ begin
+ function dynamic_partition_graph(data_, partitions_; name=nothing, num_partitions=nothing)
+ local desc
+ tf.with_op_name(name, "DynamicPartition") do
+ desc = tf.NodeDescription("DynamicPartition")
+ begin
+ begin
+ data_ = convert(Tensor{Any}, data_)
+ begin
+ end
end
- if seed2 !== nothing
- desc["seed2"] = Base.Int(seed2)
+ begin
+ partitions_ = convert(Tensor{Int32}, partitions_)
+ begin
+ end
end
- if min_object_covered !== nothing
- desc["min_object_covered"] = Base.identity(min_object_covered)
+ begin
+ (data_,) = tf.tf_promote(data_)
end
- if aspect_ratio_range !== nothing
- desc["aspect_ratio_range"] = map(Base.identity, aspect_ratio_range)
+ end
+ begin
+ begin
+ tf.add_input(desc, data_)
end
- if area_range !== nothing
- desc["area_range"] = map(Base.identity, area_range)
+ begin
+ tf.add_input(desc, partitions_)
end
- if max_attempts !== nothing
- desc["max_attempts"] = Base.Int(max_attempts)
+ end
+ begin
+ begin
+ if num_partitions !== nothing
+ desc["num_partitions"] = Base.Int(num_partitions)
+ end
end
- if use_image_if_no_bounding_boxes !== nothing
- desc["use_image_if_no_bounding_boxes"] = Base.Bool(use_image_if_no_bounding_boxes)
+ end
+ end
+ begin
+ out = tf.Tensor[]
+ op = tf.Operation(desc)
+ for out_idx = 1:num_partitions
+ push!(out, tf.Tensor(op, out_idx))
+ end
+ out
+ end
+ end
+ end
+ begin
+ function dynamic_partition_eager(data_, partitions_; name=nothing, num_partitions=nothing)
+ desc = tf.EagerOp("DynamicPartition")
+ data_ = convert(tf.EagerTensor, data_)
+ partitions_ = convert(tf.EagerTensor, partitions_)
+ begin
+ begin
+ tf.add_input(desc, data_)
+ end
+ begin
+ tf.add_input(desc, partitions_)
+ end
+ end
+ begin
+ begin
+ if num_partitions !== nothing
+ desc["num_partitions"] = Base.Int(num_partitions)
end
- end), name, "SampleDistortedBoundingBox")
- out = tf.Tensor[]
- op = tf.Operation(desc)
- for out_idx = 1:3
- push!(out, tf.Tensor(op, out_idx))
+ end
+ end
+ begin
+ desc["T"] = tf.data_type(data_)
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(dynamic_partition, [data_, partitions_], name=nothing, num_partitions=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res
+ end
end
- out
end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function dynamic_partition(data_, partitions_; name=nothing, num_partitions=nothing)
+ if tf.in_eager_mode()
+ dynamic_partition_eager(data_, partitions_; name=name, num_partitions=num_partitions)
+ else
+ dynamic_partition_graph(data_, partitions_; name=name, num_partitions=num_partitions)
+ end
+ end
+ end
+end
+
"""
- logical_and(x, y)
+ experimental_private_thread_pool_dataset(input_dataset, num_threads)
"""
-tf.@op function logical_and(x_, y_; name=nothing)
- local desc
- tf.with_op_name((()->begin
- desc = tf.NodeDescription("LogicalAnd")
- x_ = convert(TensorFlow.Tensor{Bool}, x_)
- y_ = convert(TensorFlow.Tensor{Bool}, y_)
- tf.add_input(desc, x_)
- tf.add_input(desc, y_)
- end), name, "LogicalAnd")
- tf.Tensor(tf.Operation(desc))
+begin
+ begin
+ function experimental_private_thread_pool_dataset_graph(input_dataset_, num_threads_; name=nothing, output_types=nothing, output_shapes=nothing)
+ local desc
+ tf.with_op_name(name, "ExperimentalPrivateThreadPoolDataset") do
+ desc = tf.NodeDescription("ExperimentalPrivateThreadPoolDataset")
+ begin
+ begin
+ input_dataset_ = convert(Tensor{Any}, input_dataset_)
+ begin
+ end
+ end
+ begin
+ num_threads_ = convert(Tensor{Int64}, num_threads_)
+ begin
+ end
+ end
+ end
+ begin
+ begin
+ tf.add_input(desc, input_dataset_)
+ end
+ begin
+ tf.add_input(desc, num_threads_)
+ end
+ end
+ begin
+ begin
+ if output_types !== nothing
+ desc["output_types"] = map(Base.identity, output_types)
+ end
+ end
+ begin
+ if output_shapes !== nothing
+ desc["output_shapes"] = map(Base.identity, output_shapes)
+ end
+ end
+ end
+ end
+ tf.Tensor(tf.Operation(desc))
+ end
+ end
+ begin
+ function experimental_private_thread_pool_dataset_eager(input_dataset_, num_threads_; name=nothing, output_types=nothing, output_shapes=nothing)
+ desc = tf.EagerOp("ExperimentalPrivateThreadPoolDataset")
+ input_dataset_ = convert(tf.EagerTensor, input_dataset_)
+ num_threads_ = convert(tf.EagerTensor, num_threads_)
+ begin
+ begin
+ tf.add_input(desc, input_dataset_)
+ end
+ begin
+ tf.add_input(desc, num_threads_)
+ end
+ end
+ begin
+ begin
+ if output_types !== nothing
+ desc["output_types"] = map(Base.identity, output_types)
+ end
+ end
+ begin
+ if output_shapes !== nothing
+ desc["output_shapes"] = map(Base.identity, output_shapes)
+ end
+ end
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(experimental_private_thread_pool_dataset, [input_dataset_, num_threads_], name=nothing, output_types=nothing, output_shapes=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res[1]
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function experimental_private_thread_pool_dataset(input_dataset_, num_threads_; name=nothing, output_types=nothing, output_shapes=nothing)
+ if tf.in_eager_mode()
+ experimental_private_thread_pool_dataset_eager(input_dataset_, num_threads_; name=name, output_types=output_types, output_shapes=output_shapes)
+ else
+ experimental_private_thread_pool_dataset_graph(input_dataset_, num_threads_; name=name, output_types=output_types, output_shapes=output_shapes)
+ end
+ end
end
+end
+
"""
- logical_not(x)
+ reader_serialize_state(reader_handle)
"""
-tf.@op function logical_not(x_; name=nothing)
- local desc
- tf.with_op_name((()->begin
- desc = tf.NodeDescription("LogicalNot")
- x_ = convert(TensorFlow.Tensor{Bool}, x_)
- tf.add_input(desc, x_)
- end), name, "LogicalNot")
- tf.Tensor(tf.Operation(desc))
+begin
+ begin
+ function reader_serialize_state_graph(reader_handle_; name=nothing)
+ local desc
+ tf.with_op_name(name, "ReaderSerializeState") do
+ desc = tf.NodeDescription("ReaderSerializeState")
+ begin
+ begin
+ reader_handle_ = convert(Tensor{String}, reader_handle_)
+ begin
+ end
+ end
+ end
+ begin
+ begin
+ tf.add_input(desc, reader_handle_)
+ end
+ end
+ begin
+ end
+ end
+ tf.Tensor(tf.Operation(desc))
+ end
+ end
+ begin
+ function reader_serialize_state_eager(reader_handle_; name=nothing)
+ desc = tf.EagerOp("ReaderSerializeState")
+ reader_handle_ = convert(tf.EagerTensor, reader_handle_)
+ begin
+ begin
+ tf.add_input(desc, reader_handle_)
+ end
+ end
+ begin
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(reader_serialize_state, [reader_handle_], name=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res[1]
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function reader_serialize_state(reader_handle_; name=nothing)
+ if tf.in_eager_mode()
+ reader_serialize_state_eager(reader_handle_; name=name)
+ else
+ reader_serialize_state_graph(reader_handle_; name=name)
+ end
+ end
end
+end
+
"""
- logical_or(x, y)
+ right_shift(x, y)
"""
-tf.@op function logical_or(x_, y_; name=nothing)
- local desc
- tf.with_op_name((()->begin
- desc = tf.NodeDescription("LogicalOr")
- x_ = convert(TensorFlow.Tensor{Bool}, x_)
- y_ = convert(TensorFlow.Tensor{Bool}, y_)
+begin
+ begin
+ function right_shift_graph(x_, y_; name=nothing)
+ local desc
+ tf.with_op_name(name, "RightShift") do
+ desc = tf.NodeDescription("RightShift")
+ begin
+ begin
+ x_ = convert(Tensor{Any}, x_)
+ begin
+ end
+ end
+ begin
+ y_ = convert(Tensor{Any}, y_)
+ begin
+ end
+ end
+ begin
+ (x_, y_) = tf.tf_promote(x_, y_)
+ end
+ end
+ begin
+ begin
+ tf.add_input(desc, x_)
+ end
+ begin
+ tf.add_input(desc, y_)
+ end
+ end
+ begin
+ end
+ end
+ tf.Tensor(tf.Operation(desc))
+ end
+ end
+ begin
+ function right_shift_eager(x_, y_; name=nothing)
+ desc = tf.EagerOp("RightShift")
+ x_ = convert(tf.EagerTensor, x_)
+ y_ = convert(tf.EagerTensor, y_)
+ begin
+ begin
tf.add_input(desc, x_)
+ end
+ begin
tf.add_input(desc, y_)
- end), name, "LogicalOr")
- tf.Tensor(tf.Operation(desc))
+ end
+ end
+ begin
+ end
+ begin
+ desc["T"] = tf.data_type(x_)
+ end
+ begin
+ desc["T"] = tf.data_type(y_)
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(right_shift, [x_, y_], name=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res[1]
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function right_shift(x_, y_; name=nothing)
+ if tf.in_eager_mode()
+ right_shift_eager(x_, y_; name=name)
+ else
+ right_shift_graph(x_, y_; name=name)
+ end
+ end
end
+end
+
"""
- add_n(inputs)
+ avg_pool3d(input; data_format=)
"""
-tf.@op function add_n(inputs_; name=nothing, N=nothing)
- local desc
- tf.with_op_name((()->begin
- desc = tf.NodeDescription("AddN")
- inputs_ = [convert(TensorFlow.Tensor{Any}, x) for x = inputs_]
- (inputs_,) = tf.tf_promote(inputs_)
- tf.add_input(desc, inputs_)
- if N !== nothing
- desc["N"] = Base.Int(N)
+begin
+ begin
+ function avg_pool3d_graph(input_; name=nothing, ksize=nothing, strides=nothing, padding=nothing, data_format=nothing)
+ local desc
+ tf.with_op_name(name, "AvgPool3D") do
+ desc = tf.NodeDescription("AvgPool3D")
+ begin
+ begin
+ input_ = convert(Tensor{Any}, input_)
+ begin
+ end
+ end
+ begin
+ (input_,) = tf.tf_promote(input_)
+ end
+ end
+ begin
+ begin
+ tf.add_input(desc, input_)
+ end
+ end
+ begin
+ begin
+ if ksize !== nothing
+ desc["ksize"] = map(Base.identity, ksize)
+ end
+ end
+ begin
+ if strides !== nothing
+ desc["strides"] = map(Base.identity, strides)
+ end
end
- end), name, "AddN")
- tf.Tensor(tf.Operation(desc))
+ begin
+ if padding !== nothing
+ desc["padding"] = Base.String(padding)
+ end
+ end
+ begin
+ if data_format !== nothing
+ desc["data_format"] = Base.String(data_format)
+ end
+ end
+ end
+ end
+ tf.Tensor(tf.Operation(desc))
+ end
end
-
-"""
- arg_min(input, dimension; output_type=Int64)
-
-
-"""
-tf.@op function arg_min(input_, dimension_; name=nothing, output_type=nothing)
- local desc
- tf.with_op_name((()->begin
- desc = tf.NodeDescription("ArgMin")
- input_ = convert(TensorFlow.Tensor{Any}, input_)
- dimension_ = convert(TensorFlow.Tensor{Int32}, dimension_)
- dimension_ = dimension_ - convert(tf.Tensor{eltype(dimension_)}, 1)
- (input_,) = tf.tf_promote(input_)
- (dimension_,) = tf.tf_promote(dimension_)
+ begin
+ function avg_pool3d_eager(input_; name=nothing, ksize=nothing, strides=nothing, padding=nothing, data_format=nothing)
+ desc = tf.EagerOp("AvgPool3D")
+ input_ = convert(tf.EagerTensor, input_)
+ begin
+ begin
tf.add_input(desc, input_)
- tf.add_input(desc, dimension_)
- if output_type !== nothing
- desc["output_type"] = Base.identity(output_type)
+ end
+ end
+ begin
+ begin
+ if ksize !== nothing
+ desc["ksize"] = map(Base.identity, ksize)
+ end
+ end
+ begin
+ if strides !== nothing
+ desc["strides"] = map(Base.identity, strides)
+ end
+ end
+ begin
+ if padding !== nothing
+ desc["padding"] = Base.String(padding)
end
- end), name, "ArgMin")
- tf.Tensor(tf.Operation(desc))
+ end
+ begin
+ if data_format !== nothing
+ desc["data_format"] = Base.String(data_format)
+ end
+ end
+ end
+ begin
+ desc["T"] = tf.data_type(input_)
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(avg_pool3d, [input_], name=nothing, ksize=nothing, strides=nothing, padding=nothing, data_format=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res[1]
+ end
+ end
end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function avg_pool3d(input_; name=nothing, ksize=nothing, strides=nothing, padding=nothing, data_format=nothing)
+ if tf.in_eager_mode()
+ avg_pool3d_eager(input_; name=name, ksize=ksize, strides=strides, padding=padding, data_format=data_format)
+ else
+ avg_pool3d_graph(input_; name=name, ksize=ksize, strides=strides, padding=padding, data_format=data_format)
+ end
+ end
+ end
+end
+
"""
- arg_max(input, dimension; output_type=Int64)
+ encode_png(image; compression=-1)
"""
-tf.@op function arg_max(input_, dimension_; name=nothing, output_type=nothing)
- local desc
- tf.with_op_name((()->begin
- desc = tf.NodeDescription("ArgMax")
- input_ = convert(TensorFlow.Tensor{Any}, input_)
- dimension_ = convert(TensorFlow.Tensor{Int32}, dimension_)
- dimension_ = dimension_ - convert(tf.Tensor{eltype(dimension_)}, 1)
- (input_,) = tf.tf_promote(input_)
- (dimension_,) = tf.tf_promote(dimension_)
- tf.add_input(desc, input_)
- tf.add_input(desc, dimension_)
- if output_type !== nothing
- desc["output_type"] = Base.identity(output_type)
+begin
+ begin
+ function encode_png_graph(image_; name=nothing, compression=nothing)
+ local desc
+ tf.with_op_name(name, "EncodePng") do
+ desc = tf.NodeDescription("EncodePng")
+ begin
+ begin
+ image_ = convert(Tensor{UInt8}, image_)
+ begin
+ end
+ end
+ begin
+ (image_,) = tf.tf_promote(image_)
+ end
+ end
+ begin
+ begin
+ tf.add_input(desc, image_)
+ end
+ end
+ begin
+ begin
+ if compression !== nothing
+ desc["compression"] = Base.Int(compression)
+ end
+ end
+ end
+ end
+ tf.Tensor(tf.Operation(desc))
+ end
+ end
+ begin
+ function encode_png_eager(image_; name=nothing, compression=nothing)
+ desc = tf.EagerOp("EncodePng")
+ image_ = convert(tf.EagerTensor, image_)
+ begin
+ begin
+ tf.add_input(desc, image_)
+ end
+ end
+ begin
+ begin
+ if compression !== nothing
+ desc["compression"] = Base.Int(compression)
end
- end), name, "ArgMax")
- tf.Tensor(tf.Operation(desc))
+ end
+ end
+ begin
+ desc["T"] = tf.data_type(image_)
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(encode_png, [image_], name=nothing, compression=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res[1]
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function encode_png(image_; name=nothing, compression=nothing)
+ if tf.in_eager_mode()
+ encode_png_eager(image_; name=name, compression=compression)
+ else
+ encode_png_graph(image_; name=name, compression=compression)
+ end
+ end
end
+end
-"""
- add(x, y)
+"""
+ debug_identity(input; device_name=, tensor_name=, debug_urls=Int64[], gated_grpc=false)
+Debug Identity Op.
"""
-tf.@op function add(x_, y_; name=nothing)
- local desc
- tf.with_op_name((()->begin
- desc = tf.NodeDescription("Add")
- x_ = convert(TensorFlow.Tensor{Any}, x_)
- y_ = convert(TensorFlow.Tensor{Any}, y_)
- (x_, y_) = tf.tf_promote(x_, y_)
- tf.add_input(desc, x_)
- tf.add_input(desc, y_)
- end), name, "Add")
- tf.Tensor(tf.Operation(desc))
+begin
+ begin
+ function debug_identity_graph(input_; name=nothing, device_name=nothing, tensor_name=nothing, debug_urls=nothing, gated_grpc=nothing)
+ local desc
+ tf.with_op_name(name, "DebugIdentity") do
+ desc = tf.NodeDescription("DebugIdentity")
+ begin
+ begin
+ input_ = convert(Tensor{Any}, input_)
+ begin
+ end
+ end
+ begin
+ (input_,) = tf.tf_promote(input_)
+ end
+ end
+ begin
+ begin
+ tf.add_input(desc, input_)
+ end
+ end
+ begin
+ begin
+ if device_name !== nothing
+ desc["device_name"] = Base.String(device_name)
+ end
+ end
+ begin
+ if tensor_name !== nothing
+ desc["tensor_name"] = Base.String(tensor_name)
+ end
+ end
+ begin
+ if debug_urls !== nothing
+ desc["debug_urls"] = map(Base.identity, debug_urls)
+ end
+ end
+ begin
+ if gated_grpc !== nothing
+ desc["gated_grpc"] = Base.Bool(gated_grpc)
+ end
+ end
+ end
+ end
+ tf.Tensor(tf.Operation(desc))
+ end
+ end
+ begin
+ function debug_identity_eager(input_; name=nothing, device_name=nothing, tensor_name=nothing, debug_urls=nothing, gated_grpc=nothing)
+ desc = tf.EagerOp("DebugIdentity")
+ input_ = convert(tf.EagerTensor, input_)
+ begin
+ begin
+ tf.add_input(desc, input_)
+ end
+ end
+ begin
+ begin
+ if device_name !== nothing
+ desc["device_name"] = Base.String(device_name)
+ end
+ end
+ begin
+ if tensor_name !== nothing
+ desc["tensor_name"] = Base.String(tensor_name)
+ end
+ end
+ begin
+ if debug_urls !== nothing
+ desc["debug_urls"] = map(Base.identity, debug_urls)
+ end
+ end
+ begin
+ if gated_grpc !== nothing
+ desc["gated_grpc"] = Base.Bool(gated_grpc)
+ end
+ end
+ end
+ begin
+ desc["T"] = tf.data_type(input_)
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(debug_identity, [input_], name=nothing, device_name=nothing, tensor_name=nothing, debug_urls=nothing, gated_grpc=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res[1]
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function debug_identity(input_; name=nothing, device_name=nothing, tensor_name=nothing, debug_urls=nothing, gated_grpc=nothing)
+ if tf.in_eager_mode()
+ debug_identity_eager(input_; name=name, device_name=device_name, tensor_name=tensor_name, debug_urls=debug_urls, gated_grpc=gated_grpc)
+ else
+ debug_identity_graph(input_; name=name, device_name=device_name, tensor_name=tensor_name, debug_urls=debug_urls, gated_grpc=gated_grpc)
+ end
+ end
end
+end
+
"""
- sub(x, y)
+ imag(input)
"""
-tf.@op function sub(x_, y_; name=nothing)
- local desc
- tf.with_op_name((()->begin
- desc = tf.NodeDescription("Sub")
- x_ = convert(TensorFlow.Tensor{Any}, x_)
- y_ = convert(TensorFlow.Tensor{Any}, y_)
- (x_, y_) = tf.tf_promote(x_, y_)
- tf.add_input(desc, x_)
- tf.add_input(desc, y_)
- end), name, "Sub")
- tf.Tensor(tf.Operation(desc))
+begin
+ begin
+ function imag_graph(input_; name=nothing)
+ local desc
+ tf.with_op_name(name, "Imag") do
+ desc = tf.NodeDescription("Imag")
+ begin
+ begin
+ input_ = convert(Tensor{Complex{Float32}}, input_)
+ begin
+ end
+ end
+ begin
+ (input_,) = tf.tf_promote(input_)
+ end
+ end
+ begin
+ begin
+ tf.add_input(desc, input_)
+ end
+ end
+ begin
+ end
+ end
+ tf.Tensor(tf.Operation(desc))
+ end
+ end
+ begin
+ function imag_eager(input_; name=nothing)
+ desc = tf.EagerOp("Imag")
+ input_ = convert(tf.EagerTensor, input_)
+ begin
+ begin
+ tf.add_input(desc, input_)
+ end
+ end
+ begin
+ end
+ begin
+ desc["T"] = tf.data_type(input_)
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(imag, [input_], name=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res[1]
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function imag(input_; name=nothing)
+ if tf.in_eager_mode()
+ imag_eager(input_; name=name)
+ else
+ imag_graph(input_; name=name)
+ end
+ end
end
+end
+
"""
- mat_mul(a, b; transpose_a=false, transpose_b=false)
+ resource_sparse_apply_ftrl_v2(var, accum, linear, grad, indices, lr, l1, l2, l2_shrinkage, lr_power; use_locking=false)
"""
-tf.@op function mat_mul(a_, b_; name=nothing, transpose_a=nothing, transpose_b=nothing)
- local desc
- tf.with_op_name((()->begin
- desc = tf.NodeDescription("MatMul")
- a_ = convert(TensorFlow.Tensor{Any}, a_)
- b_ = convert(TensorFlow.Tensor{Any}, b_)
- (a_, b_) = tf.tf_promote(a_, b_)
- tf.add_input(desc, a_)
- tf.add_input(desc, b_)
- if transpose_a !== nothing
- desc["transpose_a"] = Base.Bool(transpose_a)
+begin
+ begin
+ function resource_sparse_apply_ftrl_v2_graph(var_, accum_, linear_, grad_, indices_, lr_, l1_, l2_, l2_shrinkage_, lr_power_; name=nothing, use_locking=nothing)
+ local desc
+ tf.with_op_name(name, "ResourceSparseApplyFtrlV2") do
+ desc = tf.NodeDescription("ResourceSparseApplyFtrlV2")
+ begin
+ begin
+ var_ = convert(Tensor{Any}, var_)
+ begin
+ end
end
- if transpose_b !== nothing
- desc["transpose_b"] = Base.Bool(transpose_b)
+ begin
+ accum_ = convert(Tensor{Any}, accum_)
+ begin
+ end
+ end
+ begin
+ linear_ = convert(Tensor{Any}, linear_)
+ begin
+ end
+ end
+ begin
+ grad_ = convert(Tensor{Any}, grad_)
+ begin
+ end
+ end
+ begin
+ indices_ = convert(Tensor{Any}, indices_)
+ begin
+ indices_ = indices_ - convert(tf.Tensor{eltype(indices_)}, 1)
+ end
+ end
+ begin
+ lr_ = convert(Tensor{Any}, lr_)
+ begin
+ end
+ end
+ begin
+ l1_ = convert(Tensor{Any}, l1_)
+ begin
+ end
+ end
+ begin
+ l2_ = convert(Tensor{Any}, l2_)
+ begin
+ end
+ end
+ begin
+ l2_shrinkage_ = convert(Tensor{Any}, l2_shrinkage_)
+ begin
+ end
+ end
+ begin
+ lr_power_ = convert(Tensor{Any}, lr_power_)
+ begin
+ end
+ end
+ begin
+ (grad_, lr_, l1_, l2_, l2_shrinkage_, lr_power_) = tf.tf_promote(grad_, lr_, l1_, l2_, l2_shrinkage_, lr_power_)
+ end
+ begin
+ (indices_,) = tf.tf_promote(indices_)
+ end
+ end
+ begin
+ begin
+ tf.add_input(desc, var_)
+ end
+ begin
+ tf.add_input(desc, accum_)
+ end
+ begin
+ tf.add_input(desc, linear_)
+ end
+ begin
+ tf.add_input(desc, grad_)
+ end
+ begin
+ tf.add_input(desc, indices_)
end
- end), name, "MatMul")
- tf.Tensor(tf.Operation(desc))
+ begin
+ tf.add_input(desc, lr_)
+ end
+ begin
+ tf.add_input(desc, l1_)
+ end
+ begin
+ tf.add_input(desc, l2_)
+ end
+ begin
+ tf.add_input(desc, l2_shrinkage_)
+ end
+ begin
+ tf.add_input(desc, lr_power_)
+ end
+ end
+ begin
+ begin
+ if use_locking !== nothing
+ desc["use_locking"] = Base.Bool(use_locking)
+ end
+ end
+ end
+ end
+ tf.Tensor(tf.Operation(desc))
+ end
+ end
+ begin
+ function resource_sparse_apply_ftrl_v2_eager(var_, accum_, linear_, grad_, indices_, lr_, l1_, l2_, l2_shrinkage_, lr_power_; name=nothing, use_locking=nothing)
+ desc = tf.EagerOp("ResourceSparseApplyFtrlV2")
+ var_ = convert(tf.EagerTensor, var_)
+ accum_ = convert(tf.EagerTensor, accum_)
+ linear_ = convert(tf.EagerTensor, linear_)
+ grad_ = convert(tf.EagerTensor, grad_)
+ indices_ = convert(tf.EagerTensor, indices_)
+ lr_ = convert(tf.EagerTensor, lr_)
+ l1_ = convert(tf.EagerTensor, l1_)
+ l2_ = convert(tf.EagerTensor, l2_)
+ l2_shrinkage_ = convert(tf.EagerTensor, l2_shrinkage_)
+ lr_power_ = convert(tf.EagerTensor, lr_power_)
+ begin
+ begin
+ tf.add_input(desc, var_)
+ end
+ begin
+ tf.add_input(desc, accum_)
+ end
+ begin
+ tf.add_input(desc, linear_)
+ end
+ begin
+ tf.add_input(desc, grad_)
+ end
+ begin
+ tf.add_input(desc, indices_)
+ end
+ begin
+ tf.add_input(desc, lr_)
+ end
+ begin
+ tf.add_input(desc, l1_)
+ end
+ begin
+ tf.add_input(desc, l2_)
+ end
+ begin
+ tf.add_input(desc, l2_shrinkage_)
+ end
+ begin
+ tf.add_input(desc, lr_power_)
+ end
+ end
+ begin
+ begin
+ if use_locking !== nothing
+ desc["use_locking"] = Base.Bool(use_locking)
+ end
+ end
+ end
+ begin
+ desc["T"] = tf.data_type(grad_)
+ end
+ begin
+ desc["Tindices"] = tf.data_type(indices_)
+ end
+ begin
+ desc["T"] = tf.data_type(lr_)
+ end
+ begin
+ desc["T"] = tf.data_type(l1_)
+ end
+ begin
+ desc["T"] = tf.data_type(l2_)
+ end
+ begin
+ desc["T"] = tf.data_type(l2_shrinkage_)
+ end
+ begin
+ desc["T"] = tf.data_type(lr_power_)
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(resource_sparse_apply_ftrl_v2, [var_, accum_, linear_, grad_, indices_, lr_, l1_, l2_, l2_shrinkage_, lr_power_], name=nothing, use_locking=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res[1]
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function resource_sparse_apply_ftrl_v2(var_, accum_, linear_, grad_, indices_, lr_, l1_, l2_, l2_shrinkage_, lr_power_; name=nothing, use_locking=nothing)
+ if tf.in_eager_mode()
+ resource_sparse_apply_ftrl_v2_eager(var_, accum_, linear_, grad_, indices_, lr_, l1_, l2_, l2_shrinkage_, lr_power_; name=name, use_locking=use_locking)
+ else
+ resource_sparse_apply_ftrl_v2_graph(var_, accum_, linear_, grad_, indices_, lr_, l1_, l2_, l2_shrinkage_, lr_power_; name=name, use_locking=use_locking)
+ end
+ end
end
+end
+
"""
- mul(x, y)
+ stage_clear(; capacity=0, memory_limit=0, container=, shared_name=)
"""
-tf.@op function mul(x_, y_; name=nothing)
- local desc
- tf.with_op_name((()->begin
- desc = tf.NodeDescription("Mul")
- x_ = convert(TensorFlow.Tensor{Any}, x_)
- y_ = convert(TensorFlow.Tensor{Any}, y_)
- (x_, y_) = tf.tf_promote(x_, y_)
- tf.add_input(desc, x_)
- tf.add_input(desc, y_)
- end), name, "Mul")
- tf.Tensor(tf.Operation(desc))
+begin
+ begin
+ function stage_clear_graph(; name=nothing, capacity=nothing, memory_limit=nothing, dtypes=nothing, container=nothing, shared_name=nothing)
+ local desc
+ tf.with_op_name(name, "StageClear") do
+ desc = tf.NodeDescription("StageClear")
+ begin
+ end
+ begin
+ end
+ begin
+ begin
+ if capacity !== nothing
+ desc["capacity"] = Base.Int(capacity)
+ end
+ end
+ begin
+ if memory_limit !== nothing
+ desc["memory_limit"] = Base.Int(memory_limit)
+ end
+ end
+ begin
+ if dtypes !== nothing
+ desc["dtypes"] = map(Base.identity, dtypes)
+ end
+ end
+ begin
+ if container !== nothing
+ desc["container"] = Base.String(container)
+ end
+ end
+ begin
+ if shared_name !== nothing
+ desc["shared_name"] = Base.String(shared_name)
+ end
+ end
+ end
+ end
+ tf.Tensor(tf.Operation(desc))
+ end
+ end
+ begin
+ function stage_clear_eager(; name=nothing, capacity=nothing, memory_limit=nothing, dtypes=nothing, container=nothing, shared_name=nothing)
+ desc = tf.EagerOp("StageClear")
+ begin
+ end
+ begin
+ begin
+ if capacity !== nothing
+ desc["capacity"] = Base.Int(capacity)
+ end
+ end
+ begin
+ if memory_limit !== nothing
+ desc["memory_limit"] = Base.Int(memory_limit)
+ end
+ end
+ begin
+ if dtypes !== nothing
+ desc["dtypes"] = map(Base.identity, dtypes)
+ end
+ end
+ begin
+ if container !== nothing
+ desc["container"] = Base.String(container)
+ end
+ end
+ begin
+ if shared_name !== nothing
+ desc["shared_name"] = Base.String(shared_name)
+ end
+ end
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(stage_clear, [], name=nothing, capacity=nothing, memory_limit=nothing, dtypes=nothing, container=nothing, shared_name=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res[1]
+ end
+ end
end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function stage_clear(; name=nothing, capacity=nothing, memory_limit=nothing, dtypes=nothing, container=nothing, shared_name=nothing)
+ if tf.in_eager_mode()
+ stage_clear_eager(; name=name, capacity=capacity, memory_limit=memory_limit, dtypes=dtypes, container=container, shared_name=shared_name)
+ else
+ stage_clear_graph(; name=name, capacity=capacity, memory_limit=memory_limit, dtypes=dtypes, container=container, shared_name=shared_name)
+ end
+ end
+ end
+end
+
"""
- pow(x, y)
+ sign(x)
"""
-tf.@op function pow(x_, y_; name=nothing)
- local desc
- tf.with_op_name((()->begin
- desc = tf.NodeDescription("Pow")
- x_ = convert(TensorFlow.Tensor{Any}, x_)
- y_ = convert(TensorFlow.Tensor{Any}, y_)
- (x_, y_) = tf.tf_promote(x_, y_)
+begin
+ begin
+ function sign_graph(x_; name=nothing)
+ local desc
+ tf.with_op_name(name, "Sign") do
+ desc = tf.NodeDescription("Sign")
+ begin
+ begin
+ x_ = convert(Tensor{Any}, x_)
+ begin
+ end
+ end
+ begin
+ (x_,) = tf.tf_promote(x_)
+ end
+ end
+ begin
+ begin
+ tf.add_input(desc, x_)
+ end
+ end
+ begin
+ end
+ end
+ tf.Tensor(tf.Operation(desc))
+ end
+ end
+ begin
+ function sign_eager(x_; name=nothing)
+ desc = tf.EagerOp("Sign")
+ x_ = convert(tf.EagerTensor, x_)
+ begin
+ begin
tf.add_input(desc, x_)
- tf.add_input(desc, y_)
- end), name, "Pow")
- tf.Tensor(tf.Operation(desc))
+ end
+ end
+ begin
+ end
+ begin
+ desc["T"] = tf.data_type(x_)
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(sign, [x_], name=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res[1]
+ end
+ end
end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function sign(x_; name=nothing)
+ if tf.in_eager_mode()
+ sign_eager(x_; name=name)
+ else
+ sign_graph(x_; name=name)
+ end
+ end
+ end
+end
+
"""
- matrix_solve(matrix, rhs; adjoint=false)
+ population_count(x)
"""
-tf.@op function matrix_solve(matrix_, rhs_; name=nothing, adjoint=nothing)
- local desc
- tf.with_op_name((()->begin
- desc = tf.NodeDescription("MatrixSolve")
- matrix_ = convert(TensorFlow.Tensor{Any}, matrix_)
- rhs_ = convert(TensorFlow.Tensor{Any}, rhs_)
- (matrix_, rhs_) = tf.tf_promote(matrix_, rhs_)
- tf.add_input(desc, matrix_)
- tf.add_input(desc, rhs_)
- if adjoint !== nothing
- desc["adjoint"] = Base.Bool(adjoint)
+begin
+ begin
+ function population_count_graph(x_; name=nothing)
+ local desc
+ tf.with_op_name(name, "PopulationCount") do
+ desc = tf.NodeDescription("PopulationCount")
+ begin
+ begin
+ x_ = convert(Tensor{Any}, x_)
+ begin
+ end
+ end
+ begin
+ (x_,) = tf.tf_promote(x_)
+ end
+ end
+ begin
+ begin
+ tf.add_input(desc, x_)
end
- end), name, "MatrixSolve")
- tf.Tensor(tf.Operation(desc))
+ end
+ begin
+ end
+ end
+ tf.Tensor(tf.Operation(desc))
+ end
+ end
+ begin
+ function population_count_eager(x_; name=nothing)
+ desc = tf.EagerOp("PopulationCount")
+ x_ = convert(tf.EagerTensor, x_)
+ begin
+ begin
+ tf.add_input(desc, x_)
+ end
+ end
+ begin
+ end
+ begin
+ desc["T"] = tf.data_type(x_)
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(population_count, [x_], name=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res[1]
+ end
+ end
end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function population_count(x_; name=nothing)
+ if tf.in_eager_mode()
+ population_count_eager(x_; name=name)
+ else
+ population_count_graph(x_; name=name)
+ end
+ end
+ end
+end
+
"""
- matrix_triangular_solve(matrix, rhs; lower=true, adjoint=false)
+ neg(x)
"""
-tf.@op function matrix_triangular_solve(matrix_, rhs_; name=nothing, lower=nothing, adjoint=nothing)
- local desc
- tf.with_op_name((()->begin
- desc = tf.NodeDescription("MatrixTriangularSolve")
- matrix_ = convert(TensorFlow.Tensor{Any}, matrix_)
- rhs_ = convert(TensorFlow.Tensor{Any}, rhs_)
- (matrix_, rhs_) = tf.tf_promote(matrix_, rhs_)
- tf.add_input(desc, matrix_)
- tf.add_input(desc, rhs_)
- if lower !== nothing
- desc["lower"] = Base.Bool(lower)
+begin
+ begin
+ function neg_graph(x_; name=nothing)
+ local desc
+ tf.with_op_name(name, "Neg") do
+ desc = tf.NodeDescription("Neg")
+ begin
+ begin
+ x_ = convert(Tensor{Any}, x_)
+ begin
+ end
end
- if adjoint !== nothing
- desc["adjoint"] = Base.Bool(adjoint)
+ begin
+ (x_,) = tf.tf_promote(x_)
+ end
+ end
+ begin
+ begin
+ tf.add_input(desc, x_)
end
- end), name, "MatrixTriangularSolve")
- tf.Tensor(tf.Operation(desc))
+ end
+ begin
+ end
+ end
+ tf.Tensor(tf.Operation(desc))
+ end
+ end
+ begin
+ function neg_eager(x_; name=nothing)
+ desc = tf.EagerOp("Neg")
+ x_ = convert(tf.EagerTensor, x_)
+ begin
+ begin
+ tf.add_input(desc, x_)
+ end
+ end
+ begin
+ end
+ begin
+ desc["T"] = tf.data_type(x_)
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(neg, [x_], name=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res[1]
+ end
+ end
end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function neg(x_; name=nothing)
+ if tf.in_eager_mode()
+ neg_eager(x_; name=name)
+ else
+ neg_graph(x_; name=name)
+ end
+ end
+ end
+end
+
"""
- matrix_solve_ls(matrix, rhs, l2_regularizer; fast=true)
+ anonymous_iterator()
"""
-tf.@op function matrix_solve_ls(matrix_, rhs_, l2_regularizer_; name=nothing, fast=nothing)
- local desc
- tf.with_op_name((()->begin
- desc = tf.NodeDescription("MatrixSolveLs")
- matrix_ = convert(TensorFlow.Tensor{Any}, matrix_)
- rhs_ = convert(TensorFlow.Tensor{Any}, rhs_)
- l2_regularizer_ = convert(TensorFlow.Tensor{Float64}, l2_regularizer_)
- (matrix_, rhs_) = tf.tf_promote(matrix_, rhs_)
- tf.add_input(desc, matrix_)
- tf.add_input(desc, rhs_)
- tf.add_input(desc, l2_regularizer_)
- if fast !== nothing
- desc["fast"] = Base.Bool(fast)
+begin
+ begin
+ function anonymous_iterator_graph(; name=nothing, output_types=nothing, output_shapes=nothing)
+ local desc
+ tf.with_op_name(name, "AnonymousIterator") do
+ desc = tf.NodeDescription("AnonymousIterator")
+ begin
+ end
+ begin
+ end
+ begin
+ begin
+ if output_types !== nothing
+ desc["output_types"] = map(Base.identity, output_types)
+ end
+ end
+ begin
+ if output_shapes !== nothing
+ desc["output_shapes"] = map(Base.identity, output_shapes)
+ end
+ end
+ end
+ end
+ tf.Tensor(tf.Operation(desc))
+ end
+ end
+ begin
+ function anonymous_iterator_eager(; name=nothing, output_types=nothing, output_shapes=nothing)
+ desc = tf.EagerOp("AnonymousIterator")
+ begin
+ end
+ begin
+ begin
+ if output_types !== nothing
+ desc["output_types"] = map(Base.identity, output_types)
+ end
+ end
+ begin
+ if output_shapes !== nothing
+ desc["output_shapes"] = map(Base.identity, output_shapes)
end
- end), name, "MatrixSolveLs")
- tf.Tensor(tf.Operation(desc))
+ end
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(anonymous_iterator, [], name=nothing, output_types=nothing, output_shapes=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res[1]
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function anonymous_iterator(; name=nothing, output_types=nothing, output_shapes=nothing)
+ if tf.in_eager_mode()
+ anonymous_iterator_eager(; name=name, output_types=output_types, output_shapes=output_shapes)
+ else
+ anonymous_iterator_graph(; name=name, output_types=output_types, output_shapes=output_shapes)
+ end
+ end
end
+end
+
"""
- cholesky(input)
+ sparse_reduce_sum(input_indices, input_values, input_shape, reduction_axes; keep_dims=false)
"""
-tf.@op function cholesky(input_; name=nothing)
- local desc
- tf.with_op_name((()->begin
- desc = tf.NodeDescription("Cholesky")
- input_ = convert(TensorFlow.Tensor{Any}, input_)
- (input_,) = tf.tf_promote(input_)
- tf.add_input(desc, input_)
- end), name, "Cholesky")
- tf.Tensor(tf.Operation(desc))
+begin
+ begin
+ function sparse_reduce_sum_graph(input_indices_, input_values_, input_shape_, reduction_axes_; name=nothing, keep_dims=nothing)
+ local desc
+ tf.with_op_name(name, "SparseReduceSum") do
+ desc = tf.NodeDescription("SparseReduceSum")
+ begin
+ begin
+ input_indices_ = convert(Tensor{Int64}, input_indices_)
+ begin
+ end
+ end
+ begin
+ input_values_ = convert(Tensor{Any}, input_values_)
+ begin
+ end
+ end
+ begin
+ input_shape_ = convert(Tensor{Int64}, input_shape_)
+ begin
+ end
+ end
+ begin
+ reduction_axes_ = convert(Tensor{Int32}, reduction_axes_)
+ begin
+ end
+ end
+ begin
+ (input_values_,) = tf.tf_promote(input_values_)
+ end
+ end
+ begin
+ begin
+ tf.add_input(desc, input_indices_)
+ end
+ begin
+ tf.add_input(desc, input_values_)
+ end
+ begin
+ tf.add_input(desc, input_shape_)
+ end
+ begin
+ tf.add_input(desc, reduction_axes_)
+ end
+ end
+ begin
+ begin
+ if keep_dims !== nothing
+ desc["keep_dims"] = Base.Bool(keep_dims)
+ end
+ end
+ end
+ end
+ tf.Tensor(tf.Operation(desc))
+ end
+ end
+ begin
+ function sparse_reduce_sum_eager(input_indices_, input_values_, input_shape_, reduction_axes_; name=nothing, keep_dims=nothing)
+ desc = tf.EagerOp("SparseReduceSum")
+ input_indices_ = convert(tf.EagerTensor, input_indices_)
+ input_values_ = convert(tf.EagerTensor, input_values_)
+ input_shape_ = convert(tf.EagerTensor, input_shape_)
+ reduction_axes_ = convert(tf.EagerTensor, reduction_axes_)
+ begin
+ begin
+ tf.add_input(desc, input_indices_)
+ end
+ begin
+ tf.add_input(desc, input_values_)
+ end
+ begin
+ tf.add_input(desc, input_shape_)
+ end
+ begin
+ tf.add_input(desc, reduction_axes_)
+ end
+ end
+ begin
+ begin
+ if keep_dims !== nothing
+ desc["keep_dims"] = Base.Bool(keep_dims)
+ end
+ end
+ end
+ begin
+ desc["T"] = tf.data_type(input_values_)
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(sparse_reduce_sum, [input_indices_, input_values_, input_shape_, reduction_axes_], name=nothing, keep_dims=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res[1]
+ end
+ end
end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function sparse_reduce_sum(input_indices_, input_values_, input_shape_, reduction_axes_; name=nothing, keep_dims=nothing)
+ if tf.in_eager_mode()
+ sparse_reduce_sum_eager(input_indices_, input_values_, input_shape_, reduction_axes_; name=name, keep_dims=keep_dims)
+ else
+ sparse_reduce_sum_graph(input_indices_, input_values_, input_shape_, reduction_axes_; name=name, keep_dims=keep_dims)
+ end
+ end
+ end
+end
+
"""
- neg(x)
+ string_length(input; unit=)
"""
-tf.@op function neg(x_; name=nothing)
- local desc
- tf.with_op_name((()->begin
- desc = tf.NodeDescription("Neg")
- x_ = convert(TensorFlow.Tensor{Any}, x_)
- (x_,) = tf.tf_promote(x_)
- tf.add_input(desc, x_)
- end), name, "Neg")
- tf.Tensor(tf.Operation(desc))
+begin
+ begin
+ function string_length_graph(input_; name=nothing, unit=nothing)
+ local desc
+ tf.with_op_name(name, "StringLength") do
+ desc = tf.NodeDescription("StringLength")
+ begin
+ begin
+ input_ = convert(Tensor{String}, input_)
+ begin
+ end
+ end
+ end
+ begin
+ begin
+ tf.add_input(desc, input_)
+ end
+ end
+ begin
+ begin
+ if unit !== nothing
+ desc["unit"] = Base.String(unit)
+ end
+ end
+ end
+ end
+ tf.Tensor(tf.Operation(desc))
+ end
+ end
+ begin
+ function string_length_eager(input_; name=nothing, unit=nothing)
+ desc = tf.EagerOp("StringLength")
+ input_ = convert(tf.EagerTensor, input_)
+ begin
+ begin
+ tf.add_input(desc, input_)
+ end
+ end
+ begin
+ begin
+ if unit !== nothing
+ desc["unit"] = Base.String(unit)
+ end
+ end
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(string_length, [input_], name=nothing, unit=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res[1]
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function string_length(input_; name=nothing, unit=nothing)
+ if tf.in_eager_mode()
+ string_length_eager(input_; name=name, unit=unit)
+ else
+ string_length_graph(input_; name=name, unit=unit)
+ end
+ end
end
+end
+
"""
- square(x)
+ filter_dataset(input_dataset, other_arguments)
"""
-tf.@op function square(x_; name=nothing)
- local desc
- tf.with_op_name((()->begin
- desc = tf.NodeDescription("Square")
- x_ = convert(TensorFlow.Tensor{Any}, x_)
- (x_,) = tf.tf_promote(x_)
- tf.add_input(desc, x_)
- end), name, "Square")
- tf.Tensor(tf.Operation(desc))
+begin
+ begin
+ function filter_dataset_graph(input_dataset_, other_arguments_; name=nothing, predicate=nothing, Targuments=nothing, output_types=nothing, output_shapes=nothing)
+ local desc
+ tf.with_op_name(name, "FilterDataset") do
+ desc = tf.NodeDescription("FilterDataset")
+ begin
+ begin
+ input_dataset_ = convert(Tensor{Any}, input_dataset_)
+ begin
+ end
+ end
+ begin
+ other_arguments_ = [convert(Tensor{Any}, x) for x = other_arguments_]
+ begin
+ end
+ end
+ end
+ begin
+ begin
+ tf.add_input(desc, input_dataset_)
+ end
+ begin
+ tf.add_input(desc, other_arguments_)
+ end
+ end
+ begin
+ begin
+ if predicate !== nothing
+ desc["predicate"] = Base.identity(predicate)
+ end
+ end
+ begin
+ if Targuments !== nothing
+ desc["Targuments"] = map(Base.identity, Targuments)
+ end
+ end
+ begin
+ if output_types !== nothing
+ desc["output_types"] = map(Base.identity, output_types)
+ end
+ end
+ begin
+ if output_shapes !== nothing
+ desc["output_shapes"] = map(Base.identity, output_shapes)
+ end
+ end
+ end
+ end
+ tf.Tensor(tf.Operation(desc))
+ end
+ end
+ begin
+ function filter_dataset_eager(input_dataset_, other_arguments_; name=nothing, predicate=nothing, Targuments=nothing, output_types=nothing, output_shapes=nothing)
+ desc = tf.EagerOp("FilterDataset")
+ input_dataset_ = convert(tf.EagerTensor, input_dataset_)
+ other_arguments_ = convert(tf.EagerTensor, other_arguments_)
+ begin
+ begin
+ tf.add_input(desc, input_dataset_)
+ end
+ begin
+ tf.add_input(desc, other_arguments_)
+ end
+ end
+ begin
+ begin
+ if predicate !== nothing
+ desc["predicate"] = Base.identity(predicate)
+ end
+ end
+ begin
+ if Targuments !== nothing
+ desc["Targuments"] = map(Base.identity, Targuments)
+ end
+ end
+ begin
+ if output_types !== nothing
+ desc["output_types"] = map(Base.identity, output_types)
+ end
+ end
+ begin
+ if output_shapes !== nothing
+ desc["output_shapes"] = map(Base.identity, output_shapes)
+ end
+ end
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(filter_dataset, [input_dataset_, other_arguments_], name=nothing, predicate=nothing, Targuments=nothing, output_types=nothing, output_shapes=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res[1]
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function filter_dataset(input_dataset_, other_arguments_; name=nothing, predicate=nothing, Targuments=nothing, output_types=nothing, output_shapes=nothing)
+ if tf.in_eager_mode()
+ filter_dataset_eager(input_dataset_, other_arguments_; name=name, predicate=predicate, Targuments=Targuments, output_types=output_types, output_shapes=output_shapes)
+ else
+ filter_dataset_graph(input_dataset_, other_arguments_; name=name, predicate=predicate, Targuments=Targuments, output_types=output_types, output_shapes=output_shapes)
+ end
+ end
end
+end
+
"""
- shape(input; out_type=Int32)
+ conv3d(input, filter; data_format=, dilations=[1, 1, 1, 1, 1])
"""
-tf.@op function shape(input_; name=nothing, out_type=nothing)
- local desc
- tf.with_op_name((()->begin
- desc = tf.NodeDescription("Shape")
- input_ = convert(TensorFlow.Tensor{Any}, input_)
- (input_,) = tf.tf_promote(input_)
+begin
+ begin
+ function conv3d_graph(input_, filter_; name=nothing, strides=nothing, padding=nothing, data_format=nothing, dilations=nothing)
+ local desc
+ tf.with_op_name(name, "Conv3D") do
+ desc = tf.NodeDescription("Conv3D")
+ begin
+ begin
+ input_ = convert(Tensor{Any}, input_)
+ begin
+ end
+ end
+ begin
+ filter_ = convert(Tensor{Any}, filter_)
+ begin
+ end
+ end
+ begin
+ (input_, filter_) = tf.tf_promote(input_, filter_)
+ end
+ end
+ begin
+ begin
+ tf.add_input(desc, input_)
+ end
+ begin
+ tf.add_input(desc, filter_)
+ end
+ end
+ begin
+ begin
+ if strides !== nothing
+ desc["strides"] = map(Base.identity, strides)
+ end
+ end
+ begin
+ if padding !== nothing
+ desc["padding"] = Base.String(padding)
+ end
+ end
+ begin
+ if data_format !== nothing
+ desc["data_format"] = Base.String(data_format)
+ end
+ end
+ begin
+ if dilations !== nothing
+ desc["dilations"] = map(Base.identity, dilations)
+ end
+ end
+ end
+ end
+ tf.Tensor(tf.Operation(desc))
+ end
+ end
+ begin
+ function conv3d_eager(input_, filter_; name=nothing, strides=nothing, padding=nothing, data_format=nothing, dilations=nothing)
+ desc = tf.EagerOp("Conv3D")
+ input_ = convert(tf.EagerTensor, input_)
+ filter_ = convert(tf.EagerTensor, filter_)
+ begin
+ begin
tf.add_input(desc, input_)
- if out_type !== nothing
- desc["out_type"] = Base.identity(out_type)
+ end
+ begin
+ tf.add_input(desc, filter_)
+ end
+ end
+ begin
+ begin
+ if strides !== nothing
+ desc["strides"] = map(Base.identity, strides)
+ end
+ end
+ begin
+ if padding !== nothing
+ desc["padding"] = Base.String(padding)
+ end
+ end
+ begin
+ if data_format !== nothing
+ desc["data_format"] = Base.String(data_format)
+ end
+ end
+ begin
+ if dilations !== nothing
+ desc["dilations"] = map(Base.identity, dilations)
end
- end), name, "Shape")
- tf.Tensor(tf.Operation(desc))
+ end
+ end
+ begin
+ desc["T"] = tf.data_type(input_)
+ end
+ begin
+ desc["T"] = tf.data_type(filter_)
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(conv3d, [input_, filter_], name=nothing, strides=nothing, padding=nothing, data_format=nothing, dilations=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res[1]
+ end
+ end
end
-
-"""
- unsorted_segment_sum(data, segment_ids, num_segments)
-
-
-"""
-tf.@op function unsorted_segment_sum(data_, segment_ids_, num_segments_; name=nothing)
- local desc
- tf.with_op_name((()->begin
- desc = tf.NodeDescription("UnsortedSegmentSum")
- data_ = convert(TensorFlow.Tensor{Any}, data_)
- segment_ids_ = convert(TensorFlow.Tensor{Any}, segment_ids_)
- segment_ids_ = segment_ids_ - convert(tf.Tensor{eltype(segment_ids_)}, 1)
- num_segments_ = convert(TensorFlow.Tensor{Int32}, num_segments_)
- (num_segments_,) = tf.tf_promote(num_segments_)
- (data_,) = tf.tf_promote(data_)
- (segment_ids_,) = tf.tf_promote(segment_ids_)
- tf.add_input(desc, data_)
- tf.add_input(desc, segment_ids_)
- tf.add_input(desc, num_segments_)
- end), name, "UnsortedSegmentSum")
- tf.Tensor(tf.Operation(desc))
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function conv3d(input_, filter_; name=nothing, strides=nothing, padding=nothing, data_format=nothing, dilations=nothing)
+ if tf.in_eager_mode()
+ conv3d_eager(input_, filter_; name=name, strides=strides, padding=padding, data_format=data_format, dilations=dilations)
+ else
+ conv3d_graph(input_, filter_; name=name, strides=strides, padding=padding, data_format=data_format, dilations=dilations)
+ end
+ end
end
+end
-"""
- unsorted_segment_max(data, segment_ids, num_segments)
+"""
+ retrieve_tpu_embedding_adagrad_parameters(; table_id=-1, table_name=)
+Retrieve embedding parameters for a single table.
"""
-tf.@op function unsorted_segment_max(data_, segment_ids_, num_segments_; name=nothing)
- local desc
- tf.with_op_name((()->begin
- desc = tf.NodeDescription("UnsortedSegmentMax")
- data_ = convert(TensorFlow.Tensor{Any}, data_)
- segment_ids_ = convert(TensorFlow.Tensor{Any}, segment_ids_)
- segment_ids_ = segment_ids_ - convert(tf.Tensor{eltype(segment_ids_)}, 1)
- num_segments_ = convert(TensorFlow.Tensor{Int32}, num_segments_)
- (num_segments_,) = tf.tf_promote(num_segments_)
- (data_,) = tf.tf_promote(data_)
- (segment_ids_,) = tf.tf_promote(segment_ids_)
- tf.add_input(desc, data_)
- tf.add_input(desc, segment_ids_)
- tf.add_input(desc, num_segments_)
- end), name, "UnsortedSegmentMax")
- tf.Tensor(tf.Operation(desc))
+begin
+ begin
+ function retrieve_tpu_embedding_adagrad_parameters_graph(; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing)
+ local desc
+ tf.with_op_name(name, "RetrieveTPUEmbeddingAdagradParameters") do
+ desc = tf.NodeDescription("RetrieveTPUEmbeddingAdagradParameters")
+ begin
+ end
+ begin
+ end
+ begin
+ begin
+ if table_id !== nothing
+ desc["table_id"] = Base.Int(table_id)
+ end
+ end
+ begin
+ if table_name !== nothing
+ desc["table_name"] = Base.String(table_name)
+ end
+ end
+ begin
+ if num_shards !== nothing
+ desc["num_shards"] = Base.Int(num_shards)
+ end
+ end
+ begin
+ if shard_id !== nothing
+ desc["shard_id"] = Base.Int(shard_id)
+ end
+ end
+ end
+ end
+ begin
+ out = tf.Tensor[]
+ op = tf.Operation(desc)
+ for out_idx = 1:2
+ push!(out, tf.Tensor(op, out_idx))
+ end
+ out
+ end
+ end
+ end
+ begin
+ function retrieve_tpu_embedding_adagrad_parameters_eager(; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing)
+ desc = tf.EagerOp("RetrieveTPUEmbeddingAdagradParameters")
+ begin
+ end
+ begin
+ begin
+ if table_id !== nothing
+ desc["table_id"] = Base.Int(table_id)
+ end
+ end
+ begin
+ if table_name !== nothing
+ desc["table_name"] = Base.String(table_name)
+ end
+ end
+ begin
+ if num_shards !== nothing
+ desc["num_shards"] = Base.Int(num_shards)
+ end
+ end
+ begin
+ if shard_id !== nothing
+ desc["shard_id"] = Base.Int(shard_id)
+ end
+ end
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(retrieve_tpu_embedding_adagrad_parameters, [], name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res
+ end
+ end
end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function retrieve_tpu_embedding_adagrad_parameters(; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing)
+ if tf.in_eager_mode()
+ retrieve_tpu_embedding_adagrad_parameters_eager(; name=name, table_id=table_id, table_name=table_name, num_shards=num_shards, shard_id=shard_id)
+ else
+ retrieve_tpu_embedding_adagrad_parameters_graph(; name=name, table_id=table_id, table_name=table_name, num_shards=num_shards, shard_id=shard_id)
+ end
+ end
+ end
+end
+
"""
- segment_sum(data, segment_ids)
+ optional_has_value(optional)
"""
-tf.@op function segment_sum(data_, segment_ids_; name=nothing)
- local desc
- tf.with_op_name((()->begin
- desc = tf.NodeDescription("SegmentSum")
- data_ = convert(TensorFlow.Tensor{Any}, data_)
- segment_ids_ = convert(TensorFlow.Tensor{Any}, segment_ids_)
- segment_ids_ = segment_ids_ - convert(tf.Tensor{eltype(segment_ids_)}, 1)
- (data_,) = tf.tf_promote(data_)
- (segment_ids_,) = tf.tf_promote(segment_ids_)
- tf.add_input(desc, data_)
- tf.add_input(desc, segment_ids_)
- end), name, "SegmentSum")
- tf.Tensor(tf.Operation(desc))
+begin
+ begin
+ function optional_has_value_graph(optional_; name=nothing)
+ local desc
+ tf.with_op_name(name, "OptionalHasValue") do
+ desc = tf.NodeDescription("OptionalHasValue")
+ begin
+ begin
+ optional_ = convert(Tensor{Any}, optional_)
+ begin
+ end
+ end
+ end
+ begin
+ begin
+ tf.add_input(desc, optional_)
+ end
+ end
+ begin
+ end
+ end
+ tf.Tensor(tf.Operation(desc))
+ end
end
-
-"""
- segment_max(data, segment_ids)
+ begin
+ function optional_has_value_eager(optional_; name=nothing)
+ desc = tf.EagerOp("OptionalHasValue")
+ optional_ = convert(tf.EagerTensor, optional_)
+ begin
+ begin
+ tf.add_input(desc, optional_)
+ end
+ end
+ begin
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(optional_has_value, [optional_], name=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res[1]
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function optional_has_value(optional_; name=nothing)
+ if tf.in_eager_mode()
+ optional_has_value_eager(optional_; name=name)
+ else
+ optional_has_value_graph(optional_; name=name)
+ end
+ end
+ end
+end
"""
-tf.@op function segment_max(data_, segment_ids_; name=nothing)
- local desc
- tf.with_op_name((()->begin
- desc = tf.NodeDescription("SegmentMax")
- data_ = convert(TensorFlow.Tensor{Any}, data_)
- segment_ids_ = convert(TensorFlow.Tensor{Any}, segment_ids_)
- segment_ids_ = segment_ids_ - convert(tf.Tensor{eltype(segment_ids_)}, 1)
- (data_,) = tf.tf_promote(data_)
- (segment_ids_,) = tf.tf_promote(segment_ids_)
- tf.add_input(desc, data_)
- tf.add_input(desc, segment_ids_)
- end), name, "SegmentMax")
- tf.Tensor(tf.Operation(desc))
- end
+ apply_adam(var, m, v, beta1_power, beta2_power, lr, beta1, beta2, epsilon, grad; use_locking=false, use_nesterov=false)
+
"""
- segment_mean(data, segment_ids)
-
-
-"""
-tf.@op function segment_mean(data_, segment_ids_; name=nothing)
- local desc
- tf.with_op_name((()->begin
- desc = tf.NodeDescription("SegmentMean")
- data_ = convert(TensorFlow.Tensor{Any}, data_)
- segment_ids_ = convert(TensorFlow.Tensor{Any}, segment_ids_)
- segment_ids_ = segment_ids_ - convert(tf.Tensor{eltype(segment_ids_)}, 1)
- (data_,) = tf.tf_promote(data_)
- (segment_ids_,) = tf.tf_promote(segment_ids_)
- tf.add_input(desc, data_)
- tf.add_input(desc, segment_ids_)
- end), name, "SegmentMean")
- tf.Tensor(tf.Operation(desc))
+begin
+ begin
+ function apply_adam_graph(var_, m_, v_, beta1_power_, beta2_power_, lr_, beta1_, beta2_, epsilon_, grad_; name=nothing, use_locking=nothing, use_nesterov=nothing)
+ local desc
+ tf.with_op_name(name, "ApplyAdam") do
+ desc = tf.NodeDescription("ApplyAdam")
+ begin
+ begin
+ var_ = convert(Tensor{Any}, var_)
+ begin
+ end
+ end
+ begin
+ m_ = convert(Tensor{Any}, m_)
+ begin
+ end
+ end
+ begin
+ v_ = convert(Tensor{Any}, v_)
+ begin
+ end
+ end
+ begin
+ beta1_power_ = convert(Tensor{Any}, beta1_power_)
+ begin
+ end
+ end
+ begin
+ beta2_power_ = convert(Tensor{Any}, beta2_power_)
+ begin
+ end
+ end
+ begin
+ lr_ = convert(Tensor{Any}, lr_)
+ begin
+ end
+ end
+ begin
+ beta1_ = convert(Tensor{Any}, beta1_)
+ begin
+ end
+ end
+ begin
+ beta2_ = convert(Tensor{Any}, beta2_)
+ begin
+ end
+ end
+ begin
+ epsilon_ = convert(Tensor{Any}, epsilon_)
+ begin
+ end
+ end
+ begin
+ grad_ = convert(Tensor{Any}, grad_)
+ begin
+ end
+ end
+ begin
+ (var_, m_, v_, beta1_power_, beta2_power_, lr_, beta1_, beta2_, epsilon_, grad_) = tf.tf_promote(var_, m_, v_, beta1_power_, beta2_power_, lr_, beta1_, beta2_, epsilon_, grad_)
+ end
+ end
+ begin
+ begin
+ tf.add_input(desc, var_)
+ end
+ begin
+ tf.add_input(desc, m_)
+ end
+ begin
+ tf.add_input(desc, v_)
+ end
+ begin
+ tf.add_input(desc, beta1_power_)
+ end
+ begin
+ tf.add_input(desc, beta2_power_)
+ end
+ begin
+ tf.add_input(desc, lr_)
+ end
+ begin
+ tf.add_input(desc, beta1_)
+ end
+ begin
+ tf.add_input(desc, beta2_)
+ end
+ begin
+ tf.add_input(desc, epsilon_)
+ end
+ begin
+ tf.add_input(desc, grad_)
+ end
+ end
+ begin
+ begin
+ if use_locking !== nothing
+ desc["use_locking"] = Base.Bool(use_locking)
+ end
+ end
+ begin
+ if use_nesterov !== nothing
+ desc["use_nesterov"] = Base.Bool(use_nesterov)
+ end
+ end
+ end
+ end
+ tf.Tensor(tf.Operation(desc))
+ end
end
-
-"""
- segment_min(data, segment_ids)
-
-
-"""
-tf.@op function segment_min(data_, segment_ids_; name=nothing)
- local desc
- tf.with_op_name((()->begin
- desc = tf.NodeDescription("SegmentMin")
- data_ = convert(TensorFlow.Tensor{Any}, data_)
- segment_ids_ = convert(TensorFlow.Tensor{Any}, segment_ids_)
- segment_ids_ = segment_ids_ - convert(tf.Tensor{eltype(segment_ids_)}, 1)
- (data_,) = tf.tf_promote(data_)
- (segment_ids_,) = tf.tf_promote(segment_ids_)
- tf.add_input(desc, data_)
- tf.add_input(desc, segment_ids_)
- end), name, "SegmentMin")
- tf.Tensor(tf.Operation(desc))
+ begin
+ function apply_adam_eager(var_, m_, v_, beta1_power_, beta2_power_, lr_, beta1_, beta2_, epsilon_, grad_; name=nothing, use_locking=nothing, use_nesterov=nothing)
+ desc = tf.EagerOp("ApplyAdam")
+ var_ = convert(tf.EagerTensor, var_)
+ m_ = convert(tf.EagerTensor, m_)
+ v_ = convert(tf.EagerTensor, v_)
+ beta1_power_ = convert(tf.EagerTensor, beta1_power_)
+ beta2_power_ = convert(tf.EagerTensor, beta2_power_)
+ lr_ = convert(tf.EagerTensor, lr_)
+ beta1_ = convert(tf.EagerTensor, beta1_)
+ beta2_ = convert(tf.EagerTensor, beta2_)
+ epsilon_ = convert(tf.EagerTensor, epsilon_)
+ grad_ = convert(tf.EagerTensor, grad_)
+ begin
+ begin
+ tf.add_input(desc, var_)
+ end
+ begin
+ tf.add_input(desc, m_)
+ end
+ begin
+ tf.add_input(desc, v_)
+ end
+ begin
+ tf.add_input(desc, beta1_power_)
+ end
+ begin
+ tf.add_input(desc, beta2_power_)
+ end
+ begin
+ tf.add_input(desc, lr_)
+ end
+ begin
+ tf.add_input(desc, beta1_)
+ end
+ begin
+ tf.add_input(desc, beta2_)
+ end
+ begin
+ tf.add_input(desc, epsilon_)
+ end
+ begin
+ tf.add_input(desc, grad_)
+ end
+ end
+ begin
+ begin
+ if use_locking !== nothing
+ desc["use_locking"] = Base.Bool(use_locking)
+ end
+ end
+ begin
+ if use_nesterov !== nothing
+ desc["use_nesterov"] = Base.Bool(use_nesterov)
+ end
+ end
+ end
+ begin
+ desc["T"] = tf.data_type(var_)
+ end
+ begin
+ desc["T"] = tf.data_type(m_)
+ end
+ begin
+ desc["T"] = tf.data_type(v_)
+ end
+ begin
+ desc["T"] = tf.data_type(beta1_power_)
+ end
+ begin
+ desc["T"] = tf.data_type(beta2_power_)
+ end
+ begin
+ desc["T"] = tf.data_type(lr_)
+ end
+ begin
+ desc["T"] = tf.data_type(beta1_)
+ end
+ begin
+ desc["T"] = tf.data_type(beta2_)
+ end
+ begin
+ desc["T"] = tf.data_type(epsilon_)
+ end
+ begin
+ desc["T"] = tf.data_type(grad_)
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(apply_adam, [var_, m_, v_, beta1_power_, beta2_power_, lr_, beta1_, beta2_, epsilon_, grad_], name=nothing, use_locking=nothing, use_nesterov=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res[1]
+ end
+ end
end
-
-"""
- segment_prod(data, segment_ids)
-
-
-"""
-tf.@op function segment_prod(data_, segment_ids_; name=nothing)
- local desc
- tf.with_op_name((()->begin
- desc = tf.NodeDescription("SegmentProd")
- data_ = convert(TensorFlow.Tensor{Any}, data_)
- segment_ids_ = convert(TensorFlow.Tensor{Any}, segment_ids_)
- segment_ids_ = segment_ids_ - convert(tf.Tensor{eltype(segment_ids_)}, 1)
- (data_,) = tf.tf_promote(data_)
- (segment_ids_,) = tf.tf_promote(segment_ids_)
- tf.add_input(desc, data_)
- tf.add_input(desc, segment_ids_)
- end), name, "SegmentProd")
- tf.Tensor(tf.Operation(desc))
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function apply_adam(var_, m_, v_, beta1_power_, beta2_power_, lr_, beta1_, beta2_, epsilon_, grad_; name=nothing, use_locking=nothing, use_nesterov=nothing)
+ if tf.in_eager_mode()
+ apply_adam_eager(var_, m_, v_, beta1_power_, beta2_power_, lr_, beta1_, beta2_, epsilon_, grad_; name=name, use_locking=use_locking, use_nesterov=use_nesterov)
+ else
+ apply_adam_graph(var_, m_, v_, beta1_power_, beta2_power_, lr_, beta1_, beta2_, epsilon_, grad_; name=name, use_locking=use_locking, use_nesterov=use_nesterov)
+ end
+ end
end
+end
+
"""
- relu(features)
+ cudnn_rnn_params_to_canonical(num_layers, num_units, input_size, params; rnn_mode=, input_mode=, direction=, dropout=?, seed=0, seed2=0)
"""
-tf.@op function relu(features_; name=nothing)
- local desc
- tf.with_op_name((()->begin
- desc = tf.NodeDescription("Relu")
- features_ = convert(TensorFlow.Tensor{Any}, features_)
- (features_,) = tf.tf_promote(features_)
- tf.add_input(desc, features_)
- end), name, "Relu")
- tf.Tensor(tf.Operation(desc))
+begin
+ begin
+ function cudnn_rnn_params_to_canonical_graph(num_layers_, num_units_, input_size_, params_; name=nothing, num_params=nothing, rnn_mode=nothing, input_mode=nothing, direction=nothing, dropout=nothing, seed=nothing, seed2=nothing)
+ local desc
+ tf.with_op_name(name, "CudnnRNNParamsToCanonical") do
+ desc = tf.NodeDescription("CudnnRNNParamsToCanonical")
+ begin
+ begin
+ num_layers_ = convert(Tensor{Int32}, num_layers_)
+ begin
+ end
+ end
+ begin
+ num_units_ = convert(Tensor{Int32}, num_units_)
+ begin
+ end
+ end
+ begin
+ input_size_ = convert(Tensor{Int32}, input_size_)
+ begin
+ end
+ end
+ begin
+ params_ = convert(Tensor{Any}, params_)
+ begin
+ end
+ end
+ begin
+ (params_,) = tf.tf_promote(params_)
+ end
+ end
+ begin
+ begin
+ tf.add_input(desc, num_layers_)
+ end
+ begin
+ tf.add_input(desc, num_units_)
+ end
+ begin
+ tf.add_input(desc, input_size_)
+ end
+ begin
+ tf.add_input(desc, params_)
+ end
+ end
+ begin
+ begin
+ if num_params !== nothing
+ desc["num_params"] = Base.Int(num_params)
+ end
+ end
+ begin
+ if rnn_mode !== nothing
+ desc["rnn_mode"] = Base.String(rnn_mode)
+ end
+ end
+ begin
+ if input_mode !== nothing
+ desc["input_mode"] = Base.String(input_mode)
+ end
+ end
+ begin
+ if direction !== nothing
+ desc["direction"] = Base.String(direction)
+ end
+ end
+ begin
+ if dropout !== nothing
+ desc["dropout"] = Base.identity(dropout)
+ end
+ end
+ begin
+ if seed !== nothing
+ desc["seed"] = Base.Int(seed)
+ end
+ end
+ begin
+ if seed2 !== nothing
+ desc["seed2"] = Base.Int(seed2)
+ end
+ end
+ end
+ end
+ begin
+ out = tf.Tensor[]
+ op = tf.Operation(desc)
+ for out_idx = 1:2
+ push!(out, tf.Tensor(op, out_idx))
+ end
+ out
+ end
+ end
+ end
+ begin
+ function cudnn_rnn_params_to_canonical_eager(num_layers_, num_units_, input_size_, params_; name=nothing, num_params=nothing, rnn_mode=nothing, input_mode=nothing, direction=nothing, dropout=nothing, seed=nothing, seed2=nothing)
+ desc = tf.EagerOp("CudnnRNNParamsToCanonical")
+ num_layers_ = convert(tf.EagerTensor, num_layers_)
+ num_units_ = convert(tf.EagerTensor, num_units_)
+ input_size_ = convert(tf.EagerTensor, input_size_)
+ params_ = convert(tf.EagerTensor, params_)
+ begin
+ begin
+ tf.add_input(desc, num_layers_)
+ end
+ begin
+ tf.add_input(desc, num_units_)
+ end
+ begin
+ tf.add_input(desc, input_size_)
+ end
+ begin
+ tf.add_input(desc, params_)
+ end
+ end
+ begin
+ begin
+ if num_params !== nothing
+ desc["num_params"] = Base.Int(num_params)
+ end
+ end
+ begin
+ if rnn_mode !== nothing
+ desc["rnn_mode"] = Base.String(rnn_mode)
+ end
+ end
+ begin
+ if input_mode !== nothing
+ desc["input_mode"] = Base.String(input_mode)
+ end
+ end
+ begin
+ if direction !== nothing
+ desc["direction"] = Base.String(direction)
+ end
+ end
+ begin
+ if dropout !== nothing
+ desc["dropout"] = Base.identity(dropout)
+ end
+ end
+ begin
+ if seed !== nothing
+ desc["seed"] = Base.Int(seed)
+ end
+ end
+ begin
+ if seed2 !== nothing
+ desc["seed2"] = Base.Int(seed2)
+ end
+ end
+ end
+ begin
+ desc["T"] = tf.data_type(params_)
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(cudnn_rnn_params_to_canonical, [num_layers_, num_units_, input_size_, params_], name=nothing, num_params=nothing, rnn_mode=nothing, input_mode=nothing, direction=nothing, dropout=nothing, seed=nothing, seed2=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res
+ end
+ end
end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function cudnn_rnn_params_to_canonical(num_layers_, num_units_, input_size_, params_; name=nothing, num_params=nothing, rnn_mode=nothing, input_mode=nothing, direction=nothing, dropout=nothing, seed=nothing, seed2=nothing)
+ if tf.in_eager_mode()
+ cudnn_rnn_params_to_canonical_eager(num_layers_, num_units_, input_size_, params_; name=name, num_params=num_params, rnn_mode=rnn_mode, input_mode=input_mode, direction=direction, dropout=dropout, seed=seed, seed2=seed2)
+ else
+ cudnn_rnn_params_to_canonical_graph(num_layers_, num_units_, input_size_, params_; name=name, num_params=num_params, rnn_mode=rnn_mode, input_mode=input_mode, direction=direction, dropout=dropout, seed=seed, seed2=seed2)
+ end
+ end
+ end
+end
+
"""
- relu6(features)
+ irfft3d(input, fft_length)
"""
-tf.@op function relu6(features_; name=nothing)
- local desc
- tf.with_op_name((()->begin
- desc = tf.NodeDescription("Relu6")
- features_ = convert(TensorFlow.Tensor{Any}, features_)
- (features_,) = tf.tf_promote(features_)
- tf.add_input(desc, features_)
- end), name, "Relu6")
- tf.Tensor(tf.Operation(desc))
+begin
+ begin
+ function irfft3d_graph(input_, fft_length_; name=nothing)
+ local desc
+ tf.with_op_name(name, "IRFFT3D") do
+ desc = tf.NodeDescription("IRFFT3D")
+ begin
+ begin
+ input_ = convert(Tensor{Complex{Float32}}, input_)
+ begin
+ end
+ end
+ begin
+ fft_length_ = convert(Tensor{Int32}, fft_length_)
+ begin
+ end
+ end
+ end
+ begin
+ begin
+ tf.add_input(desc, input_)
+ end
+ begin
+ tf.add_input(desc, fft_length_)
+ end
+ end
+ begin
+ end
+ end
+ tf.Tensor(tf.Operation(desc))
+ end
end
+ begin
+ function irfft3d_eager(input_, fft_length_; name=nothing)
+ desc = tf.EagerOp("IRFFT3D")
+ input_ = convert(tf.EagerTensor, input_)
+ fft_length_ = convert(tf.EagerTensor, fft_length_)
+ begin
+ begin
+ tf.add_input(desc, input_)
+ end
+ begin
+ tf.add_input(desc, fft_length_)
+ end
+ end
+ begin
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(irfft3d, [input_, fft_length_], name=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res[1]
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function irfft3d(input_, fft_length_; name=nothing)
+ if tf.in_eager_mode()
+ irfft3d_eager(input_, fft_length_; name=name)
+ else
+ irfft3d_graph(input_, fft_length_; name=name)
+ end
+ end
+ end
+end
+
"""
- elu(features)
+ angle(input)
"""
-tf.@op function elu(features_; name=nothing)
- local desc
- tf.with_op_name((()->begin
- desc = tf.NodeDescription("Elu")
- features_ = convert(TensorFlow.Tensor{Any}, features_)
- (features_,) = tf.tf_promote(features_)
- tf.add_input(desc, features_)
- end), name, "Elu")
- tf.Tensor(tf.Operation(desc))
+begin
+ begin
+ function angle_graph(input_; name=nothing)
+ local desc
+ tf.with_op_name(name, "Angle") do
+ desc = tf.NodeDescription("Angle")
+ begin
+ begin
+ input_ = convert(Tensor{Complex{Float32}}, input_)
+ begin
+ end
+ end
+ begin
+ (input_,) = tf.tf_promote(input_)
+ end
+ end
+ begin
+ begin
+ tf.add_input(desc, input_)
+ end
+ end
+ begin
+ end
+ end
+ tf.Tensor(tf.Operation(desc))
+ end
+ end
+ begin
+ function angle_eager(input_; name=nothing)
+ desc = tf.EagerOp("Angle")
+ input_ = convert(tf.EagerTensor, input_)
+ begin
+ begin
+ tf.add_input(desc, input_)
+ end
+ end
+ begin
+ end
+ begin
+ desc["T"] = tf.data_type(input_)
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(angle, [input_], name=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res[1]
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function angle(input_; name=nothing)
+ if tf.in_eager_mode()
+ angle_eager(input_; name=name)
+ else
+ angle_graph(input_; name=name)
+ end
+ end
end
+end
+
"""
- softplus(features)
+ tensor_forest_tree_resource_handle_op(; container=, shared_name=)
"""
-tf.@op function softplus(features_; name=nothing)
- local desc
- tf.with_op_name((()->begin
- desc = tf.NodeDescription("Softplus")
- features_ = convert(TensorFlow.Tensor{Any}, features_)
- (features_,) = tf.tf_promote(features_)
- tf.add_input(desc, features_)
- end), name, "Softplus")
- tf.Tensor(tf.Operation(desc))
+begin
+ begin
+ function tensor_forest_tree_resource_handle_op_graph(; name=nothing, container=nothing, shared_name=nothing)
+ local desc
+ tf.with_op_name(name, "TensorForestTreeResourceHandleOp") do
+ desc = tf.NodeDescription("TensorForestTreeResourceHandleOp")
+ begin
+ end
+ begin
+ end
+ begin
+ begin
+ if container !== nothing
+ desc["container"] = Base.String(container)
+ end
+ end
+ begin
+ if shared_name !== nothing
+ desc["shared_name"] = Base.String(shared_name)
+ end
+ end
+ end
+ end
+ tf.Tensor(tf.Operation(desc))
+ end
+ end
+ begin
+ function tensor_forest_tree_resource_handle_op_eager(; name=nothing, container=nothing, shared_name=nothing)
+ desc = tf.EagerOp("TensorForestTreeResourceHandleOp")
+ begin
+ end
+ begin
+ begin
+ if container !== nothing
+ desc["container"] = Base.String(container)
+ end
+ end
+ begin
+ if shared_name !== nothing
+ desc["shared_name"] = Base.String(shared_name)
+ end
+ end
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(tensor_forest_tree_resource_handle_op, [], name=nothing, container=nothing, shared_name=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res[1]
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function tensor_forest_tree_resource_handle_op(; name=nothing, container=nothing, shared_name=nothing)
+ if tf.in_eager_mode()
+ tensor_forest_tree_resource_handle_op_eager(; name=name, container=container, shared_name=shared_name)
+ else
+ tensor_forest_tree_resource_handle_op_graph(; name=name, container=container, shared_name=shared_name)
+ end
+ end
end
+end
+
"""
- softsign(features)
+ learned_unigram_candidate_sampler(true_classes; seed=0, seed2=0)
"""
-tf.@op function softsign(features_; name=nothing)
- local desc
- tf.with_op_name((()->begin
- desc = tf.NodeDescription("Softsign")
- features_ = convert(TensorFlow.Tensor{Any}, features_)
- (features_,) = tf.tf_promote(features_)
- tf.add_input(desc, features_)
- end), name, "Softsign")
- tf.Tensor(tf.Operation(desc))
+begin
+ begin
+ function learned_unigram_candidate_sampler_graph(true_classes_; name=nothing, num_true=nothing, num_sampled=nothing, unique=nothing, range_max=nothing, seed=nothing, seed2=nothing)
+ local desc
+ tf.with_op_name(name, "LearnedUnigramCandidateSampler") do
+ desc = tf.NodeDescription("LearnedUnigramCandidateSampler")
+ begin
+ begin
+ true_classes_ = convert(Tensor{Int64}, true_classes_)
+ begin
+ end
+ end
+ end
+ begin
+ begin
+ tf.add_input(desc, true_classes_)
+ end
+ end
+ begin
+ begin
+ if num_true !== nothing
+ desc["num_true"] = Base.Int(num_true)
+ end
+ end
+ begin
+ if num_sampled !== nothing
+ desc["num_sampled"] = Base.Int(num_sampled)
+ end
+ end
+ begin
+ if unique !== nothing
+ desc["unique"] = Base.Bool(unique)
+ end
+ end
+ begin
+ if range_max !== nothing
+ desc["range_max"] = Base.Int(range_max)
+ end
+ end
+ begin
+ if seed !== nothing
+ desc["seed"] = Base.Int(seed)
+ end
+ end
+ begin
+ if seed2 !== nothing
+ desc["seed2"] = Base.Int(seed2)
+ end
+ end
+ end
+ end
+ begin
+ out = tf.Tensor[]
+ op = tf.Operation(desc)
+ for out_idx = 1:3
+ push!(out, tf.Tensor(op, out_idx))
+ end
+ out
+ end
+ end
+ end
+ begin
+ function learned_unigram_candidate_sampler_eager(true_classes_; name=nothing, num_true=nothing, num_sampled=nothing, unique=nothing, range_max=nothing, seed=nothing, seed2=nothing)
+ desc = tf.EagerOp("LearnedUnigramCandidateSampler")
+ true_classes_ = convert(tf.EagerTensor, true_classes_)
+ begin
+ begin
+ tf.add_input(desc, true_classes_)
+ end
+ end
+ begin
+ begin
+ if num_true !== nothing
+ desc["num_true"] = Base.Int(num_true)
+ end
+ end
+ begin
+ if num_sampled !== nothing
+ desc["num_sampled"] = Base.Int(num_sampled)
+ end
+ end
+ begin
+ if unique !== nothing
+ desc["unique"] = Base.Bool(unique)
+ end
+ end
+ begin
+ if range_max !== nothing
+ desc["range_max"] = Base.Int(range_max)
+ end
+ end
+ begin
+ if seed !== nothing
+ desc["seed"] = Base.Int(seed)
+ end
+ end
+ begin
+ if seed2 !== nothing
+ desc["seed2"] = Base.Int(seed2)
+ end
+ end
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(learned_unigram_candidate_sampler, [true_classes_], name=nothing, num_true=nothing, num_sampled=nothing, unique=nothing, range_max=nothing, seed=nothing, seed2=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function learned_unigram_candidate_sampler(true_classes_; name=nothing, num_true=nothing, num_sampled=nothing, unique=nothing, range_max=nothing, seed=nothing, seed2=nothing)
+ if tf.in_eager_mode()
+ learned_unigram_candidate_sampler_eager(true_classes_; name=name, num_true=num_true, num_sampled=num_sampled, unique=unique, range_max=range_max, seed=seed, seed2=seed2)
+ else
+ learned_unigram_candidate_sampler_graph(true_classes_; name=name, num_true=num_true, num_sampled=num_sampled, unique=unique, range_max=range_max, seed=seed, seed2=seed2)
+ end
+ end
end
+end
-"""
- softmax(logits)
+"""
+ _arg()
+A graph node which represents an argument to a function.
"""
-tf.@op function softmax(logits_; name=nothing)
- local desc
- tf.with_op_name((()->begin
- desc = tf.NodeDescription("Softmax")
- logits_ = convert(TensorFlow.Tensor{Any}, logits_)
- (logits_,) = tf.tf_promote(logits_)
- tf.add_input(desc, logits_)
- end), name, "Softmax")
- tf.Tensor(tf.Operation(desc))
+begin
+ begin
+ function _arg_graph(; name=nothing, index=nothing)
+ local desc
+ tf.with_op_name(name, "_Arg") do
+ desc = tf.NodeDescription("_Arg")
+ begin
+ end
+ begin
+ end
+ begin
+ begin
+ if index !== nothing
+ desc["index"] = Base.Int(index)
+ end
+ end
+ end
+ end
+ tf.Tensor(tf.Operation(desc))
+ end
+ end
+ begin
+ function _arg_eager(; name=nothing, index=nothing)
+ desc = tf.EagerOp("_Arg")
+ begin
+ end
+ begin
+ begin
+ if index !== nothing
+ desc["index"] = Base.Int(index)
+ end
+ end
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(_arg, [], name=nothing, index=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res[1]
+ end
+ end
end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function _arg(; name=nothing, index=nothing)
+ if tf.in_eager_mode()
+ _arg_eager(; name=name, index=index)
+ else
+ _arg_graph(; name=name, index=index)
+ end
+ end
+ end
+end
+
"""
- sigmoid(x)
+ matrix_square_root(input)
"""
-tf.@op function sigmoid(x_; name=nothing)
- local desc
- tf.with_op_name((()->begin
- desc = tf.NodeDescription("Sigmoid")
- x_ = convert(TensorFlow.Tensor{Any}, x_)
- (x_,) = tf.tf_promote(x_)
- tf.add_input(desc, x_)
- end), name, "Sigmoid")
- tf.Tensor(tf.Operation(desc))
+begin
+ begin
+ function matrix_square_root_graph(input_; name=nothing)
+ local desc
+ tf.with_op_name(name, "MatrixSquareRoot") do
+ desc = tf.NodeDescription("MatrixSquareRoot")
+ begin
+ begin
+ input_ = convert(Tensor{Any}, input_)
+ begin
+ end
+ end
+ begin
+ (input_,) = tf.tf_promote(input_)
+ end
+ end
+ begin
+ begin
+ tf.add_input(desc, input_)
+ end
+ end
+ begin
+ end
+ end
+ tf.Tensor(tf.Operation(desc))
+ end
+ end
+ begin
+ function matrix_square_root_eager(input_; name=nothing)
+ desc = tf.EagerOp("MatrixSquareRoot")
+ input_ = convert(tf.EagerTensor, input_)
+ begin
+ begin
+ tf.add_input(desc, input_)
+ end
+ end
+ begin
+ end
+ begin
+ desc["T"] = tf.data_type(input_)
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(matrix_square_root, [input_], name=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res[1]
+ end
+ end
end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function matrix_square_root(input_; name=nothing)
+ if tf.in_eager_mode()
+ matrix_square_root_eager(input_; name=name)
+ else
+ matrix_square_root_graph(input_; name=name)
+ end
+ end
+ end
+end
+
"""
- conv3d(input, filter; data_format=NDHWC, dilations=[1, 1, 1, 1, 1])
+ sparse_dense_cwise_mul(sp_indices, sp_values, sp_shape, dense)
"""
-tf.@op function conv3d(input_, filter_; name=nothing, strides=nothing, padding=nothing, data_format=nothing, dilations=nothing)
- local desc
- tf.with_op_name((()->begin
- desc = tf.NodeDescription("Conv3D")
- input_ = convert(TensorFlow.Tensor{Any}, input_)
- filter_ = convert(TensorFlow.Tensor{Any}, filter_)
- (input_, filter_) = tf.tf_promote(input_, filter_)
- tf.add_input(desc, input_)
- tf.add_input(desc, filter_)
- if strides !== nothing
- desc["strides"] = map(Base.identity, strides)
+begin
+ begin
+ function sparse_dense_cwise_mul_graph(sp_indices_, sp_values_, sp_shape_, dense_; name=nothing)
+ local desc
+ tf.with_op_name(name, "SparseDenseCwiseMul") do
+ desc = tf.NodeDescription("SparseDenseCwiseMul")
+ begin
+ begin
+ sp_indices_ = convert(Tensor{Int64}, sp_indices_)
+ begin
+ end
end
- if padding !== nothing
- desc["padding"] = Base.String(padding)
+ begin
+ sp_values_ = convert(Tensor{Any}, sp_values_)
+ begin
+ end
end
- if data_format !== nothing
- desc["data_format"] = Base.String(data_format)
+ begin
+ sp_shape_ = convert(Tensor{Int64}, sp_shape_)
+ begin
+ end
end
- if dilations !== nothing
- desc["dilations"] = map(Base.identity, dilations)
+ begin
+ dense_ = convert(Tensor{Any}, dense_)
+ begin
+ end
+ end
+ begin
+ (sp_values_, dense_) = tf.tf_promote(sp_values_, dense_)
+ end
+ end
+ begin
+ begin
+ tf.add_input(desc, sp_indices_)
+ end
+ begin
+ tf.add_input(desc, sp_values_)
end
- end), name, "Conv3D")
- tf.Tensor(tf.Operation(desc))
+ begin
+ tf.add_input(desc, sp_shape_)
+ end
+ begin
+ tf.add_input(desc, dense_)
+ end
+ end
+ begin
+ end
+ end
+ tf.Tensor(tf.Operation(desc))
+ end
+ end
+ begin
+ function sparse_dense_cwise_mul_eager(sp_indices_, sp_values_, sp_shape_, dense_; name=nothing)
+ desc = tf.EagerOp("SparseDenseCwiseMul")
+ sp_indices_ = convert(tf.EagerTensor, sp_indices_)
+ sp_values_ = convert(tf.EagerTensor, sp_values_)
+ sp_shape_ = convert(tf.EagerTensor, sp_shape_)
+ dense_ = convert(tf.EagerTensor, dense_)
+ begin
+ begin
+ tf.add_input(desc, sp_indices_)
+ end
+ begin
+ tf.add_input(desc, sp_values_)
+ end
+ begin
+ tf.add_input(desc, sp_shape_)
+ end
+ begin
+ tf.add_input(desc, dense_)
+ end
+ end
+ begin
+ end
+ begin
+ desc["T"] = tf.data_type(sp_values_)
+ end
+ begin
+ desc["T"] = tf.data_type(dense_)
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(sparse_dense_cwise_mul, [sp_indices_, sp_values_, sp_shape_, dense_], name=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res[1]
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function sparse_dense_cwise_mul(sp_indices_, sp_values_, sp_shape_, dense_; name=nothing)
+ if tf.in_eager_mode()
+ sparse_dense_cwise_mul_eager(sp_indices_, sp_values_, sp_shape_, dense_; name=name)
+ else
+ sparse_dense_cwise_mul_graph(sp_indices_, sp_values_, sp_shape_, dense_; name=name)
+ end
+ end
end
+end
+
"""
- max_pool(input; data_format=NHWC)
+ tensor_array_concat_v3(handle, flow_in; element_shape_except0=?)
"""
-tf.@op function max_pool(input_; name=nothing, ksize=nothing, strides=nothing, padding=nothing, data_format=nothing)
- local desc
- tf.with_op_name((()->begin
- desc = tf.NodeDescription("MaxPool")
- input_ = convert(TensorFlow.Tensor{Float32}, input_)
- (input_,) = tf.tf_promote(input_)
- tf.add_input(desc, input_)
- if ksize !== nothing
- desc["ksize"] = map(Base.identity, ksize)
+begin
+ begin
+ function tensor_array_concat_v3_graph(handle_, flow_in_; name=nothing, dtype=nothing, element_shape_except0=nothing)
+ local desc
+ tf.with_op_name(name, "TensorArrayConcatV3") do
+ desc = tf.NodeDescription("TensorArrayConcatV3")
+ begin
+ begin
+ handle_ = convert(Tensor{Any}, handle_)
+ begin
+ end
end
- if strides !== nothing
- desc["strides"] = map(Base.identity, strides)
+ begin
+ flow_in_ = convert(Tensor{Float32}, flow_in_)
+ begin
+ end
end
- if padding !== nothing
- desc["padding"] = Base.String(padding)
+ end
+ begin
+ begin
+ tf.add_input(desc, handle_)
end
- if data_format !== nothing
- desc["data_format"] = Base.String(data_format)
+ begin
+ tf.add_input(desc, flow_in_)
+ end
+ end
+ begin
+ begin
+ if dtype !== nothing
+ desc["dtype"] = Base.identity(dtype)
+ end
+ end
+ begin
+ if element_shape_except0 !== nothing
+ desc["element_shape_except0"] = Base.identity(element_shape_except0)
+ end
+ end
+ end
+ end
+ begin
+ out = tf.Tensor[]
+ op = tf.Operation(desc)
+ for out_idx = 1:2
+ push!(out, tf.Tensor(op, out_idx))
+ end
+ out
+ end
+ end
+ end
+ begin
+ function tensor_array_concat_v3_eager(handle_, flow_in_; name=nothing, dtype=nothing, element_shape_except0=nothing)
+ desc = tf.EagerOp("TensorArrayConcatV3")
+ handle_ = convert(tf.EagerTensor, handle_)
+ flow_in_ = convert(tf.EagerTensor, flow_in_)
+ begin
+ begin
+ tf.add_input(desc, handle_)
+ end
+ begin
+ tf.add_input(desc, flow_in_)
+ end
+ end
+ begin
+ begin
+ if dtype !== nothing
+ desc["dtype"] = Base.identity(dtype)
+ end
+ end
+ begin
+ if element_shape_except0 !== nothing
+ desc["element_shape_except0"] = Base.identity(element_shape_except0)
end
- end), name, "MaxPool")
- tf.Tensor(tf.Operation(desc))
+ end
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(tensor_array_concat_v3, [handle_, flow_in_], name=nothing, dtype=nothing, element_shape_except0=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function tensor_array_concat_v3(handle_, flow_in_; name=nothing, dtype=nothing, element_shape_except0=nothing)
+ if tf.in_eager_mode()
+ tensor_array_concat_v3_eager(handle_, flow_in_; name=name, dtype=dtype, element_shape_except0=element_shape_except0)
+ else
+ tensor_array_concat_v3_graph(handle_, flow_in_; name=name, dtype=dtype, element_shape_except0=element_shape_except0)
+ end
+ end
end
+end
+
"""
- max_pool3d(input; data_format=NDHWC)
+ unicode_script(input)
"""
-tf.@op function max_pool3d(input_; name=nothing, ksize=nothing, strides=nothing, padding=nothing, data_format=nothing)
- local desc
- tf.with_op_name((()->begin
- desc = tf.NodeDescription("MaxPool3D")
- input_ = convert(TensorFlow.Tensor{Any}, input_)
- (input_,) = tf.tf_promote(input_)
- tf.add_input(desc, input_)
- if ksize !== nothing
- desc["ksize"] = map(Base.identity, ksize)
- end
- if strides !== nothing
- desc["strides"] = map(Base.identity, strides)
+begin
+ begin
+ function unicode_script_graph(input_; name=nothing)
+ local desc
+ tf.with_op_name(name, "UnicodeScript") do
+ desc = tf.NodeDescription("UnicodeScript")
+ begin
+ begin
+ input_ = convert(Tensor{Int32}, input_)
+ begin
+ end
end
- if padding !== nothing
- desc["padding"] = Base.String(padding)
- end
- if data_format !== nothing
- desc["data_format"] = Base.String(data_format)
+ end
+ begin
+ begin
+ tf.add_input(desc, input_)
end
- end), name, "MaxPool3D")
- tf.Tensor(tf.Operation(desc))
+ end
+ begin
+ end
+ end
+ tf.Tensor(tf.Operation(desc))
+ end
+ end
+ begin
+ function unicode_script_eager(input_; name=nothing)
+ desc = tf.EagerOp("UnicodeScript")
+ input_ = convert(tf.EagerTensor, input_)
+ begin
+ begin
+ tf.add_input(desc, input_)
+ end
+ end
+ begin
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(unicode_script, [input_], name=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res[1]
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function unicode_script(input_; name=nothing)
+ if tf.in_eager_mode()
+ unicode_script_eager(input_; name=name)
+ else
+ unicode_script_graph(input_; name=name)
+ end
+ end
end
+end
+
"""
- avg_pool(value; data_format=NHWC)
+ batch_cholesky_grad(l, grad)
"""
-tf.@op function avg_pool(value_; name=nothing, ksize=nothing, strides=nothing, padding=nothing, data_format=nothing)
- local desc
- tf.with_op_name((()->begin
- desc = tf.NodeDescription("AvgPool")
- value_ = convert(TensorFlow.Tensor{Any}, value_)
- (value_,) = tf.tf_promote(value_)
- tf.add_input(desc, value_)
- if ksize !== nothing
- desc["ksize"] = map(Base.identity, ksize)
+begin
+ begin
+ function batch_cholesky_grad_graph(l_, grad_; name=nothing)
+ local desc
+ tf.with_op_name(name, "BatchCholeskyGrad") do
+ desc = tf.NodeDescription("BatchCholeskyGrad")
+ begin
+ begin
+ l_ = convert(Tensor{Any}, l_)
+ begin
+ end
end
- if strides !== nothing
- desc["strides"] = map(Base.identity, strides)
+ begin
+ grad_ = convert(Tensor{Any}, grad_)
+ begin
+ end
end
- if padding !== nothing
- desc["padding"] = Base.String(padding)
+ begin
+ (l_, grad_) = tf.tf_promote(l_, grad_)
end
- if data_format !== nothing
- desc["data_format"] = Base.String(data_format)
+ end
+ begin
+ begin
+ tf.add_input(desc, l_)
+ end
+ begin
+ tf.add_input(desc, grad_)
end
- end), name, "AvgPool")
- tf.Tensor(tf.Operation(desc))
+ end
+ begin
+ end
+ end
+ tf.Tensor(tf.Operation(desc))
+ end
+ end
+ begin
+ function batch_cholesky_grad_eager(l_, grad_; name=nothing)
+ desc = tf.EagerOp("BatchCholeskyGrad")
+ l_ = convert(tf.EagerTensor, l_)
+ grad_ = convert(tf.EagerTensor, grad_)
+ begin
+ begin
+ tf.add_input(desc, l_)
+ end
+ begin
+ tf.add_input(desc, grad_)
+ end
+ end
+ begin
+ end
+ begin
+ desc["T"] = tf.data_type(l_)
+ end
+ begin
+ desc["T"] = tf.data_type(grad_)
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(batch_cholesky_grad, [l_, grad_], name=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res[1]
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function batch_cholesky_grad(l_, grad_; name=nothing)
+ if tf.in_eager_mode()
+ batch_cholesky_grad_eager(l_, grad_; name=name)
+ else
+ batch_cholesky_grad_graph(l_, grad_; name=name)
+ end
+ end
end
+end
+
"""
- avg_pool3d(input; data_format=NDHWC)
+ mean(input, reduction_indices; keep_dims=false)
"""
-tf.@op function avg_pool3d(input_; name=nothing, ksize=nothing, strides=nothing, padding=nothing, data_format=nothing)
- local desc
- tf.with_op_name((()->begin
- desc = tf.NodeDescription("AvgPool3D")
- input_ = convert(TensorFlow.Tensor{Any}, input_)
- (input_,) = tf.tf_promote(input_)
- tf.add_input(desc, input_)
- if ksize !== nothing
- desc["ksize"] = map(Base.identity, ksize)
+begin
+ begin
+ function mean_graph(input_, reduction_indices_; name=nothing, keep_dims=nothing)
+ local desc
+ tf.with_op_name(name, "Mean") do
+ desc = tf.NodeDescription("Mean")
+ begin
+ begin
+ input_ = convert(Tensor{Any}, input_)
+ begin
+ end
end
- if strides !== nothing
- desc["strides"] = map(Base.identity, strides)
+ begin
+ reduction_indices_ = convert(Tensor{Int32}, reduction_indices_)
+ begin
+ reduction_indices_ = reduction_indices_ - convert(tf.Tensor{eltype(reduction_indices_)}, 1)
+ end
end
- if padding !== nothing
- desc["padding"] = Base.String(padding)
+ begin
+ (input_,) = tf.tf_promote(input_)
end
- if data_format !== nothing
- desc["data_format"] = Base.String(data_format)
+ begin
+ (reduction_indices_,) = tf.tf_promote(reduction_indices_)
+ end
+ end
+ begin
+ begin
+ tf.add_input(desc, input_)
+ end
+ begin
+ tf.add_input(desc, reduction_indices_)
+ end
+ end
+ begin
+ begin
+ if keep_dims !== nothing
+ desc["keep_dims"] = Base.Bool(keep_dims)
+ end
+ end
+ end
+ end
+ tf.Tensor(tf.Operation(desc))
+ end
+ end
+ begin
+ function mean_eager(input_, reduction_indices_; name=nothing, keep_dims=nothing)
+ desc = tf.EagerOp("Mean")
+ input_ = convert(tf.EagerTensor, input_)
+ reduction_indices_ = convert(tf.EagerTensor, reduction_indices_)
+ begin
+ begin
+ tf.add_input(desc, input_)
+ end
+ begin
+ tf.add_input(desc, reduction_indices_)
+ end
+ end
+ begin
+ begin
+ if keep_dims !== nothing
+ desc["keep_dims"] = Base.Bool(keep_dims)
end
- end), name, "AvgPool3D")
- tf.Tensor(tf.Operation(desc))
+ end
+ end
+ begin
+ desc["T"] = tf.data_type(input_)
+ end
+ begin
+ desc["Tidx"] = tf.data_type(reduction_indices_)
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(mean, [input_, reduction_indices_], name=nothing, keep_dims=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res[1]
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function mean(input_, reduction_indices_; name=nothing, keep_dims=nothing)
+ if tf.in_eager_mode()
+ mean_eager(input_, reduction_indices_; name=name, keep_dims=keep_dims)
+ else
+ mean_graph(input_, reduction_indices_; name=name, keep_dims=keep_dims)
+ end
+ end
end
+end
+
"""
- log_softmax(logits)
+ batch_fft(input)
"""
-tf.@op function log_softmax(logits_; name=nothing)
- local desc
- tf.with_op_name((()->begin
- desc = tf.NodeDescription("LogSoftmax")
- logits_ = convert(TensorFlow.Tensor{Any}, logits_)
- (logits_,) = tf.tf_promote(logits_)
- tf.add_input(desc, logits_)
- end), name, "LogSoftmax")
- tf.Tensor(tf.Operation(desc))
+begin
+ begin
+ function batch_fft_graph(input_; name=nothing)
+ local desc
+ tf.with_op_name(name, "BatchFFT") do
+ desc = tf.NodeDescription("BatchFFT")
+ begin
+ begin
+ input_ = convert(Tensor{Complex{Float32}}, input_)
+ begin
+ end
+ end
+ end
+ begin
+ begin
+ tf.add_input(desc, input_)
+ end
+ end
+ begin
+ end
+ end
+ tf.Tensor(tf.Operation(desc))
+ end
+ end
+ begin
+ function batch_fft_eager(input_; name=nothing)
+ desc = tf.EagerOp("BatchFFT")
+ input_ = convert(tf.EagerTensor, input_)
+ begin
+ begin
+ tf.add_input(desc, input_)
+ end
+ end
+ begin
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(batch_fft, [input_], name=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res[1]
+ end
+ end
end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function batch_fft(input_; name=nothing)
+ if tf.in_eager_mode()
+ batch_fft_eager(input_; name=name)
+ else
+ batch_fft_graph(input_; name=name)
+ end
+ end
+ end
+end
+
"""
- dilation2d(input, filter)
+ sin(x)
"""
-tf.@op function dilation2d(input_, filter_; name=nothing, strides=nothing, rates=nothing, padding=nothing)
- local desc
- tf.with_op_name((()->begin
- desc = tf.NodeDescription("Dilation2D")
- input_ = convert(TensorFlow.Tensor{Any}, input_)
- filter_ = convert(TensorFlow.Tensor{Any}, filter_)
- (input_, filter_) = tf.tf_promote(input_, filter_)
- tf.add_input(desc, input_)
- tf.add_input(desc, filter_)
- if strides !== nothing
- desc["strides"] = map(Base.identity, strides)
+begin
+ begin
+ function sin_graph(x_; name=nothing)
+ local desc
+ tf.with_op_name(name, "Sin") do
+ desc = tf.NodeDescription("Sin")
+ begin
+ begin
+ x_ = convert(Tensor{Any}, x_)
+ begin
+ end
end
- if rates !== nothing
- desc["rates"] = map(Base.identity, rates)
+ begin
+ (x_,) = tf.tf_promote(x_)
end
- if padding !== nothing
- desc["padding"] = Base.String(padding)
+ end
+ begin
+ begin
+ tf.add_input(desc, x_)
+ end
+ end
+ begin
+ end
+ end
+ tf.Tensor(tf.Operation(desc))
+ end
+ end
+ begin
+ function sin_eager(x_; name=nothing)
+ desc = tf.EagerOp("Sin")
+ x_ = convert(tf.EagerTensor, x_)
+ begin
+ begin
+ tf.add_input(desc, x_)
+ end
+ end
+ begin
+ end
+ begin
+ desc["T"] = tf.data_type(x_)
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(sin, [x_], name=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res[1]
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function sin(x_; name=nothing)
+ if tf.in_eager_mode()
+ sin_eager(x_; name=name)
+ else
+ sin_graph(x_; name=name)
+ end
+ end
+ end
+end
+
+
+"""
+ boosted_trees_ensemble_resource_handle_op(; container=, shared_name=)
+
+
+"""
+begin
+ begin
+ function boosted_trees_ensemble_resource_handle_op_graph(; name=nothing, container=nothing, shared_name=nothing)
+ local desc
+ tf.with_op_name(name, "BoostedTreesEnsembleResourceHandleOp") do
+ desc = tf.NodeDescription("BoostedTreesEnsembleResourceHandleOp")
+ begin
+ end
+ begin
+ end
+ begin
+ begin
+ if container !== nothing
+ desc["container"] = Base.String(container)
+ end
+ end
+ begin
+ if shared_name !== nothing
+ desc["shared_name"] = Base.String(shared_name)
+ end
+ end
+ end
+ end
+ tf.Tensor(tf.Operation(desc))
+ end
+ end
+ begin
+ function boosted_trees_ensemble_resource_handle_op_eager(; name=nothing, container=nothing, shared_name=nothing)
+ desc = tf.EagerOp("BoostedTreesEnsembleResourceHandleOp")
+ begin
+ end
+ begin
+ begin
+ if container !== nothing
+ desc["container"] = Base.String(container)
+ end
+ end
+ begin
+ if shared_name !== nothing
+ desc["shared_name"] = Base.String(shared_name)
end
- end), name, "Dilation2D")
- tf.Tensor(tf.Operation(desc))
+ end
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(boosted_trees_ensemble_resource_handle_op, [], name=nothing, container=nothing, shared_name=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res[1]
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function boosted_trees_ensemble_resource_handle_op(; name=nothing, container=nothing, shared_name=nothing)
+ if tf.in_eager_mode()
+ boosted_trees_ensemble_resource_handle_op_eager(; name=name, container=container, shared_name=shared_name)
+ else
+ boosted_trees_ensemble_resource_handle_op_graph(; name=name, container=container, shared_name=shared_name)
+ end
+ end
end
+end
+
"""
- conv2d(input, filter; use_cudnn_on_gpu=true, data_format=NHWC, dilations=[1, 1, 1, 1])
+ quantized_max_pool(input, min_input, max_input)
"""
-tf.@op function conv2d(input_, filter_; name=nothing, strides=nothing, use_cudnn_on_gpu=nothing, padding=nothing, data_format=nothing, dilations=nothing)
- local desc
- tf.with_op_name((()->begin
- desc = tf.NodeDescription("Conv2D")
- input_ = convert(TensorFlow.Tensor{Any}, input_)
- filter_ = convert(TensorFlow.Tensor{Any}, filter_)
- (input_, filter_) = tf.tf_promote(input_, filter_)
+begin
+ begin
+ function quantized_max_pool_graph(input_, min_input_, max_input_; name=nothing, ksize=nothing, strides=nothing, padding=nothing)
+ local desc
+ tf.with_op_name(name, "QuantizedMaxPool") do
+ desc = tf.NodeDescription("QuantizedMaxPool")
+ begin
+ begin
+ input_ = convert(Tensor{Any}, input_)
+ begin
+ end
+ end
+ begin
+ min_input_ = convert(Tensor{Float32}, min_input_)
+ begin
+ end
+ end
+ begin
+ max_input_ = convert(Tensor{Float32}, max_input_)
+ begin
+ end
+ end
+ begin
+ (input_,) = tf.tf_promote(input_)
+ end
+ end
+ begin
+ begin
+ tf.add_input(desc, input_)
+ end
+ begin
+ tf.add_input(desc, min_input_)
+ end
+ begin
+ tf.add_input(desc, max_input_)
+ end
+ end
+ begin
+ begin
+ if ksize !== nothing
+ desc["ksize"] = map(Base.identity, ksize)
+ end
+ end
+ begin
+ if strides !== nothing
+ desc["strides"] = map(Base.identity, strides)
+ end
+ end
+ begin
+ if padding !== nothing
+ desc["padding"] = Base.String(padding)
+ end
+ end
+ end
+ end
+ begin
+ out = tf.Tensor[]
+ op = tf.Operation(desc)
+ for out_idx = 1:3
+ push!(out, tf.Tensor(op, out_idx))
+ end
+ out
+ end
+ end
+ end
+ begin
+ function quantized_max_pool_eager(input_, min_input_, max_input_; name=nothing, ksize=nothing, strides=nothing, padding=nothing)
+ desc = tf.EagerOp("QuantizedMaxPool")
+ input_ = convert(tf.EagerTensor, input_)
+ min_input_ = convert(tf.EagerTensor, min_input_)
+ max_input_ = convert(tf.EagerTensor, max_input_)
+ begin
+ begin
tf.add_input(desc, input_)
- tf.add_input(desc, filter_)
+ end
+ begin
+ tf.add_input(desc, min_input_)
+ end
+ begin
+ tf.add_input(desc, max_input_)
+ end
+ end
+ begin
+ begin
+ if ksize !== nothing
+ desc["ksize"] = map(Base.identity, ksize)
+ end
+ end
+ begin
if strides !== nothing
desc["strides"] = map(Base.identity, strides)
end
- if use_cudnn_on_gpu !== nothing
- desc["use_cudnn_on_gpu"] = Base.Bool(use_cudnn_on_gpu)
- end
+ end
+ begin
if padding !== nothing
desc["padding"] = Base.String(padding)
end
- if data_format !== nothing
- desc["data_format"] = Base.String(data_format)
- end
- if dilations !== nothing
- desc["dilations"] = map(Base.identity, dilations)
- end
- end), name, "Conv2D")
- tf.Tensor(tf.Operation(desc))
+ end
+ end
+ begin
+ desc["T"] = tf.data_type(input_)
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(quantized_max_pool, [input_, min_input_, max_input_], name=nothing, ksize=nothing, strides=nothing, padding=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res
+ end
+ end
end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function quantized_max_pool(input_, min_input_, max_input_; name=nothing, ksize=nothing, strides=nothing, padding=nothing)
+ if tf.in_eager_mode()
+ quantized_max_pool_eager(input_, min_input_, max_input_; name=name, ksize=ksize, strides=strides, padding=padding)
+ else
+ quantized_max_pool_graph(input_, min_input_, max_input_; name=name, ksize=ksize, strides=strides, padding=padding)
+ end
+ end
+ end
+end
+
"""
- random_uniform(shape; seed=0, seed2=0)
+ ordered_map_stage(key, indices, values; capacity=0, memory_limit=0, container=, shared_name=)
"""
-tf.@op function random_uniform(shape_; name=nothing, seed=nothing, seed2=nothing, dtype=nothing)
- local desc
- tf.with_op_name((()->begin
- desc = tf.NodeDescription("RandomUniform")
- shape_ = convert(TensorFlow.Tensor{Any}, shape_)
- (shape_,) = tf.tf_promote(shape_)
- tf.add_input(desc, shape_)
- if seed !== nothing
- desc["seed"] = Base.Int(seed)
+begin
+ begin
+ function ordered_map_stage_graph(key_, indices_, values_; name=nothing, capacity=nothing, memory_limit=nothing, dtypes=nothing, fake_dtypes=nothing, container=nothing, shared_name=nothing)
+ local desc
+ tf.with_op_name(name, "OrderedMapStage") do
+ desc = tf.NodeDescription("OrderedMapStage")
+ begin
+ begin
+ key_ = convert(Tensor{Int64}, key_)
+ begin
+ end
end
- if seed2 !== nothing
- desc["seed2"] = Base.Int(seed2)
+ begin
+ indices_ = convert(Tensor{Int32}, indices_)
+ begin
+ end
end
- if dtype !== nothing
- desc["dtype"] = Base.identity(dtype)
+ begin
+ values_ = [convert(Tensor{Any}, x) for x = values_]
+ begin
+ end
+ end
+ end
+ begin
+ begin
+ tf.add_input(desc, key_)
+ end
+ begin
+ tf.add_input(desc, indices_)
+ end
+ begin
+ tf.add_input(desc, values_)
+ end
+ end
+ begin
+ begin
+ if capacity !== nothing
+ desc["capacity"] = Base.Int(capacity)
+ end
+ end
+ begin
+ if memory_limit !== nothing
+ desc["memory_limit"] = Base.Int(memory_limit)
+ end
+ end
+ begin
+ if dtypes !== nothing
+ desc["dtypes"] = map(Base.identity, dtypes)
+ end
+ end
+ begin
+ if fake_dtypes !== nothing
+ desc["fake_dtypes"] = map(Base.identity, fake_dtypes)
+ end
+ end
+ begin
+ if container !== nothing
+ desc["container"] = Base.String(container)
+ end
+ end
+ begin
+ if shared_name !== nothing
+ desc["shared_name"] = Base.String(shared_name)
+ end
+ end
+ end
+ end
+ tf.Tensor(tf.Operation(desc))
+ end
+ end
+ begin
+ function ordered_map_stage_eager(key_, indices_, values_; name=nothing, capacity=nothing, memory_limit=nothing, dtypes=nothing, fake_dtypes=nothing, container=nothing, shared_name=nothing)
+ desc = tf.EagerOp("OrderedMapStage")
+ key_ = convert(tf.EagerTensor, key_)
+ indices_ = convert(tf.EagerTensor, indices_)
+ values_ = convert(tf.EagerTensor, values_)
+ begin
+ begin
+ tf.add_input(desc, key_)
+ end
+ begin
+ tf.add_input(desc, indices_)
+ end
+ begin
+ tf.add_input(desc, values_)
+ end
+ end
+ begin
+ begin
+ if capacity !== nothing
+ desc["capacity"] = Base.Int(capacity)
+ end
+ end
+ begin
+ if memory_limit !== nothing
+ desc["memory_limit"] = Base.Int(memory_limit)
+ end
+ end
+ begin
+ if dtypes !== nothing
+ desc["dtypes"] = map(Base.identity, dtypes)
+ end
+ end
+ begin
+ if fake_dtypes !== nothing
+ desc["fake_dtypes"] = map(Base.identity, fake_dtypes)
+ end
+ end
+ begin
+ if container !== nothing
+ desc["container"] = Base.String(container)
+ end
+ end
+ begin
+ if shared_name !== nothing
+ desc["shared_name"] = Base.String(shared_name)
end
- end), name, "RandomUniform")
- tf.Tensor(tf.Operation(desc))
+ end
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(ordered_map_stage, [key_, indices_, values_], name=nothing, capacity=nothing, memory_limit=nothing, dtypes=nothing, fake_dtypes=nothing, container=nothing, shared_name=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res[1]
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function ordered_map_stage(key_, indices_, values_; name=nothing, capacity=nothing, memory_limit=nothing, dtypes=nothing, fake_dtypes=nothing, container=nothing, shared_name=nothing)
+ if tf.in_eager_mode()
+ ordered_map_stage_eager(key_, indices_, values_; name=name, capacity=capacity, memory_limit=memory_limit, dtypes=dtypes, fake_dtypes=fake_dtypes, container=container, shared_name=shared_name)
+ else
+ ordered_map_stage_graph(key_, indices_, values_; name=name, capacity=capacity, memory_limit=memory_limit, dtypes=dtypes, fake_dtypes=fake_dtypes, container=container, shared_name=shared_name)
+ end
+ end
end
+end
+
"""
- random_standard_normal(shape; seed=0, seed2=0)
+ partitioned_call(args; config=, config_proto=, executor_type=)
"""
-tf.@op function random_standard_normal(shape_; name=nothing, seed=nothing, seed2=nothing, dtype=nothing)
- local desc
- tf.with_op_name((()->begin
- desc = tf.NodeDescription("RandomStandardNormal")
- shape_ = convert(TensorFlow.Tensor{Any}, shape_)
- (shape_,) = tf.tf_promote(shape_)
- tf.add_input(desc, shape_)
- if seed !== nothing
- desc["seed"] = Base.Int(seed)
+begin
+ begin
+ function partitioned_call_graph(args_; name=nothing, Tin=nothing, Tout=nothing, f=nothing, config=nothing, config_proto=nothing, executor_type=nothing)
+ local desc
+ tf.with_op_name(name, "PartitionedCall") do
+ desc = tf.NodeDescription("PartitionedCall")
+ begin
+ begin
+ args_ = [convert(Tensor{Any}, x) for x = args_]
+ begin
+ end
end
- if seed2 !== nothing
- desc["seed2"] = Base.Int(seed2)
+ end
+ begin
+ begin
+ tf.add_input(desc, args_)
end
- if dtype !== nothing
- desc["dtype"] = Base.identity(dtype)
+ end
+ begin
+ begin
+ if Tin !== nothing
+ desc["Tin"] = map(Base.identity, Tin)
+ end
+ end
+ begin
+ if Tout !== nothing
+ desc["Tout"] = map(Base.identity, Tout)
+ end
+ end
+ begin
+ if f !== nothing
+ desc["f"] = Base.identity(f)
+ end
end
- end), name, "RandomStandardNormal")
- tf.Tensor(tf.Operation(desc))
+ begin
+ if config !== nothing
+ desc["config"] = Base.String(config)
+ end
+ end
+ begin
+ if config_proto !== nothing
+ desc["config_proto"] = Base.String(config_proto)
+ end
+ end
+ begin
+ if executor_type !== nothing
+ desc["executor_type"] = Base.String(executor_type)
+ end
+ end
+ end
+ end
+ tf.Tensor(tf.Operation(desc))
+ end
+ end
+ begin
+ function partitioned_call_eager(args_; name=nothing, Tin=nothing, Tout=nothing, f=nothing, config=nothing, config_proto=nothing, executor_type=nothing)
+ desc = tf.EagerOp("PartitionedCall")
+ args_ = convert(tf.EagerTensor, args_)
+ begin
+ begin
+ tf.add_input(desc, args_)
+ end
+ end
+ begin
+ begin
+ if Tin !== nothing
+ desc["Tin"] = map(Base.identity, Tin)
+ end
+ end
+ begin
+ if Tout !== nothing
+ desc["Tout"] = map(Base.identity, Tout)
+ end
+ end
+ begin
+ if f !== nothing
+ desc["f"] = Base.identity(f)
+ end
+ end
+ begin
+ if config !== nothing
+ desc["config"] = Base.String(config)
+ end
+ end
+ begin
+ if config_proto !== nothing
+ desc["config_proto"] = Base.String(config_proto)
+ end
+ end
+ begin
+ if executor_type !== nothing
+ desc["executor_type"] = Base.String(executor_type)
+ end
+ end
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(partitioned_call, [args_], name=nothing, Tin=nothing, Tout=nothing, f=nothing, config=nothing, config_proto=nothing, executor_type=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res[1]
+ end
+ end
end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function partitioned_call(args_; name=nothing, Tin=nothing, Tout=nothing, f=nothing, config=nothing, config_proto=nothing, executor_type=nothing)
+ if tf.in_eager_mode()
+ partitioned_call_eager(args_; name=name, Tin=Tin, Tout=Tout, f=f, config=config, config_proto=config_proto, executor_type=executor_type)
+ else
+ partitioned_call_graph(args_; name=name, Tin=Tin, Tout=Tout, f=f, config=config, config_proto=config_proto, executor_type=executor_type)
+ end
+ end
+ end
+end
+
"""
- random_shuffle(value; seed=0, seed2=0)
+ sparse_apply_adagrad(var, accum, lr, grad, indices; use_locking=false, update_slots=true)
"""
-tf.@op function random_shuffle(value_; name=nothing, seed=nothing, seed2=nothing)
- local desc
- tf.with_op_name((()->begin
- desc = tf.NodeDescription("RandomShuffle")
- value_ = convert(TensorFlow.Tensor{Any}, value_)
- (value_,) = tf.tf_promote(value_)
- tf.add_input(desc, value_)
- if seed !== nothing
- desc["seed"] = Base.Int(seed)
+begin
+ begin
+ function sparse_apply_adagrad_graph(var_, accum_, lr_, grad_, indices_; name=nothing, use_locking=nothing, update_slots=nothing)
+ local desc
+ tf.with_op_name(name, "SparseApplyAdagrad") do
+ desc = tf.NodeDescription("SparseApplyAdagrad")
+ begin
+ begin
+ var_ = convert(Tensor{Any}, var_)
+ begin
+ end
end
- if seed2 !== nothing
- desc["seed2"] = Base.Int(seed2)
+ begin
+ accum_ = convert(Tensor{Any}, accum_)
+ begin
+ end
+ end
+ begin
+ lr_ = convert(Tensor{Any}, lr_)
+ begin
+ end
+ end
+ begin
+ grad_ = convert(Tensor{Any}, grad_)
+ begin
+ end
+ end
+ begin
+ indices_ = convert(Tensor{Any}, indices_)
+ begin
+ indices_ = indices_ - convert(tf.Tensor{eltype(indices_)}, 1)
+ end
+ end
+ begin
+ (var_, accum_, lr_, grad_) = tf.tf_promote(var_, accum_, lr_, grad_)
+ end
+ begin
+ (indices_,) = tf.tf_promote(indices_)
+ end
+ end
+ begin
+ begin
+ tf.add_input(desc, var_)
+ end
+ begin
+ tf.add_input(desc, accum_)
+ end
+ begin
+ tf.add_input(desc, lr_)
+ end
+ begin
+ tf.add_input(desc, grad_)
+ end
+ begin
+ tf.add_input(desc, indices_)
+ end
+ end
+ begin
+ begin
+ if use_locking !== nothing
+ desc["use_locking"] = Base.Bool(use_locking)
+ end
end
- end), name, "RandomShuffle")
- tf.Tensor(tf.Operation(desc))
+ begin
+ if update_slots !== nothing
+ desc["update_slots"] = Base.Bool(update_slots)
+ end
+ end
+ end
+ end
+ tf.Tensor(tf.Operation(desc))
+ end
+ end
+ begin
+ function sparse_apply_adagrad_eager(var_, accum_, lr_, grad_, indices_; name=nothing, use_locking=nothing, update_slots=nothing)
+ desc = tf.EagerOp("SparseApplyAdagrad")
+ var_ = convert(tf.EagerTensor, var_)
+ accum_ = convert(tf.EagerTensor, accum_)
+ lr_ = convert(tf.EagerTensor, lr_)
+ grad_ = convert(tf.EagerTensor, grad_)
+ indices_ = convert(tf.EagerTensor, indices_)
+ begin
+ begin
+ tf.add_input(desc, var_)
+ end
+ begin
+ tf.add_input(desc, accum_)
+ end
+ begin
+ tf.add_input(desc, lr_)
+ end
+ begin
+ tf.add_input(desc, grad_)
+ end
+ begin
+ tf.add_input(desc, indices_)
+ end
+ end
+ begin
+ begin
+ if use_locking !== nothing
+ desc["use_locking"] = Base.Bool(use_locking)
+ end
+ end
+ begin
+ if update_slots !== nothing
+ desc["update_slots"] = Base.Bool(update_slots)
+ end
+ end
+ end
+ begin
+ desc["T"] = tf.data_type(var_)
+ end
+ begin
+ desc["T"] = tf.data_type(accum_)
+ end
+ begin
+ desc["T"] = tf.data_type(lr_)
+ end
+ begin
+ desc["T"] = tf.data_type(grad_)
+ end
+ begin
+ desc["Tindices"] = tf.data_type(indices_)
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(sparse_apply_adagrad, [var_, accum_, lr_, grad_, indices_], name=nothing, use_locking=nothing, update_slots=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res[1]
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function sparse_apply_adagrad(var_, accum_, lr_, grad_, indices_; name=nothing, use_locking=nothing, update_slots=nothing)
+ if tf.in_eager_mode()
+ sparse_apply_adagrad_eager(var_, accum_, lr_, grad_, indices_; name=name, use_locking=use_locking, update_slots=update_slots)
+ else
+ sparse_apply_adagrad_graph(var_, accum_, lr_, grad_, indices_; name=name, use_locking=use_locking, update_slots=update_slots)
+ end
+ end
end
+end
+
"""
- strided_slice(input, begin, end, strides; begin_mask=0, end_mask=0, ellipsis_mask=0, new_axis_mask=0, shrink_axis_mask=0)
+ decode_proto_v2(bytes; descriptor_source=, message_format=, sanitize=false)
"""
-tf.@op function strided_slice(input_, begin_, end_, strides_; name=nothing, Index=nothing, begin_mask=nothing, end_mask=nothing, ellipsis_mask=nothing, new_axis_mask=nothing, shrink_axis_mask=nothing)
- local desc
- tf.with_op_name((()->begin
- desc = tf.NodeDescription("StridedSlice")
- input_ = convert(TensorFlow.Tensor{Any}, input_)
- begin_ = convert(TensorFlow.Tensor{Any}, begin_)
- begin_ = begin_ - convert(tf.Tensor{eltype(begin_)}, 1)
- end_ = convert(TensorFlow.Tensor{Any}, end_)
- end_ = end_ - convert(tf.Tensor{eltype(end_)}, 1)
- strides_ = convert(TensorFlow.Tensor{Any}, strides_)
- strides_ = strides_ - convert(tf.Tensor{eltype(strides_)}, 1)
- (input_,) = tf.tf_promote(input_)
- (begin_, end_, strides_) = tf.tf_promote(begin_, end_, strides_)
- tf.add_input(desc, input_)
- tf.add_input(desc, begin_)
- tf.add_input(desc, end_)
- tf.add_input(desc, strides_)
- if Index !== nothing
- desc["Index"] = Base.identity(Index)
+begin
+ begin
+ function decode_proto_v2_graph(bytes_; name=nothing, message_type=nothing, field_names=nothing, output_types=nothing, descriptor_source=nothing, message_format=nothing, sanitize=nothing)
+ local desc
+ tf.with_op_name(name, "DecodeProtoV2") do
+ desc = tf.NodeDescription("DecodeProtoV2")
+ begin
+ begin
+ bytes_ = convert(Tensor{String}, bytes_)
+ begin
+ end
end
- if begin_mask !== nothing
- begin_mask = Base.Int(begin_mask) - 1
+ end
+ begin
+ begin
+ tf.add_input(desc, bytes_)
end
- if begin_mask !== nothing
- desc["begin_mask"] = Base.Int(begin_mask)
+ end
+ begin
+ begin
+ if message_type !== nothing
+ desc["message_type"] = Base.String(message_type)
+ end
end
- if end_mask !== nothing
- end_mask = Base.Int(end_mask) - 1
+ begin
+ if field_names !== nothing
+ desc["field_names"] = map(Base.identity, field_names)
+ end
end
- if end_mask !== nothing
- desc["end_mask"] = Base.Int(end_mask)
+ begin
+ if output_types !== nothing
+ desc["output_types"] = map(Base.identity, output_types)
+ end
end
- if ellipsis_mask !== nothing
- ellipsis_mask = Base.Int(ellipsis_mask) - 1
+ begin
+ if descriptor_source !== nothing
+ desc["descriptor_source"] = Base.String(descriptor_source)
+ end
end
- if ellipsis_mask !== nothing
- desc["ellipsis_mask"] = Base.Int(ellipsis_mask)
+ begin
+ if message_format !== nothing
+ desc["message_format"] = Base.String(message_format)
+ end
end
- if new_axis_mask !== nothing
- new_axis_mask = Base.Int(new_axis_mask) - 1
+ begin
+ if sanitize !== nothing
+ desc["sanitize"] = Base.Bool(sanitize)
+ end
end
- if new_axis_mask !== nothing
- desc["new_axis_mask"] = Base.Int(new_axis_mask)
+ end
+ end
+ begin
+ out = tf.Tensor[]
+ op = tf.Operation(desc)
+ for out_idx = 1:2
+ push!(out, tf.Tensor(op, out_idx))
+ end
+ out
+ end
+ end
+ end
+ begin
+ function decode_proto_v2_eager(bytes_; name=nothing, message_type=nothing, field_names=nothing, output_types=nothing, descriptor_source=nothing, message_format=nothing, sanitize=nothing)
+ desc = tf.EagerOp("DecodeProtoV2")
+ bytes_ = convert(tf.EagerTensor, bytes_)
+ begin
+ begin
+ tf.add_input(desc, bytes_)
+ end
+ end
+ begin
+ begin
+ if message_type !== nothing
+ desc["message_type"] = Base.String(message_type)
end
- if shrink_axis_mask !== nothing
- shrink_axis_mask = Base.Int(shrink_axis_mask) - 1
+ end
+ begin
+ if field_names !== nothing
+ desc["field_names"] = map(Base.identity, field_names)
end
- if shrink_axis_mask !== nothing
- desc["shrink_axis_mask"] = Base.Int(shrink_axis_mask)
+ end
+ begin
+ if output_types !== nothing
+ desc["output_types"] = map(Base.identity, output_types)
+ end
+ end
+ begin
+ if descriptor_source !== nothing
+ desc["descriptor_source"] = Base.String(descriptor_source)
+ end
+ end
+ begin
+ if message_format !== nothing
+ desc["message_format"] = Base.String(message_format)
+ end
+ end
+ begin
+ if sanitize !== nothing
+ desc["sanitize"] = Base.Bool(sanitize)
end
- end), name, "StridedSlice")
- tf.Tensor(tf.Operation(desc))
+ end
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(decode_proto_v2, [bytes_], name=nothing, message_type=nothing, field_names=nothing, output_types=nothing, descriptor_source=nothing, message_format=nothing, sanitize=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function decode_proto_v2(bytes_; name=nothing, message_type=nothing, field_names=nothing, output_types=nothing, descriptor_source=nothing, message_format=nothing, sanitize=nothing)
+ if tf.in_eager_mode()
+ decode_proto_v2_eager(bytes_; name=name, message_type=message_type, field_names=field_names, output_types=output_types, descriptor_source=descriptor_source, message_format=message_format, sanitize=sanitize)
+ else
+ decode_proto_v2_graph(bytes_; name=name, message_type=message_type, field_names=field_names, output_types=output_types, descriptor_source=descriptor_source, message_format=message_format, sanitize=sanitize)
+ end
+ end
end
+end
+
"""
- expand_dims(input, dim)
+ betainc(a, b, x)
"""
-tf.@op function expand_dims(input_, dim_; name=nothing)
- local desc
- tf.with_op_name((()->begin
- desc = tf.NodeDescription("ExpandDims")
- input_ = convert(TensorFlow.Tensor{Any}, input_)
- dim_ = convert(TensorFlow.Tensor{Int32}, dim_)
- dim_ = dim_ - convert(tf.Tensor{eltype(dim_)}, 1)
- (input_,) = tf.tf_promote(input_)
- (dim_,) = tf.tf_promote(dim_)
- tf.add_input(desc, input_)
- tf.add_input(desc, dim_)
- end), name, "ExpandDims")
- tf.Tensor(tf.Operation(desc))
+begin
+ begin
+ function betainc_graph(a_, b_, x_; name=nothing)
+ local desc
+ tf.with_op_name(name, "Betainc") do
+ desc = tf.NodeDescription("Betainc")
+ begin
+ begin
+ a_ = convert(Tensor{Any}, a_)
+ begin
+ end
+ end
+ begin
+ b_ = convert(Tensor{Any}, b_)
+ begin
+ end
+ end
+ begin
+ x_ = convert(Tensor{Any}, x_)
+ begin
+ end
+ end
+ begin
+ (a_, b_, x_) = tf.tf_promote(a_, b_, x_)
+ end
+ end
+ begin
+ begin
+ tf.add_input(desc, a_)
+ end
+ begin
+ tf.add_input(desc, b_)
+ end
+ begin
+ tf.add_input(desc, x_)
+ end
+ end
+ begin
+ end
+ end
+ tf.Tensor(tf.Operation(desc))
+ end
+ end
+ begin
+ function betainc_eager(a_, b_, x_; name=nothing)
+ desc = tf.EagerOp("Betainc")
+ a_ = convert(tf.EagerTensor, a_)
+ b_ = convert(tf.EagerTensor, b_)
+ x_ = convert(tf.EagerTensor, x_)
+ begin
+ begin
+ tf.add_input(desc, a_)
+ end
+ begin
+ tf.add_input(desc, b_)
+ end
+ begin
+ tf.add_input(desc, x_)
+ end
+ end
+ begin
+ end
+ begin
+ desc["T"] = tf.data_type(a_)
+ end
+ begin
+ desc["T"] = tf.data_type(b_)
+ end
+ begin
+ desc["T"] = tf.data_type(x_)
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(betainc, [a_, b_, x_], name=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res[1]
+ end
+ end
end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function betainc(a_, b_, x_; name=nothing)
+ if tf.in_eager_mode()
+ betainc_eager(a_, b_, x_; name=name)
+ else
+ betainc_graph(a_, b_, x_; name=name)
+ end
+ end
+ end
+end
+
"""
- tile(input, multiples)
+ guarantee_const(input)
"""
-tf.@op function tile(input_, multiples_; name=nothing)
- local desc
- tf.with_op_name((()->begin
- desc = tf.NodeDescription("Tile")
- input_ = convert(TensorFlow.Tensor{Any}, input_)
- multiples_ = convert(TensorFlow.Tensor{Int32}, multiples_)
- (input_,) = tf.tf_promote(input_)
- (multiples_,) = tf.tf_promote(multiples_)
+begin
+ begin
+ function guarantee_const_graph(input_; name=nothing)
+ local desc
+ tf.with_op_name(name, "GuaranteeConst") do
+ desc = tf.NodeDescription("GuaranteeConst")
+ begin
+ begin
+ input_ = convert(Tensor{Any}, input_)
+ begin
+ end
+ end
+ begin
+ (input_,) = tf.tf_promote(input_)
+ end
+ end
+ begin
+ begin
+ tf.add_input(desc, input_)
+ end
+ end
+ begin
+ end
+ end
+ tf.Tensor(tf.Operation(desc))
+ end
+ end
+ begin
+ function guarantee_const_eager(input_; name=nothing)
+ desc = tf.EagerOp("GuaranteeConst")
+ input_ = convert(tf.EagerTensor, input_)
+ begin
+ begin
tf.add_input(desc, input_)
- tf.add_input(desc, multiples_)
- end), name, "Tile")
- tf.Tensor(tf.Operation(desc))
+ end
+ end
+ begin
+ end
+ begin
+ desc["T"] = tf.data_type(input_)
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(guarantee_const, [input_], name=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res[1]
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function guarantee_const(input_; name=nothing)
+ if tf.in_eager_mode()
+ guarantee_const_eager(input_; name=name)
+ else
+ guarantee_const_graph(input_; name=name)
+ end
+ end
end
+end
+
"""
- pad(input, paddings)
+ decode_bmp(contents; channels=0)
"""
-tf.@op function pad(input_, paddings_; name=nothing)
- local desc
- tf.with_op_name((()->begin
- desc = tf.NodeDescription("Pad")
- input_ = convert(TensorFlow.Tensor{Any}, input_)
- paddings_ = convert(TensorFlow.Tensor{Int32}, paddings_)
- (input_,) = tf.tf_promote(input_)
- (paddings_,) = tf.tf_promote(paddings_)
- tf.add_input(desc, input_)
- tf.add_input(desc, paddings_)
- end), name, "Pad")
- tf.Tensor(tf.Operation(desc))
+begin
+ begin
+ function decode_bmp_graph(contents_; name=nothing, channels=nothing)
+ local desc
+ tf.with_op_name(name, "DecodeBmp") do
+ desc = tf.NodeDescription("DecodeBmp")
+ begin
+ begin
+ contents_ = convert(Tensor{String}, contents_)
+ begin
+ end
+ end
+ end
+ begin
+ begin
+ tf.add_input(desc, contents_)
+ end
+ end
+ begin
+ begin
+ if channels !== nothing
+ desc["channels"] = Base.Int(channels)
+ end
+ end
+ end
+ end
+ tf.Tensor(tf.Operation(desc))
+ end
+ end
+ begin
+ function decode_bmp_eager(contents_; name=nothing, channels=nothing)
+ desc = tf.EagerOp("DecodeBmp")
+ contents_ = convert(tf.EagerTensor, contents_)
+ begin
+ begin
+ tf.add_input(desc, contents_)
+ end
+ end
+ begin
+ begin
+ if channels !== nothing
+ desc["channels"] = Base.Int(channels)
+ end
+ end
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(decode_bmp, [contents_], name=nothing, channels=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res[1]
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function decode_bmp(contents_; name=nothing, channels=nothing)
+ if tf.in_eager_mode()
+ decode_bmp_eager(contents_; name=name, channels=channels)
+ else
+ decode_bmp_graph(contents_; name=name, channels=channels)
+ end
+ end
end
+end
+
"""
- gather(params, indices; validate_indices=true)
+ boosted_trees_bucketize(float_values, bucket_boundaries)
"""
-tf.@op function gather(params_, indices_; name=nothing, validate_indices=nothing)
- local desc
- tf.with_op_name((()->begin
- desc = tf.NodeDescription("Gather")
- params_ = convert(TensorFlow.Tensor{Any}, params_)
- indices_ = convert(TensorFlow.Tensor{Any}, indices_)
- indices_ = indices_ - convert(tf.Tensor{eltype(indices_)}, 1)
- (params_,) = tf.tf_promote(params_)
- (indices_,) = tf.tf_promote(indices_)
- tf.add_input(desc, params_)
- tf.add_input(desc, indices_)
- if validate_indices !== nothing
- desc["validate_indices"] = Base.Bool(validate_indices)
+begin
+ begin
+ function boosted_trees_bucketize_graph(float_values_, bucket_boundaries_; name=nothing, num_features=nothing)
+ local desc
+ tf.with_op_name(name, "BoostedTreesBucketize") do
+ desc = tf.NodeDescription("BoostedTreesBucketize")
+ begin
+ begin
+ float_values_ = [convert(Tensor{Float32}, x) for x = float_values_]
+ begin
+ end
+ end
+ begin
+ bucket_boundaries_ = [convert(Tensor{Float32}, x) for x = bucket_boundaries_]
+ begin
+ end
+ end
+ end
+ begin
+ begin
+ tf.add_input(desc, float_values_)
+ end
+ begin
+ tf.add_input(desc, bucket_boundaries_)
+ end
+ end
+ begin
+ begin
+ if num_features !== nothing
+ desc["num_features"] = Base.Int(num_features)
+ end
+ end
+ end
+ end
+ begin
+ out = tf.Tensor[]
+ op = tf.Operation(desc)
+ for out_idx = 1:num_features
+ push!(out, tf.Tensor(op, out_idx))
+ end
+ out
+ end
+ end
+ end
+ begin
+ function boosted_trees_bucketize_eager(float_values_, bucket_boundaries_; name=nothing, num_features=nothing)
+ desc = tf.EagerOp("BoostedTreesBucketize")
+ float_values_ = convert(tf.EagerTensor, float_values_)
+ bucket_boundaries_ = convert(tf.EagerTensor, bucket_boundaries_)
+ begin
+ begin
+ tf.add_input(desc, float_values_)
+ end
+ begin
+ tf.add_input(desc, bucket_boundaries_)
+ end
+ end
+ begin
+ begin
+ if num_features !== nothing
+ desc["num_features"] = Base.Int(num_features)
end
- end), name, "Gather")
- tf.Tensor(tf.Operation(desc))
+ end
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(boosted_trees_bucketize, [float_values_, bucket_boundaries_], name=nothing, num_features=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function boosted_trees_bucketize(float_values_, bucket_boundaries_; name=nothing, num_features=nothing)
+ if tf.in_eager_mode()
+ boosted_trees_bucketize_eager(float_values_, bucket_boundaries_; name=name, num_features=num_features)
+ else
+ boosted_trees_bucketize_graph(float_values_, bucket_boundaries_; name=name, num_features=num_features)
+ end
+ end
end
+end
-"""
- gather_nd(params, indices)
+"""
+ shutdown_distributed_tpu()
+An op that shuts down a running distributed TPU system. The Op returns
"""
-tf.@op function gather_nd(params_, indices_; name=nothing)
- local desc
- tf.with_op_name((()->begin
- desc = tf.NodeDescription("GatherNd")
- params_ = convert(TensorFlow.Tensor{Any}, params_)
- indices_ = convert(TensorFlow.Tensor{Any}, indices_)
- indices_ = indices_ - convert(tf.Tensor{eltype(indices_)}, 1)
- (params_,) = tf.tf_promote(params_)
- (indices_,) = tf.tf_promote(indices_)
- tf.add_input(desc, params_)
- tf.add_input(desc, indices_)
- end), name, "GatherNd")
- tf.Tensor(tf.Operation(desc))
+begin
+ begin
+ function shutdown_distributed_tpu_graph(; name=nothing)
+ local desc
+ tf.with_op_name(name, "ShutdownDistributedTPU") do
+ desc = tf.NodeDescription("ShutdownDistributedTPU")
+ begin
+ end
+ begin
+ end
+ begin
+ end
+ end
+ tf.Tensor(tf.Operation(desc))
+ end
+ end
+ begin
+ function shutdown_distributed_tpu_eager(; name=nothing)
+ desc = tf.EagerOp("ShutdownDistributedTPU")
+ begin
+ end
+ begin
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(shutdown_distributed_tpu, [], name=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res[1]
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function shutdown_distributed_tpu(; name=nothing)
+ if tf.in_eager_mode()
+ shutdown_distributed_tpu_eager(; name=name)
+ else
+ shutdown_distributed_tpu_graph(; name=name)
+ end
+ end
end
+end
+
"""
- scatter_nd(indices, updates, shape)
+ experimental_stats_aggregator_summary(iterator)
"""
-tf.@op function scatter_nd(indices_, updates_, shape_; name=nothing)
- local desc
- tf.with_op_name((()->begin
- desc = tf.NodeDescription("ScatterNd")
- indices_ = convert(TensorFlow.Tensor{Any}, indices_)
- indices_ = indices_ - convert(tf.Tensor{eltype(indices_)}, 1)
- updates_ = convert(TensorFlow.Tensor{Any}, updates_)
- shape_ = convert(TensorFlow.Tensor{Any}, shape_)
- (updates_,) = tf.tf_promote(updates_)
- (indices_, shape_) = tf.tf_promote(indices_, shape_)
- tf.add_input(desc, indices_)
- tf.add_input(desc, updates_)
- tf.add_input(desc, shape_)
- end), name, "ScatterNd")
- tf.Tensor(tf.Operation(desc))
+begin
+ begin
+ function experimental_stats_aggregator_summary_graph(iterator_; name=nothing)
+ local desc
+ tf.with_op_name(name, "ExperimentalStatsAggregatorSummary") do
+ desc = tf.NodeDescription("ExperimentalStatsAggregatorSummary")
+ begin
+ begin
+ iterator_ = convert(Tensor{Any}, iterator_)
+ begin
+ end
+ end
+ end
+ begin
+ begin
+ tf.add_input(desc, iterator_)
+ end
+ end
+ begin
+ end
+ end
+ tf.Tensor(tf.Operation(desc))
+ end
+ end
+ begin
+ function experimental_stats_aggregator_summary_eager(iterator_; name=nothing)
+ desc = tf.EagerOp("ExperimentalStatsAggregatorSummary")
+ iterator_ = convert(tf.EagerTensor, iterator_)
+ begin
+ begin
+ tf.add_input(desc, iterator_)
+ end
+ end
+ begin
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(experimental_stats_aggregator_summary, [iterator_], name=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res[1]
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function experimental_stats_aggregator_summary(iterator_; name=nothing)
+ if tf.in_eager_mode()
+ experimental_stats_aggregator_summary_eager(iterator_; name=name)
+ else
+ experimental_stats_aggregator_summary_graph(iterator_; name=name)
+ end
+ end
end
+end
+
"""
- dynamic_partition(data, partitions)
+ timestamp()
"""
-tf.@op function dynamic_partition(data_, partitions_; name=nothing, num_partitions=nothing)
- local desc
- tf.with_op_name((()->begin
- desc = tf.NodeDescription("DynamicPartition")
- data_ = convert(TensorFlow.Tensor{Any}, data_)
- partitions_ = convert(TensorFlow.Tensor{Int32}, partitions_)
- (data_,) = tf.tf_promote(data_)
- tf.add_input(desc, data_)
- tf.add_input(desc, partitions_)
- if num_partitions !== nothing
- desc["num_partitions"] = Base.Int(num_partitions)
- end
- end), name, "DynamicPartition")
- out = tf.Tensor[]
- op = tf.Operation(desc)
- for out_idx = 1:num_partitions
- push!(out, tf.Tensor(op, out_idx))
+begin
+ begin
+ function timestamp_graph(; name=nothing)
+ local desc
+ tf.with_op_name(name, "Timestamp") do
+ desc = tf.NodeDescription("Timestamp")
+ begin
+ end
+ begin
+ end
+ begin
+ end
+ end
+ tf.Tensor(tf.Operation(desc))
+ end
+ end
+ begin
+ function timestamp_eager(; name=nothing)
+ desc = tf.EagerOp("Timestamp")
+ begin
+ end
+ begin
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(timestamp, [], name=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res[1]
+ end
end
- out
end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function timestamp(; name=nothing)
+ if tf.in_eager_mode()
+ timestamp_eager(; name=name)
+ else
+ timestamp_graph(; name=name)
+ end
+ end
+ end
+end
+
"""
- dynamic_stitch(indices, data)
+ matrix_exponential(input)
"""
-tf.@op function dynamic_stitch(indices_, data_; name=nothing, N=nothing)
- local desc
- tf.with_op_name((()->begin
- desc = tf.NodeDescription("DynamicStitch")
- indices_ = [convert(TensorFlow.Tensor{Int32}, x) for x = indices_]
- data_ = [convert(TensorFlow.Tensor{Any}, x) for x = data_]
- (data_,) = tf.tf_promote(data_)
- tf.add_input(desc, indices_)
- tf.add_input(desc, data_)
- if N !== nothing
- desc["N"] = Base.Int(N)
+begin
+ begin
+ function matrix_exponential_graph(input_; name=nothing)
+ local desc
+ tf.with_op_name(name, "MatrixExponential") do
+ desc = tf.NodeDescription("MatrixExponential")
+ begin
+ begin
+ input_ = convert(Tensor{Any}, input_)
+ begin
+ end
end
- end), name, "DynamicStitch")
- tf.Tensor(tf.Operation(desc))
+ begin
+ (input_,) = tf.tf_promote(input_)
+ end
+ end
+ begin
+ begin
+ tf.add_input(desc, input_)
+ end
+ end
+ begin
+ end
+ end
+ tf.Tensor(tf.Operation(desc))
+ end
+ end
+ begin
+ function matrix_exponential_eager(input_; name=nothing)
+ desc = tf.EagerOp("MatrixExponential")
+ input_ = convert(tf.EagerTensor, input_)
+ begin
+ begin
+ tf.add_input(desc, input_)
+ end
+ end
+ begin
+ end
+ begin
+ desc["T"] = tf.data_type(input_)
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(matrix_exponential, [input_], name=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res[1]
+ end
+ end
end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function matrix_exponential(input_; name=nothing)
+ if tf.in_eager_mode()
+ matrix_exponential_eager(input_; name=name)
+ else
+ matrix_exponential_graph(input_; name=name)
+ end
+ end
+ end
+end
+
"""
- pack(values; axis=0)
+ size(input; out_type=Int32)
"""
-tf.@op function pack(values_; name=nothing, N=nothing, axis=nothing)
- local desc
- tf.with_op_name((()->begin
- desc = tf.NodeDescription("Pack")
- values_ = [convert(TensorFlow.Tensor{Any}, x) for x = values_]
- (values_,) = tf.tf_promote(values_)
- tf.add_input(desc, values_)
- if N !== nothing
- desc["N"] = Base.Int(N)
+begin
+ begin
+ function size_graph(input_; name=nothing, out_type=nothing)
+ local desc
+ tf.with_op_name(name, "Size") do
+ desc = tf.NodeDescription("Size")
+ begin
+ begin
+ input_ = convert(Tensor{Any}, input_)
+ begin
+ end
end
- if axis !== nothing
- axis = Base.Int(axis) - 1
+ begin
+ (input_,) = tf.tf_promote(input_)
end
- if axis !== nothing
- desc["axis"] = Base.Int(axis)
+ end
+ begin
+ begin
+ tf.add_input(desc, input_)
+ end
+ end
+ begin
+ begin
+ if out_type !== nothing
+ desc["out_type"] = Base.identity(out_type)
+ end
+ end
+ end
+ end
+ tf.Tensor(tf.Operation(desc))
+ end
+ end
+ begin
+ function size_eager(input_; name=nothing, out_type=nothing)
+ desc = tf.EagerOp("Size")
+ input_ = convert(tf.EagerTensor, input_)
+ begin
+ begin
+ tf.add_input(desc, input_)
+ end
+ end
+ begin
+ begin
+ if out_type !== nothing
+ desc["out_type"] = Base.identity(out_type)
end
- end), name, "Pack")
- tf.Tensor(tf.Operation(desc))
+ end
+ end
+ begin
+ desc["T"] = tf.data_type(input_)
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(size, [input_], name=nothing, out_type=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res[1]
+ end
+ end
end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function size(input_; name=nothing, out_type=nothing)
+ if tf.in_eager_mode()
+ size_eager(input_; name=name, out_type=out_type)
+ else
+ size_graph(input_; name=name, out_type=out_type)
+ end
+ end
+ end
+end
+
"""
- concat_v2(values, axis)
+ add_n(inputs)
"""
-tf.@op function concat_v2(values_, axis_; name=nothing, N=nothing)
- local desc
- tf.with_op_name((()->begin
- desc = tf.NodeDescription("ConcatV2")
- values_ = [convert(TensorFlow.Tensor{Any}, x) for x = values_]
- axis_ = convert(TensorFlow.Tensor{Int32}, axis_)
- axis_ = axis_ - convert(tf.Tensor{eltype(axis_)}, 1)
- (values_,) = tf.tf_promote(values_)
- (axis_,) = tf.tf_promote(axis_)
- tf.add_input(desc, values_)
- tf.add_input(desc, axis_)
+begin
+ begin
+ function add_n_graph(inputs_; name=nothing, N=nothing)
+ local desc
+ tf.with_op_name(name, "AddN") do
+ desc = tf.NodeDescription("AddN")
+ begin
+ begin
+ inputs_ = [convert(Tensor{Any}, x) for x = inputs_]
+ begin
+ end
+ end
+ begin
+ (inputs_,) = tf.tf_promote(inputs_)
+ end
+ end
+ begin
+ begin
+ tf.add_input(desc, inputs_)
+ end
+ end
+ begin
+ begin
+ if N !== nothing
+ desc["N"] = Base.Int(N)
+ end
+ end
+ end
+ end
+ tf.Tensor(tf.Operation(desc))
+ end
+ end
+ begin
+ function add_n_eager(inputs_; name=nothing, N=nothing)
+ desc = tf.EagerOp("AddN")
+ inputs_ = convert(tf.EagerTensor, inputs_)
+ begin
+ begin
+ tf.add_input(desc, inputs_)
+ end
+ end
+ begin
+ begin
if N !== nothing
desc["N"] = Base.Int(N)
end
- end), name, "ConcatV2")
- tf.Tensor(tf.Operation(desc))
+ end
+ end
+ begin
+ desc["T"] = tf.data_type(inputs_)
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(add_n, [inputs_], name=nothing, N=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res[1]
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function add_n(inputs_; name=nothing, N=nothing)
+ if tf.in_eager_mode()
+ add_n_eager(inputs_; name=name, N=N)
+ else
+ add_n_graph(inputs_; name=name, N=N)
+ end
+ end
end
+end
+
"""
- self_adjoint_eig_v2(input; compute_v=true)
+ sparse_segment_sum(data, indices, segment_ids)
"""
-tf.@op function self_adjoint_eig_v2(input_; name=nothing, compute_v=nothing)
- local desc
- tf.with_op_name((()->begin
- desc = tf.NodeDescription("SelfAdjointEigV2")
- input_ = convert(TensorFlow.Tensor{Any}, input_)
- (input_,) = tf.tf_promote(input_)
- tf.add_input(desc, input_)
- if compute_v !== nothing
- desc["compute_v"] = Base.Bool(compute_v)
+begin
+ begin
+ function sparse_segment_sum_graph(data_, indices_, segment_ids_; name=nothing)
+ local desc
+ tf.with_op_name(name, "SparseSegmentSum") do
+ desc = tf.NodeDescription("SparseSegmentSum")
+ begin
+ begin
+ data_ = convert(Tensor{Any}, data_)
+ begin
+ end
+ end
+ begin
+ indices_ = convert(Tensor{Int32}, indices_)
+ begin
+ indices_ = indices_ - convert(tf.Tensor{eltype(indices_)}, 1)
+ end
+ end
+ begin
+ segment_ids_ = convert(Tensor{Int32}, segment_ids_)
+ begin
+ end
+ end
+ begin
+ (data_,) = tf.tf_promote(data_)
+ end
+ begin
+ (indices_,) = tf.tf_promote(indices_)
end
- end), name, "SelfAdjointEigV2")
- out = tf.Tensor[]
- op = tf.Operation(desc)
- for out_idx = 1:2
- push!(out, tf.Tensor(op, out_idx))
+ end
+ begin
+ begin
+ tf.add_input(desc, data_)
+ end
+ begin
+ tf.add_input(desc, indices_)
+ end
+ begin
+ tf.add_input(desc, segment_ids_)
+ end
+ end
+ begin
+ end
+ end
+ tf.Tensor(tf.Operation(desc))
+ end
+ end
+ begin
+ function sparse_segment_sum_eager(data_, indices_, segment_ids_; name=nothing)
+ desc = tf.EagerOp("SparseSegmentSum")
+ data_ = convert(tf.EagerTensor, data_)
+ indices_ = convert(tf.EagerTensor, indices_)
+ segment_ids_ = convert(tf.EagerTensor, segment_ids_)
+ begin
+ begin
+ tf.add_input(desc, data_)
+ end
+ begin
+ tf.add_input(desc, indices_)
+ end
+ begin
+ tf.add_input(desc, segment_ids_)
+ end
+ end
+ begin
+ end
+ begin
+ desc["T"] = tf.data_type(data_)
+ end
+ begin
+ desc["Tidx"] = tf.data_type(indices_)
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(sparse_segment_sum, [data_, indices_, segment_ids_], name=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res[1]
+ end
end
- out
end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function sparse_segment_sum(data_, indices_, segment_ids_; name=nothing)
+ if tf.in_eager_mode()
+ sparse_segment_sum_eager(data_, indices_, segment_ids_; name=name)
+ else
+ sparse_segment_sum_graph(data_, indices_, segment_ids_; name=name)
+ end
+ end
+ end
+end
+
"""
- is_finite(x)
+ batch_dataset(input_dataset, batch_size)
"""
-tf.@op function is_finite(x_; name=nothing)
- local desc
- tf.with_op_name((()->begin
- desc = tf.NodeDescription("IsFinite")
- x_ = convert(TensorFlow.Tensor{Any}, x_)
- (x_,) = tf.tf_promote(x_)
- tf.add_input(desc, x_)
- end), name, "IsFinite")
- tf.Tensor(tf.Operation(desc))
+begin
+ begin
+ function batch_dataset_graph(input_dataset_, batch_size_; name=nothing, output_types=nothing, output_shapes=nothing)
+ local desc
+ tf.with_op_name(name, "BatchDataset") do
+ desc = tf.NodeDescription("BatchDataset")
+ begin
+ begin
+ input_dataset_ = convert(Tensor{Any}, input_dataset_)
+ begin
+ end
+ end
+ begin
+ batch_size_ = convert(Tensor{Int64}, batch_size_)
+ begin
+ end
+ end
+ end
+ begin
+ begin
+ tf.add_input(desc, input_dataset_)
+ end
+ begin
+ tf.add_input(desc, batch_size_)
+ end
+ end
+ begin
+ begin
+ if output_types !== nothing
+ desc["output_types"] = map(Base.identity, output_types)
+ end
+ end
+ begin
+ if output_shapes !== nothing
+ desc["output_shapes"] = map(Base.identity, output_shapes)
+ end
+ end
+ end
+ end
+ tf.Tensor(tf.Operation(desc))
+ end
+ end
+ begin
+ function batch_dataset_eager(input_dataset_, batch_size_; name=nothing, output_types=nothing, output_shapes=nothing)
+ desc = tf.EagerOp("BatchDataset")
+ input_dataset_ = convert(tf.EagerTensor, input_dataset_)
+ batch_size_ = convert(tf.EagerTensor, batch_size_)
+ begin
+ begin
+ tf.add_input(desc, input_dataset_)
+ end
+ begin
+ tf.add_input(desc, batch_size_)
+ end
+ end
+ begin
+ begin
+ if output_types !== nothing
+ desc["output_types"] = map(Base.identity, output_types)
+ end
+ end
+ begin
+ if output_shapes !== nothing
+ desc["output_shapes"] = map(Base.identity, output_shapes)
+ end
+ end
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(batch_dataset, [input_dataset_, batch_size_], name=nothing, output_types=nothing, output_shapes=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res[1]
+ end
+ end
end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function batch_dataset(input_dataset_, batch_size_; name=nothing, output_types=nothing, output_shapes=nothing)
+ if tf.in_eager_mode()
+ batch_dataset_eager(input_dataset_, batch_size_; name=name, output_types=output_types, output_shapes=output_shapes)
+ else
+ batch_dataset_graph(input_dataset_, batch_size_; name=name, output_types=output_types, output_shapes=output_shapes)
+ end
+ end
+ end
+end
+
"""
- is_nan(x)
+ record_input(; file_random_seed=301, file_shuffle_shift_ratio=?, file_buffer_size=10000, file_parallelism=16, batch_size=32, compression_type=)
"""
-tf.@op function is_nan(x_; name=nothing)
- local desc
- tf.with_op_name((()->begin
- desc = tf.NodeDescription("IsNan")
- x_ = convert(TensorFlow.Tensor{Any}, x_)
- (x_,) = tf.tf_promote(x_)
- tf.add_input(desc, x_)
- end), name, "IsNan")
- tf.Tensor(tf.Operation(desc))
+begin
+ begin
+ function record_input_graph(; name=nothing, file_pattern=nothing, file_random_seed=nothing, file_shuffle_shift_ratio=nothing, file_buffer_size=nothing, file_parallelism=nothing, batch_size=nothing, compression_type=nothing)
+ local desc
+ tf.with_op_name(name, "RecordInput") do
+ desc = tf.NodeDescription("RecordInput")
+ begin
+ end
+ begin
+ end
+ begin
+ begin
+ if file_pattern !== nothing
+ desc["file_pattern"] = Base.String(file_pattern)
+ end
+ end
+ begin
+ if file_random_seed !== nothing
+ desc["file_random_seed"] = Base.Int(file_random_seed)
+ end
+ end
+ begin
+ if file_shuffle_shift_ratio !== nothing
+ desc["file_shuffle_shift_ratio"] = Base.identity(file_shuffle_shift_ratio)
+ end
+ end
+ begin
+ if file_buffer_size !== nothing
+ desc["file_buffer_size"] = Base.Int(file_buffer_size)
+ end
+ end
+ begin
+ if file_parallelism !== nothing
+ desc["file_parallelism"] = Base.Int(file_parallelism)
+ end
+ end
+ begin
+ if batch_size !== nothing
+ desc["batch_size"] = Base.Int(batch_size)
+ end
+ end
+ begin
+ if compression_type !== nothing
+ desc["compression_type"] = Base.String(compression_type)
+ end
+ end
+ end
+ end
+ tf.Tensor(tf.Operation(desc))
+ end
+ end
+ begin
+ function record_input_eager(; name=nothing, file_pattern=nothing, file_random_seed=nothing, file_shuffle_shift_ratio=nothing, file_buffer_size=nothing, file_parallelism=nothing, batch_size=nothing, compression_type=nothing)
+ desc = tf.EagerOp("RecordInput")
+ begin
+ end
+ begin
+ begin
+ if file_pattern !== nothing
+ desc["file_pattern"] = Base.String(file_pattern)
+ end
+ end
+ begin
+ if file_random_seed !== nothing
+ desc["file_random_seed"] = Base.Int(file_random_seed)
+ end
+ end
+ begin
+ if file_shuffle_shift_ratio !== nothing
+ desc["file_shuffle_shift_ratio"] = Base.identity(file_shuffle_shift_ratio)
+ end
+ end
+ begin
+ if file_buffer_size !== nothing
+ desc["file_buffer_size"] = Base.Int(file_buffer_size)
+ end
+ end
+ begin
+ if file_parallelism !== nothing
+ desc["file_parallelism"] = Base.Int(file_parallelism)
+ end
+ end
+ begin
+ if batch_size !== nothing
+ desc["batch_size"] = Base.Int(batch_size)
+ end
+ end
+ begin
+ if compression_type !== nothing
+ desc["compression_type"] = Base.String(compression_type)
+ end
+ end
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(record_input, [], name=nothing, file_pattern=nothing, file_random_seed=nothing, file_shuffle_shift_ratio=nothing, file_buffer_size=nothing, file_parallelism=nothing, batch_size=nothing, compression_type=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res[1]
+ end
+ end
end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function record_input(; name=nothing, file_pattern=nothing, file_random_seed=nothing, file_shuffle_shift_ratio=nothing, file_buffer_size=nothing, file_parallelism=nothing, batch_size=nothing, compression_type=nothing)
+ if tf.in_eager_mode()
+ record_input_eager(; name=name, file_pattern=file_pattern, file_random_seed=file_random_seed, file_shuffle_shift_ratio=file_shuffle_shift_ratio, file_buffer_size=file_buffer_size, file_parallelism=file_parallelism, batch_size=batch_size, compression_type=compression_type)
+ else
+ record_input_graph(; name=name, file_pattern=file_pattern, file_random_seed=file_random_seed, file_shuffle_shift_ratio=file_shuffle_shift_ratio, file_buffer_size=file_buffer_size, file_parallelism=file_parallelism, batch_size=batch_size, compression_type=compression_type)
+ end
+ end
+ end
+end
+
"""
- is_inf(x)
+ queue_dequeue_up_to_v2(handle, n; timeout_ms=-1)
"""
-tf.@op function is_inf(x_; name=nothing)
- local desc
- tf.with_op_name((()->begin
- desc = tf.NodeDescription("IsInf")
- x_ = convert(TensorFlow.Tensor{Any}, x_)
- (x_,) = tf.tf_promote(x_)
- tf.add_input(desc, x_)
- end), name, "IsInf")
- tf.Tensor(tf.Operation(desc))
+begin
+ begin
+ function queue_dequeue_up_to_v2_graph(handle_, n_; name=nothing, component_types=nothing, timeout_ms=nothing)
+ local desc
+ tf.with_op_name(name, "QueueDequeueUpToV2") do
+ desc = tf.NodeDescription("QueueDequeueUpToV2")
+ begin
+ begin
+ handle_ = convert(Tensor{Any}, handle_)
+ begin
+ end
+ end
+ begin
+ n_ = convert(Tensor{Int32}, n_)
+ begin
+ end
+ end
+ end
+ begin
+ begin
+ tf.add_input(desc, handle_)
+ end
+ begin
+ tf.add_input(desc, n_)
+ end
+ end
+ begin
+ begin
+ if component_types !== nothing
+ desc["component_types"] = map(Base.identity, component_types)
+ end
+ end
+ begin
+ if timeout_ms !== nothing
+ desc["timeout_ms"] = Base.Int(timeout_ms)
+ end
+ end
+ end
+ end
+ tf.Tensor(tf.Operation(desc))
+ end
+ end
+ begin
+ function queue_dequeue_up_to_v2_eager(handle_, n_; name=nothing, component_types=nothing, timeout_ms=nothing)
+ desc = tf.EagerOp("QueueDequeueUpToV2")
+ handle_ = convert(tf.EagerTensor, handle_)
+ n_ = convert(tf.EagerTensor, n_)
+ begin
+ begin
+ tf.add_input(desc, handle_)
+ end
+ begin
+ tf.add_input(desc, n_)
+ end
+ end
+ begin
+ begin
+ if component_types !== nothing
+ desc["component_types"] = map(Base.identity, component_types)
+ end
+ end
+ begin
+ if timeout_ms !== nothing
+ desc["timeout_ms"] = Base.Int(timeout_ms)
+ end
+ end
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(queue_dequeue_up_to_v2, [handle_, n_], name=nothing, component_types=nothing, timeout_ms=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res[1]
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function queue_dequeue_up_to_v2(handle_, n_; name=nothing, component_types=nothing, timeout_ms=nothing)
+ if tf.in_eager_mode()
+ queue_dequeue_up_to_v2_eager(handle_, n_; name=name, component_types=component_types, timeout_ms=timeout_ms)
+ else
+ queue_dequeue_up_to_v2_graph(handle_, n_; name=name, component_types=component_types, timeout_ms=timeout_ms)
+ end
+ end
end
+end
-"""
- lrn(input; depth_radius=5, bias=nothing, alpha=nothing, beta=nothing)
+"""
+ retrieve_tpu_embedding_proximal_adagrad_parameters_grad_accum_debug(; table_id=-1, table_name=)
+Retrieve embedding parameters for a single table.
"""
-tf.@op function lrn(input_; name=nothing, depth_radius=nothing, bias=nothing, alpha=nothing, beta=nothing)
- local desc
- tf.with_op_name((()->begin
- desc = tf.NodeDescription("LRN")
- input_ = convert(TensorFlow.Tensor{Float32}, input_)
- (input_,) = tf.tf_promote(input_)
- tf.add_input(desc, input_)
- if depth_radius !== nothing
- desc["depth_radius"] = Base.Int(depth_radius)
+begin
+ begin
+ function retrieve_tpu_embedding_proximal_adagrad_parameters_grad_accum_debug_graph(; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing)
+ local desc
+ tf.with_op_name(name, "RetrieveTPUEmbeddingProximalAdagradParametersGradAccumDebug") do
+ desc = tf.NodeDescription("RetrieveTPUEmbeddingProximalAdagradParametersGradAccumDebug")
+ begin
+ end
+ begin
+ end
+ begin
+ begin
+ if table_id !== nothing
+ desc["table_id"] = Base.Int(table_id)
+ end
end
- if bias !== nothing
- desc["bias"] = Base.identity(bias)
+ begin
+ if table_name !== nothing
+ desc["table_name"] = Base.String(table_name)
+ end
end
- if alpha !== nothing
- desc["alpha"] = Base.identity(alpha)
+ begin
+ if num_shards !== nothing
+ desc["num_shards"] = Base.Int(num_shards)
+ end
end
- if beta !== nothing
- desc["beta"] = Base.identity(beta)
+ begin
+ if shard_id !== nothing
+ desc["shard_id"] = Base.Int(shard_id)
+ end
end
- end), name, "LRN")
- tf.Tensor(tf.Operation(desc))
+ end
+ end
+ begin
+ out = tf.Tensor[]
+ op = tf.Operation(desc)
+ for out_idx = 1:3
+ push!(out, tf.Tensor(op, out_idx))
+ end
+ out
+ end
+ end
+ end
+ begin
+ function retrieve_tpu_embedding_proximal_adagrad_parameters_grad_accum_debug_eager(; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing)
+ desc = tf.EagerOp("RetrieveTPUEmbeddingProximalAdagradParametersGradAccumDebug")
+ begin
+ end
+ begin
+ begin
+ if table_id !== nothing
+ desc["table_id"] = Base.Int(table_id)
+ end
+ end
+ begin
+ if table_name !== nothing
+ desc["table_name"] = Base.String(table_name)
+ end
+ end
+ begin
+ if num_shards !== nothing
+ desc["num_shards"] = Base.Int(num_shards)
+ end
+ end
+ begin
+ if shard_id !== nothing
+ desc["shard_id"] = Base.Int(shard_id)
+ end
+ end
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(retrieve_tpu_embedding_proximal_adagrad_parameters_grad_accum_debug, [], name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function retrieve_tpu_embedding_proximal_adagrad_parameters_grad_accum_debug(; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing)
+ if tf.in_eager_mode()
+ retrieve_tpu_embedding_proximal_adagrad_parameters_grad_accum_debug_eager(; name=name, table_id=table_id, table_name=table_name, num_shards=num_shards, shard_id=shard_id)
+ else
+ retrieve_tpu_embedding_proximal_adagrad_parameters_grad_accum_debug_graph(; name=name, table_id=table_id, table_name=table_name, num_shards=num_shards, shard_id=shard_id)
+ end
+ end
end
+end
-"""
- assign(ref, value; validate_shape=true, use_locking=true)
+"""
+ load_tpu_embedding_rms_prop_parameters_grad_accum_debug(parameters, ms, mom, gradient_accumulators; table_id=-1, table_name=)
+Load embedding parameters for a single table.
"""
-tf.@op function assign(ref_, value_; name=nothing, validate_shape=nothing, use_locking=nothing)
- local desc
- tf.with_op_name((()->begin
- desc = tf.NodeDescription("Assign")
- ref_ = convert(TensorFlow.Tensor{Any}, ref_)
- value_ = convert(TensorFlow.Tensor{Any}, value_)
- (ref_, value_) = tf.tf_promote(ref_, value_)
- tf.add_input(desc, ref_)
- tf.add_input(desc, value_)
- if validate_shape !== nothing
- desc["validate_shape"] = Base.Bool(validate_shape)
+begin
+ begin
+ function load_tpu_embedding_rms_prop_parameters_grad_accum_debug_graph(parameters_, ms_, mom_, gradient_accumulators_; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing)
+ local desc
+ tf.with_op_name(name, "LoadTPUEmbeddingRMSPropParametersGradAccumDebug") do
+ desc = tf.NodeDescription("LoadTPUEmbeddingRMSPropParametersGradAccumDebug")
+ begin
+ begin
+ parameters_ = convert(Tensor{Float32}, parameters_)
+ begin
+ end
end
- if use_locking !== nothing
- desc["use_locking"] = Base.Bool(use_locking)
+ begin
+ ms_ = convert(Tensor{Float32}, ms_)
+ begin
+ end
+ end
+ begin
+ mom_ = convert(Tensor{Float32}, mom_)
+ begin
+ end
+ end
+ begin
+ gradient_accumulators_ = convert(Tensor{Float32}, gradient_accumulators_)
+ begin
+ end
+ end
+ end
+ begin
+ begin
+ tf.add_input(desc, parameters_)
+ end
+ begin
+ tf.add_input(desc, ms_)
+ end
+ begin
+ tf.add_input(desc, mom_)
+ end
+ begin
+ tf.add_input(desc, gradient_accumulators_)
end
- end), name, "Assign")
- tf.Tensor(tf.Operation(desc))
+ end
+ begin
+ begin
+ if table_id !== nothing
+ desc["table_id"] = Base.Int(table_id)
+ end
+ end
+ begin
+ if table_name !== nothing
+ desc["table_name"] = Base.String(table_name)
+ end
+ end
+ begin
+ if num_shards !== nothing
+ desc["num_shards"] = Base.Int(num_shards)
+ end
+ end
+ begin
+ if shard_id !== nothing
+ desc["shard_id"] = Base.Int(shard_id)
+ end
+ end
+ end
+ end
+ tf.Tensor(tf.Operation(desc))
+ end
+ end
+ begin
+ function load_tpu_embedding_rms_prop_parameters_grad_accum_debug_eager(parameters_, ms_, mom_, gradient_accumulators_; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing)
+ desc = tf.EagerOp("LoadTPUEmbeddingRMSPropParametersGradAccumDebug")
+ parameters_ = convert(tf.EagerTensor, parameters_)
+ ms_ = convert(tf.EagerTensor, ms_)
+ mom_ = convert(tf.EagerTensor, mom_)
+ gradient_accumulators_ = convert(tf.EagerTensor, gradient_accumulators_)
+ begin
+ begin
+ tf.add_input(desc, parameters_)
+ end
+ begin
+ tf.add_input(desc, ms_)
+ end
+ begin
+ tf.add_input(desc, mom_)
+ end
+ begin
+ tf.add_input(desc, gradient_accumulators_)
+ end
+ end
+ begin
+ begin
+ if table_id !== nothing
+ desc["table_id"] = Base.Int(table_id)
+ end
+ end
+ begin
+ if table_name !== nothing
+ desc["table_name"] = Base.String(table_name)
+ end
+ end
+ begin
+ if num_shards !== nothing
+ desc["num_shards"] = Base.Int(num_shards)
+ end
+ end
+ begin
+ if shard_id !== nothing
+ desc["shard_id"] = Base.Int(shard_id)
+ end
+ end
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(load_tpu_embedding_rms_prop_parameters_grad_accum_debug, [parameters_, ms_, mom_, gradient_accumulators_], name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res[1]
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function load_tpu_embedding_rms_prop_parameters_grad_accum_debug(parameters_, ms_, mom_, gradient_accumulators_; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing)
+ if tf.in_eager_mode()
+ load_tpu_embedding_rms_prop_parameters_grad_accum_debug_eager(parameters_, ms_, mom_, gradient_accumulators_; name=name, table_id=table_id, table_name=table_name, num_shards=num_shards, shard_id=shard_id)
+ else
+ load_tpu_embedding_rms_prop_parameters_grad_accum_debug_graph(parameters_, ms_, mom_, gradient_accumulators_; name=name, table_id=table_id, table_name=table_name, num_shards=num_shards, shard_id=shard_id)
+ end
+ end
end
+end
+
"""
- assign_add(ref, value; use_locking=false)
+ serialize_tensor(tensor)
"""
-tf.@op function assign_add(ref_, value_; name=nothing, use_locking=nothing)
- local desc
- tf.with_op_name((()->begin
- desc = tf.NodeDescription("AssignAdd")
- ref_ = convert(TensorFlow.Tensor{Any}, ref_)
- value_ = convert(TensorFlow.Tensor{Any}, value_)
- (ref_, value_) = tf.tf_promote(ref_, value_)
- tf.add_input(desc, ref_)
- tf.add_input(desc, value_)
- if use_locking !== nothing
- desc["use_locking"] = Base.Bool(use_locking)
+begin
+ begin
+ function serialize_tensor_graph(tensor_; name=nothing)
+ local desc
+ tf.with_op_name(name, "SerializeTensor") do
+ desc = tf.NodeDescription("SerializeTensor")
+ begin
+ begin
+ tensor_ = convert(Tensor{Any}, tensor_)
+ begin
+ end
+ end
+ begin
+ (tensor_,) = tf.tf_promote(tensor_)
end
- end), name, "AssignAdd")
- tf.Tensor(tf.Operation(desc))
+ end
+ begin
+ begin
+ tf.add_input(desc, tensor_)
+ end
+ end
+ begin
+ end
+ end
+ tf.Tensor(tf.Operation(desc))
+ end
+ end
+ begin
+ function serialize_tensor_eager(tensor_; name=nothing)
+ desc = tf.EagerOp("SerializeTensor")
+ tensor_ = convert(tf.EagerTensor, tensor_)
+ begin
+ begin
+ tf.add_input(desc, tensor_)
+ end
+ end
+ begin
+ end
+ begin
+ desc["T"] = tf.data_type(tensor_)
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(serialize_tensor, [tensor_], name=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res[1]
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function serialize_tensor(tensor_; name=nothing)
+ if tf.in_eager_mode()
+ serialize_tensor_eager(tensor_; name=name)
+ else
+ serialize_tensor_graph(tensor_; name=name)
+ end
+ end
end
+end
+
"""
- assign_sub(ref, value; use_locking=false)
+ mul(x, y)
"""
-tf.@op function assign_sub(ref_, value_; name=nothing, use_locking=nothing)
- local desc
- tf.with_op_name((()->begin
- desc = tf.NodeDescription("AssignSub")
- ref_ = convert(TensorFlow.Tensor{Any}, ref_)
- value_ = convert(TensorFlow.Tensor{Any}, value_)
- (ref_, value_) = tf.tf_promote(ref_, value_)
- tf.add_input(desc, ref_)
- tf.add_input(desc, value_)
- if use_locking !== nothing
- desc["use_locking"] = Base.Bool(use_locking)
+begin
+ begin
+ function mul_graph(x_, y_; name=nothing)
+ local desc
+ tf.with_op_name(name, "Mul") do
+ desc = tf.NodeDescription("Mul")
+ begin
+ begin
+ x_ = convert(Tensor{Any}, x_)
+ begin
+ end
+ end
+ begin
+ y_ = convert(Tensor{Any}, y_)
+ begin
+ end
+ end
+ begin
+ (x_, y_) = tf.tf_promote(x_, y_)
end
- end), name, "AssignSub")
- tf.Tensor(tf.Operation(desc))
+ end
+ begin
+ begin
+ tf.add_input(desc, x_)
+ end
+ begin
+ tf.add_input(desc, y_)
+ end
+ end
+ begin
+ end
+ end
+ tf.Tensor(tf.Operation(desc))
+ end
+ end
+ begin
+ function mul_eager(x_, y_; name=nothing)
+ desc = tf.EagerOp("Mul")
+ x_ = convert(tf.EagerTensor, x_)
+ y_ = convert(tf.EagerTensor, y_)
+ begin
+ begin
+ tf.add_input(desc, x_)
+ end
+ begin
+ tf.add_input(desc, y_)
+ end
+ end
+ begin
+ end
+ begin
+ desc["T"] = tf.data_type(x_)
+ end
+ begin
+ desc["T"] = tf.data_type(y_)
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(mul, [x_, y_], name=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res[1]
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function mul(x_, y_; name=nothing)
+ if tf.in_eager_mode()
+ mul_eager(x_, y_; name=name)
+ else
+ mul_graph(x_, y_; name=name)
+ end
+ end
end
+end
+
"""
- scatter_update(ref, indices, updates; use_locking=true)
+ softmax_cross_entropy_with_logits(features, labels)
"""
-tf.@op function scatter_update(ref_, indices_, updates_; name=nothing, use_locking=nothing)
- local desc
- tf.with_op_name((()->begin
- desc = tf.NodeDescription("ScatterUpdate")
- ref_ = convert(TensorFlow.Tensor{Any}, ref_)
- indices_ = convert(TensorFlow.Tensor{Any}, indices_)
- indices_ = indices_ - convert(tf.Tensor{eltype(indices_)}, 1)
- updates_ = convert(TensorFlow.Tensor{Any}, updates_)
- (ref_, updates_) = tf.tf_promote(ref_, updates_)
- (indices_,) = tf.tf_promote(indices_)
- tf.add_input(desc, ref_)
- tf.add_input(desc, indices_)
- tf.add_input(desc, updates_)
- if use_locking !== nothing
- desc["use_locking"] = Base.Bool(use_locking)
+begin
+ begin
+ function softmax_cross_entropy_with_logits_graph(features_, labels_; name=nothing)
+ local desc
+ tf.with_op_name(name, "SoftmaxCrossEntropyWithLogits") do
+ desc = tf.NodeDescription("SoftmaxCrossEntropyWithLogits")
+ begin
+ begin
+ features_ = convert(Tensor{Any}, features_)
+ begin
+ end
+ end
+ begin
+ labels_ = convert(Tensor{Any}, labels_)
+ begin
+ end
+ end
+ begin
+ (features_, labels_) = tf.tf_promote(features_, labels_)
+ end
+ end
+ begin
+ begin
+ tf.add_input(desc, features_)
end
- end), name, "ScatterUpdate")
- tf.Tensor(tf.Operation(desc))
+ begin
+ tf.add_input(desc, labels_)
+ end
+ end
+ begin
+ end
+ end
+ begin
+ out = tf.Tensor[]
+ op = tf.Operation(desc)
+ for out_idx = 1:2
+ push!(out, tf.Tensor(op, out_idx))
+ end
+ out
+ end
+ end
+ end
+ begin
+ function softmax_cross_entropy_with_logits_eager(features_, labels_; name=nothing)
+ desc = tf.EagerOp("SoftmaxCrossEntropyWithLogits")
+ features_ = convert(tf.EagerTensor, features_)
+ labels_ = convert(tf.EagerTensor, labels_)
+ begin
+ begin
+ tf.add_input(desc, features_)
+ end
+ begin
+ tf.add_input(desc, labels_)
+ end
+ end
+ begin
+ end
+ begin
+ desc["T"] = tf.data_type(features_)
+ end
+ begin
+ desc["T"] = tf.data_type(labels_)
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(softmax_cross_entropy_with_logits, [features_, labels_], name=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res
+ end
+ end
end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function softmax_cross_entropy_with_logits(features_, labels_; name=nothing)
+ if tf.in_eager_mode()
+ softmax_cross_entropy_with_logits_eager(features_, labels_; name=name)
+ else
+ softmax_cross_entropy_with_logits_graph(features_, labels_; name=name)
+ end
+ end
+ end
+end
+
"""
- scatter_sub(ref, indices, updates; use_locking=false)
+ resource_scatter_div(resource, indices, updates)
"""
-tf.@op function scatter_sub(ref_, indices_, updates_; name=nothing, use_locking=nothing)
- local desc
- tf.with_op_name((()->begin
- desc = tf.NodeDescription("ScatterSub")
- ref_ = convert(TensorFlow.Tensor{Any}, ref_)
- indices_ = convert(TensorFlow.Tensor{Any}, indices_)
- indices_ = indices_ - convert(tf.Tensor{eltype(indices_)}, 1)
- updates_ = convert(TensorFlow.Tensor{Any}, updates_)
- (ref_, updates_) = tf.tf_promote(ref_, updates_)
- (indices_,) = tf.tf_promote(indices_)
- tf.add_input(desc, ref_)
+begin
+ begin
+ function resource_scatter_div_graph(resource_, indices_, updates_; name=nothing, dtype=nothing)
+ local desc
+ tf.with_op_name(name, "ResourceScatterDiv") do
+ desc = tf.NodeDescription("ResourceScatterDiv")
+ begin
+ begin
+ resource_ = convert(Tensor{Any}, resource_)
+ begin
+ end
+ end
+ begin
+ indices_ = convert(Tensor{Any}, indices_)
+ begin
+ indices_ = indices_ - convert(tf.Tensor{eltype(indices_)}, 1)
+ end
+ end
+ begin
+ updates_ = convert(Tensor{Any}, updates_)
+ begin
+ end
+ end
+ begin
+ (updates_,) = tf.tf_promote(updates_)
+ end
+ begin
+ (indices_,) = tf.tf_promote(indices_)
+ end
+ end
+ begin
+ begin
+ tf.add_input(desc, resource_)
+ end
+ begin
+ tf.add_input(desc, indices_)
+ end
+ begin
+ tf.add_input(desc, updates_)
+ end
+ end
+ begin
+ begin
+ if dtype !== nothing
+ desc["dtype"] = Base.identity(dtype)
+ end
+ end
+ end
+ end
+ tf.Tensor(tf.Operation(desc))
+ end
+ end
+ begin
+ function resource_scatter_div_eager(resource_, indices_, updates_; name=nothing, dtype=nothing)
+ desc = tf.EagerOp("ResourceScatterDiv")
+ resource_ = convert(tf.EagerTensor, resource_)
+ indices_ = convert(tf.EagerTensor, indices_)
+ updates_ = convert(tf.EagerTensor, updates_)
+ begin
+ begin
+ tf.add_input(desc, resource_)
+ end
+ begin
tf.add_input(desc, indices_)
+ end
+ begin
tf.add_input(desc, updates_)
- if use_locking !== nothing
- desc["use_locking"] = Base.Bool(use_locking)
+ end
+ end
+ begin
+ begin
+ if dtype !== nothing
+ desc["dtype"] = Base.identity(dtype)
end
- end), name, "ScatterSub")
- tf.Tensor(tf.Operation(desc))
+ end
+ end
+ begin
+ desc["Tindices"] = tf.data_type(indices_)
+ end
+ begin
+ desc["dtype"] = tf.data_type(updates_)
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(resource_scatter_div, [resource_, indices_, updates_], name=nothing, dtype=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res[1]
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function resource_scatter_div(resource_, indices_, updates_; name=nothing, dtype=nothing)
+ if tf.in_eager_mode()
+ resource_scatter_div_eager(resource_, indices_, updates_; name=name, dtype=dtype)
+ else
+ resource_scatter_div_graph(resource_, indices_, updates_; name=name, dtype=dtype)
+ end
+ end
end
+end
+
"""
- scatter_add(ref, indices, updates; use_locking=false)
+ fixed_length_record_dataset_v2(filenames, header_bytes, record_bytes, footer_bytes, buffer_size, compression_type)
"""
-tf.@op function scatter_add(ref_, indices_, updates_; name=nothing, use_locking=nothing)
- local desc
- tf.with_op_name((()->begin
- desc = tf.NodeDescription("ScatterAdd")
- ref_ = convert(TensorFlow.Tensor{Any}, ref_)
- indices_ = convert(TensorFlow.Tensor{Any}, indices_)
- indices_ = indices_ - convert(tf.Tensor{eltype(indices_)}, 1)
- updates_ = convert(TensorFlow.Tensor{Any}, updates_)
- (ref_, updates_) = tf.tf_promote(ref_, updates_)
- (indices_,) = tf.tf_promote(indices_)
- tf.add_input(desc, ref_)
- tf.add_input(desc, indices_)
- tf.add_input(desc, updates_)
- if use_locking !== nothing
- desc["use_locking"] = Base.Bool(use_locking)
+begin
+ begin
+ function fixed_length_record_dataset_v2_graph(filenames_, header_bytes_, record_bytes_, footer_bytes_, buffer_size_, compression_type_; name=nothing)
+ local desc
+ tf.with_op_name(name, "FixedLengthRecordDatasetV2") do
+ desc = tf.NodeDescription("FixedLengthRecordDatasetV2")
+ begin
+ begin
+ filenames_ = convert(Tensor{String}, filenames_)
+ begin
+ end
+ end
+ begin
+ header_bytes_ = convert(Tensor{Int64}, header_bytes_)
+ begin
+ end
+ end
+ begin
+ record_bytes_ = convert(Tensor{Int64}, record_bytes_)
+ begin
+ end
+ end
+ begin
+ footer_bytes_ = convert(Tensor{Int64}, footer_bytes_)
+ begin
+ end
+ end
+ begin
+ buffer_size_ = convert(Tensor{Int64}, buffer_size_)
+ begin
+ end
end
- end), name, "ScatterAdd")
- tf.Tensor(tf.Operation(desc))
+ begin
+ compression_type_ = convert(Tensor{String}, compression_type_)
+ begin
+ end
+ end
+ end
+ begin
+ begin
+ tf.add_input(desc, filenames_)
+ end
+ begin
+ tf.add_input(desc, header_bytes_)
+ end
+ begin
+ tf.add_input(desc, record_bytes_)
+ end
+ begin
+ tf.add_input(desc, footer_bytes_)
+ end
+ begin
+ tf.add_input(desc, buffer_size_)
+ end
+ begin
+ tf.add_input(desc, compression_type_)
+ end
+ end
+ begin
+ end
+ end
+ tf.Tensor(tf.Operation(desc))
+ end
end
+ begin
+ function fixed_length_record_dataset_v2_eager(filenames_, header_bytes_, record_bytes_, footer_bytes_, buffer_size_, compression_type_; name=nothing)
+ desc = tf.EagerOp("FixedLengthRecordDatasetV2")
+ filenames_ = convert(tf.EagerTensor, filenames_)
+ header_bytes_ = convert(tf.EagerTensor, header_bytes_)
+ record_bytes_ = convert(tf.EagerTensor, record_bytes_)
+ footer_bytes_ = convert(tf.EagerTensor, footer_bytes_)
+ buffer_size_ = convert(tf.EagerTensor, buffer_size_)
+ compression_type_ = convert(tf.EagerTensor, compression_type_)
+ begin
+ begin
+ tf.add_input(desc, filenames_)
+ end
+ begin
+ tf.add_input(desc, header_bytes_)
+ end
+ begin
+ tf.add_input(desc, record_bytes_)
+ end
+ begin
+ tf.add_input(desc, footer_bytes_)
+ end
+ begin
+ tf.add_input(desc, buffer_size_)
+ end
+ begin
+ tf.add_input(desc, compression_type_)
+ end
+ end
+ begin
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(fixed_length_record_dataset_v2, [filenames_, header_bytes_, record_bytes_, footer_bytes_, buffer_size_, compression_type_], name=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res[1]
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function fixed_length_record_dataset_v2(filenames_, header_bytes_, record_bytes_, footer_bytes_, buffer_size_, compression_type_; name=nothing)
+ if tf.in_eager_mode()
+ fixed_length_record_dataset_v2_eager(filenames_, header_bytes_, record_bytes_, footer_bytes_, buffer_size_, compression_type_; name=name)
+ else
+ fixed_length_record_dataset_v2_graph(filenames_, header_bytes_, record_bytes_, footer_bytes_, buffer_size_, compression_type_; name=name)
+ end
+ end
+ end
+end
+
"""
- scatter_mul(ref, indices, updates; use_locking=false)
+ skip_dataset(input_dataset, count)
"""
-tf.@op function scatter_mul(ref_, indices_, updates_; name=nothing, use_locking=nothing)
- local desc
- tf.with_op_name((()->begin
- desc = tf.NodeDescription("ScatterMul")
- ref_ = convert(TensorFlow.Tensor{Any}, ref_)
- indices_ = convert(TensorFlow.Tensor{Any}, indices_)
- indices_ = indices_ - convert(tf.Tensor{eltype(indices_)}, 1)
- updates_ = convert(TensorFlow.Tensor{Any}, updates_)
- (ref_, updates_) = tf.tf_promote(ref_, updates_)
- (indices_,) = tf.tf_promote(indices_)
- tf.add_input(desc, ref_)
- tf.add_input(desc, indices_)
- tf.add_input(desc, updates_)
- if use_locking !== nothing
- desc["use_locking"] = Base.Bool(use_locking)
+begin
+ begin
+ function skip_dataset_graph(input_dataset_, count_; name=nothing, output_types=nothing, output_shapes=nothing)
+ local desc
+ tf.with_op_name(name, "SkipDataset") do
+ desc = tf.NodeDescription("SkipDataset")
+ begin
+ begin
+ input_dataset_ = convert(Tensor{Any}, input_dataset_)
+ begin
+ end
+ end
+ begin
+ count_ = convert(Tensor{Int64}, count_)
+ begin
+ end
+ end
+ end
+ begin
+ begin
+ tf.add_input(desc, input_dataset_)
+ end
+ begin
+ tf.add_input(desc, count_)
+ end
+ end
+ begin
+ begin
+ if output_types !== nothing
+ desc["output_types"] = map(Base.identity, output_types)
+ end
+ end
+ begin
+ if output_shapes !== nothing
+ desc["output_shapes"] = map(Base.identity, output_shapes)
+ end
end
- end), name, "ScatterMul")
- tf.Tensor(tf.Operation(desc))
+ end
+ end
+ tf.Tensor(tf.Operation(desc))
+ end
+ end
+ begin
+ function skip_dataset_eager(input_dataset_, count_; name=nothing, output_types=nothing, output_shapes=nothing)
+ desc = tf.EagerOp("SkipDataset")
+ input_dataset_ = convert(tf.EagerTensor, input_dataset_)
+ count_ = convert(tf.EagerTensor, count_)
+ begin
+ begin
+ tf.add_input(desc, input_dataset_)
+ end
+ begin
+ tf.add_input(desc, count_)
+ end
+ end
+ begin
+ begin
+ if output_types !== nothing
+ desc["output_types"] = map(Base.identity, output_types)
+ end
+ end
+ begin
+ if output_shapes !== nothing
+ desc["output_shapes"] = map(Base.identity, output_shapes)
+ end
+ end
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(skip_dataset, [input_dataset_, count_], name=nothing, output_types=nothing, output_shapes=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res[1]
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function skip_dataset(input_dataset_, count_; name=nothing, output_types=nothing, output_shapes=nothing)
+ if tf.in_eager_mode()
+ skip_dataset_eager(input_dataset_, count_; name=name, output_types=output_types, output_shapes=output_shapes)
+ else
+ skip_dataset_graph(input_dataset_, count_; name=name, output_types=output_types, output_shapes=output_shapes)
+ end
+ end
end
+end
+
"""
- scatter_div(ref, indices, updates; use_locking=false)
+ cosh(x)
"""
-tf.@op function scatter_div(ref_, indices_, updates_; name=nothing, use_locking=nothing)
- local desc
- tf.with_op_name((()->begin
- desc = tf.NodeDescription("ScatterDiv")
- ref_ = convert(TensorFlow.Tensor{Any}, ref_)
- indices_ = convert(TensorFlow.Tensor{Any}, indices_)
- indices_ = indices_ - convert(tf.Tensor{eltype(indices_)}, 1)
- updates_ = convert(TensorFlow.Tensor{Any}, updates_)
- (ref_, updates_) = tf.tf_promote(ref_, updates_)
- (indices_,) = tf.tf_promote(indices_)
- tf.add_input(desc, ref_)
- tf.add_input(desc, indices_)
- tf.add_input(desc, updates_)
- if use_locking !== nothing
- desc["use_locking"] = Base.Bool(use_locking)
+begin
+ begin
+ function cosh_graph(x_; name=nothing)
+ local desc
+ tf.with_op_name(name, "Cosh") do
+ desc = tf.NodeDescription("Cosh")
+ begin
+ begin
+ x_ = convert(Tensor{Any}, x_)
+ begin
+ end
+ end
+ begin
+ (x_,) = tf.tf_promote(x_)
+ end
+ end
+ begin
+ begin
+ tf.add_input(desc, x_)
end
- end), name, "ScatterDiv")
- tf.Tensor(tf.Operation(desc))
+ end
+ begin
+ end
+ end
+ tf.Tensor(tf.Operation(desc))
+ end
+ end
+ begin
+ function cosh_eager(x_; name=nothing)
+ desc = tf.EagerOp("Cosh")
+ x_ = convert(tf.EagerTensor, x_)
+ begin
+ begin
+ tf.add_input(desc, x_)
+ end
+ end
+ begin
+ end
+ begin
+ desc["T"] = tf.data_type(x_)
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(cosh, [x_], name=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res[1]
+ end
+ end
end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function cosh(x_; name=nothing)
+ if tf.in_eager_mode()
+ cosh_eager(x_; name=name)
+ else
+ cosh_graph(x_; name=name)
+ end
+ end
+ end
+end
+
"""
- merge_summary(inputs)
+ fused_batch_norm_v2(x, scale, offset, mean, variance; epsilon=?, data_format=, is_training=true)
"""
-tf.@op function merge_summary(inputs_; name=nothing, N=nothing)
- local desc
- tf.with_op_name((()->begin
- desc = tf.NodeDescription("MergeSummary")
- inputs_ = [convert(TensorFlow.Tensor{String}, x) for x = inputs_]
- tf.add_input(desc, inputs_)
- if N !== nothing
- desc["N"] = Base.Int(N)
+begin
+ begin
+ function fused_batch_norm_v2_graph(x_, scale_, offset_, mean_, variance_; name=nothing, U=nothing, epsilon=nothing, data_format=nothing, is_training=nothing)
+ local desc
+ tf.with_op_name(name, "FusedBatchNormV2") do
+ desc = tf.NodeDescription("FusedBatchNormV2")
+ begin
+ begin
+ x_ = convert(Tensor{Any}, x_)
+ begin
+ end
+ end
+ begin
+ scale_ = convert(Tensor{Any}, scale_)
+ begin
+ end
+ end
+ begin
+ offset_ = convert(Tensor{Any}, offset_)
+ begin
+ end
+ end
+ begin
+ mean_ = convert(Tensor{Any}, mean_)
+ begin
+ end
+ end
+ begin
+ variance_ = convert(Tensor{Any}, variance_)
+ begin
+ end
+ end
+ begin
+ (scale_, offset_, mean_, variance_) = tf.tf_promote(scale_, offset_, mean_, variance_)
+ end
+ begin
+ (x_,) = tf.tf_promote(x_)
+ end
+ end
+ begin
+ begin
+ tf.add_input(desc, x_)
+ end
+ begin
+ tf.add_input(desc, scale_)
+ end
+ begin
+ tf.add_input(desc, offset_)
+ end
+ begin
+ tf.add_input(desc, mean_)
+ end
+ begin
+ tf.add_input(desc, variance_)
+ end
+ end
+ begin
+ begin
+ if U !== nothing
+ desc["U"] = Base.identity(U)
+ end
+ end
+ begin
+ if epsilon !== nothing
+ desc["epsilon"] = Base.identity(epsilon)
+ end
end
- end), name, "MergeSummary")
- tf.Tensor(tf.Operation(desc))
+ begin
+ if data_format !== nothing
+ desc["data_format"] = Base.String(data_format)
+ end
+ end
+ begin
+ if is_training !== nothing
+ desc["is_training"] = Base.Bool(is_training)
+ end
+ end
+ end
+ end
+ begin
+ out = tf.Tensor[]
+ op = tf.Operation(desc)
+ for out_idx = 1:5
+ push!(out, tf.Tensor(op, out_idx))
+ end
+ out
+ end
+ end
+ end
+ begin
+ function fused_batch_norm_v2_eager(x_, scale_, offset_, mean_, variance_; name=nothing, U=nothing, epsilon=nothing, data_format=nothing, is_training=nothing)
+ desc = tf.EagerOp("FusedBatchNormV2")
+ x_ = convert(tf.EagerTensor, x_)
+ scale_ = convert(tf.EagerTensor, scale_)
+ offset_ = convert(tf.EagerTensor, offset_)
+ mean_ = convert(tf.EagerTensor, mean_)
+ variance_ = convert(tf.EagerTensor, variance_)
+ begin
+ begin
+ tf.add_input(desc, x_)
+ end
+ begin
+ tf.add_input(desc, scale_)
+ end
+ begin
+ tf.add_input(desc, offset_)
+ end
+ begin
+ tf.add_input(desc, mean_)
+ end
+ begin
+ tf.add_input(desc, variance_)
+ end
+ end
+ begin
+ begin
+ if U !== nothing
+ desc["U"] = Base.identity(U)
+ end
+ end
+ begin
+ if epsilon !== nothing
+ desc["epsilon"] = Base.identity(epsilon)
+ end
+ end
+ begin
+ if data_format !== nothing
+ desc["data_format"] = Base.String(data_format)
+ end
+ end
+ begin
+ if is_training !== nothing
+ desc["is_training"] = Base.Bool(is_training)
+ end
+ end
+ end
+ begin
+ desc["T"] = tf.data_type(x_)
+ end
+ begin
+ desc["U"] = tf.data_type(scale_)
+ end
+ begin
+ desc["U"] = tf.data_type(offset_)
+ end
+ begin
+ desc["U"] = tf.data_type(mean_)
+ end
+ begin
+ desc["U"] = tf.data_type(variance_)
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(fused_batch_norm_v2, [x_, scale_, offset_, mean_, variance_], name=nothing, U=nothing, epsilon=nothing, data_format=nothing, is_training=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res
+ end
+ end
end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function fused_batch_norm_v2(x_, scale_, offset_, mean_, variance_; name=nothing, U=nothing, epsilon=nothing, data_format=nothing, is_training=nothing)
+ if tf.in_eager_mode()
+ fused_batch_norm_v2_eager(x_, scale_, offset_, mean_, variance_; name=name, U=U, epsilon=epsilon, data_format=data_format, is_training=is_training)
+ else
+ fused_batch_norm_v2_graph(x_, scale_, offset_, mean_, variance_; name=name, U=U, epsilon=epsilon, data_format=data_format, is_training=is_training)
+ end
+ end
+ end
+end
+
"""
- scalar_summary(tags, values)
+ tensor_array_split(handle, value, lengths, flow_in)
"""
-tf.@op function scalar_summary(tags_, values_; name=nothing)
- local desc
- tf.with_op_name((()->begin
- desc = tf.NodeDescription("ScalarSummary")
- tags_ = convert(TensorFlow.Tensor{String}, tags_)
- values_ = convert(TensorFlow.Tensor{Any}, values_)
- (values_,) = tf.tf_promote(values_)
- tf.add_input(desc, tags_)
- tf.add_input(desc, values_)
- end), name, "ScalarSummary")
- tf.Tensor(tf.Operation(desc))
+begin
+ begin
+ function tensor_array_split_graph(handle_, value_, lengths_, flow_in_; name=nothing)
+ local desc
+ tf.with_op_name(name, "TensorArraySplit") do
+ desc = tf.NodeDescription("TensorArraySplit")
+ begin
+ begin
+ handle_ = convert(Tensor{String}, handle_)
+ begin
+ end
+ end
+ begin
+ value_ = convert(Tensor{Any}, value_)
+ begin
+ end
+ end
+ begin
+ lengths_ = convert(Tensor{Int64}, lengths_)
+ begin
+ end
+ end
+ begin
+ flow_in_ = convert(Tensor{Float32}, flow_in_)
+ begin
+ end
+ end
+ begin
+ (value_,) = tf.tf_promote(value_)
+ end
+ end
+ begin
+ begin
+ tf.add_input(desc, handle_)
+ end
+ begin
+ tf.add_input(desc, value_)
+ end
+ begin
+ tf.add_input(desc, lengths_)
+ end
+ begin
+ tf.add_input(desc, flow_in_)
+ end
+ end
+ begin
+ end
+ end
+ tf.Tensor(tf.Operation(desc))
+ end
+ end
+ begin
+ function tensor_array_split_eager(handle_, value_, lengths_, flow_in_; name=nothing)
+ desc = tf.EagerOp("TensorArraySplit")
+ handle_ = convert(tf.EagerTensor, handle_)
+ value_ = convert(tf.EagerTensor, value_)
+ lengths_ = convert(tf.EagerTensor, lengths_)
+ flow_in_ = convert(tf.EagerTensor, flow_in_)
+ begin
+ begin
+ tf.add_input(desc, handle_)
+ end
+ begin
+ tf.add_input(desc, value_)
+ end
+ begin
+ tf.add_input(desc, lengths_)
+ end
+ begin
+ tf.add_input(desc, flow_in_)
+ end
+ end
+ begin
+ end
+ begin
+ desc["T"] = tf.data_type(value_)
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(tensor_array_split, [handle_, value_, lengths_, flow_in_], name=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res[1]
+ end
+ end
end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function tensor_array_split(handle_, value_, lengths_, flow_in_; name=nothing)
+ if tf.in_eager_mode()
+ tensor_array_split_eager(handle_, value_, lengths_, flow_in_; name=name)
+ else
+ tensor_array_split_graph(handle_, value_, lengths_, flow_in_; name=name)
+ end
+ end
+ end
+end
+
"""
- audio_summary_v2(tag, tensor, sample_rate; max_outputs=3)
+ ctc_loss(inputs, labels_indices, labels_values, sequence_length; preprocess_collapse_repeated=false, ctc_merge_repeated=true, ignore_longer_outputs_than_inputs=false)
"""
-tf.@op function audio_summary_v2(tag_, tensor_, sample_rate_; name=nothing, max_outputs=nothing)
- local desc
- tf.with_op_name((()->begin
- desc = tf.NodeDescription("AudioSummaryV2")
- tag_ = convert(TensorFlow.Tensor{String}, tag_)
- tensor_ = convert(TensorFlow.Tensor{Float32}, tensor_)
- sample_rate_ = convert(TensorFlow.Tensor{Float32}, sample_rate_)
- tf.add_input(desc, tag_)
- tf.add_input(desc, tensor_)
- tf.add_input(desc, sample_rate_)
- if max_outputs !== nothing
- desc["max_outputs"] = Base.Int(max_outputs)
+begin
+ begin
+ function ctc_loss_graph(inputs_, labels_indices_, labels_values_, sequence_length_; name=nothing, preprocess_collapse_repeated=nothing, ctc_merge_repeated=nothing, ignore_longer_outputs_than_inputs=nothing)
+ local desc
+ tf.with_op_name(name, "CTCLoss") do
+ desc = tf.NodeDescription("CTCLoss")
+ begin
+ begin
+ inputs_ = convert(Tensor{Float32}, inputs_)
+ begin
+ end
+ end
+ begin
+ labels_indices_ = convert(Tensor{Int64}, labels_indices_)
+ begin
+ end
+ end
+ begin
+ labels_values_ = convert(Tensor{Int32}, labels_values_)
+ begin
+ end
+ end
+ begin
+ sequence_length_ = convert(Tensor{Int32}, sequence_length_)
+ begin
+ end
+ end
+ end
+ begin
+ begin
+ tf.add_input(desc, inputs_)
+ end
+ begin
+ tf.add_input(desc, labels_indices_)
end
- end), name, "AudioSummaryV2")
- tf.Tensor(tf.Operation(desc))
+ begin
+ tf.add_input(desc, labels_values_)
+ end
+ begin
+ tf.add_input(desc, sequence_length_)
+ end
+ end
+ begin
+ begin
+ if preprocess_collapse_repeated !== nothing
+ desc["preprocess_collapse_repeated"] = Base.Bool(preprocess_collapse_repeated)
+ end
+ end
+ begin
+ if ctc_merge_repeated !== nothing
+ desc["ctc_merge_repeated"] = Base.Bool(ctc_merge_repeated)
+ end
+ end
+ begin
+ if ignore_longer_outputs_than_inputs !== nothing
+ desc["ignore_longer_outputs_than_inputs"] = Base.Bool(ignore_longer_outputs_than_inputs)
+ end
+ end
+ end
+ end
+ begin
+ out = tf.Tensor[]
+ op = tf.Operation(desc)
+ for out_idx = 1:2
+ push!(out, tf.Tensor(op, out_idx))
+ end
+ out
+ end
+ end
+ end
+ begin
+ function ctc_loss_eager(inputs_, labels_indices_, labels_values_, sequence_length_; name=nothing, preprocess_collapse_repeated=nothing, ctc_merge_repeated=nothing, ignore_longer_outputs_than_inputs=nothing)
+ desc = tf.EagerOp("CTCLoss")
+ inputs_ = convert(tf.EagerTensor, inputs_)
+ labels_indices_ = convert(tf.EagerTensor, labels_indices_)
+ labels_values_ = convert(tf.EagerTensor, labels_values_)
+ sequence_length_ = convert(tf.EagerTensor, sequence_length_)
+ begin
+ begin
+ tf.add_input(desc, inputs_)
+ end
+ begin
+ tf.add_input(desc, labels_indices_)
+ end
+ begin
+ tf.add_input(desc, labels_values_)
+ end
+ begin
+ tf.add_input(desc, sequence_length_)
+ end
+ end
+ begin
+ begin
+ if preprocess_collapse_repeated !== nothing
+ desc["preprocess_collapse_repeated"] = Base.Bool(preprocess_collapse_repeated)
+ end
+ end
+ begin
+ if ctc_merge_repeated !== nothing
+ desc["ctc_merge_repeated"] = Base.Bool(ctc_merge_repeated)
+ end
+ end
+ begin
+ if ignore_longer_outputs_than_inputs !== nothing
+ desc["ignore_longer_outputs_than_inputs"] = Base.Bool(ignore_longer_outputs_than_inputs)
+ end
+ end
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(ctc_loss, [inputs_, labels_indices_, labels_values_, sequence_length_], name=nothing, preprocess_collapse_repeated=nothing, ctc_merge_repeated=nothing, ignore_longer_outputs_than_inputs=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res
+ end
+ end
end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function ctc_loss(inputs_, labels_indices_, labels_values_, sequence_length_; name=nothing, preprocess_collapse_repeated=nothing, ctc_merge_repeated=nothing, ignore_longer_outputs_than_inputs=nothing)
+ if tf.in_eager_mode()
+ ctc_loss_eager(inputs_, labels_indices_, labels_values_, sequence_length_; name=name, preprocess_collapse_repeated=preprocess_collapse_repeated, ctc_merge_repeated=ctc_merge_repeated, ignore_longer_outputs_than_inputs=ignore_longer_outputs_than_inputs)
+ else
+ ctc_loss_graph(inputs_, labels_indices_, labels_values_, sequence_length_; name=name, preprocess_collapse_repeated=preprocess_collapse_repeated, ctc_merge_repeated=ctc_merge_repeated, ignore_longer_outputs_than_inputs=ignore_longer_outputs_than_inputs)
+ end
+ end
+ end
+end
+
"""
- histogram_summary(tag, values)
+ quantized_reshape(tensor, shape, input_min, input_max)
"""
-tf.@op function histogram_summary(tag_, values_; name=nothing)
- local desc
- tf.with_op_name((()->begin
- desc = tf.NodeDescription("HistogramSummary")
- tag_ = convert(TensorFlow.Tensor{String}, tag_)
- values_ = convert(TensorFlow.Tensor{Float32}, values_)
- (values_,) = tf.tf_promote(values_)
- tf.add_input(desc, tag_)
- tf.add_input(desc, values_)
- end), name, "HistogramSummary")
- tf.Tensor(tf.Operation(desc))
+begin
+ begin
+ function quantized_reshape_graph(tensor_, shape_, input_min_, input_max_; name=nothing)
+ local desc
+ tf.with_op_name(name, "QuantizedReshape") do
+ desc = tf.NodeDescription("QuantizedReshape")
+ begin
+ begin
+ tensor_ = convert(Tensor{Any}, tensor_)
+ begin
+ end
+ end
+ begin
+ shape_ = convert(Tensor{Int32}, shape_)
+ begin
+ end
+ end
+ begin
+ input_min_ = convert(Tensor{Float32}, input_min_)
+ begin
+ end
+ end
+ begin
+ input_max_ = convert(Tensor{Float32}, input_max_)
+ begin
+ end
+ end
+ begin
+ (tensor_,) = tf.tf_promote(tensor_)
+ end
+ begin
+ (shape_,) = tf.tf_promote(shape_)
+ end
+ end
+ begin
+ begin
+ tf.add_input(desc, tensor_)
+ end
+ begin
+ tf.add_input(desc, shape_)
+ end
+ begin
+ tf.add_input(desc, input_min_)
+ end
+ begin
+ tf.add_input(desc, input_max_)
+ end
+ end
+ begin
+ end
+ end
+ begin
+ out = tf.Tensor[]
+ op = tf.Operation(desc)
+ for out_idx = 1:3
+ push!(out, tf.Tensor(op, out_idx))
+ end
+ out
+ end
+ end
+ end
+ begin
+ function quantized_reshape_eager(tensor_, shape_, input_min_, input_max_; name=nothing)
+ desc = tf.EagerOp("QuantizedReshape")
+ tensor_ = convert(tf.EagerTensor, tensor_)
+ shape_ = convert(tf.EagerTensor, shape_)
+ input_min_ = convert(tf.EagerTensor, input_min_)
+ input_max_ = convert(tf.EagerTensor, input_max_)
+ begin
+ begin
+ tf.add_input(desc, tensor_)
+ end
+ begin
+ tf.add_input(desc, shape_)
+ end
+ begin
+ tf.add_input(desc, input_min_)
+ end
+ begin
+ tf.add_input(desc, input_max_)
+ end
+ end
+ begin
+ end
+ begin
+ desc["T"] = tf.data_type(tensor_)
+ end
+ begin
+ desc["Tshape"] = tf.data_type(shape_)
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(quantized_reshape, [tensor_, shape_, input_min_, input_max_], name=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function quantized_reshape(tensor_, shape_, input_min_, input_max_; name=nothing)
+ if tf.in_eager_mode()
+ quantized_reshape_eager(tensor_, shape_, input_min_, input_max_; name=name)
+ else
+ quantized_reshape_graph(tensor_, shape_, input_min_, input_max_; name=name)
+ end
+ end
end
+end
+
"""
- image_summary(tag, tensor; max_images=3, bad_color=?)
+ floor_div(x, y)
"""
-tf.@op function image_summary(tag_, tensor_; name=nothing, max_images=nothing, bad_color=nothing)
- local desc
- tf.with_op_name((()->begin
- desc = tf.NodeDescription("ImageSummary")
- tag_ = convert(TensorFlow.Tensor{String}, tag_)
- tensor_ = convert(TensorFlow.Tensor{Float32}, tensor_)
- (tensor_,) = tf.tf_promote(tensor_)
- tf.add_input(desc, tag_)
- tf.add_input(desc, tensor_)
- if max_images !== nothing
- desc["max_images"] = Base.Int(max_images)
+begin
+ begin
+ function floor_div_graph(x_, y_; name=nothing)
+ local desc
+ tf.with_op_name(name, "FloorDiv") do
+ desc = tf.NodeDescription("FloorDiv")
+ begin
+ begin
+ x_ = convert(Tensor{Any}, x_)
+ begin
+ end
end
- if bad_color !== nothing
- desc["bad_color"] = TensorFlow.RawTensor(bad_color)
+ begin
+ y_ = convert(Tensor{Any}, y_)
+ begin
+ end
+ end
+ begin
+ (x_, y_) = tf.tf_promote(x_, y_)
end
- end), name, "ImageSummary")
- tf.Tensor(tf.Operation(desc))
+ end
+ begin
+ begin
+ tf.add_input(desc, x_)
+ end
+ begin
+ tf.add_input(desc, y_)
+ end
+ end
+ begin
+ end
+ end
+ tf.Tensor(tf.Operation(desc))
+ end
+ end
+ begin
+ function floor_div_eager(x_, y_; name=nothing)
+ desc = tf.EagerOp("FloorDiv")
+ x_ = convert(tf.EagerTensor, x_)
+ y_ = convert(tf.EagerTensor, y_)
+ begin
+ begin
+ tf.add_input(desc, x_)
+ end
+ begin
+ tf.add_input(desc, y_)
+ end
+ end
+ begin
+ end
+ begin
+ desc["T"] = tf.data_type(x_)
+ end
+ begin
+ desc["T"] = tf.data_type(y_)
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(floor_div, [x_, y_], name=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res[1]
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function floor_div(x_, y_; name=nothing)
+ if tf.in_eager_mode()
+ floor_div_eager(x_, y_; name=name)
+ else
+ floor_div_graph(x_, y_; name=name)
+ end
+ end
end
+end
+
"""
- decode_png(contents; channels=0, dtype=UInt8)
+ tensor_array_v2(size; element_shape=?, dynamic_size=false, clear_after_read=true, tensor_array_name=)
"""
-tf.@op function decode_png(contents_; name=nothing, channels=nothing, dtype=nothing)
- local desc
- tf.with_op_name((()->begin
- desc = tf.NodeDescription("DecodePng")
- contents_ = convert(TensorFlow.Tensor{String}, contents_)
- tf.add_input(desc, contents_)
- if channels !== nothing
- desc["channels"] = Base.Int(channels)
+begin
+ begin
+ function tensor_array_v2_graph(size_; name=nothing, dtype=nothing, element_shape=nothing, dynamic_size=nothing, clear_after_read=nothing, tensor_array_name=nothing)
+ local desc
+ tf.with_op_name(name, "TensorArrayV2") do
+ desc = tf.NodeDescription("TensorArrayV2")
+ begin
+ begin
+ size_ = convert(Tensor{Int32}, size_)
+ begin
+ end
+ end
+ end
+ begin
+ begin
+ tf.add_input(desc, size_)
+ end
+ end
+ begin
+ begin
+ if dtype !== nothing
+ desc["dtype"] = Base.identity(dtype)
+ end
+ end
+ begin
+ if element_shape !== nothing
+ desc["element_shape"] = Base.identity(element_shape)
+ end
+ end
+ begin
+ if dynamic_size !== nothing
+ desc["dynamic_size"] = Base.Bool(dynamic_size)
+ end
+ end
+ begin
+ if clear_after_read !== nothing
+ desc["clear_after_read"] = Base.Bool(clear_after_read)
+ end
+ end
+ begin
+ if tensor_array_name !== nothing
+ desc["tensor_array_name"] = Base.String(tensor_array_name)
+ end
end
+ end
+ end
+ tf.Tensor(tf.Operation(desc))
+ end
+ end
+ begin
+ function tensor_array_v2_eager(size_; name=nothing, dtype=nothing, element_shape=nothing, dynamic_size=nothing, clear_after_read=nothing, tensor_array_name=nothing)
+ desc = tf.EagerOp("TensorArrayV2")
+ size_ = convert(tf.EagerTensor, size_)
+ begin
+ begin
+ tf.add_input(desc, size_)
+ end
+ end
+ begin
+ begin
if dtype !== nothing
desc["dtype"] = Base.identity(dtype)
end
- end), name, "DecodePng")
- tf.Tensor(tf.Operation(desc))
+ end
+ begin
+ if element_shape !== nothing
+ desc["element_shape"] = Base.identity(element_shape)
+ end
+ end
+ begin
+ if dynamic_size !== nothing
+ desc["dynamic_size"] = Base.Bool(dynamic_size)
+ end
+ end
+ begin
+ if clear_after_read !== nothing
+ desc["clear_after_read"] = Base.Bool(clear_after_read)
+ end
+ end
+ begin
+ if tensor_array_name !== nothing
+ desc["tensor_array_name"] = Base.String(tensor_array_name)
+ end
+ end
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(tensor_array_v2, [size_], name=nothing, dtype=nothing, element_shape=nothing, dynamic_size=nothing, clear_after_read=nothing, tensor_array_name=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res[1]
+ end
+ end
end
-
-"""
- where(input)
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function tensor_array_v2(size_; name=nothing, dtype=nothing, element_shape=nothing, dynamic_size=nothing, clear_after_read=nothing, tensor_array_name=nothing)
+ if tf.in_eager_mode()
+ tensor_array_v2_eager(size_; name=name, dtype=dtype, element_shape=element_shape, dynamic_size=dynamic_size, clear_after_read=clear_after_read, tensor_array_name=tensor_array_name)
+ else
+ tensor_array_v2_graph(size_; name=name, dtype=dtype, element_shape=element_shape, dynamic_size=dynamic_size, clear_after_read=clear_after_read, tensor_array_name=tensor_array_name)
+ end
+ end
+ end
+end
"""
-tf.@op function where(input_; name=nothing)
- local desc
- tf.with_op_name((()->begin
- desc = tf.NodeDescription("Where")
- input_ = convert(TensorFlow.Tensor{Bool}, input_)
- (input_,) = tf.tf_promote(input_)
- tf.add_input(desc, input_)
- end), name, "Where")
- tf.Tensor(tf.Operation(desc))
- end
+ barrier_close(handle; cancel_pending_enqueues=false)
+
"""
- const_()
-
-
-"""
-tf.@op function const_(; name=nothing, value=nothing, dtype=nothing)
- local desc
- tf.with_op_name((()->begin
- desc = tf.NodeDescription("Const")
- if value !== nothing
- desc["value"] = TensorFlow.RawTensor(value)
+begin
+ begin
+ function barrier_close_graph(handle_; name=nothing, cancel_pending_enqueues=nothing)
+ local desc
+ tf.with_op_name(name, "BarrierClose") do
+ desc = tf.NodeDescription("BarrierClose")
+ begin
+ begin
+ handle_ = convert(Tensor{String}, handle_)
+ begin
+ end
end
- if dtype !== nothing
- desc["dtype"] = Base.identity(dtype)
+ end
+ begin
+ begin
+ tf.add_input(desc, handle_)
end
- end), name, "Const")
- tf.Tensor(tf.Operation(desc))
+ end
+ begin
+ begin
+ if cancel_pending_enqueues !== nothing
+ desc["cancel_pending_enqueues"] = Base.Bool(cancel_pending_enqueues)
+ end
+ end
+ end
+ end
+ tf.Tensor(tf.Operation(desc))
+ end
+ end
+ begin
+ function barrier_close_eager(handle_; name=nothing, cancel_pending_enqueues=nothing)
+ desc = tf.EagerOp("BarrierClose")
+ handle_ = convert(tf.EagerTensor, handle_)
+ begin
+ begin
+ tf.add_input(desc, handle_)
+ end
+ end
+ begin
+ begin
+ if cancel_pending_enqueues !== nothing
+ desc["cancel_pending_enqueues"] = Base.Bool(cancel_pending_enqueues)
+ end
+ end
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(barrier_close, [handle_], name=nothing, cancel_pending_enqueues=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res[1]
+ end
+ end
end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function barrier_close(handle_; name=nothing, cancel_pending_enqueues=nothing)
+ if tf.in_eager_mode()
+ barrier_close_eager(handle_; name=name, cancel_pending_enqueues=cancel_pending_enqueues)
+ else
+ barrier_close_graph(handle_; name=name, cancel_pending_enqueues=cancel_pending_enqueues)
+ end
+ end
+ end
+end
+
"""
- variable_v2(; container=, shared_name=)
+ read_variable_op(resource)
"""
-tf.@op function variable_v2(; name=nothing, shape=nothing, dtype=nothing, container=nothing, shared_name=nothing)
- local desc
- tf.with_op_name((()->begin
- desc = tf.NodeDescription("VariableV2")
- if shape !== nothing
- desc["shape"] = Base.identity(shape)
+begin
+ begin
+ function read_variable_op_graph(resource_; name=nothing, dtype=nothing)
+ local desc
+ tf.with_op_name(name, "ReadVariableOp") do
+ desc = tf.NodeDescription("ReadVariableOp")
+ begin
+ begin
+ resource_ = convert(Tensor{Any}, resource_)
+ begin
+ end
end
- if dtype !== nothing
- desc["dtype"] = Base.identity(dtype)
+ end
+ begin
+ begin
+ tf.add_input(desc, resource_)
end
- if container !== nothing
- desc["container"] = Base.String(container)
+ end
+ begin
+ begin
+ if dtype !== nothing
+ desc["dtype"] = Base.identity(dtype)
+ end
end
- if shared_name !== nothing
- desc["shared_name"] = Base.String(shared_name)
+ end
+ end
+ tf.Tensor(tf.Operation(desc))
+ end
+ end
+ begin
+ function read_variable_op_eager(resource_; name=nothing, dtype=nothing)
+ desc = tf.EagerOp("ReadVariableOp")
+ resource_ = convert(tf.EagerTensor, resource_)
+ begin
+ begin
+ tf.add_input(desc, resource_)
+ end
+ end
+ begin
+ begin
+ if dtype !== nothing
+ desc["dtype"] = Base.identity(dtype)
end
- end), name, "VariableV2")
- tf.Tensor(tf.Operation(desc))
+ end
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(read_variable_op, [resource_], name=nothing, dtype=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res[1]
+ end
+ end
end
-
-"""
- log(x)
-
-
-"""
-tf.@op function log(x_; name=nothing)
- local desc
- tf.with_op_name((()->begin
- desc = tf.NodeDescription("Log")
- x_ = convert(TensorFlow.Tensor{Any}, x_)
- (x_,) = tf.tf_promote(x_)
- tf.add_input(desc, x_)
- end), name, "Log")
- tf.Tensor(tf.Operation(desc))
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function read_variable_op(resource_; name=nothing, dtype=nothing)
+ if tf.in_eager_mode()
+ read_variable_op_eager(resource_; name=name, dtype=dtype)
+ else
+ read_variable_op_graph(resource_; name=name, dtype=dtype)
+ end
+ end
end
+end
+
"""
- exp(x)
+ quantized_mul(x, y, min_x, max_x, min_y, max_y)
"""
-tf.@op function exp(x_; name=nothing)
- local desc
- tf.with_op_name((()->begin
- desc = tf.NodeDescription("Exp")
- x_ = convert(TensorFlow.Tensor{Any}, x_)
- (x_,) = tf.tf_promote(x_)
+begin
+ begin
+ function quantized_mul_graph(x_, y_, min_x_, max_x_, min_y_, max_y_; name=nothing)
+ local desc
+ tf.with_op_name(name, "QuantizedMul") do
+ desc = tf.NodeDescription("QuantizedMul")
+ begin
+ begin
+ x_ = convert(Tensor{Any}, x_)
+ begin
+ end
+ end
+ begin
+ y_ = convert(Tensor{Any}, y_)
+ begin
+ end
+ end
+ begin
+ min_x_ = convert(Tensor{Float32}, min_x_)
+ begin
+ end
+ end
+ begin
+ max_x_ = convert(Tensor{Float32}, max_x_)
+ begin
+ end
+ end
+ begin
+ min_y_ = convert(Tensor{Float32}, min_y_)
+ begin
+ end
+ end
+ begin
+ max_y_ = convert(Tensor{Float32}, max_y_)
+ begin
+ end
+ end
+ begin
+ (x_,) = tf.tf_promote(x_)
+ end
+ begin
+ (y_,) = tf.tf_promote(y_)
+ end
+ end
+ begin
+ begin
+ tf.add_input(desc, x_)
+ end
+ begin
+ tf.add_input(desc, y_)
+ end
+ begin
+ tf.add_input(desc, min_x_)
+ end
+ begin
+ tf.add_input(desc, max_x_)
+ end
+ begin
+ tf.add_input(desc, min_y_)
+ end
+ begin
+ tf.add_input(desc, max_y_)
+ end
+ end
+ begin
+ end
+ end
+ begin
+ out = tf.Tensor[]
+ op = tf.Operation(desc)
+ for out_idx = 1:3
+ push!(out, tf.Tensor(op, out_idx))
+ end
+ out
+ end
+ end
+ end
+ begin
+ function quantized_mul_eager(x_, y_, min_x_, max_x_, min_y_, max_y_; name=nothing)
+ desc = tf.EagerOp("QuantizedMul")
+ x_ = convert(tf.EagerTensor, x_)
+ y_ = convert(tf.EagerTensor, y_)
+ min_x_ = convert(tf.EagerTensor, min_x_)
+ max_x_ = convert(tf.EagerTensor, max_x_)
+ min_y_ = convert(tf.EagerTensor, min_y_)
+ max_y_ = convert(tf.EagerTensor, max_y_)
+ begin
+ begin
tf.add_input(desc, x_)
- end), name, "Exp")
- tf.Tensor(tf.Operation(desc))
+ end
+ begin
+ tf.add_input(desc, y_)
+ end
+ begin
+ tf.add_input(desc, min_x_)
+ end
+ begin
+ tf.add_input(desc, max_x_)
+ end
+ begin
+ tf.add_input(desc, min_y_)
+ end
+ begin
+ tf.add_input(desc, max_y_)
+ end
+ end
+ begin
+ end
+ begin
+ desc["T1"] = tf.data_type(x_)
+ end
+ begin
+ desc["T2"] = tf.data_type(y_)
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(quantized_mul, [x_, y_, min_x_, max_x_, min_y_, max_y_], name=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function quantized_mul(x_, y_, min_x_, max_x_, min_y_, max_y_; name=nothing)
+ if tf.in_eager_mode()
+ quantized_mul_eager(x_, y_, min_x_, max_x_, min_y_, max_y_; name=name)
+ else
+ quantized_mul_graph(x_, y_, min_x_, max_x_, min_y_, max_y_; name=name)
+ end
+ end
end
+end
+
"""
- ceil(x)
+ selu(features)
"""
-tf.@op function ceil(x_; name=nothing)
- local desc
- tf.with_op_name((()->begin
- desc = tf.NodeDescription("Ceil")
- x_ = convert(TensorFlow.Tensor{Any}, x_)
- (x_,) = tf.tf_promote(x_)
- tf.add_input(desc, x_)
- end), name, "Ceil")
- tf.Tensor(tf.Operation(desc))
+begin
+ begin
+ function selu_graph(features_; name=nothing)
+ local desc
+ tf.with_op_name(name, "Selu") do
+ desc = tf.NodeDescription("Selu")
+ begin
+ begin
+ features_ = convert(Tensor{Any}, features_)
+ begin
+ end
+ end
+ begin
+ (features_,) = tf.tf_promote(features_)
+ end
+ end
+ begin
+ begin
+ tf.add_input(desc, features_)
+ end
+ end
+ begin
+ end
+ end
+ tf.Tensor(tf.Operation(desc))
+ end
+ end
+ begin
+ function selu_eager(features_; name=nothing)
+ desc = tf.EagerOp("Selu")
+ features_ = convert(tf.EagerTensor, features_)
+ begin
+ begin
+ tf.add_input(desc, features_)
+ end
+ end
+ begin
+ end
+ begin
+ desc["T"] = tf.data_type(features_)
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(selu, [features_], name=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res[1]
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function selu(features_; name=nothing)
+ if tf.in_eager_mode()
+ selu_eager(features_; name=name)
+ else
+ selu_graph(features_; name=name)
+ end
+ end
end
+end
+
"""
- floor(x)
+ lookup_table_insert(table_handle, keys, values)
"""
-tf.@op function floor(x_; name=nothing)
- local desc
- tf.with_op_name((()->begin
- desc = tf.NodeDescription("Floor")
- x_ = convert(TensorFlow.Tensor{Any}, x_)
- (x_,) = tf.tf_promote(x_)
- tf.add_input(desc, x_)
- end), name, "Floor")
- tf.Tensor(tf.Operation(desc))
+begin
+ begin
+ function lookup_table_insert_graph(table_handle_, keys_, values_; name=nothing)
+ local desc
+ tf.with_op_name(name, "LookupTableInsert") do
+ desc = tf.NodeDescription("LookupTableInsert")
+ begin
+ begin
+ table_handle_ = convert(Tensor{String}, table_handle_)
+ begin
+ end
+ end
+ begin
+ keys_ = convert(Tensor{Any}, keys_)
+ begin
+ end
+ end
+ begin
+ values_ = convert(Tensor{Any}, values_)
+ begin
+ end
+ end
+ begin
+ (keys_,) = tf.tf_promote(keys_)
+ end
+ begin
+ (values_,) = tf.tf_promote(values_)
+ end
+ end
+ begin
+ begin
+ tf.add_input(desc, table_handle_)
+ end
+ begin
+ tf.add_input(desc, keys_)
+ end
+ begin
+ tf.add_input(desc, values_)
+ end
+ end
+ begin
+ end
+ end
+ tf.Tensor(tf.Operation(desc))
+ end
+ end
+ begin
+ function lookup_table_insert_eager(table_handle_, keys_, values_; name=nothing)
+ desc = tf.EagerOp("LookupTableInsert")
+ table_handle_ = convert(tf.EagerTensor, table_handle_)
+ keys_ = convert(tf.EagerTensor, keys_)
+ values_ = convert(tf.EagerTensor, values_)
+ begin
+ begin
+ tf.add_input(desc, table_handle_)
+ end
+ begin
+ tf.add_input(desc, keys_)
+ end
+ begin
+ tf.add_input(desc, values_)
+ end
+ end
+ begin
+ end
+ begin
+ desc["Tin"] = tf.data_type(keys_)
+ end
+ begin
+ desc["Tout"] = tf.data_type(values_)
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(lookup_table_insert, [table_handle_, keys_, values_], name=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res[1]
+ end
+ end
end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function lookup_table_insert(table_handle_, keys_, values_; name=nothing)
+ if tf.in_eager_mode()
+ lookup_table_insert_eager(table_handle_, keys_, values_; name=name)
+ else
+ lookup_table_insert_graph(table_handle_, keys_, values_; name=name)
+ end
+ end
+ end
+end
+
"""
- sqrt(x)
+ complex_abs(x)
"""
-tf.@op function sqrt(x_; name=nothing)
- local desc
- tf.with_op_name((()->begin
- desc = tf.NodeDescription("Sqrt")
- x_ = convert(TensorFlow.Tensor{Any}, x_)
- (x_,) = tf.tf_promote(x_)
+begin
+ begin
+ function complex_abs_graph(x_; name=nothing)
+ local desc
+ tf.with_op_name(name, "ComplexAbs") do
+ desc = tf.NodeDescription("ComplexAbs")
+ begin
+ begin
+ x_ = convert(Tensor{Complex{Float32}}, x_)
+ begin
+ end
+ end
+ begin
+ (x_,) = tf.tf_promote(x_)
+ end
+ end
+ begin
+ begin
+ tf.add_input(desc, x_)
+ end
+ end
+ begin
+ end
+ end
+ tf.Tensor(tf.Operation(desc))
+ end
+ end
+ begin
+ function complex_abs_eager(x_; name=nothing)
+ desc = tf.EagerOp("ComplexAbs")
+ x_ = convert(tf.EagerTensor, x_)
+ begin
+ begin
tf.add_input(desc, x_)
- end), name, "Sqrt")
- tf.Tensor(tf.Operation(desc))
+ end
+ end
+ begin
+ end
+ begin
+ desc["T"] = tf.data_type(x_)
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(complex_abs, [x_], name=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res[1]
+ end
+ end
end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function complex_abs(x_; name=nothing)
+ if tf.in_eager_mode()
+ complex_abs_eager(x_; name=name)
+ else
+ complex_abs_graph(x_; name=name)
+ end
+ end
+ end
+end
+
"""
abs(x)
"""
-tf.@op function abs(x_; name=nothing)
- local desc
- tf.with_op_name((()->begin
- desc = tf.NodeDescription("Abs")
- x_ = convert(TensorFlow.Tensor{Any}, x_)
- (x_,) = tf.tf_promote(x_)
- tf.add_input(desc, x_)
- end), name, "Abs")
- tf.Tensor(tf.Operation(desc))
+begin
+ begin
+ function abs_graph(x_; name=nothing)
+ local desc
+ tf.with_op_name(name, "Abs") do
+ desc = tf.NodeDescription("Abs")
+ begin
+ begin
+ x_ = convert(Tensor{Any}, x_)
+ begin
+ end
+ end
+ begin
+ (x_,) = tf.tf_promote(x_)
+ end
+ end
+ begin
+ begin
+ tf.add_input(desc, x_)
+ end
+ end
+ begin
+ end
+ end
+ tf.Tensor(tf.Operation(desc))
+ end
end
-
-"""
- cos(x)
-
-
-"""
-tf.@op function cos(x_; name=nothing)
- local desc
- tf.with_op_name((()->begin
- desc = tf.NodeDescription("Cos")
- x_ = convert(TensorFlow.Tensor{Any}, x_)
- (x_,) = tf.tf_promote(x_)
+ begin
+ function abs_eager(x_; name=nothing)
+ desc = tf.EagerOp("Abs")
+ x_ = convert(tf.EagerTensor, x_)
+ begin
+ begin
tf.add_input(desc, x_)
- end), name, "Cos")
- tf.Tensor(tf.Operation(desc))
+ end
+ end
+ begin
+ end
+ begin
+ desc["T"] = tf.data_type(x_)
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(abs, [x_], name=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res[1]
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function abs(x_; name=nothing)
+ if tf.in_eager_mode()
+ abs_eager(x_; name=name)
+ else
+ abs_graph(x_; name=name)
+ end
+ end
end
+end
+
"""
- sin(x)
+ lookup_table_import(table_handle, keys, values)
"""
-tf.@op function sin(x_; name=nothing)
- local desc
- tf.with_op_name((()->begin
- desc = tf.NodeDescription("Sin")
- x_ = convert(TensorFlow.Tensor{Any}, x_)
- (x_,) = tf.tf_promote(x_)
- tf.add_input(desc, x_)
- end), name, "Sin")
- tf.Tensor(tf.Operation(desc))
+begin
+ begin
+ function lookup_table_import_graph(table_handle_, keys_, values_; name=nothing)
+ local desc
+ tf.with_op_name(name, "LookupTableImport") do
+ desc = tf.NodeDescription("LookupTableImport")
+ begin
+ begin
+ table_handle_ = convert(Tensor{String}, table_handle_)
+ begin
+ end
+ end
+ begin
+ keys_ = convert(Tensor{Any}, keys_)
+ begin
+ end
+ end
+ begin
+ values_ = convert(Tensor{Any}, values_)
+ begin
+ end
+ end
+ begin
+ (keys_,) = tf.tf_promote(keys_)
+ end
+ begin
+ (values_,) = tf.tf_promote(values_)
+ end
+ end
+ begin
+ begin
+ tf.add_input(desc, table_handle_)
+ end
+ begin
+ tf.add_input(desc, keys_)
+ end
+ begin
+ tf.add_input(desc, values_)
+ end
+ end
+ begin
+ end
+ end
+ tf.Tensor(tf.Operation(desc))
+ end
+ end
+ begin
+ function lookup_table_import_eager(table_handle_, keys_, values_; name=nothing)
+ desc = tf.EagerOp("LookupTableImport")
+ table_handle_ = convert(tf.EagerTensor, table_handle_)
+ keys_ = convert(tf.EagerTensor, keys_)
+ values_ = convert(tf.EagerTensor, values_)
+ begin
+ begin
+ tf.add_input(desc, table_handle_)
+ end
+ begin
+ tf.add_input(desc, keys_)
+ end
+ begin
+ tf.add_input(desc, values_)
+ end
+ end
+ begin
+ end
+ begin
+ desc["Tin"] = tf.data_type(keys_)
+ end
+ begin
+ desc["Tout"] = tf.data_type(values_)
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(lookup_table_import, [table_handle_, keys_, values_], name=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res[1]
+ end
+ end
end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function lookup_table_import(table_handle_, keys_, values_; name=nothing)
+ if tf.in_eager_mode()
+ lookup_table_import_eager(table_handle_, keys_, values_; name=name)
+ else
+ lookup_table_import_graph(table_handle_, keys_, values_; name=name)
+ end
+ end
+ end
+end
+
"""
- tan(x)
+ resource_apply_adam(var, m, v, beta1_power, beta2_power, lr, beta1, beta2, epsilon, grad; use_locking=false, use_nesterov=false)
"""
-tf.@op function tan(x_; name=nothing)
- local desc
- tf.with_op_name((()->begin
- desc = tf.NodeDescription("Tan")
- x_ = convert(TensorFlow.Tensor{Any}, x_)
- (x_,) = tf.tf_promote(x_)
- tf.add_input(desc, x_)
- end), name, "Tan")
- tf.Tensor(tf.Operation(desc))
+begin
+ begin
+ function resource_apply_adam_graph(var_, m_, v_, beta1_power_, beta2_power_, lr_, beta1_, beta2_, epsilon_, grad_; name=nothing, use_locking=nothing, use_nesterov=nothing)
+ local desc
+ tf.with_op_name(name, "ResourceApplyAdam") do
+ desc = tf.NodeDescription("ResourceApplyAdam")
+ begin
+ begin
+ var_ = convert(Tensor{Any}, var_)
+ begin
+ end
+ end
+ begin
+ m_ = convert(Tensor{Any}, m_)
+ begin
+ end
+ end
+ begin
+ v_ = convert(Tensor{Any}, v_)
+ begin
+ end
+ end
+ begin
+ beta1_power_ = convert(Tensor{Any}, beta1_power_)
+ begin
+ end
+ end
+ begin
+ beta2_power_ = convert(Tensor{Any}, beta2_power_)
+ begin
+ end
+ end
+ begin
+ lr_ = convert(Tensor{Any}, lr_)
+ begin
+ end
+ end
+ begin
+ beta1_ = convert(Tensor{Any}, beta1_)
+ begin
+ end
+ end
+ begin
+ beta2_ = convert(Tensor{Any}, beta2_)
+ begin
+ end
+ end
+ begin
+ epsilon_ = convert(Tensor{Any}, epsilon_)
+ begin
+ end
+ end
+ begin
+ grad_ = convert(Tensor{Any}, grad_)
+ begin
+ end
+ end
+ begin
+ (beta1_power_, beta2_power_, lr_, beta1_, beta2_, epsilon_, grad_) = tf.tf_promote(beta1_power_, beta2_power_, lr_, beta1_, beta2_, epsilon_, grad_)
+ end
+ end
+ begin
+ begin
+ tf.add_input(desc, var_)
+ end
+ begin
+ tf.add_input(desc, m_)
+ end
+ begin
+ tf.add_input(desc, v_)
+ end
+ begin
+ tf.add_input(desc, beta1_power_)
+ end
+ begin
+ tf.add_input(desc, beta2_power_)
+ end
+ begin
+ tf.add_input(desc, lr_)
+ end
+ begin
+ tf.add_input(desc, beta1_)
+ end
+ begin
+ tf.add_input(desc, beta2_)
+ end
+ begin
+ tf.add_input(desc, epsilon_)
+ end
+ begin
+ tf.add_input(desc, grad_)
+ end
+ end
+ begin
+ begin
+ if use_locking !== nothing
+ desc["use_locking"] = Base.Bool(use_locking)
+ end
+ end
+ begin
+ if use_nesterov !== nothing
+ desc["use_nesterov"] = Base.Bool(use_nesterov)
+ end
+ end
+ end
+ end
+ tf.Tensor(tf.Operation(desc))
+ end
+ end
+ begin
+ function resource_apply_adam_eager(var_, m_, v_, beta1_power_, beta2_power_, lr_, beta1_, beta2_, epsilon_, grad_; name=nothing, use_locking=nothing, use_nesterov=nothing)
+ desc = tf.EagerOp("ResourceApplyAdam")
+ var_ = convert(tf.EagerTensor, var_)
+ m_ = convert(tf.EagerTensor, m_)
+ v_ = convert(tf.EagerTensor, v_)
+ beta1_power_ = convert(tf.EagerTensor, beta1_power_)
+ beta2_power_ = convert(tf.EagerTensor, beta2_power_)
+ lr_ = convert(tf.EagerTensor, lr_)
+ beta1_ = convert(tf.EagerTensor, beta1_)
+ beta2_ = convert(tf.EagerTensor, beta2_)
+ epsilon_ = convert(tf.EagerTensor, epsilon_)
+ grad_ = convert(tf.EagerTensor, grad_)
+ begin
+ begin
+ tf.add_input(desc, var_)
+ end
+ begin
+ tf.add_input(desc, m_)
+ end
+ begin
+ tf.add_input(desc, v_)
+ end
+ begin
+ tf.add_input(desc, beta1_power_)
+ end
+ begin
+ tf.add_input(desc, beta2_power_)
+ end
+ begin
+ tf.add_input(desc, lr_)
+ end
+ begin
+ tf.add_input(desc, beta1_)
+ end
+ begin
+ tf.add_input(desc, beta2_)
+ end
+ begin
+ tf.add_input(desc, epsilon_)
+ end
+ begin
+ tf.add_input(desc, grad_)
+ end
+ end
+ begin
+ begin
+ if use_locking !== nothing
+ desc["use_locking"] = Base.Bool(use_locking)
+ end
+ end
+ begin
+ if use_nesterov !== nothing
+ desc["use_nesterov"] = Base.Bool(use_nesterov)
+ end
+ end
+ end
+ begin
+ desc["T"] = tf.data_type(beta1_power_)
+ end
+ begin
+ desc["T"] = tf.data_type(beta2_power_)
+ end
+ begin
+ desc["T"] = tf.data_type(lr_)
+ end
+ begin
+ desc["T"] = tf.data_type(beta1_)
+ end
+ begin
+ desc["T"] = tf.data_type(beta2_)
+ end
+ begin
+ desc["T"] = tf.data_type(epsilon_)
+ end
+ begin
+ desc["T"] = tf.data_type(grad_)
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(resource_apply_adam, [var_, m_, v_, beta1_power_, beta2_power_, lr_, beta1_, beta2_, epsilon_, grad_], name=nothing, use_locking=nothing, use_nesterov=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res[1]
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function resource_apply_adam(var_, m_, v_, beta1_power_, beta2_power_, lr_, beta1_, beta2_, epsilon_, grad_; name=nothing, use_locking=nothing, use_nesterov=nothing)
+ if tf.in_eager_mode()
+ resource_apply_adam_eager(var_, m_, v_, beta1_power_, beta2_power_, lr_, beta1_, beta2_, epsilon_, grad_; name=name, use_locking=use_locking, use_nesterov=use_nesterov)
+ else
+ resource_apply_adam_graph(var_, m_, v_, beta1_power_, beta2_power_, lr_, beta1_, beta2_, epsilon_, grad_; name=name, use_locking=use_locking, use_nesterov=use_nesterov)
+ end
+ end
end
+end
+
"""
- atan(x)
+ write_histogram_summary(writer, step, tag, values)
"""
-tf.@op function atan(x_; name=nothing)
- local desc
- tf.with_op_name((()->begin
- desc = tf.NodeDescription("Atan")
- x_ = convert(TensorFlow.Tensor{Any}, x_)
- (x_,) = tf.tf_promote(x_)
- tf.add_input(desc, x_)
- end), name, "Atan")
- tf.Tensor(tf.Operation(desc))
+begin
+ begin
+ function write_histogram_summary_graph(writer_, step_, tag_, values_; name=nothing)
+ local desc
+ tf.with_op_name(name, "WriteHistogramSummary") do
+ desc = tf.NodeDescription("WriteHistogramSummary")
+ begin
+ begin
+ writer_ = convert(Tensor{Any}, writer_)
+ begin
+ end
+ end
+ begin
+ step_ = convert(Tensor{Int64}, step_)
+ begin
+ end
+ end
+ begin
+ tag_ = convert(Tensor{String}, tag_)
+ begin
+ end
+ end
+ begin
+ values_ = convert(Tensor{Float32}, values_)
+ begin
+ end
+ end
+ begin
+ (values_,) = tf.tf_promote(values_)
+ end
+ end
+ begin
+ begin
+ tf.add_input(desc, writer_)
+ end
+ begin
+ tf.add_input(desc, step_)
+ end
+ begin
+ tf.add_input(desc, tag_)
+ end
+ begin
+ tf.add_input(desc, values_)
+ end
+ end
+ begin
+ end
+ end
+ tf.Tensor(tf.Operation(desc))
+ end
+ end
+ begin
+ function write_histogram_summary_eager(writer_, step_, tag_, values_; name=nothing)
+ desc = tf.EagerOp("WriteHistogramSummary")
+ writer_ = convert(tf.EagerTensor, writer_)
+ step_ = convert(tf.EagerTensor, step_)
+ tag_ = convert(tf.EagerTensor, tag_)
+ values_ = convert(tf.EagerTensor, values_)
+ begin
+ begin
+ tf.add_input(desc, writer_)
+ end
+ begin
+ tf.add_input(desc, step_)
+ end
+ begin
+ tf.add_input(desc, tag_)
+ end
+ begin
+ tf.add_input(desc, values_)
+ end
+ end
+ begin
+ end
+ begin
+ desc["T"] = tf.data_type(values_)
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(write_histogram_summary, [writer_, step_, tag_, values_], name=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res[1]
+ end
+ end
end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function write_histogram_summary(writer_, step_, tag_, values_; name=nothing)
+ if tf.in_eager_mode()
+ write_histogram_summary_eager(writer_, step_, tag_, values_; name=name)
+ else
+ write_histogram_summary_graph(writer_, step_, tag_, values_; name=name)
+ end
+ end
+ end
+end
-"""
- asin(x)
+"""
+ _host_send(tensor; client_terminated=false)
+Sends the named tensor from send_device to recv_device.
"""
-tf.@op function asin(x_; name=nothing)
- local desc
- tf.with_op_name((()->begin
- desc = tf.NodeDescription("Asin")
- x_ = convert(TensorFlow.Tensor{Any}, x_)
- (x_,) = tf.tf_promote(x_)
- tf.add_input(desc, x_)
- end), name, "Asin")
- tf.Tensor(tf.Operation(desc))
+begin
+ begin
+ function _host_send_graph(tensor_; name=nothing, tensor_name=nothing, send_device=nothing, send_device_incarnation=nothing, recv_device=nothing, client_terminated=nothing)
+ local desc
+ tf.with_op_name(name, "_HostSend") do
+ desc = tf.NodeDescription("_HostSend")
+ begin
+ begin
+ tensor_ = convert(Tensor{Any}, tensor_)
+ begin
+ end
+ end
+ begin
+ (tensor_,) = tf.tf_promote(tensor_)
+ end
+ end
+ begin
+ begin
+ tf.add_input(desc, tensor_)
+ end
+ end
+ begin
+ begin
+ if tensor_name !== nothing
+ desc["tensor_name"] = Base.String(tensor_name)
+ end
+ end
+ begin
+ if send_device !== nothing
+ desc["send_device"] = Base.String(send_device)
+ end
+ end
+ begin
+ if send_device_incarnation !== nothing
+ desc["send_device_incarnation"] = Base.Int(send_device_incarnation)
+ end
+ end
+ begin
+ if recv_device !== nothing
+ desc["recv_device"] = Base.String(recv_device)
+ end
+ end
+ begin
+ if client_terminated !== nothing
+ desc["client_terminated"] = Base.Bool(client_terminated)
+ end
+ end
+ end
+ end
+ tf.Tensor(tf.Operation(desc))
+ end
+ end
+ begin
+ function _host_send_eager(tensor_; name=nothing, tensor_name=nothing, send_device=nothing, send_device_incarnation=nothing, recv_device=nothing, client_terminated=nothing)
+ desc = tf.EagerOp("_HostSend")
+ tensor_ = convert(tf.EagerTensor, tensor_)
+ begin
+ begin
+ tf.add_input(desc, tensor_)
+ end
+ end
+ begin
+ begin
+ if tensor_name !== nothing
+ desc["tensor_name"] = Base.String(tensor_name)
+ end
+ end
+ begin
+ if send_device !== nothing
+ desc["send_device"] = Base.String(send_device)
+ end
+ end
+ begin
+ if send_device_incarnation !== nothing
+ desc["send_device_incarnation"] = Base.Int(send_device_incarnation)
+ end
+ end
+ begin
+ if recv_device !== nothing
+ desc["recv_device"] = Base.String(recv_device)
+ end
+ end
+ begin
+ if client_terminated !== nothing
+ desc["client_terminated"] = Base.Bool(client_terminated)
+ end
+ end
+ end
+ begin
+ desc["T"] = tf.data_type(tensor_)
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(_host_send, [tensor_], name=nothing, tensor_name=nothing, send_device=nothing, send_device_incarnation=nothing, recv_device=nothing, client_terminated=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res[1]
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function _host_send(tensor_; name=nothing, tensor_name=nothing, send_device=nothing, send_device_incarnation=nothing, recv_device=nothing, client_terminated=nothing)
+ if tf.in_eager_mode()
+ _host_send_eager(tensor_; name=name, tensor_name=tensor_name, send_device=send_device, send_device_incarnation=send_device_incarnation, recv_device=recv_device, client_terminated=client_terminated)
+ else
+ _host_send_graph(tensor_; name=name, tensor_name=tensor_name, send_device=send_device, send_device_incarnation=send_device_incarnation, recv_device=recv_device, client_terminated=client_terminated)
+ end
+ end
end
+end
+
"""
- acos(x)
+ experimental_indexed_dataset_materialize(dataset, materialized)
"""
-tf.@op function acos(x_; name=nothing)
- local desc
- tf.with_op_name((()->begin
- desc = tf.NodeDescription("Acos")
- x_ = convert(TensorFlow.Tensor{Any}, x_)
- (x_,) = tf.tf_promote(x_)
- tf.add_input(desc, x_)
- end), name, "Acos")
- tf.Tensor(tf.Operation(desc))
+begin
+ begin
+ function experimental_indexed_dataset_materialize_graph(dataset_, materialized_; name=nothing)
+ local desc
+ tf.with_op_name(name, "ExperimentalIndexedDatasetMaterialize") do
+ desc = tf.NodeDescription("ExperimentalIndexedDatasetMaterialize")
+ begin
+ begin
+ dataset_ = convert(Tensor{Any}, dataset_)
+ begin
+ end
+ end
+ begin
+ materialized_ = convert(Tensor{Any}, materialized_)
+ begin
+ end
+ end
+ end
+ begin
+ begin
+ tf.add_input(desc, dataset_)
+ end
+ begin
+ tf.add_input(desc, materialized_)
+ end
+ end
+ begin
+ end
+ end
+ tf.Tensor(tf.Operation(desc))
+ end
+ end
+ begin
+ function experimental_indexed_dataset_materialize_eager(dataset_, materialized_; name=nothing)
+ desc = tf.EagerOp("ExperimentalIndexedDatasetMaterialize")
+ dataset_ = convert(tf.EagerTensor, dataset_)
+ materialized_ = convert(tf.EagerTensor, materialized_)
+ begin
+ begin
+ tf.add_input(desc, dataset_)
+ end
+ begin
+ tf.add_input(desc, materialized_)
+ end
+ end
+ begin
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(experimental_indexed_dataset_materialize, [dataset_, materialized_], name=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res[1]
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function experimental_indexed_dataset_materialize(dataset_, materialized_; name=nothing)
+ if tf.in_eager_mode()
+ experimental_indexed_dataset_materialize_eager(dataset_, materialized_; name=name)
+ else
+ experimental_indexed_dataset_materialize_graph(dataset_, materialized_; name=name)
+ end
+ end
end
+end
+
"""
- tanh(x)
+ greater(x, y)
"""
-tf.@op function tanh(x_; name=nothing)
- local desc
- tf.with_op_name((()->begin
- desc = tf.NodeDescription("Tanh")
- x_ = convert(TensorFlow.Tensor{Any}, x_)
- (x_,) = tf.tf_promote(x_)
+begin
+ begin
+ function greater_graph(x_, y_; name=nothing)
+ local desc
+ tf.with_op_name(name, "Greater") do
+ desc = tf.NodeDescription("Greater")
+ begin
+ begin
+ x_ = convert(Tensor{Any}, x_)
+ begin
+ end
+ end
+ begin
+ y_ = convert(Tensor{Any}, y_)
+ begin
+ end
+ end
+ begin
+ (x_, y_) = tf.tf_promote(x_, y_)
+ end
+ end
+ begin
+ begin
+ tf.add_input(desc, x_)
+ end
+ begin
+ tf.add_input(desc, y_)
+ end
+ end
+ begin
+ end
+ end
+ tf.Tensor(tf.Operation(desc))
+ end
+ end
+ begin
+ function greater_eager(x_, y_; name=nothing)
+ desc = tf.EagerOp("Greater")
+ x_ = convert(tf.EagerTensor, x_)
+ y_ = convert(tf.EagerTensor, y_)
+ begin
+ begin
tf.add_input(desc, x_)
- end), name, "Tanh")
- tf.Tensor(tf.Operation(desc))
+ end
+ begin
+ tf.add_input(desc, y_)
+ end
+ end
+ begin
+ end
+ begin
+ desc["T"] = tf.data_type(x_)
+ end
+ begin
+ desc["T"] = tf.data_type(y_)
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(greater, [x_, y_], name=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res[1]
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function greater(x_, y_; name=nothing)
+ if tf.in_eager_mode()
+ greater_eager(x_, y_; name=name)
+ else
+ greater_graph(x_, y_; name=name)
+ end
+ end
end
+end
+
"""
- lgamma(x)
+ nccl_broadcast(input)
"""
-tf.@op function lgamma(x_; name=nothing)
- local desc
- tf.with_op_name((()->begin
- desc = tf.NodeDescription("Lgamma")
- x_ = convert(TensorFlow.Tensor{Any}, x_)
- (x_,) = tf.tf_promote(x_)
- tf.add_input(desc, x_)
- end), name, "Lgamma")
- tf.Tensor(tf.Operation(desc))
+begin
+ begin
+ function nccl_broadcast_graph(input_; name=nothing, shape=nothing)
+ local desc
+ tf.with_op_name(name, "NcclBroadcast") do
+ desc = tf.NodeDescription("NcclBroadcast")
+ begin
+ begin
+ input_ = convert(Tensor{Any}, input_)
+ begin
+ end
+ end
+ begin
+ (input_,) = tf.tf_promote(input_)
+ end
+ end
+ begin
+ begin
+ tf.add_input(desc, input_)
+ end
+ end
+ begin
+ begin
+ if shape !== nothing
+ desc["shape"] = Base.identity(shape)
+ end
+ end
+ end
+ end
+ tf.Tensor(tf.Operation(desc))
+ end
+ end
+ begin
+ function nccl_broadcast_eager(input_; name=nothing, shape=nothing)
+ desc = tf.EagerOp("NcclBroadcast")
+ input_ = convert(tf.EagerTensor, input_)
+ begin
+ begin
+ tf.add_input(desc, input_)
+ end
+ end
+ begin
+ begin
+ if shape !== nothing
+ desc["shape"] = Base.identity(shape)
+ end
+ end
+ end
+ begin
+ desc["T"] = tf.data_type(input_)
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(nccl_broadcast, [input_], name=nothing, shape=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res[1]
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function nccl_broadcast(input_; name=nothing, shape=nothing)
+ if tf.in_eager_mode()
+ nccl_broadcast_eager(input_; name=name, shape=shape)
+ else
+ nccl_broadcast_graph(input_; name=name, shape=shape)
+ end
+ end
end
+end
+
"""
- erf(x)
+ tensor_list_push_back_batch(input_handles, tensor)
"""
-tf.@op function erf(x_; name=nothing)
- local desc
- tf.with_op_name((()->begin
- desc = tf.NodeDescription("Erf")
- x_ = convert(TensorFlow.Tensor{Any}, x_)
- (x_,) = tf.tf_promote(x_)
- tf.add_input(desc, x_)
- end), name, "Erf")
- tf.Tensor(tf.Operation(desc))
+begin
+ begin
+ function tensor_list_push_back_batch_graph(input_handles_, tensor_; name=nothing, element_dtype=nothing)
+ local desc
+ tf.with_op_name(name, "TensorListPushBackBatch") do
+ desc = tf.NodeDescription("TensorListPushBackBatch")
+ begin
+ begin
+ input_handles_ = convert(Tensor{Any}, input_handles_)
+ begin
+ end
+ end
+ begin
+ tensor_ = convert(Tensor{Any}, tensor_)
+ begin
+ end
+ end
+ begin
+ (tensor_,) = tf.tf_promote(tensor_)
+ end
+ end
+ begin
+ begin
+ tf.add_input(desc, input_handles_)
+ end
+ begin
+ tf.add_input(desc, tensor_)
+ end
+ end
+ begin
+ begin
+ if element_dtype !== nothing
+ desc["element_dtype"] = Base.identity(element_dtype)
+ end
+ end
+ end
+ end
+ tf.Tensor(tf.Operation(desc))
+ end
+ end
+ begin
+ function tensor_list_push_back_batch_eager(input_handles_, tensor_; name=nothing, element_dtype=nothing)
+ desc = tf.EagerOp("TensorListPushBackBatch")
+ input_handles_ = convert(tf.EagerTensor, input_handles_)
+ tensor_ = convert(tf.EagerTensor, tensor_)
+ begin
+ begin
+ tf.add_input(desc, input_handles_)
+ end
+ begin
+ tf.add_input(desc, tensor_)
+ end
+ end
+ begin
+ begin
+ if element_dtype !== nothing
+ desc["element_dtype"] = Base.identity(element_dtype)
+ end
+ end
+ end
+ begin
+ desc["element_dtype"] = tf.data_type(tensor_)
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(tensor_list_push_back_batch, [input_handles_, tensor_], name=nothing, element_dtype=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res[1]
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function tensor_list_push_back_batch(input_handles_, tensor_; name=nothing, element_dtype=nothing)
+ if tf.in_eager_mode()
+ tensor_list_push_back_batch_eager(input_handles_, tensor_; name=name, element_dtype=element_dtype)
+ else
+ tensor_list_push_back_batch_graph(input_handles_, tensor_; name=name, element_dtype=element_dtype)
+ end
+ end
end
+end
+
"""
- erfc(x)
+ resource_scatter_min(resource, indices, updates)
"""
-tf.@op function erfc(x_; name=nothing)
- local desc
- tf.with_op_name((()->begin
- desc = tf.NodeDescription("Erfc")
- x_ = convert(TensorFlow.Tensor{Any}, x_)
- (x_,) = tf.tf_promote(x_)
- tf.add_input(desc, x_)
- end), name, "Erfc")
- tf.Tensor(tf.Operation(desc))
+begin
+ begin
+ function resource_scatter_min_graph(resource_, indices_, updates_; name=nothing, dtype=nothing)
+ local desc
+ tf.with_op_name(name, "ResourceScatterMin") do
+ desc = tf.NodeDescription("ResourceScatterMin")
+ begin
+ begin
+ resource_ = convert(Tensor{Any}, resource_)
+ begin
+ end
+ end
+ begin
+ indices_ = convert(Tensor{Any}, indices_)
+ begin
+ indices_ = indices_ - convert(tf.Tensor{eltype(indices_)}, 1)
+ end
+ end
+ begin
+ updates_ = convert(Tensor{Any}, updates_)
+ begin
+ end
+ end
+ begin
+ (updates_,) = tf.tf_promote(updates_)
+ end
+ begin
+ (indices_,) = tf.tf_promote(indices_)
+ end
+ end
+ begin
+ begin
+ tf.add_input(desc, resource_)
+ end
+ begin
+ tf.add_input(desc, indices_)
+ end
+ begin
+ tf.add_input(desc, updates_)
+ end
+ end
+ begin
+ begin
+ if dtype !== nothing
+ desc["dtype"] = Base.identity(dtype)
+ end
+ end
+ end
+ end
+ tf.Tensor(tf.Operation(desc))
+ end
+ end
+ begin
+ function resource_scatter_min_eager(resource_, indices_, updates_; name=nothing, dtype=nothing)
+ desc = tf.EagerOp("ResourceScatterMin")
+ resource_ = convert(tf.EagerTensor, resource_)
+ indices_ = convert(tf.EagerTensor, indices_)
+ updates_ = convert(tf.EagerTensor, updates_)
+ begin
+ begin
+ tf.add_input(desc, resource_)
+ end
+ begin
+ tf.add_input(desc, indices_)
+ end
+ begin
+ tf.add_input(desc, updates_)
+ end
+ end
+ begin
+ begin
+ if dtype !== nothing
+ desc["dtype"] = Base.identity(dtype)
+ end
+ end
+ end
+ begin
+ desc["Tindices"] = tf.data_type(indices_)
+ end
+ begin
+ desc["dtype"] = tf.data_type(updates_)
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(resource_scatter_min, [resource_, indices_, updates_], name=nothing, dtype=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res[1]
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function resource_scatter_min(resource_, indices_, updates_; name=nothing, dtype=nothing)
+ if tf.in_eager_mode()
+ resource_scatter_min_eager(resource_, indices_, updates_; name=name, dtype=dtype)
+ else
+ resource_scatter_min_graph(resource_, indices_, updates_; name=name, dtype=dtype)
+ end
+ end
end
+end
+
"""
- real(input)
+ slice(input, begin, size)
"""
-tf.@op function real(input_; name=nothing)
- local desc
- tf.with_op_name((()->begin
- desc = tf.NodeDescription("Real")
- input_ = convert(TensorFlow.Tensor{Complex{Float32}}, input_)
- (input_,) = tf.tf_promote(input_)
+begin
+ begin
+ function slice_graph(input_, begin_, size_; name=nothing, Index=nothing)
+ local desc
+ tf.with_op_name(name, "Slice") do
+ desc = tf.NodeDescription("Slice")
+ begin
+ begin
+ input_ = convert(Tensor{Any}, input_)
+ begin
+ end
+ end
+ begin
+ begin_ = convert(Tensor{Any}, begin_)
+ begin
+ begin_ = begin_ - convert(tf.Tensor{eltype(begin_)}, 1)
+ end
+ end
+ begin
+ size_ = convert(Tensor{Any}, size_)
+ begin
+ end
+ end
+ begin
+ (input_,) = tf.tf_promote(input_)
+ end
+ begin
+ (begin_, size_) = tf.tf_promote(begin_, size_)
+ end
+ end
+ begin
+ begin
+ tf.add_input(desc, input_)
+ end
+ begin
+ tf.add_input(desc, begin_)
+ end
+ begin
+ tf.add_input(desc, size_)
+ end
+ end
+ begin
+ begin
+ if Index !== nothing
+ desc["Index"] = Base.identity(Index)
+ end
+ end
+ end
+ end
+ tf.Tensor(tf.Operation(desc))
+ end
+ end
+ begin
+ function slice_eager(input_, begin_, size_; name=nothing, Index=nothing)
+ desc = tf.EagerOp("Slice")
+ input_ = convert(tf.EagerTensor, input_)
+ begin_ = convert(tf.EagerTensor, begin_)
+ size_ = convert(tf.EagerTensor, size_)
+ begin
+ begin
tf.add_input(desc, input_)
- end), name, "Real")
- tf.Tensor(tf.Operation(desc))
+ end
+ begin
+ tf.add_input(desc, begin_)
+ end
+ begin
+ tf.add_input(desc, size_)
+ end
+ end
+ begin
+ begin
+ if Index !== nothing
+ desc["Index"] = Base.identity(Index)
+ end
+ end
+ end
+ begin
+ desc["T"] = tf.data_type(input_)
+ end
+ begin
+ desc["Index"] = tf.data_type(begin_)
+ end
+ begin
+ desc["Index"] = tf.data_type(size_)
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(slice, [input_, begin_, size_], name=nothing, Index=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res[1]
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function slice(input_, begin_, size_; name=nothing, Index=nothing)
+ if tf.in_eager_mode()
+ slice_eager(input_, begin_, size_; name=name, Index=Index)
+ else
+ slice_graph(input_, begin_, size_; name=name, Index=Index)
+ end
+ end
end
+end
+
"""
- imag(input)
+ unicode_decode(input; errors=, replacement_char=65533, replace_control_characters=false)
"""
-tf.@op function imag(input_; name=nothing)
- local desc
- tf.with_op_name((()->begin
- desc = tf.NodeDescription("Imag")
- input_ = convert(TensorFlow.Tensor{Complex{Float32}}, input_)
- (input_,) = tf.tf_promote(input_)
+begin
+ begin
+ function unicode_decode_graph(input_; name=nothing, input_encoding=nothing, errors=nothing, replacement_char=nothing, replace_control_characters=nothing)
+ local desc
+ tf.with_op_name(name, "UnicodeDecode") do
+ desc = tf.NodeDescription("UnicodeDecode")
+ begin
+ begin
+ input_ = convert(Tensor{String}, input_)
+ begin
+ end
+ end
+ end
+ begin
+ begin
+ tf.add_input(desc, input_)
+ end
+ end
+ begin
+ begin
+ if input_encoding !== nothing
+ desc["input_encoding"] = Base.String(input_encoding)
+ end
+ end
+ begin
+ if errors !== nothing
+ desc["errors"] = Base.String(errors)
+ end
+ end
+ begin
+ if replacement_char !== nothing
+ desc["replacement_char"] = Base.Int(replacement_char)
+ end
+ end
+ begin
+ if replace_control_characters !== nothing
+ desc["replace_control_characters"] = Base.Bool(replace_control_characters)
+ end
+ end
+ end
+ end
+ begin
+ out = tf.Tensor[]
+ op = tf.Operation(desc)
+ for out_idx = 1:2
+ push!(out, tf.Tensor(op, out_idx))
+ end
+ out
+ end
+ end
+ end
+ begin
+ function unicode_decode_eager(input_; name=nothing, input_encoding=nothing, errors=nothing, replacement_char=nothing, replace_control_characters=nothing)
+ desc = tf.EagerOp("UnicodeDecode")
+ input_ = convert(tf.EagerTensor, input_)
+ begin
+ begin
tf.add_input(desc, input_)
- end), name, "Imag")
- tf.Tensor(tf.Operation(desc))
+ end
+ end
+ begin
+ begin
+ if input_encoding !== nothing
+ desc["input_encoding"] = Base.String(input_encoding)
+ end
+ end
+ begin
+ if errors !== nothing
+ desc["errors"] = Base.String(errors)
+ end
+ end
+ begin
+ if replacement_char !== nothing
+ desc["replacement_char"] = Base.Int(replacement_char)
+ end
+ end
+ begin
+ if replace_control_characters !== nothing
+ desc["replace_control_characters"] = Base.Bool(replace_control_characters)
+ end
+ end
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(unicode_decode, [input_], name=nothing, input_encoding=nothing, errors=nothing, replacement_char=nothing, replace_control_characters=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function unicode_decode(input_; name=nothing, input_encoding=nothing, errors=nothing, replacement_char=nothing, replace_control_characters=nothing)
+ if tf.in_eager_mode()
+ unicode_decode_eager(input_; name=name, input_encoding=input_encoding, errors=errors, replacement_char=replacement_char, replace_control_characters=replace_control_characters)
+ else
+ unicode_decode_graph(input_; name=name, input_encoding=input_encoding, errors=errors, replacement_char=replacement_char, replace_control_characters=replace_control_characters)
+ end
+ end
+ end
+end
+
+
+"""
+ take_dataset(input_dataset, count)
+
+
+"""
+begin
+ begin
+ function take_dataset_graph(input_dataset_, count_; name=nothing, output_types=nothing, output_shapes=nothing)
+ local desc
+ tf.with_op_name(name, "TakeDataset") do
+ desc = tf.NodeDescription("TakeDataset")
+ begin
+ begin
+ input_dataset_ = convert(Tensor{Any}, input_dataset_)
+ begin
+ end
+ end
+ begin
+ count_ = convert(Tensor{Int64}, count_)
+ begin
+ end
+ end
+ end
+ begin
+ begin
+ tf.add_input(desc, input_dataset_)
+ end
+ begin
+ tf.add_input(desc, count_)
+ end
+ end
+ begin
+ begin
+ if output_types !== nothing
+ desc["output_types"] = map(Base.identity, output_types)
+ end
+ end
+ begin
+ if output_shapes !== nothing
+ desc["output_shapes"] = map(Base.identity, output_shapes)
+ end
+ end
+ end
+ end
+ tf.Tensor(tf.Operation(desc))
+ end
+ end
+ begin
+ function take_dataset_eager(input_dataset_, count_; name=nothing, output_types=nothing, output_shapes=nothing)
+ desc = tf.EagerOp("TakeDataset")
+ input_dataset_ = convert(tf.EagerTensor, input_dataset_)
+ count_ = convert(tf.EagerTensor, count_)
+ begin
+ begin
+ tf.add_input(desc, input_dataset_)
+ end
+ begin
+ tf.add_input(desc, count_)
+ end
+ end
+ begin
+ begin
+ if output_types !== nothing
+ desc["output_types"] = map(Base.identity, output_types)
+ end
+ end
+ begin
+ if output_shapes !== nothing
+ desc["output_shapes"] = map(Base.identity, output_shapes)
+ end
+ end
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(take_dataset, [input_dataset_, count_], name=nothing, output_types=nothing, output_shapes=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res[1]
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function take_dataset(input_dataset_, count_; name=nothing, output_types=nothing, output_shapes=nothing)
+ if tf.in_eager_mode()
+ take_dataset_eager(input_dataset_, count_; name=name, output_types=output_types, output_shapes=output_shapes)
+ else
+ take_dataset_graph(input_dataset_, count_; name=name, output_types=output_types, output_shapes=output_shapes)
+ end
+ end
+ end
+end
+
+
+"""
+ boosted_trees_make_stats_summary(node_ids, gradients, hessians, bucketized_features_list)
+
+
+"""
+begin
+ begin
+ function boosted_trees_make_stats_summary_graph(node_ids_, gradients_, hessians_, bucketized_features_list_; name=nothing, max_splits=nothing, num_buckets=nothing, num_features=nothing)
+ local desc
+ tf.with_op_name(name, "BoostedTreesMakeStatsSummary") do
+ desc = tf.NodeDescription("BoostedTreesMakeStatsSummary")
+ begin
+ begin
+ node_ids_ = convert(Tensor{Int32}, node_ids_)
+ begin
+ end
+ end
+ begin
+ gradients_ = convert(Tensor{Float32}, gradients_)
+ begin
+ end
+ end
+ begin
+ hessians_ = convert(Tensor{Float32}, hessians_)
+ begin
+ end
+ end
+ begin
+ bucketized_features_list_ = [convert(Tensor{Int32}, x) for x = bucketized_features_list_]
+ begin
+ end
+ end
+ end
+ begin
+ begin
+ tf.add_input(desc, node_ids_)
+ end
+ begin
+ tf.add_input(desc, gradients_)
+ end
+ begin
+ tf.add_input(desc, hessians_)
+ end
+ begin
+ tf.add_input(desc, bucketized_features_list_)
+ end
+ end
+ begin
+ begin
+ if max_splits !== nothing
+ desc["max_splits"] = Base.Int(max_splits)
+ end
+ end
+ begin
+ if num_buckets !== nothing
+ desc["num_buckets"] = Base.Int(num_buckets)
+ end
+ end
+ begin
+ if num_features !== nothing
+ desc["num_features"] = Base.Int(num_features)
+ end
+ end
+ end
+ end
+ tf.Tensor(tf.Operation(desc))
+ end
+ end
+ begin
+ function boosted_trees_make_stats_summary_eager(node_ids_, gradients_, hessians_, bucketized_features_list_; name=nothing, max_splits=nothing, num_buckets=nothing, num_features=nothing)
+ desc = tf.EagerOp("BoostedTreesMakeStatsSummary")
+ node_ids_ = convert(tf.EagerTensor, node_ids_)
+ gradients_ = convert(tf.EagerTensor, gradients_)
+ hessians_ = convert(tf.EagerTensor, hessians_)
+ bucketized_features_list_ = convert(tf.EagerTensor, bucketized_features_list_)
+ begin
+ begin
+ tf.add_input(desc, node_ids_)
+ end
+ begin
+ tf.add_input(desc, gradients_)
+ end
+ begin
+ tf.add_input(desc, hessians_)
+ end
+ begin
+ tf.add_input(desc, bucketized_features_list_)
+ end
+ end
+ begin
+ begin
+ if max_splits !== nothing
+ desc["max_splits"] = Base.Int(max_splits)
+ end
+ end
+ begin
+ if num_buckets !== nothing
+ desc["num_buckets"] = Base.Int(num_buckets)
+ end
+ end
+ begin
+ if num_features !== nothing
+ desc["num_features"] = Base.Int(num_features)
+ end
+ end
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(boosted_trees_make_stats_summary, [node_ids_, gradients_, hessians_, bucketized_features_list_], name=nothing, max_splits=nothing, num_buckets=nothing, num_features=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res[1]
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function boosted_trees_make_stats_summary(node_ids_, gradients_, hessians_, bucketized_features_list_; name=nothing, max_splits=nothing, num_buckets=nothing, num_features=nothing)
+ if tf.in_eager_mode()
+ boosted_trees_make_stats_summary_eager(node_ids_, gradients_, hessians_, bucketized_features_list_; name=name, max_splits=max_splits, num_buckets=num_buckets, num_features=num_features)
+ else
+ boosted_trees_make_stats_summary_graph(node_ids_, gradients_, hessians_, bucketized_features_list_; name=name, max_splits=max_splits, num_buckets=num_buckets, num_features=num_features)
+ end
+ end
+ end
+end
+
+
+"""
+ all_candidate_sampler(true_classes; seed=0, seed2=0)
+
+
+"""
+begin
+ begin
+ function all_candidate_sampler_graph(true_classes_; name=nothing, num_true=nothing, num_sampled=nothing, unique=nothing, seed=nothing, seed2=nothing)
+ local desc
+ tf.with_op_name(name, "AllCandidateSampler") do
+ desc = tf.NodeDescription("AllCandidateSampler")
+ begin
+ begin
+ true_classes_ = convert(Tensor{Int64}, true_classes_)
+ begin
+ end
+ end
+ end
+ begin
+ begin
+ tf.add_input(desc, true_classes_)
+ end
+ end
+ begin
+ begin
+ if num_true !== nothing
+ desc["num_true"] = Base.Int(num_true)
+ end
+ end
+ begin
+ if num_sampled !== nothing
+ desc["num_sampled"] = Base.Int(num_sampled)
+ end
+ end
+ begin
+ if unique !== nothing
+ desc["unique"] = Base.Bool(unique)
+ end
+ end
+ begin
+ if seed !== nothing
+ desc["seed"] = Base.Int(seed)
+ end
+ end
+ begin
+ if seed2 !== nothing
+ desc["seed2"] = Base.Int(seed2)
+ end
+ end
+ end
+ end
+ begin
+ out = tf.Tensor[]
+ op = tf.Operation(desc)
+ for out_idx = 1:3
+ push!(out, tf.Tensor(op, out_idx))
+ end
+ out
+ end
+ end
+ end
+ begin
+ function all_candidate_sampler_eager(true_classes_; name=nothing, num_true=nothing, num_sampled=nothing, unique=nothing, seed=nothing, seed2=nothing)
+ desc = tf.EagerOp("AllCandidateSampler")
+ true_classes_ = convert(tf.EagerTensor, true_classes_)
+ begin
+ begin
+ tf.add_input(desc, true_classes_)
+ end
+ end
+ begin
+ begin
+ if num_true !== nothing
+ desc["num_true"] = Base.Int(num_true)
+ end
+ end
+ begin
+ if num_sampled !== nothing
+ desc["num_sampled"] = Base.Int(num_sampled)
+ end
+ end
+ begin
+ if unique !== nothing
+ desc["unique"] = Base.Bool(unique)
+ end
+ end
+ begin
+ if seed !== nothing
+ desc["seed"] = Base.Int(seed)
+ end
+ end
+ begin
+ if seed2 !== nothing
+ desc["seed2"] = Base.Int(seed2)
+ end
+ end
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(all_candidate_sampler, [true_classes_], name=nothing, num_true=nothing, num_sampled=nothing, unique=nothing, seed=nothing, seed2=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function all_candidate_sampler(true_classes_; name=nothing, num_true=nothing, num_sampled=nothing, unique=nothing, seed=nothing, seed2=nothing)
+ if tf.in_eager_mode()
+ all_candidate_sampler_eager(true_classes_; name=name, num_true=num_true, num_sampled=num_sampled, unique=unique, seed=seed, seed2=seed2)
+ else
+ all_candidate_sampler_graph(true_classes_; name=name, num_true=num_true, num_sampled=num_sampled, unique=unique, seed=seed, seed2=seed2)
+ end
+ end
+ end
+end
+
+
+"""
+ conv2d_backprop_input(input_sizes, filter, out_backprop; use_cudnn_on_gpu=true, data_format=, dilations=[1, 1, 1, 1])
+
+
+"""
+begin
+ begin
+ function conv2d_backprop_input_graph(input_sizes_, filter_, out_backprop_; name=nothing, strides=nothing, use_cudnn_on_gpu=nothing, padding=nothing, data_format=nothing, dilations=nothing)
+ local desc
+ tf.with_op_name(name, "Conv2DBackpropInput") do
+ desc = tf.NodeDescription("Conv2DBackpropInput")
+ begin
+ begin
+ input_sizes_ = convert(Tensor{Int32}, input_sizes_)
+ begin
+ end
+ end
+ begin
+ filter_ = convert(Tensor{Any}, filter_)
+ begin
+ end
+ end
+ begin
+ out_backprop_ = convert(Tensor{Any}, out_backprop_)
+ begin
+ end
+ end
+ begin
+ (filter_, out_backprop_) = tf.tf_promote(filter_, out_backprop_)
+ end
+ end
+ begin
+ begin
+ tf.add_input(desc, input_sizes_)
+ end
+ begin
+ tf.add_input(desc, filter_)
+ end
+ begin
+ tf.add_input(desc, out_backprop_)
+ end
+ end
+ begin
+ begin
+ if strides !== nothing
+ desc["strides"] = map(Base.identity, strides)
+ end
+ end
+ begin
+ if use_cudnn_on_gpu !== nothing
+ desc["use_cudnn_on_gpu"] = Base.Bool(use_cudnn_on_gpu)
+ end
+ end
+ begin
+ if padding !== nothing
+ desc["padding"] = Base.String(padding)
+ end
+ end
+ begin
+ if data_format !== nothing
+ desc["data_format"] = Base.String(data_format)
+ end
+ end
+ begin
+ if dilations !== nothing
+ desc["dilations"] = map(Base.identity, dilations)
+ end
+ end
+ end
+ end
+ tf.Tensor(tf.Operation(desc))
+ end
+ end
+ begin
+ function conv2d_backprop_input_eager(input_sizes_, filter_, out_backprop_; name=nothing, strides=nothing, use_cudnn_on_gpu=nothing, padding=nothing, data_format=nothing, dilations=nothing)
+ desc = tf.EagerOp("Conv2DBackpropInput")
+ input_sizes_ = convert(tf.EagerTensor, input_sizes_)
+ filter_ = convert(tf.EagerTensor, filter_)
+ out_backprop_ = convert(tf.EagerTensor, out_backprop_)
+ begin
+ begin
+ tf.add_input(desc, input_sizes_)
+ end
+ begin
+ tf.add_input(desc, filter_)
+ end
+ begin
+ tf.add_input(desc, out_backprop_)
+ end
+ end
+ begin
+ begin
+ if strides !== nothing
+ desc["strides"] = map(Base.identity, strides)
+ end
+ end
+ begin
+ if use_cudnn_on_gpu !== nothing
+ desc["use_cudnn_on_gpu"] = Base.Bool(use_cudnn_on_gpu)
+ end
+ end
+ begin
+ if padding !== nothing
+ desc["padding"] = Base.String(padding)
+ end
+ end
+ begin
+ if data_format !== nothing
+ desc["data_format"] = Base.String(data_format)
+ end
+ end
+ begin
+ if dilations !== nothing
+ desc["dilations"] = map(Base.identity, dilations)
+ end
+ end
+ end
+ begin
+ desc["T"] = tf.data_type(filter_)
+ end
+ begin
+ desc["T"] = tf.data_type(out_backprop_)
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(conv2d_backprop_input, [input_sizes_, filter_, out_backprop_], name=nothing, strides=nothing, use_cudnn_on_gpu=nothing, padding=nothing, data_format=nothing, dilations=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res[1]
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function conv2d_backprop_input(input_sizes_, filter_, out_backprop_; name=nothing, strides=nothing, use_cudnn_on_gpu=nothing, padding=nothing, data_format=nothing, dilations=nothing)
+ if tf.in_eager_mode()
+ conv2d_backprop_input_eager(input_sizes_, filter_, out_backprop_; name=name, strides=strides, use_cudnn_on_gpu=use_cudnn_on_gpu, padding=padding, data_format=data_format, dilations=dilations)
+ else
+ conv2d_backprop_input_graph(input_sizes_, filter_, out_backprop_; name=name, strides=strides, use_cudnn_on_gpu=use_cudnn_on_gpu, padding=padding, data_format=data_format, dilations=dilations)
+ end
+ end
+ end
+end
+
+
+"""
+ dataset_to_single_element(dataset)
+
+
+"""
+begin
+ begin
+ function dataset_to_single_element_graph(dataset_; name=nothing, output_types=nothing, output_shapes=nothing)
+ local desc
+ tf.with_op_name(name, "DatasetToSingleElement") do
+ desc = tf.NodeDescription("DatasetToSingleElement")
+ begin
+ begin
+ dataset_ = convert(Tensor{Any}, dataset_)
+ begin
+ end
+ end
+ end
+ begin
+ begin
+ tf.add_input(desc, dataset_)
+ end
+ end
+ begin
+ begin
+ if output_types !== nothing
+ desc["output_types"] = map(Base.identity, output_types)
+ end
+ end
+ begin
+ if output_shapes !== nothing
+ desc["output_shapes"] = map(Base.identity, output_shapes)
+ end
+ end
+ end
+ end
+ tf.Tensor(tf.Operation(desc))
+ end
+ end
+ begin
+ function dataset_to_single_element_eager(dataset_; name=nothing, output_types=nothing, output_shapes=nothing)
+ desc = tf.EagerOp("DatasetToSingleElement")
+ dataset_ = convert(tf.EagerTensor, dataset_)
+ begin
+ begin
+ tf.add_input(desc, dataset_)
+ end
+ end
+ begin
+ begin
+ if output_types !== nothing
+ desc["output_types"] = map(Base.identity, output_types)
+ end
+ end
+ begin
+ if output_shapes !== nothing
+ desc["output_shapes"] = map(Base.identity, output_shapes)
+ end
+ end
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(dataset_to_single_element, [dataset_], name=nothing, output_types=nothing, output_shapes=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res[1]
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function dataset_to_single_element(dataset_; name=nothing, output_types=nothing, output_shapes=nothing)
+ if tf.in_eager_mode()
+ dataset_to_single_element_eager(dataset_; name=name, output_types=output_types, output_shapes=output_shapes)
+ else
+ dataset_to_single_element_graph(dataset_; name=name, output_types=output_types, output_shapes=output_shapes)
+ end
+ end
+ end
+end
+
+
+"""
+ cache_dataset(input_dataset, filename)
+
+
+"""
+begin
+ begin
+ function cache_dataset_graph(input_dataset_, filename_; name=nothing, output_types=nothing, output_shapes=nothing)
+ local desc
+ tf.with_op_name(name, "CacheDataset") do
+ desc = tf.NodeDescription("CacheDataset")
+ begin
+ begin
+ input_dataset_ = convert(Tensor{Any}, input_dataset_)
+ begin
+ end
+ end
+ begin
+ filename_ = convert(Tensor{String}, filename_)
+ begin
+ end
+ end
+ end
+ begin
+ begin
+ tf.add_input(desc, input_dataset_)
+ end
+ begin
+ tf.add_input(desc, filename_)
+ end
+ end
+ begin
+ begin
+ if output_types !== nothing
+ desc["output_types"] = map(Base.identity, output_types)
+ end
+ end
+ begin
+ if output_shapes !== nothing
+ desc["output_shapes"] = map(Base.identity, output_shapes)
+ end
+ end
+ end
+ end
+ tf.Tensor(tf.Operation(desc))
+ end
+ end
+ begin
+ function cache_dataset_eager(input_dataset_, filename_; name=nothing, output_types=nothing, output_shapes=nothing)
+ desc = tf.EagerOp("CacheDataset")
+ input_dataset_ = convert(tf.EagerTensor, input_dataset_)
+ filename_ = convert(tf.EagerTensor, filename_)
+ begin
+ begin
+ tf.add_input(desc, input_dataset_)
+ end
+ begin
+ tf.add_input(desc, filename_)
+ end
+ end
+ begin
+ begin
+ if output_types !== nothing
+ desc["output_types"] = map(Base.identity, output_types)
+ end
+ end
+ begin
+ if output_shapes !== nothing
+ desc["output_shapes"] = map(Base.identity, output_shapes)
+ end
+ end
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(cache_dataset, [input_dataset_, filename_], name=nothing, output_types=nothing, output_shapes=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res[1]
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function cache_dataset(input_dataset_, filename_; name=nothing, output_types=nothing, output_shapes=nothing)
+ if tf.in_eager_mode()
+ cache_dataset_eager(input_dataset_, filename_; name=name, output_types=output_types, output_shapes=output_shapes)
+ else
+ cache_dataset_graph(input_dataset_, filename_; name=name, output_types=output_types, output_shapes=output_shapes)
+ end
+ end
+ end
+end
+
+
+"""
+ fake_quant_with_min_max_vars_gradient(gradients, inputs, min, max; num_bits=8, narrow_range=false)
+
+
+"""
+begin
+ begin
+ function fake_quant_with_min_max_vars_gradient_graph(gradients_, inputs_, min_, max_; name=nothing, num_bits=nothing, narrow_range=nothing)
+ local desc
+ tf.with_op_name(name, "FakeQuantWithMinMaxVarsGradient") do
+ desc = tf.NodeDescription("FakeQuantWithMinMaxVarsGradient")
+ begin
+ begin
+ gradients_ = convert(Tensor{Float32}, gradients_)
+ begin
+ end
+ end
+ begin
+ inputs_ = convert(Tensor{Float32}, inputs_)
+ begin
+ end
+ end
+ begin
+ min_ = convert(Tensor{Float32}, min_)
+ begin
+ end
+ end
+ begin
+ max_ = convert(Tensor{Float32}, max_)
+ begin
+ end
+ end
+ end
+ begin
+ begin
+ tf.add_input(desc, gradients_)
+ end
+ begin
+ tf.add_input(desc, inputs_)
+ end
+ begin
+ tf.add_input(desc, min_)
+ end
+ begin
+ tf.add_input(desc, max_)
+ end
+ end
+ begin
+ begin
+ if num_bits !== nothing
+ desc["num_bits"] = Base.Int(num_bits)
+ end
+ end
+ begin
+ if narrow_range !== nothing
+ desc["narrow_range"] = Base.Bool(narrow_range)
+ end
+ end
+ end
+ end
+ begin
+ out = tf.Tensor[]
+ op = tf.Operation(desc)
+ for out_idx = 1:3
+ push!(out, tf.Tensor(op, out_idx))
+ end
+ out
+ end
+ end
+ end
+ begin
+ function fake_quant_with_min_max_vars_gradient_eager(gradients_, inputs_, min_, max_; name=nothing, num_bits=nothing, narrow_range=nothing)
+ desc = tf.EagerOp("FakeQuantWithMinMaxVarsGradient")
+ gradients_ = convert(tf.EagerTensor, gradients_)
+ inputs_ = convert(tf.EagerTensor, inputs_)
+ min_ = convert(tf.EagerTensor, min_)
+ max_ = convert(tf.EagerTensor, max_)
+ begin
+ begin
+ tf.add_input(desc, gradients_)
+ end
+ begin
+ tf.add_input(desc, inputs_)
+ end
+ begin
+ tf.add_input(desc, min_)
+ end
+ begin
+ tf.add_input(desc, max_)
+ end
+ end
+ begin
+ begin
+ if num_bits !== nothing
+ desc["num_bits"] = Base.Int(num_bits)
+ end
+ end
+ begin
+ if narrow_range !== nothing
+ desc["narrow_range"] = Base.Bool(narrow_range)
+ end
+ end
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(fake_quant_with_min_max_vars_gradient, [gradients_, inputs_, min_, max_], name=nothing, num_bits=nothing, narrow_range=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function fake_quant_with_min_max_vars_gradient(gradients_, inputs_, min_, max_; name=nothing, num_bits=nothing, narrow_range=nothing)
+ if tf.in_eager_mode()
+ fake_quant_with_min_max_vars_gradient_eager(gradients_, inputs_, min_, max_; name=name, num_bits=num_bits, narrow_range=narrow_range)
+ else
+ fake_quant_with_min_max_vars_gradient_graph(gradients_, inputs_, min_, max_; name=name, num_bits=num_bits, narrow_range=narrow_range)
+ end
+ end
+ end
+end
+
+
+"""
+ fused_resize_and_pad_conv2d(input, size, paddings, filter; resize_align_corners=false)
+
+
+"""
+begin
+ begin
+ function fused_resize_and_pad_conv2d_graph(input_, size_, paddings_, filter_; name=nothing, resize_align_corners=nothing, mode=nothing, strides=nothing, padding=nothing)
+ local desc
+ tf.with_op_name(name, "FusedResizeAndPadConv2D") do
+ desc = tf.NodeDescription("FusedResizeAndPadConv2D")
+ begin
+ begin
+ input_ = convert(Tensor{Any}, input_)
+ begin
+ end
+ end
+ begin
+ size_ = convert(Tensor{Int32}, size_)
+ begin
+ end
+ end
+ begin
+ paddings_ = convert(Tensor{Int32}, paddings_)
+ begin
+ end
+ end
+ begin
+ filter_ = convert(Tensor{Any}, filter_)
+ begin
+ end
+ end
+ begin
+ (input_, filter_) = tf.tf_promote(input_, filter_)
+ end
+ end
+ begin
+ begin
+ tf.add_input(desc, input_)
+ end
+ begin
+ tf.add_input(desc, size_)
+ end
+ begin
+ tf.add_input(desc, paddings_)
+ end
+ begin
+ tf.add_input(desc, filter_)
+ end
+ end
+ begin
+ begin
+ if resize_align_corners !== nothing
+ desc["resize_align_corners"] = Base.Bool(resize_align_corners)
+ end
+ end
+ begin
+ if mode !== nothing
+ desc["mode"] = Base.String(mode)
+ end
+ end
+ begin
+ if strides !== nothing
+ desc["strides"] = map(Base.identity, strides)
+ end
+ end
+ begin
+ if padding !== nothing
+ desc["padding"] = Base.String(padding)
+ end
+ end
+ end
+ end
+ tf.Tensor(tf.Operation(desc))
+ end
+ end
+ begin
+ function fused_resize_and_pad_conv2d_eager(input_, size_, paddings_, filter_; name=nothing, resize_align_corners=nothing, mode=nothing, strides=nothing, padding=nothing)
+ desc = tf.EagerOp("FusedResizeAndPadConv2D")
+ input_ = convert(tf.EagerTensor, input_)
+ size_ = convert(tf.EagerTensor, size_)
+ paddings_ = convert(tf.EagerTensor, paddings_)
+ filter_ = convert(tf.EagerTensor, filter_)
+ begin
+ begin
+ tf.add_input(desc, input_)
+ end
+ begin
+ tf.add_input(desc, size_)
+ end
+ begin
+ tf.add_input(desc, paddings_)
+ end
+ begin
+ tf.add_input(desc, filter_)
+ end
+ end
+ begin
+ begin
+ if resize_align_corners !== nothing
+ desc["resize_align_corners"] = Base.Bool(resize_align_corners)
+ end
+ end
+ begin
+ if mode !== nothing
+ desc["mode"] = Base.String(mode)
+ end
+ end
+ begin
+ if strides !== nothing
+ desc["strides"] = map(Base.identity, strides)
+ end
+ end
+ begin
+ if padding !== nothing
+ desc["padding"] = Base.String(padding)
+ end
+ end
+ end
+ begin
+ desc["T"] = tf.data_type(input_)
+ end
+ begin
+ desc["T"] = tf.data_type(filter_)
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(fused_resize_and_pad_conv2d, [input_, size_, paddings_, filter_], name=nothing, resize_align_corners=nothing, mode=nothing, strides=nothing, padding=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res[1]
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function fused_resize_and_pad_conv2d(input_, size_, paddings_, filter_; name=nothing, resize_align_corners=nothing, mode=nothing, strides=nothing, padding=nothing)
+ if tf.in_eager_mode()
+ fused_resize_and_pad_conv2d_eager(input_, size_, paddings_, filter_; name=name, resize_align_corners=resize_align_corners, mode=mode, strides=strides, padding=padding)
+ else
+ fused_resize_and_pad_conv2d_graph(input_, size_, paddings_, filter_; name=name, resize_align_corners=resize_align_corners, mode=mode, strides=strides, padding=padding)
+ end
+ end
+ end
+end
+
+
+"""
+ batch(in_tensors; max_enqueued_batches=10, allowed_batch_sizes=Int64[], container=, shared_name=, batching_queue=)
+
+
+"""
+begin
+ begin
+ function batch_graph(in_tensors_; name=nothing, num_batch_threads=nothing, max_batch_size=nothing, max_enqueued_batches=nothing, batch_timeout_micros=nothing, allowed_batch_sizes=nothing, grad_timeout_micros=nothing, container=nothing, shared_name=nothing, batching_queue=nothing, T=nothing)
+ local desc
+ tf.with_op_name(name, "Batch") do
+ desc = tf.NodeDescription("Batch")
+ begin
+ begin
+ in_tensors_ = [convert(Tensor{Any}, x) for x = in_tensors_]
+ begin
+ end
+ end
+ end
+ begin
+ begin
+ tf.add_input(desc, in_tensors_)
+ end
+ end
+ begin
+ begin
+ if num_batch_threads !== nothing
+ desc["num_batch_threads"] = Base.Int(num_batch_threads)
+ end
+ end
+ begin
+ if max_batch_size !== nothing
+ desc["max_batch_size"] = Base.Int(max_batch_size)
+ end
+ end
+ begin
+ if max_enqueued_batches !== nothing
+ desc["max_enqueued_batches"] = Base.Int(max_enqueued_batches)
+ end
+ end
+ begin
+ if batch_timeout_micros !== nothing
+ desc["batch_timeout_micros"] = Base.Int(batch_timeout_micros)
+ end
+ end
+ begin
+ if allowed_batch_sizes !== nothing
+ desc["allowed_batch_sizes"] = map(Base.identity, allowed_batch_sizes)
+ end
+ end
+ begin
+ if grad_timeout_micros !== nothing
+ desc["grad_timeout_micros"] = Base.Int(grad_timeout_micros)
+ end
+ end
+ begin
+ if container !== nothing
+ desc["container"] = Base.String(container)
+ end
+ end
+ begin
+ if shared_name !== nothing
+ desc["shared_name"] = Base.String(shared_name)
+ end
+ end
+ begin
+ if batching_queue !== nothing
+ desc["batching_queue"] = Base.String(batching_queue)
+ end
+ end
+ begin
+ if T !== nothing
+ desc["T"] = map(Base.identity, T)
+ end
+ end
+ end
+ end
+ begin
+ out = tf.Tensor[]
+ op = tf.Operation(desc)
+ for out_idx = 1:3
+ push!(out, tf.Tensor(op, out_idx))
+ end
+ out
+ end
+ end
+ end
+ begin
+ function batch_eager(in_tensors_; name=nothing, num_batch_threads=nothing, max_batch_size=nothing, max_enqueued_batches=nothing, batch_timeout_micros=nothing, allowed_batch_sizes=nothing, grad_timeout_micros=nothing, container=nothing, shared_name=nothing, batching_queue=nothing, T=nothing)
+ desc = tf.EagerOp("Batch")
+ in_tensors_ = convert(tf.EagerTensor, in_tensors_)
+ begin
+ begin
+ tf.add_input(desc, in_tensors_)
+ end
+ end
+ begin
+ begin
+ if num_batch_threads !== nothing
+ desc["num_batch_threads"] = Base.Int(num_batch_threads)
+ end
+ end
+ begin
+ if max_batch_size !== nothing
+ desc["max_batch_size"] = Base.Int(max_batch_size)
+ end
+ end
+ begin
+ if max_enqueued_batches !== nothing
+ desc["max_enqueued_batches"] = Base.Int(max_enqueued_batches)
+ end
+ end
+ begin
+ if batch_timeout_micros !== nothing
+ desc["batch_timeout_micros"] = Base.Int(batch_timeout_micros)
+ end
+ end
+ begin
+ if allowed_batch_sizes !== nothing
+ desc["allowed_batch_sizes"] = map(Base.identity, allowed_batch_sizes)
+ end
+ end
+ begin
+ if grad_timeout_micros !== nothing
+ desc["grad_timeout_micros"] = Base.Int(grad_timeout_micros)
+ end
+ end
+ begin
+ if container !== nothing
+ desc["container"] = Base.String(container)
+ end
+ end
+ begin
+ if shared_name !== nothing
+ desc["shared_name"] = Base.String(shared_name)
+ end
+ end
+ begin
+ if batching_queue !== nothing
+ desc["batching_queue"] = Base.String(batching_queue)
+ end
+ end
+ begin
+ if T !== nothing
+ desc["T"] = map(Base.identity, T)
+ end
+ end
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(batch, [in_tensors_], name=nothing, num_batch_threads=nothing, max_batch_size=nothing, max_enqueued_batches=nothing, batch_timeout_micros=nothing, allowed_batch_sizes=nothing, grad_timeout_micros=nothing, container=nothing, shared_name=nothing, batching_queue=nothing, T=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function batch(in_tensors_; name=nothing, num_batch_threads=nothing, max_batch_size=nothing, max_enqueued_batches=nothing, batch_timeout_micros=nothing, allowed_batch_sizes=nothing, grad_timeout_micros=nothing, container=nothing, shared_name=nothing, batching_queue=nothing, T=nothing)
+ if tf.in_eager_mode()
+ batch_eager(in_tensors_; name=name, num_batch_threads=num_batch_threads, max_batch_size=max_batch_size, max_enqueued_batches=max_enqueued_batches, batch_timeout_micros=batch_timeout_micros, allowed_batch_sizes=allowed_batch_sizes, grad_timeout_micros=grad_timeout_micros, container=container, shared_name=shared_name, batching_queue=batching_queue, T=T)
+ else
+ batch_graph(in_tensors_; name=name, num_batch_threads=num_batch_threads, max_batch_size=max_batch_size, max_enqueued_batches=max_enqueued_batches, batch_timeout_micros=batch_timeout_micros, allowed_batch_sizes=allowed_batch_sizes, grad_timeout_micros=grad_timeout_micros, container=container, shared_name=shared_name, batching_queue=batching_queue, T=T)
+ end
+ end
+ end
+end
+
+
+"""
+ collective_bcast_recv()
+
+
+"""
+begin
+ begin
+ function collective_bcast_recv_graph(; name=nothing, group_size=nothing, group_key=nothing, instance_key=nothing, shape=nothing)
+ local desc
+ tf.with_op_name(name, "CollectiveBcastRecv") do
+ desc = tf.NodeDescription("CollectiveBcastRecv")
+ begin
+ end
+ begin
+ end
+ begin
+ begin
+ if group_size !== nothing
+ desc["group_size"] = Base.Int(group_size)
+ end
+ end
+ begin
+ if group_key !== nothing
+ desc["group_key"] = Base.Int(group_key)
+ end
+ end
+ begin
+ if instance_key !== nothing
+ desc["instance_key"] = Base.Int(instance_key)
+ end
+ end
+ begin
+ if shape !== nothing
+ desc["shape"] = Base.identity(shape)
+ end
+ end
+ end
+ end
+ tf.Tensor(tf.Operation(desc))
+ end
+ end
+ begin
+ function collective_bcast_recv_eager(; name=nothing, group_size=nothing, group_key=nothing, instance_key=nothing, shape=nothing)
+ desc = tf.EagerOp("CollectiveBcastRecv")
+ begin
+ end
+ begin
+ begin
+ if group_size !== nothing
+ desc["group_size"] = Base.Int(group_size)
+ end
+ end
+ begin
+ if group_key !== nothing
+ desc["group_key"] = Base.Int(group_key)
+ end
+ end
+ begin
+ if instance_key !== nothing
+ desc["instance_key"] = Base.Int(instance_key)
+ end
+ end
+ begin
+ if shape !== nothing
+ desc["shape"] = Base.identity(shape)
+ end
+ end
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(collective_bcast_recv, [], name=nothing, group_size=nothing, group_key=nothing, instance_key=nothing, shape=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res[1]
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function collective_bcast_recv(; name=nothing, group_size=nothing, group_key=nothing, instance_key=nothing, shape=nothing)
+ if tf.in_eager_mode()
+ collective_bcast_recv_eager(; name=name, group_size=group_size, group_key=group_key, instance_key=instance_key, shape=shape)
+ else
+ collective_bcast_recv_graph(; name=name, group_size=group_size, group_key=group_key, instance_key=instance_key, shape=shape)
+ end
+ end
+ end
+end
+
+
+"""
+ batch_to_space_nd(input, block_shape, crops)
+
+
+"""
+begin
+ begin
+ function batch_to_space_nd_graph(input_, block_shape_, crops_; name=nothing)
+ local desc
+ tf.with_op_name(name, "BatchToSpaceND") do
+ desc = tf.NodeDescription("BatchToSpaceND")
+ begin
+ begin
+ input_ = convert(Tensor{Any}, input_)
+ begin
+ end
+ end
+ begin
+ block_shape_ = convert(Tensor{Int32}, block_shape_)
+ begin
+ end
+ end
+ begin
+ crops_ = convert(Tensor{Int32}, crops_)
+ begin
+ end
+ end
+ begin
+ (crops_,) = tf.tf_promote(crops_)
+ end
+ begin
+ (input_,) = tf.tf_promote(input_)
+ end
+ begin
+ (block_shape_,) = tf.tf_promote(block_shape_)
+ end
+ end
+ begin
+ begin
+ tf.add_input(desc, input_)
+ end
+ begin
+ tf.add_input(desc, block_shape_)
+ end
+ begin
+ tf.add_input(desc, crops_)
+ end
+ end
+ begin
+ end
+ end
+ tf.Tensor(tf.Operation(desc))
+ end
+ end
+ begin
+ function batch_to_space_nd_eager(input_, block_shape_, crops_; name=nothing)
+ desc = tf.EagerOp("BatchToSpaceND")
+ input_ = convert(tf.EagerTensor, input_)
+ block_shape_ = convert(tf.EagerTensor, block_shape_)
+ crops_ = convert(tf.EagerTensor, crops_)
+ begin
+ begin
+ tf.add_input(desc, input_)
+ end
+ begin
+ tf.add_input(desc, block_shape_)
+ end
+ begin
+ tf.add_input(desc, crops_)
+ end
+ end
+ begin
+ end
+ begin
+ desc["T"] = tf.data_type(input_)
+ end
+ begin
+ desc["Tblock_shape"] = tf.data_type(block_shape_)
+ end
+ begin
+ desc["Tcrops"] = tf.data_type(crops_)
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(batch_to_space_nd, [input_, block_shape_, crops_], name=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res[1]
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function batch_to_space_nd(input_, block_shape_, crops_; name=nothing)
+ if tf.in_eager_mode()
+ batch_to_space_nd_eager(input_, block_shape_, crops_; name=name)
+ else
+ batch_to_space_nd_graph(input_, block_shape_, crops_; name=name)
+ end
+ end
+ end
+end
+
+
+"""
+ loop_cond(input)
+
+
+"""
+begin
+ begin
+ function loop_cond_graph(input_; name=nothing)
+ local desc
+ tf.with_op_name(name, "LoopCond") do
+ desc = tf.NodeDescription("LoopCond")
+ begin
+ begin
+ input_ = convert(Tensor{Bool}, input_)
+ begin
+ end
+ end
+ end
+ begin
+ begin
+ tf.add_input(desc, input_)
+ end
+ end
+ begin
+ end
+ end
+ tf.Tensor(tf.Operation(desc))
+ end
+ end
+ begin
+ function loop_cond_eager(input_; name=nothing)
+ desc = tf.EagerOp("LoopCond")
+ input_ = convert(tf.EagerTensor, input_)
+ begin
+ begin
+ tf.add_input(desc, input_)
+ end
+ end
+ begin
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(loop_cond, [input_], name=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res[1]
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function loop_cond(input_; name=nothing)
+ if tf.in_eager_mode()
+ loop_cond_eager(input_; name=name)
+ else
+ loop_cond_graph(input_; name=name)
+ end
+ end
+ end
+end
+
+
+"""
+ depth_to_space(input; data_format=)
+
+
+"""
+begin
+ begin
+ function depth_to_space_graph(input_; name=nothing, block_size=nothing, data_format=nothing)
+ local desc
+ tf.with_op_name(name, "DepthToSpace") do
+ desc = tf.NodeDescription("DepthToSpace")
+ begin
+ begin
+ input_ = convert(Tensor{Any}, input_)
+ begin
+ end
+ end
+ begin
+ (input_,) = tf.tf_promote(input_)
+ end
+ end
+ begin
+ begin
+ tf.add_input(desc, input_)
+ end
+ end
+ begin
+ begin
+ if block_size !== nothing
+ desc["block_size"] = Base.Int(block_size)
+ end
+ end
+ begin
+ if data_format !== nothing
+ desc["data_format"] = Base.String(data_format)
+ end
+ end
+ end
+ end
+ tf.Tensor(tf.Operation(desc))
+ end
+ end
+ begin
+ function depth_to_space_eager(input_; name=nothing, block_size=nothing, data_format=nothing)
+ desc = tf.EagerOp("DepthToSpace")
+ input_ = convert(tf.EagerTensor, input_)
+ begin
+ begin
+ tf.add_input(desc, input_)
+ end
+ end
+ begin
+ begin
+ if block_size !== nothing
+ desc["block_size"] = Base.Int(block_size)
+ end
+ end
+ begin
+ if data_format !== nothing
+ desc["data_format"] = Base.String(data_format)
+ end
+ end
+ end
+ begin
+ desc["T"] = tf.data_type(input_)
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(depth_to_space, [input_], name=nothing, block_size=nothing, data_format=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res[1]
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function depth_to_space(input_; name=nothing, block_size=nothing, data_format=nothing)
+ if tf.in_eager_mode()
+ depth_to_space_eager(input_; name=name, block_size=block_size, data_format=data_format)
+ else
+ depth_to_space_graph(input_; name=name, block_size=block_size, data_format=data_format)
+ end
+ end
+ end
+end
+
+
+"""
+ destroy_temporary_variable(ref)
+
+
+"""
+begin
+ begin
+ function destroy_temporary_variable_graph(ref_; name=nothing, var_name=nothing)
+ local desc
+ tf.with_op_name(name, "DestroyTemporaryVariable") do
+ desc = tf.NodeDescription("DestroyTemporaryVariable")
+ begin
+ begin
+ ref_ = convert(Tensor{Any}, ref_)
+ begin
+ end
+ end
+ begin
+ (ref_,) = tf.tf_promote(ref_)
+ end
+ end
+ begin
+ begin
+ tf.add_input(desc, ref_)
+ end
+ end
+ begin
+ begin
+ if var_name !== nothing
+ desc["var_name"] = Base.String(var_name)
+ end
+ end
+ end
+ end
+ tf.Tensor(tf.Operation(desc))
+ end
+ end
+ begin
+ function destroy_temporary_variable_eager(ref_; name=nothing, var_name=nothing)
+ desc = tf.EagerOp("DestroyTemporaryVariable")
+ ref_ = convert(tf.EagerTensor, ref_)
+ begin
+ begin
+ tf.add_input(desc, ref_)
+ end
+ end
+ begin
+ begin
+ if var_name !== nothing
+ desc["var_name"] = Base.String(var_name)
+ end
+ end
+ end
+ begin
+ desc["T"] = tf.data_type(ref_)
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(destroy_temporary_variable, [ref_], name=nothing, var_name=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res[1]
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function destroy_temporary_variable(ref_; name=nothing, var_name=nothing)
+ if tf.in_eager_mode()
+ destroy_temporary_variable_eager(ref_; name=name, var_name=var_name)
+ else
+ destroy_temporary_variable_graph(ref_; name=name, var_name=var_name)
+ end
+ end
+ end
+end
+
+
+"""
+ cudnn_rnn(input, input_h, input_c, params; rnn_mode=, input_mode=, direction=, dropout=?, seed=0, seed2=0, is_training=true)
+
+
+"""
+begin
+ begin
+ function cudnn_rnn_graph(input_, input_h_, input_c_, params_; name=nothing, rnn_mode=nothing, input_mode=nothing, direction=nothing, dropout=nothing, seed=nothing, seed2=nothing, is_training=nothing)
+ local desc
+ tf.with_op_name(name, "CudnnRNN") do
+ desc = tf.NodeDescription("CudnnRNN")
+ begin
+ begin
+ input_ = convert(Tensor{Any}, input_)
+ begin
+ end
+ end
+ begin
+ input_h_ = convert(Tensor{Any}, input_h_)
+ begin
+ end
+ end
+ begin
+ input_c_ = convert(Tensor{Any}, input_c_)
+ begin
+ end
+ end
+ begin
+ params_ = convert(Tensor{Any}, params_)
+ begin
+ end
+ end
+ begin
+ (input_, input_h_, input_c_, params_) = tf.tf_promote(input_, input_h_, input_c_, params_)
+ end
+ end
+ begin
+ begin
+ tf.add_input(desc, input_)
+ end
+ begin
+ tf.add_input(desc, input_h_)
+ end
+ begin
+ tf.add_input(desc, input_c_)
+ end
+ begin
+ tf.add_input(desc, params_)
+ end
+ end
+ begin
+ begin
+ if rnn_mode !== nothing
+ desc["rnn_mode"] = Base.String(rnn_mode)
+ end
+ end
+ begin
+ if input_mode !== nothing
+ desc["input_mode"] = Base.String(input_mode)
+ end
+ end
+ begin
+ if direction !== nothing
+ desc["direction"] = Base.String(direction)
+ end
+ end
+ begin
+ if dropout !== nothing
+ desc["dropout"] = Base.identity(dropout)
+ end
+ end
+ begin
+ if seed !== nothing
+ desc["seed"] = Base.Int(seed)
+ end
+ end
+ begin
+ if seed2 !== nothing
+ desc["seed2"] = Base.Int(seed2)
+ end
+ end
+ begin
+ if is_training !== nothing
+ desc["is_training"] = Base.Bool(is_training)
+ end
+ end
+ end
+ end
+ begin
+ out = tf.Tensor[]
+ op = tf.Operation(desc)
+ for out_idx = 1:4
+ push!(out, tf.Tensor(op, out_idx))
+ end
+ out
+ end
+ end
+ end
+ begin
+ function cudnn_rnn_eager(input_, input_h_, input_c_, params_; name=nothing, rnn_mode=nothing, input_mode=nothing, direction=nothing, dropout=nothing, seed=nothing, seed2=nothing, is_training=nothing)
+ desc = tf.EagerOp("CudnnRNN")
+ input_ = convert(tf.EagerTensor, input_)
+ input_h_ = convert(tf.EagerTensor, input_h_)
+ input_c_ = convert(tf.EagerTensor, input_c_)
+ params_ = convert(tf.EagerTensor, params_)
+ begin
+ begin
+ tf.add_input(desc, input_)
+ end
+ begin
+ tf.add_input(desc, input_h_)
+ end
+ begin
+ tf.add_input(desc, input_c_)
+ end
+ begin
+ tf.add_input(desc, params_)
+ end
+ end
+ begin
+ begin
+ if rnn_mode !== nothing
+ desc["rnn_mode"] = Base.String(rnn_mode)
+ end
+ end
+ begin
+ if input_mode !== nothing
+ desc["input_mode"] = Base.String(input_mode)
+ end
+ end
+ begin
+ if direction !== nothing
+ desc["direction"] = Base.String(direction)
+ end
+ end
+ begin
+ if dropout !== nothing
+ desc["dropout"] = Base.identity(dropout)
+ end
+ end
+ begin
+ if seed !== nothing
+ desc["seed"] = Base.Int(seed)
+ end
+ end
+ begin
+ if seed2 !== nothing
+ desc["seed2"] = Base.Int(seed2)
+ end
+ end
+ begin
+ if is_training !== nothing
+ desc["is_training"] = Base.Bool(is_training)
+ end
+ end
+ end
+ begin
+ desc["T"] = tf.data_type(input_)
+ end
+ begin
+ desc["T"] = tf.data_type(input_h_)
+ end
+ begin
+ desc["T"] = tf.data_type(input_c_)
+ end
+ begin
+ desc["T"] = tf.data_type(params_)
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(cudnn_rnn, [input_, input_h_, input_c_, params_], name=nothing, rnn_mode=nothing, input_mode=nothing, direction=nothing, dropout=nothing, seed=nothing, seed2=nothing, is_training=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function cudnn_rnn(input_, input_h_, input_c_, params_; name=nothing, rnn_mode=nothing, input_mode=nothing, direction=nothing, dropout=nothing, seed=nothing, seed2=nothing, is_training=nothing)
+ if tf.in_eager_mode()
+ cudnn_rnn_eager(input_, input_h_, input_c_, params_; name=name, rnn_mode=rnn_mode, input_mode=input_mode, direction=direction, dropout=dropout, seed=seed, seed2=seed2, is_training=is_training)
+ else
+ cudnn_rnn_graph(input_, input_h_, input_c_, params_; name=name, rnn_mode=rnn_mode, input_mode=input_mode, direction=direction, dropout=dropout, seed=seed, seed2=seed2, is_training=is_training)
+ end
+ end
+ end
+end
+
+
+"""
+ ref_identity(input)
+
+
+"""
+begin
+ begin
+ function ref_identity_graph(input_; name=nothing)
+ local desc
+ tf.with_op_name(name, "RefIdentity") do
+ desc = tf.NodeDescription("RefIdentity")
+ begin
+ begin
+ input_ = convert(Tensor{Any}, input_)
+ begin
+ end
+ end
+ begin
+ (input_,) = tf.tf_promote(input_)
+ end
+ end
+ begin
+ begin
+ tf.add_input(desc, input_)
+ end
+ end
+ begin
+ end
+ end
+ tf.Tensor(tf.Operation(desc))
+ end
+ end
+ begin
+ function ref_identity_eager(input_; name=nothing)
+ desc = tf.EagerOp("RefIdentity")
+ input_ = convert(tf.EagerTensor, input_)
+ begin
+ begin
+ tf.add_input(desc, input_)
+ end
+ end
+ begin
+ end
+ begin
+ desc["T"] = tf.data_type(input_)
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(ref_identity, [input_], name=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res[1]
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function ref_identity(input_; name=nothing)
+ if tf.in_eager_mode()
+ ref_identity_eager(input_; name=name)
+ else
+ ref_identity_graph(input_; name=name)
+ end
+ end
+ end
+end
+
+
+"""
+ max_pool3d_grad(orig_input, orig_output, grad; data_format=)
+
+
+"""
+begin
+ begin
+ function max_pool3d_grad_graph(orig_input_, orig_output_, grad_; name=nothing, ksize=nothing, strides=nothing, padding=nothing, data_format=nothing)
+ local desc
+ tf.with_op_name(name, "MaxPool3DGrad") do
+ desc = tf.NodeDescription("MaxPool3DGrad")
+ begin
+ begin
+ orig_input_ = convert(Tensor{Float32}, orig_input_)
+ begin
+ end
+ end
+ begin
+ orig_output_ = convert(Tensor{Float32}, orig_output_)
+ begin
+ end
+ end
+ begin
+ grad_ = convert(Tensor{Float32}, grad_)
+ begin
+ end
+ end
+ begin
+ (grad_,) = tf.tf_promote(grad_)
+ end
+ begin
+ (orig_input_, orig_output_) = tf.tf_promote(orig_input_, orig_output_)
+ end
+ end
+ begin
+ begin
+ tf.add_input(desc, orig_input_)
+ end
+ begin
+ tf.add_input(desc, orig_output_)
+ end
+ begin
+ tf.add_input(desc, grad_)
+ end
+ end
+ begin
+ begin
+ if ksize !== nothing
+ desc["ksize"] = map(Base.identity, ksize)
+ end
+ end
+ begin
+ if strides !== nothing
+ desc["strides"] = map(Base.identity, strides)
+ end
+ end
+ begin
+ if padding !== nothing
+ desc["padding"] = Base.String(padding)
+ end
+ end
+ begin
+ if data_format !== nothing
+ desc["data_format"] = Base.String(data_format)
+ end
+ end
+ end
+ end
+ tf.Tensor(tf.Operation(desc))
+ end
+ end
+ begin
+ function max_pool3d_grad_eager(orig_input_, orig_output_, grad_; name=nothing, ksize=nothing, strides=nothing, padding=nothing, data_format=nothing)
+ desc = tf.EagerOp("MaxPool3DGrad")
+ orig_input_ = convert(tf.EagerTensor, orig_input_)
+ orig_output_ = convert(tf.EagerTensor, orig_output_)
+ grad_ = convert(tf.EagerTensor, grad_)
+ begin
+ begin
+ tf.add_input(desc, orig_input_)
+ end
+ begin
+ tf.add_input(desc, orig_output_)
+ end
+ begin
+ tf.add_input(desc, grad_)
+ end
+ end
+ begin
+ begin
+ if ksize !== nothing
+ desc["ksize"] = map(Base.identity, ksize)
+ end
+ end
+ begin
+ if strides !== nothing
+ desc["strides"] = map(Base.identity, strides)
+ end
+ end
+ begin
+ if padding !== nothing
+ desc["padding"] = Base.String(padding)
+ end
+ end
+ begin
+ if data_format !== nothing
+ desc["data_format"] = Base.String(data_format)
+ end
+ end
+ end
+ begin
+ desc["TInput"] = tf.data_type(orig_input_)
+ end
+ begin
+ desc["TInput"] = tf.data_type(orig_output_)
+ end
+ begin
+ desc["T"] = tf.data_type(grad_)
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(max_pool3d_grad, [orig_input_, orig_output_, grad_], name=nothing, ksize=nothing, strides=nothing, padding=nothing, data_format=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res[1]
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function max_pool3d_grad(orig_input_, orig_output_, grad_; name=nothing, ksize=nothing, strides=nothing, padding=nothing, data_format=nothing)
+ if tf.in_eager_mode()
+ max_pool3d_grad_eager(orig_input_, orig_output_, grad_; name=name, ksize=ksize, strides=strides, padding=padding, data_format=data_format)
+ else
+ max_pool3d_grad_graph(orig_input_, orig_output_, grad_; name=name, ksize=ksize, strides=strides, padding=padding, data_format=data_format)
+ end
+ end
+ end
+end
+
+
+"""
+ load_tpu_embedding_momentum_parameters_grad_accum_debug(parameters, momenta, gradient_accumulators; table_id=-1, table_name=)
+
+Load embedding parameters for a single table.
+"""
+begin
+ begin
+ function load_tpu_embedding_momentum_parameters_grad_accum_debug_graph(parameters_, momenta_, gradient_accumulators_; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing)
+ local desc
+ tf.with_op_name(name, "LoadTPUEmbeddingMomentumParametersGradAccumDebug") do
+ desc = tf.NodeDescription("LoadTPUEmbeddingMomentumParametersGradAccumDebug")
+ begin
+ begin
+ parameters_ = convert(Tensor{Float32}, parameters_)
+ begin
+ end
+ end
+ begin
+ momenta_ = convert(Tensor{Float32}, momenta_)
+ begin
+ end
+ end
+ begin
+ gradient_accumulators_ = convert(Tensor{Float32}, gradient_accumulators_)
+ begin
+ end
+ end
+ end
+ begin
+ begin
+ tf.add_input(desc, parameters_)
+ end
+ begin
+ tf.add_input(desc, momenta_)
+ end
+ begin
+ tf.add_input(desc, gradient_accumulators_)
+ end
+ end
+ begin
+ begin
+ if table_id !== nothing
+ desc["table_id"] = Base.Int(table_id)
+ end
+ end
+ begin
+ if table_name !== nothing
+ desc["table_name"] = Base.String(table_name)
+ end
+ end
+ begin
+ if num_shards !== nothing
+ desc["num_shards"] = Base.Int(num_shards)
+ end
+ end
+ begin
+ if shard_id !== nothing
+ desc["shard_id"] = Base.Int(shard_id)
+ end
+ end
+ end
+ end
+ tf.Tensor(tf.Operation(desc))
+ end
+ end
+ begin
+ function load_tpu_embedding_momentum_parameters_grad_accum_debug_eager(parameters_, momenta_, gradient_accumulators_; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing)
+ desc = tf.EagerOp("LoadTPUEmbeddingMomentumParametersGradAccumDebug")
+ parameters_ = convert(tf.EagerTensor, parameters_)
+ momenta_ = convert(tf.EagerTensor, momenta_)
+ gradient_accumulators_ = convert(tf.EagerTensor, gradient_accumulators_)
+ begin
+ begin
+ tf.add_input(desc, parameters_)
+ end
+ begin
+ tf.add_input(desc, momenta_)
+ end
+ begin
+ tf.add_input(desc, gradient_accumulators_)
+ end
+ end
+ begin
+ begin
+ if table_id !== nothing
+ desc["table_id"] = Base.Int(table_id)
+ end
+ end
+ begin
+ if table_name !== nothing
+ desc["table_name"] = Base.String(table_name)
+ end
+ end
+ begin
+ if num_shards !== nothing
+ desc["num_shards"] = Base.Int(num_shards)
+ end
+ end
+ begin
+ if shard_id !== nothing
+ desc["shard_id"] = Base.Int(shard_id)
+ end
+ end
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(load_tpu_embedding_momentum_parameters_grad_accum_debug, [parameters_, momenta_, gradient_accumulators_], name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res[1]
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function load_tpu_embedding_momentum_parameters_grad_accum_debug(parameters_, momenta_, gradient_accumulators_; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing)
+ if tf.in_eager_mode()
+ load_tpu_embedding_momentum_parameters_grad_accum_debug_eager(parameters_, momenta_, gradient_accumulators_; name=name, table_id=table_id, table_name=table_name, num_shards=num_shards, shard_id=shard_id)
+ else
+ load_tpu_embedding_momentum_parameters_grad_accum_debug_graph(parameters_, momenta_, gradient_accumulators_; name=name, table_id=table_id, table_name=table_name, num_shards=num_shards, shard_id=shard_id)
+ end
+ end
+ end
+end
+
+
+"""
+ conv3d_backprop_input(input, filter, out_backprop; dilations=[1, 1, 1, 1, 1])
+
+
+"""
+begin
+ begin
+ function conv3d_backprop_input_graph(input_, filter_, out_backprop_; name=nothing, strides=nothing, padding=nothing, dilations=nothing)
+ local desc
+ tf.with_op_name(name, "Conv3DBackpropInput") do
+ desc = tf.NodeDescription("Conv3DBackpropInput")
+ begin
+ begin
+ input_ = convert(Tensor{Any}, input_)
+ begin
+ end
+ end
+ begin
+ filter_ = convert(Tensor{Any}, filter_)
+ begin
+ end
+ end
+ begin
+ out_backprop_ = convert(Tensor{Any}, out_backprop_)
+ begin
+ end
+ end
+ begin
+ (input_, filter_, out_backprop_) = tf.tf_promote(input_, filter_, out_backprop_)
+ end
+ end
+ begin
+ begin
+ tf.add_input(desc, input_)
+ end
+ begin
+ tf.add_input(desc, filter_)
+ end
+ begin
+ tf.add_input(desc, out_backprop_)
+ end
+ end
+ begin
+ begin
+ if strides !== nothing
+ desc["strides"] = map(Base.identity, strides)
+ end
+ end
+ begin
+ if padding !== nothing
+ desc["padding"] = Base.String(padding)
+ end
+ end
+ begin
+ if dilations !== nothing
+ desc["dilations"] = map(Base.identity, dilations)
+ end
+ end
+ end
+ end
+ tf.Tensor(tf.Operation(desc))
+ end
+ end
+ begin
+ function conv3d_backprop_input_eager(input_, filter_, out_backprop_; name=nothing, strides=nothing, padding=nothing, dilations=nothing)
+ desc = tf.EagerOp("Conv3DBackpropInput")
+ input_ = convert(tf.EagerTensor, input_)
+ filter_ = convert(tf.EagerTensor, filter_)
+ out_backprop_ = convert(tf.EagerTensor, out_backprop_)
+ begin
+ begin
+ tf.add_input(desc, input_)
+ end
+ begin
+ tf.add_input(desc, filter_)
+ end
+ begin
+ tf.add_input(desc, out_backprop_)
+ end
+ end
+ begin
+ begin
+ if strides !== nothing
+ desc["strides"] = map(Base.identity, strides)
+ end
+ end
+ begin
+ if padding !== nothing
+ desc["padding"] = Base.String(padding)
+ end
+ end
+ begin
+ if dilations !== nothing
+ desc["dilations"] = map(Base.identity, dilations)
+ end
+ end
+ end
+ begin
+ desc["T"] = tf.data_type(input_)
+ end
+ begin
+ desc["T"] = tf.data_type(filter_)
+ end
+ begin
+ desc["T"] = tf.data_type(out_backprop_)
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(conv3d_backprop_input, [input_, filter_, out_backprop_], name=nothing, strides=nothing, padding=nothing, dilations=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res[1]
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function conv3d_backprop_input(input_, filter_, out_backprop_; name=nothing, strides=nothing, padding=nothing, dilations=nothing)
+ if tf.in_eager_mode()
+ conv3d_backprop_input_eager(input_, filter_, out_backprop_; name=name, strides=strides, padding=padding, dilations=dilations)
+ else
+ conv3d_backprop_input_graph(input_, filter_, out_backprop_; name=name, strides=strides, padding=padding, dilations=dilations)
+ end
+ end
+ end
+end
+
+
+"""
+ padding_fifo_queue_v2(; shapes=Int64[], capacity=-1, container=, shared_name=)
+
+
+"""
+begin
+ begin
+ function padding_fifo_queue_v2_graph(; name=nothing, component_types=nothing, shapes=nothing, capacity=nothing, container=nothing, shared_name=nothing)
+ local desc
+ tf.with_op_name(name, "PaddingFIFOQueueV2") do
+ desc = tf.NodeDescription("PaddingFIFOQueueV2")
+ begin
+ end
+ begin
+ end
+ begin
+ begin
+ if component_types !== nothing
+ desc["component_types"] = map(Base.identity, component_types)
+ end
+ end
+ begin
+ if shapes !== nothing
+ desc["shapes"] = map(Base.identity, shapes)
+ end
+ end
+ begin
+ if capacity !== nothing
+ desc["capacity"] = Base.Int(capacity)
+ end
+ end
+ begin
+ if container !== nothing
+ desc["container"] = Base.String(container)
+ end
+ end
+ begin
+ if shared_name !== nothing
+ desc["shared_name"] = Base.String(shared_name)
+ end
+ end
+ end
+ end
+ tf.Tensor(tf.Operation(desc))
+ end
+ end
+ begin
+ function padding_fifo_queue_v2_eager(; name=nothing, component_types=nothing, shapes=nothing, capacity=nothing, container=nothing, shared_name=nothing)
+ desc = tf.EagerOp("PaddingFIFOQueueV2")
+ begin
+ end
+ begin
+ begin
+ if component_types !== nothing
+ desc["component_types"] = map(Base.identity, component_types)
+ end
+ end
+ begin
+ if shapes !== nothing
+ desc["shapes"] = map(Base.identity, shapes)
+ end
+ end
+ begin
+ if capacity !== nothing
+ desc["capacity"] = Base.Int(capacity)
+ end
+ end
+ begin
+ if container !== nothing
+ desc["container"] = Base.String(container)
+ end
+ end
+ begin
+ if shared_name !== nothing
+ desc["shared_name"] = Base.String(shared_name)
+ end
+ end
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(padding_fifo_queue_v2, [], name=nothing, component_types=nothing, shapes=nothing, capacity=nothing, container=nothing, shared_name=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res[1]
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function padding_fifo_queue_v2(; name=nothing, component_types=nothing, shapes=nothing, capacity=nothing, container=nothing, shared_name=nothing)
+ if tf.in_eager_mode()
+ padding_fifo_queue_v2_eager(; name=name, component_types=component_types, shapes=shapes, capacity=capacity, container=container, shared_name=shared_name)
+ else
+ padding_fifo_queue_v2_graph(; name=name, component_types=component_types, shapes=shapes, capacity=capacity, container=container, shared_name=shared_name)
+ end
+ end
+ end
+end
+
+
+"""
+ ref_exit(data)
+
+
+"""
+begin
+ begin
+ function ref_exit_graph(data_; name=nothing)
+ local desc
+ tf.with_op_name(name, "RefExit") do
+ desc = tf.NodeDescription("RefExit")
+ begin
+ begin
+ data_ = convert(Tensor{Any}, data_)
+ begin
+ end
+ end
+ begin
+ (data_,) = tf.tf_promote(data_)
+ end
+ end
+ begin
+ begin
+ tf.add_input(desc, data_)
+ end
+ end
+ begin
+ end
+ end
+ tf.Tensor(tf.Operation(desc))
+ end
+ end
+ begin
+ function ref_exit_eager(data_; name=nothing)
+ desc = tf.EagerOp("RefExit")
+ data_ = convert(tf.EagerTensor, data_)
+ begin
+ begin
+ tf.add_input(desc, data_)
+ end
+ end
+ begin
+ end
+ begin
+ desc["T"] = tf.data_type(data_)
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(ref_exit, [data_], name=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res[1]
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function ref_exit(data_; name=nothing)
+ if tf.in_eager_mode()
+ ref_exit_eager(data_; name=name)
+ else
+ ref_exit_graph(data_; name=name)
+ end
+ end
+ end
+end
+
+
+"""
+ map_clear(; capacity=0, memory_limit=0, container=, shared_name=)
+
+
+"""
+begin
+ begin
+ function map_clear_graph(; name=nothing, capacity=nothing, memory_limit=nothing, dtypes=nothing, container=nothing, shared_name=nothing)
+ local desc
+ tf.with_op_name(name, "MapClear") do
+ desc = tf.NodeDescription("MapClear")
+ begin
+ end
+ begin
+ end
+ begin
+ begin
+ if capacity !== nothing
+ desc["capacity"] = Base.Int(capacity)
+ end
+ end
+ begin
+ if memory_limit !== nothing
+ desc["memory_limit"] = Base.Int(memory_limit)
+ end
+ end
+ begin
+ if dtypes !== nothing
+ desc["dtypes"] = map(Base.identity, dtypes)
+ end
+ end
+ begin
+ if container !== nothing
+ desc["container"] = Base.String(container)
+ end
+ end
+ begin
+ if shared_name !== nothing
+ desc["shared_name"] = Base.String(shared_name)
+ end
+ end
+ end
+ end
+ tf.Tensor(tf.Operation(desc))
+ end
+ end
+ begin
+ function map_clear_eager(; name=nothing, capacity=nothing, memory_limit=nothing, dtypes=nothing, container=nothing, shared_name=nothing)
+ desc = tf.EagerOp("MapClear")
+ begin
+ end
+ begin
+ begin
+ if capacity !== nothing
+ desc["capacity"] = Base.Int(capacity)
+ end
+ end
+ begin
+ if memory_limit !== nothing
+ desc["memory_limit"] = Base.Int(memory_limit)
+ end
+ end
+ begin
+ if dtypes !== nothing
+ desc["dtypes"] = map(Base.identity, dtypes)
+ end
+ end
+ begin
+ if container !== nothing
+ desc["container"] = Base.String(container)
+ end
+ end
+ begin
+ if shared_name !== nothing
+ desc["shared_name"] = Base.String(shared_name)
+ end
+ end
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(map_clear, [], name=nothing, capacity=nothing, memory_limit=nothing, dtypes=nothing, container=nothing, shared_name=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res[1]
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function map_clear(; name=nothing, capacity=nothing, memory_limit=nothing, dtypes=nothing, container=nothing, shared_name=nothing)
+ if tf.in_eager_mode()
+ map_clear_eager(; name=name, capacity=capacity, memory_limit=memory_limit, dtypes=dtypes, container=container, shared_name=shared_name)
+ else
+ map_clear_graph(; name=name, capacity=capacity, memory_limit=memory_limit, dtypes=dtypes, container=container, shared_name=shared_name)
+ end
+ end
+ end
+end
+
+
+"""
+ encode_wav(audio, sample_rate)
+
+
+"""
+begin
+ begin
+ function encode_wav_graph(audio_, sample_rate_; name=nothing)
+ local desc
+ tf.with_op_name(name, "EncodeWav") do
+ desc = tf.NodeDescription("EncodeWav")
+ begin
+ begin
+ audio_ = convert(Tensor{Float32}, audio_)
+ begin
+ end
+ end
+ begin
+ sample_rate_ = convert(Tensor{Int32}, sample_rate_)
+ begin
+ end
+ end
+ end
+ begin
+ begin
+ tf.add_input(desc, audio_)
+ end
+ begin
+ tf.add_input(desc, sample_rate_)
+ end
+ end
+ begin
+ end
+ end
+ tf.Tensor(tf.Operation(desc))
+ end
+ end
+ begin
+ function encode_wav_eager(audio_, sample_rate_; name=nothing)
+ desc = tf.EagerOp("EncodeWav")
+ audio_ = convert(tf.EagerTensor, audio_)
+ sample_rate_ = convert(tf.EagerTensor, sample_rate_)
+ begin
+ begin
+ tf.add_input(desc, audio_)
+ end
+ begin
+ tf.add_input(desc, sample_rate_)
+ end
+ end
+ begin
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(encode_wav, [audio_, sample_rate_], name=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res[1]
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function encode_wav(audio_, sample_rate_; name=nothing)
+ if tf.in_eager_mode()
+ encode_wav_eager(audio_, sample_rate_; name=name)
+ else
+ encode_wav_graph(audio_, sample_rate_; name=name)
+ end
+ end
+ end
+end
+
+
+"""
+ tensor_summary_v2(tag, tensor, serialized_summary_metadata)
+
+
+"""
+begin
+ begin
+ function tensor_summary_v2_graph(tag_, tensor_, serialized_summary_metadata_; name=nothing)
+ local desc
+ tf.with_op_name(name, "TensorSummaryV2") do
+ desc = tf.NodeDescription("TensorSummaryV2")
+ begin
+ begin
+ tag_ = convert(Tensor{String}, tag_)
+ begin
+ end
+ end
+ begin
+ tensor_ = convert(Tensor{Any}, tensor_)
+ begin
+ end
+ end
+ begin
+ serialized_summary_metadata_ = convert(Tensor{String}, serialized_summary_metadata_)
+ begin
+ end
+ end
+ begin
+ (tensor_,) = tf.tf_promote(tensor_)
+ end
+ end
+ begin
+ begin
+ tf.add_input(desc, tag_)
+ end
+ begin
+ tf.add_input(desc, tensor_)
+ end
+ begin
+ tf.add_input(desc, serialized_summary_metadata_)
+ end
+ end
+ begin
+ end
+ end
+ tf.Tensor(tf.Operation(desc))
+ end
+ end
+ begin
+ function tensor_summary_v2_eager(tag_, tensor_, serialized_summary_metadata_; name=nothing)
+ desc = tf.EagerOp("TensorSummaryV2")
+ tag_ = convert(tf.EagerTensor, tag_)
+ tensor_ = convert(tf.EagerTensor, tensor_)
+ serialized_summary_metadata_ = convert(tf.EagerTensor, serialized_summary_metadata_)
+ begin
+ begin
+ tf.add_input(desc, tag_)
+ end
+ begin
+ tf.add_input(desc, tensor_)
+ end
+ begin
+ tf.add_input(desc, serialized_summary_metadata_)
+ end
+ end
+ begin
+ end
+ begin
+ desc["T"] = tf.data_type(tensor_)
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(tensor_summary_v2, [tag_, tensor_, serialized_summary_metadata_], name=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res[1]
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function tensor_summary_v2(tag_, tensor_, serialized_summary_metadata_; name=nothing)
+ if tf.in_eager_mode()
+ tensor_summary_v2_eager(tag_, tensor_, serialized_summary_metadata_; name=name)
+ else
+ tensor_summary_v2_graph(tag_, tensor_, serialized_summary_metadata_; name=name)
+ end
+ end
+ end
+end
+
+
+"""
+ queue_dequeue_up_to(handle, n; timeout_ms=-1)
+
+
+"""
+begin
+ begin
+ function queue_dequeue_up_to_graph(handle_, n_; name=nothing, component_types=nothing, timeout_ms=nothing)
+ local desc
+ tf.with_op_name(name, "QueueDequeueUpTo") do
+ desc = tf.NodeDescription("QueueDequeueUpTo")
+ begin
+ begin
+ handle_ = convert(Tensor{String}, handle_)
+ begin
+ end
+ end
+ begin
+ n_ = convert(Tensor{Int32}, n_)
+ begin
+ end
+ end
+ end
+ begin
+ begin
+ tf.add_input(desc, handle_)
+ end
+ begin
+ tf.add_input(desc, n_)
+ end
+ end
+ begin
+ begin
+ if component_types !== nothing
+ desc["component_types"] = map(Base.identity, component_types)
+ end
+ end
+ begin
+ if timeout_ms !== nothing
+ desc["timeout_ms"] = Base.Int(timeout_ms)
+ end
+ end
+ end
+ end
+ tf.Tensor(tf.Operation(desc))
+ end
+ end
+ begin
+ function queue_dequeue_up_to_eager(handle_, n_; name=nothing, component_types=nothing, timeout_ms=nothing)
+ desc = tf.EagerOp("QueueDequeueUpTo")
+ handle_ = convert(tf.EagerTensor, handle_)
+ n_ = convert(tf.EagerTensor, n_)
+ begin
+ begin
+ tf.add_input(desc, handle_)
+ end
+ begin
+ tf.add_input(desc, n_)
+ end
+ end
+ begin
+ begin
+ if component_types !== nothing
+ desc["component_types"] = map(Base.identity, component_types)
+ end
+ end
+ begin
+ if timeout_ms !== nothing
+ desc["timeout_ms"] = Base.Int(timeout_ms)
+ end
+ end
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(queue_dequeue_up_to, [handle_, n_], name=nothing, component_types=nothing, timeout_ms=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res[1]
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function queue_dequeue_up_to(handle_, n_; name=nothing, component_types=nothing, timeout_ms=nothing)
+ if tf.in_eager_mode()
+ queue_dequeue_up_to_eager(handle_, n_; name=name, component_types=component_types, timeout_ms=timeout_ms)
+ else
+ queue_dequeue_up_to_graph(handle_, n_; name=name, component_types=component_types, timeout_ms=timeout_ms)
+ end
+ end
+ end
+end
+
+
+"""
+ matrix_band_part(input, num_lower, num_upper)
+
+
+"""
+begin
+ begin
+ function matrix_band_part_graph(input_, num_lower_, num_upper_; name=nothing)
+ local desc
+ tf.with_op_name(name, "MatrixBandPart") do
+ desc = tf.NodeDescription("MatrixBandPart")
+ begin
+ begin
+ input_ = convert(Tensor{Any}, input_)
+ begin
+ end
+ end
+ begin
+ num_lower_ = convert(Tensor{Int64}, num_lower_)
+ begin
+ end
+ end
+ begin
+ num_upper_ = convert(Tensor{Int64}, num_upper_)
+ begin
+ end
+ end
+ begin
+ (input_,) = tf.tf_promote(input_)
+ end
+ begin
+ (num_lower_, num_upper_) = tf.tf_promote(num_lower_, num_upper_)
+ end
+ end
+ begin
+ begin
+ tf.add_input(desc, input_)
+ end
+ begin
+ tf.add_input(desc, num_lower_)
+ end
+ begin
+ tf.add_input(desc, num_upper_)
+ end
+ end
+ begin
+ end
+ end
+ tf.Tensor(tf.Operation(desc))
+ end
+ end
+ begin
+ function matrix_band_part_eager(input_, num_lower_, num_upper_; name=nothing)
+ desc = tf.EagerOp("MatrixBandPart")
+ input_ = convert(tf.EagerTensor, input_)
+ num_lower_ = convert(tf.EagerTensor, num_lower_)
+ num_upper_ = convert(tf.EagerTensor, num_upper_)
+ begin
+ begin
+ tf.add_input(desc, input_)
+ end
+ begin
+ tf.add_input(desc, num_lower_)
+ end
+ begin
+ tf.add_input(desc, num_upper_)
+ end
+ end
+ begin
+ end
+ begin
+ desc["T"] = tf.data_type(input_)
+ end
+ begin
+ desc["Tindex"] = tf.data_type(num_lower_)
+ end
+ begin
+ desc["Tindex"] = tf.data_type(num_upper_)
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(matrix_band_part, [input_, num_lower_, num_upper_], name=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res[1]
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function matrix_band_part(input_, num_lower_, num_upper_; name=nothing)
+ if tf.in_eager_mode()
+ matrix_band_part_eager(input_, num_lower_, num_upper_; name=name)
+ else
+ matrix_band_part_graph(input_, num_lower_, num_upper_; name=name)
+ end
+ end
+ end
+end
+
+
+"""
+ copy(input; tensor_name=, debug_ops_spec=Int64[])
+
+Copy Op.
+"""
+begin
+ begin
+ function copy_graph(input_; name=nothing, tensor_name=nothing, debug_ops_spec=nothing)
+ local desc
+ tf.with_op_name(name, "Copy") do
+ desc = tf.NodeDescription("Copy")
+ begin
+ begin
+ input_ = convert(Tensor{Any}, input_)
+ begin
+ end
+ end
+ begin
+ (input_,) = tf.tf_promote(input_)
+ end
+ end
+ begin
+ begin
+ tf.add_input(desc, input_)
+ end
+ end
+ begin
+ begin
+ if tensor_name !== nothing
+ desc["tensor_name"] = Base.String(tensor_name)
+ end
+ end
+ begin
+ if debug_ops_spec !== nothing
+ desc["debug_ops_spec"] = map(Base.identity, debug_ops_spec)
+ end
+ end
+ end
+ end
+ tf.Tensor(tf.Operation(desc))
+ end
+ end
+ begin
+ function copy_eager(input_; name=nothing, tensor_name=nothing, debug_ops_spec=nothing)
+ desc = tf.EagerOp("Copy")
+ input_ = convert(tf.EagerTensor, input_)
+ begin
+ begin
+ tf.add_input(desc, input_)
+ end
+ end
+ begin
+ begin
+ if tensor_name !== nothing
+ desc["tensor_name"] = Base.String(tensor_name)
+ end
+ end
+ begin
+ if debug_ops_spec !== nothing
+ desc["debug_ops_spec"] = map(Base.identity, debug_ops_spec)
+ end
+ end
+ end
+ begin
+ desc["T"] = tf.data_type(input_)
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(copy, [input_], name=nothing, tensor_name=nothing, debug_ops_spec=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res[1]
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function copy(input_; name=nothing, tensor_name=nothing, debug_ops_spec=nothing)
+ if tf.in_eager_mode()
+ copy_eager(input_; name=name, tensor_name=tensor_name, debug_ops_spec=debug_ops_spec)
+ else
+ copy_graph(input_; name=name, tensor_name=tensor_name, debug_ops_spec=debug_ops_spec)
+ end
+ end
+ end
+end
+
+
+"""
+ shape_n(input; out_type=Int32)
+
+
+"""
+begin
+ begin
+ function shape_n_graph(input_; name=nothing, N=nothing, out_type=nothing)
+ local desc
+ tf.with_op_name(name, "ShapeN") do
+ desc = tf.NodeDescription("ShapeN")
+ begin
+ begin
+ input_ = [convert(Tensor{Any}, x) for x = input_]
+ begin
+ end
+ end
+ begin
+ (input_,) = tf.tf_promote(input_)
+ end
+ end
+ begin
+ begin
+ tf.add_input(desc, input_)
+ end
+ end
+ begin
+ begin
+ if N !== nothing
+ desc["N"] = Base.Int(N)
+ end
+ end
+ begin
+ if out_type !== nothing
+ desc["out_type"] = Base.identity(out_type)
+ end
+ end
+ end
+ end
+ begin
+ out = tf.Tensor[]
+ op = tf.Operation(desc)
+ for out_idx = 1:N
+ push!(out, tf.Tensor(op, out_idx))
+ end
+ out
+ end
+ end
+ end
+ begin
+ function shape_n_eager(input_; name=nothing, N=nothing, out_type=nothing)
+ desc = tf.EagerOp("ShapeN")
+ input_ = convert(tf.EagerTensor, input_)
+ begin
+ begin
+ tf.add_input(desc, input_)
+ end
+ end
+ begin
+ begin
+ if N !== nothing
+ desc["N"] = Base.Int(N)
+ end
+ end
+ begin
+ if out_type !== nothing
+ desc["out_type"] = Base.identity(out_type)
+ end
+ end
+ end
+ begin
+ desc["T"] = tf.data_type(input_)
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(shape_n, [input_], name=nothing, N=nothing, out_type=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function shape_n(input_; name=nothing, N=nothing, out_type=nothing)
+ if tf.in_eager_mode()
+ shape_n_eager(input_; name=name, N=N, out_type=out_type)
+ else
+ shape_n_graph(input_; name=name, N=N, out_type=out_type)
+ end
+ end
+ end
+end
+
+
+"""
+ experimental_parse_example_dataset(input_dataset, num_parallel_calls, dense_defaults; sloppy=false)
+
+
+"""
+begin
+ begin
+ function experimental_parse_example_dataset_graph(input_dataset_, num_parallel_calls_, dense_defaults_; name=nothing, sparse_keys=nothing, dense_keys=nothing, sparse_types=nothing, Tdense=nothing, dense_shapes=nothing, output_types=nothing, output_shapes=nothing, sloppy=nothing)
+ local desc
+ tf.with_op_name(name, "ExperimentalParseExampleDataset") do
+ desc = tf.NodeDescription("ExperimentalParseExampleDataset")
+ begin
+ begin
+ input_dataset_ = convert(Tensor{Any}, input_dataset_)
+ begin
+ end
+ end
+ begin
+ num_parallel_calls_ = convert(Tensor{Int64}, num_parallel_calls_)
+ begin
+ end
+ end
+ begin
+ dense_defaults_ = [convert(Tensor{Any}, x) for x = dense_defaults_]
+ begin
+ end
+ end
+ end
+ begin
+ begin
+ tf.add_input(desc, input_dataset_)
+ end
+ begin
+ tf.add_input(desc, num_parallel_calls_)
+ end
+ begin
+ tf.add_input(desc, dense_defaults_)
+ end
+ end
+ begin
+ begin
+ if sparse_keys !== nothing
+ desc["sparse_keys"] = map(Base.identity, sparse_keys)
+ end
+ end
+ begin
+ if dense_keys !== nothing
+ desc["dense_keys"] = map(Base.identity, dense_keys)
+ end
+ end
+ begin
+ if sparse_types !== nothing
+ desc["sparse_types"] = map(Base.identity, sparse_types)
+ end
+ end
+ begin
+ if Tdense !== nothing
+ desc["Tdense"] = map(Base.identity, Tdense)
+ end
+ end
+ begin
+ if dense_shapes !== nothing
+ desc["dense_shapes"] = map(Base.identity, dense_shapes)
+ end
+ end
+ begin
+ if output_types !== nothing
+ desc["output_types"] = map(Base.identity, output_types)
+ end
+ end
+ begin
+ if output_shapes !== nothing
+ desc["output_shapes"] = map(Base.identity, output_shapes)
+ end
+ end
+ begin
+ if sloppy !== nothing
+ desc["sloppy"] = Base.Bool(sloppy)
+ end
+ end
+ end
+ end
+ tf.Tensor(tf.Operation(desc))
+ end
+ end
+ begin
+ function experimental_parse_example_dataset_eager(input_dataset_, num_parallel_calls_, dense_defaults_; name=nothing, sparse_keys=nothing, dense_keys=nothing, sparse_types=nothing, Tdense=nothing, dense_shapes=nothing, output_types=nothing, output_shapes=nothing, sloppy=nothing)
+ desc = tf.EagerOp("ExperimentalParseExampleDataset")
+ input_dataset_ = convert(tf.EagerTensor, input_dataset_)
+ num_parallel_calls_ = convert(tf.EagerTensor, num_parallel_calls_)
+ dense_defaults_ = convert(tf.EagerTensor, dense_defaults_)
+ begin
+ begin
+ tf.add_input(desc, input_dataset_)
+ end
+ begin
+ tf.add_input(desc, num_parallel_calls_)
+ end
+ begin
+ tf.add_input(desc, dense_defaults_)
+ end
+ end
+ begin
+ begin
+ if sparse_keys !== nothing
+ desc["sparse_keys"] = map(Base.identity, sparse_keys)
+ end
+ end
+ begin
+ if dense_keys !== nothing
+ desc["dense_keys"] = map(Base.identity, dense_keys)
+ end
+ end
+ begin
+ if sparse_types !== nothing
+ desc["sparse_types"] = map(Base.identity, sparse_types)
+ end
+ end
+ begin
+ if Tdense !== nothing
+ desc["Tdense"] = map(Base.identity, Tdense)
+ end
+ end
+ begin
+ if dense_shapes !== nothing
+ desc["dense_shapes"] = map(Base.identity, dense_shapes)
+ end
+ end
+ begin
+ if output_types !== nothing
+ desc["output_types"] = map(Base.identity, output_types)
+ end
+ end
+ begin
+ if output_shapes !== nothing
+ desc["output_shapes"] = map(Base.identity, output_shapes)
+ end
+ end
+ begin
+ if sloppy !== nothing
+ desc["sloppy"] = Base.Bool(sloppy)
+ end
+ end
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(experimental_parse_example_dataset, [input_dataset_, num_parallel_calls_, dense_defaults_], name=nothing, sparse_keys=nothing, dense_keys=nothing, sparse_types=nothing, Tdense=nothing, dense_shapes=nothing, output_types=nothing, output_shapes=nothing, sloppy=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res[1]
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function experimental_parse_example_dataset(input_dataset_, num_parallel_calls_, dense_defaults_; name=nothing, sparse_keys=nothing, dense_keys=nothing, sparse_types=nothing, Tdense=nothing, dense_shapes=nothing, output_types=nothing, output_shapes=nothing, sloppy=nothing)
+ if tf.in_eager_mode()
+ experimental_parse_example_dataset_eager(input_dataset_, num_parallel_calls_, dense_defaults_; name=name, sparse_keys=sparse_keys, dense_keys=dense_keys, sparse_types=sparse_types, Tdense=Tdense, dense_shapes=dense_shapes, output_types=output_types, output_shapes=output_shapes, sloppy=sloppy)
+ else
+ experimental_parse_example_dataset_graph(input_dataset_, num_parallel_calls_, dense_defaults_; name=name, sparse_keys=sparse_keys, dense_keys=dense_keys, sparse_types=sparse_types, Tdense=Tdense, dense_shapes=dense_shapes, output_types=output_types, output_shapes=output_shapes, sloppy=sloppy)
+ end
+ end
+ end
+end
+
+
+"""
+ concat(concat_dim, values)
+
+
+"""
+begin
+ begin
+ function concat_graph(concat_dim_, values_; name=nothing, N=nothing)
+ local desc
+ tf.with_op_name(name, "Concat") do
+ desc = tf.NodeDescription("Concat")
+ begin
+ begin
+ concat_dim_ = convert(Tensor{Int32}, concat_dim_)
+ begin
+ end
+ end
+ begin
+ values_ = [convert(Tensor{Any}, x) for x = values_]
+ begin
+ end
+ end
+ begin
+ (values_,) = tf.tf_promote(values_)
+ end
+ end
+ begin
+ begin
+ tf.add_input(desc, concat_dim_)
+ end
+ begin
+ tf.add_input(desc, values_)
+ end
+ end
+ begin
+ begin
+ if N !== nothing
+ desc["N"] = Base.Int(N)
+ end
+ end
+ end
+ end
+ tf.Tensor(tf.Operation(desc))
+ end
+ end
+ begin
+ function concat_eager(concat_dim_, values_; name=nothing, N=nothing)
+ desc = tf.EagerOp("Concat")
+ concat_dim_ = convert(tf.EagerTensor, concat_dim_)
+ values_ = convert(tf.EagerTensor, values_)
+ begin
+ begin
+ tf.add_input(desc, concat_dim_)
+ end
+ begin
+ tf.add_input(desc, values_)
+ end
+ end
+ begin
+ begin
+ if N !== nothing
+ desc["N"] = Base.Int(N)
+ end
+ end
+ end
+ begin
+ desc["T"] = tf.data_type(values_)
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(concat, [concat_dim_, values_], name=nothing, N=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res[1]
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function concat(concat_dim_, values_; name=nothing, N=nothing)
+ if tf.in_eager_mode()
+ concat_eager(concat_dim_, values_; name=name, N=N)
+ else
+ concat_graph(concat_dim_, values_; name=name, N=N)
+ end
+ end
+ end
+end
+
+
+"""
+ data_format_dim_map(x; src_format=, dst_format=)
+
+
+"""
+begin
+ begin
+ function data_format_dim_map_graph(x_; name=nothing, src_format=nothing, dst_format=nothing)
+ local desc
+ tf.with_op_name(name, "DataFormatDimMap") do
+ desc = tf.NodeDescription("DataFormatDimMap")
+ begin
+ begin
+ x_ = convert(Tensor{Int32}, x_)
+ begin
+ end
+ end
+ begin
+ (x_,) = tf.tf_promote(x_)
+ end
+ end
+ begin
+ begin
+ tf.add_input(desc, x_)
+ end
+ end
+ begin
+ begin
+ if src_format !== nothing
+ desc["src_format"] = Base.String(src_format)
+ end
+ end
+ begin
+ if dst_format !== nothing
+ desc["dst_format"] = Base.String(dst_format)
+ end
+ end
+ end
+ end
+ tf.Tensor(tf.Operation(desc))
+ end
+ end
+ begin
+ function data_format_dim_map_eager(x_; name=nothing, src_format=nothing, dst_format=nothing)
+ desc = tf.EagerOp("DataFormatDimMap")
+ x_ = convert(tf.EagerTensor, x_)
+ begin
+ begin
+ tf.add_input(desc, x_)
+ end
+ end
+ begin
+ begin
+ if src_format !== nothing
+ desc["src_format"] = Base.String(src_format)
+ end
+ end
+ begin
+ if dst_format !== nothing
+ desc["dst_format"] = Base.String(dst_format)
+ end
+ end
+ end
+ begin
+ desc["T"] = tf.data_type(x_)
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(data_format_dim_map, [x_], name=nothing, src_format=nothing, dst_format=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res[1]
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function data_format_dim_map(x_; name=nothing, src_format=nothing, dst_format=nothing)
+ if tf.in_eager_mode()
+ data_format_dim_map_eager(x_; name=name, src_format=src_format, dst_format=dst_format)
+ else
+ data_format_dim_map_graph(x_; name=name, src_format=src_format, dst_format=dst_format)
+ end
+ end
+ end
+end
+
+
+"""
+ identity_reader(; container=, shared_name=)
+
+
+"""
+begin
+ begin
+ function identity_reader_graph(; name=nothing, container=nothing, shared_name=nothing)
+ local desc
+ tf.with_op_name(name, "IdentityReader") do
+ desc = tf.NodeDescription("IdentityReader")
+ begin
+ end
+ begin
+ end
+ begin
+ begin
+ if container !== nothing
+ desc["container"] = Base.String(container)
+ end
+ end
+ begin
+ if shared_name !== nothing
+ desc["shared_name"] = Base.String(shared_name)
+ end
+ end
+ end
+ end
+ tf.Tensor(tf.Operation(desc))
+ end
+ end
+ begin
+ function identity_reader_eager(; name=nothing, container=nothing, shared_name=nothing)
+ desc = tf.EagerOp("IdentityReader")
+ begin
+ end
+ begin
+ begin
+ if container !== nothing
+ desc["container"] = Base.String(container)
+ end
+ end
+ begin
+ if shared_name !== nothing
+ desc["shared_name"] = Base.String(shared_name)
+ end
+ end
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(identity_reader, [], name=nothing, container=nothing, shared_name=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res[1]
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function identity_reader(; name=nothing, container=nothing, shared_name=nothing)
+ if tf.in_eager_mode()
+ identity_reader_eager(; name=name, container=container, shared_name=shared_name)
+ else
+ identity_reader_graph(; name=name, container=container, shared_name=shared_name)
+ end
+ end
+ end
+end
+
+
+"""
+ softplus(features)
+
+
+"""
+begin
+ begin
+ function softplus_graph(features_; name=nothing)
+ local desc
+ tf.with_op_name(name, "Softplus") do
+ desc = tf.NodeDescription("Softplus")
+ begin
+ begin
+ features_ = convert(Tensor{Any}, features_)
+ begin
+ end
+ end
+ begin
+ (features_,) = tf.tf_promote(features_)
+ end
+ end
+ begin
+ begin
+ tf.add_input(desc, features_)
+ end
+ end
+ begin
+ end
+ end
+ tf.Tensor(tf.Operation(desc))
+ end
+ end
+ begin
+ function softplus_eager(features_; name=nothing)
+ desc = tf.EagerOp("Softplus")
+ features_ = convert(tf.EagerTensor, features_)
+ begin
+ begin
+ tf.add_input(desc, features_)
+ end
+ end
+ begin
+ end
+ begin
+ desc["T"] = tf.data_type(features_)
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(softplus, [features_], name=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res[1]
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function softplus(features_; name=nothing)
+ if tf.in_eager_mode()
+ softplus_eager(features_; name=name)
+ else
+ softplus_graph(features_; name=name)
+ end
+ end
+ end
+end
+
+
+"""
+ resource_sparse_apply_proximal_adagrad(var, accum, lr, l1, l2, grad, indices; use_locking=false)
+
+
+"""
+begin
+ begin
+ function resource_sparse_apply_proximal_adagrad_graph(var_, accum_, lr_, l1_, l2_, grad_, indices_; name=nothing, use_locking=nothing)
+ local desc
+ tf.with_op_name(name, "ResourceSparseApplyProximalAdagrad") do
+ desc = tf.NodeDescription("ResourceSparseApplyProximalAdagrad")
+ begin
+ begin
+ var_ = convert(Tensor{Any}, var_)
+ begin
+ end
+ end
+ begin
+ accum_ = convert(Tensor{Any}, accum_)
+ begin
+ end
+ end
+ begin
+ lr_ = convert(Tensor{Any}, lr_)
+ begin
+ end
+ end
+ begin
+ l1_ = convert(Tensor{Any}, l1_)
+ begin
+ end
+ end
+ begin
+ l2_ = convert(Tensor{Any}, l2_)
+ begin
+ end
+ end
+ begin
+ grad_ = convert(Tensor{Any}, grad_)
+ begin
+ end
+ end
+ begin
+ indices_ = convert(Tensor{Any}, indices_)
+ begin
+ indices_ = indices_ - convert(tf.Tensor{eltype(indices_)}, 1)
+ end
+ end
+ begin
+ (lr_, l1_, l2_, grad_) = tf.tf_promote(lr_, l1_, l2_, grad_)
+ end
+ begin
+ (indices_,) = tf.tf_promote(indices_)
+ end
+ end
+ begin
+ begin
+ tf.add_input(desc, var_)
+ end
+ begin
+ tf.add_input(desc, accum_)
+ end
+ begin
+ tf.add_input(desc, lr_)
+ end
+ begin
+ tf.add_input(desc, l1_)
+ end
+ begin
+ tf.add_input(desc, l2_)
+ end
+ begin
+ tf.add_input(desc, grad_)
+ end
+ begin
+ tf.add_input(desc, indices_)
+ end
+ end
+ begin
+ begin
+ if use_locking !== nothing
+ desc["use_locking"] = Base.Bool(use_locking)
+ end
+ end
+ end
+ end
+ tf.Tensor(tf.Operation(desc))
+ end
+ end
+ begin
+ function resource_sparse_apply_proximal_adagrad_eager(var_, accum_, lr_, l1_, l2_, grad_, indices_; name=nothing, use_locking=nothing)
+ desc = tf.EagerOp("ResourceSparseApplyProximalAdagrad")
+ var_ = convert(tf.EagerTensor, var_)
+ accum_ = convert(tf.EagerTensor, accum_)
+ lr_ = convert(tf.EagerTensor, lr_)
+ l1_ = convert(tf.EagerTensor, l1_)
+ l2_ = convert(tf.EagerTensor, l2_)
+ grad_ = convert(tf.EagerTensor, grad_)
+ indices_ = convert(tf.EagerTensor, indices_)
+ begin
+ begin
+ tf.add_input(desc, var_)
+ end
+ begin
+ tf.add_input(desc, accum_)
+ end
+ begin
+ tf.add_input(desc, lr_)
+ end
+ begin
+ tf.add_input(desc, l1_)
+ end
+ begin
+ tf.add_input(desc, l2_)
+ end
+ begin
+ tf.add_input(desc, grad_)
+ end
+ begin
+ tf.add_input(desc, indices_)
+ end
+ end
+ begin
+ begin
+ if use_locking !== nothing
+ desc["use_locking"] = Base.Bool(use_locking)
+ end
+ end
+ end
+ begin
+ desc["T"] = tf.data_type(lr_)
+ end
+ begin
+ desc["T"] = tf.data_type(l1_)
+ end
+ begin
+ desc["T"] = tf.data_type(l2_)
+ end
+ begin
+ desc["T"] = tf.data_type(grad_)
+ end
+ begin
+ desc["Tindices"] = tf.data_type(indices_)
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(resource_sparse_apply_proximal_adagrad, [var_, accum_, lr_, l1_, l2_, grad_, indices_], name=nothing, use_locking=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res[1]
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function resource_sparse_apply_proximal_adagrad(var_, accum_, lr_, l1_, l2_, grad_, indices_; name=nothing, use_locking=nothing)
+ if tf.in_eager_mode()
+ resource_sparse_apply_proximal_adagrad_eager(var_, accum_, lr_, l1_, l2_, grad_, indices_; name=name, use_locking=use_locking)
+ else
+ resource_sparse_apply_proximal_adagrad_graph(var_, accum_, lr_, l1_, l2_, grad_, indices_; name=name, use_locking=use_locking)
+ end
+ end
+ end
+end
+
+
+"""
+ parse_single_sequence_example(serialized, feature_list_dense_missing_assumed_empty, context_sparse_keys, context_dense_keys, feature_list_sparse_keys, feature_list_dense_keys, context_dense_defaults, debug_name; Ncontext_sparse=0, Ncontext_dense=0, Nfeature_list_sparse=0, Nfeature_list_dense=0, context_sparse_types=Int64[], Tcontext_dense=Int64[], feature_list_dense_types=Int64[], context_dense_shapes=Int64[], feature_list_sparse_types=Int64[], feature_list_dense_shapes=Int64[])
+
+
+"""
+begin
+ begin
+ function parse_single_sequence_example_graph(serialized_, feature_list_dense_missing_assumed_empty_, context_sparse_keys_, context_dense_keys_, feature_list_sparse_keys_, feature_list_dense_keys_, context_dense_defaults_, debug_name_; name=nothing, Ncontext_sparse=nothing, Ncontext_dense=nothing, Nfeature_list_sparse=nothing, Nfeature_list_dense=nothing, context_sparse_types=nothing, Tcontext_dense=nothing, feature_list_dense_types=nothing, context_dense_shapes=nothing, feature_list_sparse_types=nothing, feature_list_dense_shapes=nothing)
+ local desc
+ tf.with_op_name(name, "ParseSingleSequenceExample") do
+ desc = tf.NodeDescription("ParseSingleSequenceExample")
+ begin
+ begin
+ serialized_ = convert(Tensor{String}, serialized_)
+ begin
+ end
+ end
+ begin
+ feature_list_dense_missing_assumed_empty_ = convert(Tensor{String}, feature_list_dense_missing_assumed_empty_)
+ begin
+ end
+ end
+ begin
+ context_sparse_keys_ = [convert(Tensor{String}, x) for x = context_sparse_keys_]
+ begin
+ end
+ end
+ begin
+ context_dense_keys_ = [convert(Tensor{String}, x) for x = context_dense_keys_]
+ begin
+ end
+ end
+ begin
+ feature_list_sparse_keys_ = [convert(Tensor{String}, x) for x = feature_list_sparse_keys_]
+ begin
+ end
+ end
+ begin
+ feature_list_dense_keys_ = [convert(Tensor{String}, x) for x = feature_list_dense_keys_]
+ begin
+ end
+ end
+ begin
+ context_dense_defaults_ = [convert(Tensor{Any}, x) for x = context_dense_defaults_]
+ begin
+ end
+ end
+ begin
+ debug_name_ = convert(Tensor{String}, debug_name_)
+ begin
+ end
+ end
+ end
+ begin
+ begin
+ tf.add_input(desc, serialized_)
+ end
+ begin
+ tf.add_input(desc, feature_list_dense_missing_assumed_empty_)
+ end
+ begin
+ tf.add_input(desc, context_sparse_keys_)
+ end
+ begin
+ tf.add_input(desc, context_dense_keys_)
+ end
+ begin
+ tf.add_input(desc, feature_list_sparse_keys_)
+ end
+ begin
+ tf.add_input(desc, feature_list_dense_keys_)
+ end
+ begin
+ tf.add_input(desc, context_dense_defaults_)
+ end
+ begin
+ tf.add_input(desc, debug_name_)
+ end
+ end
+ begin
+ begin
+ if Ncontext_sparse !== nothing
+ desc["Ncontext_sparse"] = Base.Int(Ncontext_sparse)
+ end
+ end
+ begin
+ if Ncontext_dense !== nothing
+ desc["Ncontext_dense"] = Base.Int(Ncontext_dense)
+ end
+ end
+ begin
+ if Nfeature_list_sparse !== nothing
+ desc["Nfeature_list_sparse"] = Base.Int(Nfeature_list_sparse)
+ end
+ end
+ begin
+ if Nfeature_list_dense !== nothing
+ desc["Nfeature_list_dense"] = Base.Int(Nfeature_list_dense)
+ end
+ end
+ begin
+ if context_sparse_types !== nothing
+ desc["context_sparse_types"] = map(Base.identity, context_sparse_types)
+ end
+ end
+ begin
+ if Tcontext_dense !== nothing
+ desc["Tcontext_dense"] = map(Base.identity, Tcontext_dense)
+ end
+ end
+ begin
+ if feature_list_dense_types !== nothing
+ desc["feature_list_dense_types"] = map(Base.identity, feature_list_dense_types)
+ end
+ end
+ begin
+ if context_dense_shapes !== nothing
+ desc["context_dense_shapes"] = map(Base.identity, context_dense_shapes)
+ end
+ end
+ begin
+ if feature_list_sparse_types !== nothing
+ desc["feature_list_sparse_types"] = map(Base.identity, feature_list_sparse_types)
+ end
+ end
+ begin
+ if feature_list_dense_shapes !== nothing
+ desc["feature_list_dense_shapes"] = map(Base.identity, feature_list_dense_shapes)
+ end
+ end
+ end
+ end
+ begin
+ out = tf.Tensor[]
+ op = tf.Operation(desc)
+ for out_idx = 1:8
+ push!(out, tf.Tensor(op, out_idx))
+ end
+ out
+ end
+ end
+ end
+ begin
+ function parse_single_sequence_example_eager(serialized_, feature_list_dense_missing_assumed_empty_, context_sparse_keys_, context_dense_keys_, feature_list_sparse_keys_, feature_list_dense_keys_, context_dense_defaults_, debug_name_; name=nothing, Ncontext_sparse=nothing, Ncontext_dense=nothing, Nfeature_list_sparse=nothing, Nfeature_list_dense=nothing, context_sparse_types=nothing, Tcontext_dense=nothing, feature_list_dense_types=nothing, context_dense_shapes=nothing, feature_list_sparse_types=nothing, feature_list_dense_shapes=nothing)
+ desc = tf.EagerOp("ParseSingleSequenceExample")
+ serialized_ = convert(tf.EagerTensor, serialized_)
+ feature_list_dense_missing_assumed_empty_ = convert(tf.EagerTensor, feature_list_dense_missing_assumed_empty_)
+ context_sparse_keys_ = convert(tf.EagerTensor, context_sparse_keys_)
+ context_dense_keys_ = convert(tf.EagerTensor, context_dense_keys_)
+ feature_list_sparse_keys_ = convert(tf.EagerTensor, feature_list_sparse_keys_)
+ feature_list_dense_keys_ = convert(tf.EagerTensor, feature_list_dense_keys_)
+ context_dense_defaults_ = convert(tf.EagerTensor, context_dense_defaults_)
+ debug_name_ = convert(tf.EagerTensor, debug_name_)
+ begin
+ begin
+ tf.add_input(desc, serialized_)
+ end
+ begin
+ tf.add_input(desc, feature_list_dense_missing_assumed_empty_)
+ end
+ begin
+ tf.add_input(desc, context_sparse_keys_)
+ end
+ begin
+ tf.add_input(desc, context_dense_keys_)
+ end
+ begin
+ tf.add_input(desc, feature_list_sparse_keys_)
+ end
+ begin
+ tf.add_input(desc, feature_list_dense_keys_)
+ end
+ begin
+ tf.add_input(desc, context_dense_defaults_)
+ end
+ begin
+ tf.add_input(desc, debug_name_)
+ end
+ end
+ begin
+ begin
+ if Ncontext_sparse !== nothing
+ desc["Ncontext_sparse"] = Base.Int(Ncontext_sparse)
+ end
+ end
+ begin
+ if Ncontext_dense !== nothing
+ desc["Ncontext_dense"] = Base.Int(Ncontext_dense)
+ end
+ end
+ begin
+ if Nfeature_list_sparse !== nothing
+ desc["Nfeature_list_sparse"] = Base.Int(Nfeature_list_sparse)
+ end
+ end
+ begin
+ if Nfeature_list_dense !== nothing
+ desc["Nfeature_list_dense"] = Base.Int(Nfeature_list_dense)
+ end
+ end
+ begin
+ if context_sparse_types !== nothing
+ desc["context_sparse_types"] = map(Base.identity, context_sparse_types)
+ end
+ end
+ begin
+ if Tcontext_dense !== nothing
+ desc["Tcontext_dense"] = map(Base.identity, Tcontext_dense)
+ end
+ end
+ begin
+ if feature_list_dense_types !== nothing
+ desc["feature_list_dense_types"] = map(Base.identity, feature_list_dense_types)
+ end
+ end
+ begin
+ if context_dense_shapes !== nothing
+ desc["context_dense_shapes"] = map(Base.identity, context_dense_shapes)
+ end
+ end
+ begin
+ if feature_list_sparse_types !== nothing
+ desc["feature_list_sparse_types"] = map(Base.identity, feature_list_sparse_types)
+ end
+ end
+ begin
+ if feature_list_dense_shapes !== nothing
+ desc["feature_list_dense_shapes"] = map(Base.identity, feature_list_dense_shapes)
+ end
+ end
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(parse_single_sequence_example, [serialized_, feature_list_dense_missing_assumed_empty_, context_sparse_keys_, context_dense_keys_, feature_list_sparse_keys_, feature_list_dense_keys_, context_dense_defaults_, debug_name_], name=nothing, Ncontext_sparse=nothing, Ncontext_dense=nothing, Nfeature_list_sparse=nothing, Nfeature_list_dense=nothing, context_sparse_types=nothing, Tcontext_dense=nothing, feature_list_dense_types=nothing, context_dense_shapes=nothing, feature_list_sparse_types=nothing, feature_list_dense_shapes=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function parse_single_sequence_example(serialized_, feature_list_dense_missing_assumed_empty_, context_sparse_keys_, context_dense_keys_, feature_list_sparse_keys_, feature_list_dense_keys_, context_dense_defaults_, debug_name_; name=nothing, Ncontext_sparse=nothing, Ncontext_dense=nothing, Nfeature_list_sparse=nothing, Nfeature_list_dense=nothing, context_sparse_types=nothing, Tcontext_dense=nothing, feature_list_dense_types=nothing, context_dense_shapes=nothing, feature_list_sparse_types=nothing, feature_list_dense_shapes=nothing)
+ if tf.in_eager_mode()
+ parse_single_sequence_example_eager(serialized_, feature_list_dense_missing_assumed_empty_, context_sparse_keys_, context_dense_keys_, feature_list_sparse_keys_, feature_list_dense_keys_, context_dense_defaults_, debug_name_; name=name, Ncontext_sparse=Ncontext_sparse, Ncontext_dense=Ncontext_dense, Nfeature_list_sparse=Nfeature_list_sparse, Nfeature_list_dense=Nfeature_list_dense, context_sparse_types=context_sparse_types, Tcontext_dense=Tcontext_dense, feature_list_dense_types=feature_list_dense_types, context_dense_shapes=context_dense_shapes, feature_list_sparse_types=feature_list_sparse_types, feature_list_dense_shapes=feature_list_dense_shapes)
+ else
+ parse_single_sequence_example_graph(serialized_, feature_list_dense_missing_assumed_empty_, context_sparse_keys_, context_dense_keys_, feature_list_sparse_keys_, feature_list_dense_keys_, context_dense_defaults_, debug_name_; name=name, Ncontext_sparse=Ncontext_sparse, Ncontext_dense=Ncontext_dense, Nfeature_list_sparse=Nfeature_list_sparse, Nfeature_list_dense=Nfeature_list_dense, context_sparse_types=context_sparse_types, Tcontext_dense=Tcontext_dense, feature_list_dense_types=feature_list_dense_types, context_dense_shapes=context_dense_shapes, feature_list_sparse_types=feature_list_sparse_types, feature_list_dense_shapes=feature_list_dense_shapes)
+ end
+ end
+ end
+end
+
+
+"""
+ matrix_diag(diagonal)
+
+
+"""
+begin
+ begin
+ function matrix_diag_graph(diagonal_; name=nothing)
+ local desc
+ tf.with_op_name(name, "MatrixDiag") do
+ desc = tf.NodeDescription("MatrixDiag")
+ begin
+ begin
+ diagonal_ = convert(Tensor{Any}, diagonal_)
+ begin
+ end
+ end
+ begin
+ (diagonal_,) = tf.tf_promote(diagonal_)
+ end
+ end
+ begin
+ begin
+ tf.add_input(desc, diagonal_)
+ end
+ end
+ begin
+ end
+ end
+ tf.Tensor(tf.Operation(desc))
+ end
+ end
+ begin
+ function matrix_diag_eager(diagonal_; name=nothing)
+ desc = tf.EagerOp("MatrixDiag")
+ diagonal_ = convert(tf.EagerTensor, diagonal_)
+ begin
+ begin
+ tf.add_input(desc, diagonal_)
+ end
+ end
+ begin
+ end
+ begin
+ desc["T"] = tf.data_type(diagonal_)
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(matrix_diag, [diagonal_], name=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res[1]
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function matrix_diag(diagonal_; name=nothing)
+ if tf.in_eager_mode()
+ matrix_diag_eager(diagonal_; name=name)
+ else
+ matrix_diag_graph(diagonal_; name=name)
+ end
+ end
+ end
+end
+
+
+"""
+ fact()
+
+
+"""
+begin
+ begin
+ function fact_graph(; name=nothing)
+ local desc
+ tf.with_op_name(name, "Fact") do
+ desc = tf.NodeDescription("Fact")
+ begin
+ end
+ begin
+ end
+ begin
+ end
+ end
+ tf.Tensor(tf.Operation(desc))
+ end
+ end
+ begin
+ function fact_eager(; name=nothing)
+ desc = tf.EagerOp("Fact")
+ begin
+ end
+ begin
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(fact, [], name=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res[1]
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function fact(; name=nothing)
+ if tf.in_eager_mode()
+ fact_eager(; name=name)
+ else
+ fact_graph(; name=name)
+ end
+ end
+ end
+end
+
+
+"""
+ max_pool_grad_grad(orig_input, orig_output, grad; data_format=)
+
+
+"""
+begin
+ begin
+ function max_pool_grad_grad_graph(orig_input_, orig_output_, grad_; name=nothing, ksize=nothing, strides=nothing, padding=nothing, data_format=nothing)
+ local desc
+ tf.with_op_name(name, "MaxPoolGradGrad") do
+ desc = tf.NodeDescription("MaxPoolGradGrad")
+ begin
+ begin
+ orig_input_ = convert(Tensor{Any}, orig_input_)
+ begin
+ end
+ end
+ begin
+ orig_output_ = convert(Tensor{Any}, orig_output_)
+ begin
+ end
+ end
+ begin
+ grad_ = convert(Tensor{Any}, grad_)
+ begin
+ end
+ end
+ begin
+ (orig_input_, orig_output_, grad_) = tf.tf_promote(orig_input_, orig_output_, grad_)
+ end
+ end
+ begin
+ begin
+ tf.add_input(desc, orig_input_)
+ end
+ begin
+ tf.add_input(desc, orig_output_)
+ end
+ begin
+ tf.add_input(desc, grad_)
+ end
+ end
+ begin
+ begin
+ if ksize !== nothing
+ desc["ksize"] = map(Base.identity, ksize)
+ end
+ end
+ begin
+ if strides !== nothing
+ desc["strides"] = map(Base.identity, strides)
+ end
+ end
+ begin
+ if padding !== nothing
+ desc["padding"] = Base.String(padding)
+ end
+ end
+ begin
+ if data_format !== nothing
+ desc["data_format"] = Base.String(data_format)
+ end
+ end
+ end
+ end
+ tf.Tensor(tf.Operation(desc))
+ end
+ end
+ begin
+ function max_pool_grad_grad_eager(orig_input_, orig_output_, grad_; name=nothing, ksize=nothing, strides=nothing, padding=nothing, data_format=nothing)
+ desc = tf.EagerOp("MaxPoolGradGrad")
+ orig_input_ = convert(tf.EagerTensor, orig_input_)
+ orig_output_ = convert(tf.EagerTensor, orig_output_)
+ grad_ = convert(tf.EagerTensor, grad_)
+ begin
+ begin
+ tf.add_input(desc, orig_input_)
+ end
+ begin
+ tf.add_input(desc, orig_output_)
+ end
+ begin
+ tf.add_input(desc, grad_)
+ end
+ end
+ begin
+ begin
+ if ksize !== nothing
+ desc["ksize"] = map(Base.identity, ksize)
+ end
+ end
+ begin
+ if strides !== nothing
+ desc["strides"] = map(Base.identity, strides)
+ end
+ end
+ begin
+ if padding !== nothing
+ desc["padding"] = Base.String(padding)
+ end
+ end
+ begin
+ if data_format !== nothing
+ desc["data_format"] = Base.String(data_format)
+ end
+ end
+ end
+ begin
+ desc["T"] = tf.data_type(orig_input_)
+ end
+ begin
+ desc["T"] = tf.data_type(orig_output_)
+ end
+ begin
+ desc["T"] = tf.data_type(grad_)
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(max_pool_grad_grad, [orig_input_, orig_output_, grad_], name=nothing, ksize=nothing, strides=nothing, padding=nothing, data_format=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res[1]
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function max_pool_grad_grad(orig_input_, orig_output_, grad_; name=nothing, ksize=nothing, strides=nothing, padding=nothing, data_format=nothing)
+ if tf.in_eager_mode()
+ max_pool_grad_grad_eager(orig_input_, orig_output_, grad_; name=name, ksize=ksize, strides=strides, padding=padding, data_format=data_format)
+ else
+ max_pool_grad_grad_graph(orig_input_, orig_output_, grad_; name=name, ksize=ksize, strides=strides, padding=padding, data_format=data_format)
+ end
+ end
+ end
+end
+
+
+"""
+ resize_bilinear_grad(grads, original_image; align_corners=false)
+
+
+"""
+begin
+ begin
+ function resize_bilinear_grad_graph(grads_, original_image_; name=nothing, align_corners=nothing)
+ local desc
+ tf.with_op_name(name, "ResizeBilinearGrad") do
+ desc = tf.NodeDescription("ResizeBilinearGrad")
+ begin
+ begin
+ grads_ = convert(Tensor{Float32}, grads_)
+ begin
+ end
+ end
+ begin
+ original_image_ = convert(Tensor{Any}, original_image_)
+ begin
+ end
+ end
+ begin
+ (original_image_,) = tf.tf_promote(original_image_)
+ end
+ end
+ begin
+ begin
+ tf.add_input(desc, grads_)
+ end
+ begin
+ tf.add_input(desc, original_image_)
+ end
+ end
+ begin
+ begin
+ if align_corners !== nothing
+ desc["align_corners"] = Base.Bool(align_corners)
+ end
+ end
+ end
+ end
+ tf.Tensor(tf.Operation(desc))
+ end
+ end
+ begin
+ function resize_bilinear_grad_eager(grads_, original_image_; name=nothing, align_corners=nothing)
+ desc = tf.EagerOp("ResizeBilinearGrad")
+ grads_ = convert(tf.EagerTensor, grads_)
+ original_image_ = convert(tf.EagerTensor, original_image_)
+ begin
+ begin
+ tf.add_input(desc, grads_)
+ end
+ begin
+ tf.add_input(desc, original_image_)
+ end
+ end
+ begin
+ begin
+ if align_corners !== nothing
+ desc["align_corners"] = Base.Bool(align_corners)
+ end
+ end
+ end
+ begin
+ desc["T"] = tf.data_type(original_image_)
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(resize_bilinear_grad, [grads_, original_image_], name=nothing, align_corners=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res[1]
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function resize_bilinear_grad(grads_, original_image_; name=nothing, align_corners=nothing)
+ if tf.in_eager_mode()
+ resize_bilinear_grad_eager(grads_, original_image_; name=name, align_corners=align_corners)
+ else
+ resize_bilinear_grad_graph(grads_, original_image_; name=name, align_corners=align_corners)
+ end
+ end
+ end
+end
+
+
+"""
+ batch_to_space(input, crops)
+
+
+"""
+begin
+ begin
+ function batch_to_space_graph(input_, crops_; name=nothing, block_size=nothing)
+ local desc
+ tf.with_op_name(name, "BatchToSpace") do
+ desc = tf.NodeDescription("BatchToSpace")
+ begin
+ begin
+ input_ = convert(Tensor{Any}, input_)
+ begin
+ end
+ end
+ begin
+ crops_ = convert(Tensor{Int32}, crops_)
+ begin
+ crops_ = crops_ - convert(tf.Tensor{eltype(crops_)}, 1)
+ end
+ end
+ begin
+ (input_,) = tf.tf_promote(input_)
+ end
+ begin
+ (crops_,) = tf.tf_promote(crops_)
+ end
+ end
+ begin
+ begin
+ tf.add_input(desc, input_)
+ end
+ begin
+ tf.add_input(desc, crops_)
+ end
+ end
+ begin
+ begin
+ if block_size !== nothing
+ desc["block_size"] = Base.Int(block_size)
+ end
+ end
+ end
+ end
+ tf.Tensor(tf.Operation(desc))
+ end
+ end
+ begin
+ function batch_to_space_eager(input_, crops_; name=nothing, block_size=nothing)
+ desc = tf.EagerOp("BatchToSpace")
+ input_ = convert(tf.EagerTensor, input_)
+ crops_ = convert(tf.EagerTensor, crops_)
+ begin
+ begin
+ tf.add_input(desc, input_)
+ end
+ begin
+ tf.add_input(desc, crops_)
+ end
+ end
+ begin
+ begin
+ if block_size !== nothing
+ desc["block_size"] = Base.Int(block_size)
+ end
+ end
+ end
+ begin
+ desc["T"] = tf.data_type(input_)
+ end
+ begin
+ desc["Tidx"] = tf.data_type(crops_)
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(batch_to_space, [input_, crops_], name=nothing, block_size=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res[1]
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function batch_to_space(input_, crops_; name=nothing, block_size=nothing)
+ if tf.in_eager_mode()
+ batch_to_space_eager(input_, crops_; name=name, block_size=block_size)
+ else
+ batch_to_space_graph(input_, crops_; name=name, block_size=block_size)
+ end
+ end
+ end
+end
+
+
+"""
+ optional_from_value(components)
+
+
+"""
+begin
+ begin
+ function optional_from_value_graph(components_; name=nothing, Toutput_types=nothing)
+ local desc
+ tf.with_op_name(name, "OptionalFromValue") do
+ desc = tf.NodeDescription("OptionalFromValue")
+ begin
+ begin
+ components_ = [convert(Tensor{Any}, x) for x = components_]
+ begin
+ end
+ end
+ end
+ begin
+ begin
+ tf.add_input(desc, components_)
+ end
+ end
+ begin
+ begin
+ if Toutput_types !== nothing
+ desc["Toutput_types"] = map(Base.identity, Toutput_types)
+ end
+ end
+ end
+ end
+ tf.Tensor(tf.Operation(desc))
+ end
+ end
+ begin
+ function optional_from_value_eager(components_; name=nothing, Toutput_types=nothing)
+ desc = tf.EagerOp("OptionalFromValue")
+ components_ = convert(tf.EagerTensor, components_)
+ begin
+ begin
+ tf.add_input(desc, components_)
+ end
+ end
+ begin
+ begin
+ if Toutput_types !== nothing
+ desc["Toutput_types"] = map(Base.identity, Toutput_types)
+ end
+ end
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(optional_from_value, [components_], name=nothing, Toutput_types=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res[1]
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function optional_from_value(components_; name=nothing, Toutput_types=nothing)
+ if tf.in_eager_mode()
+ optional_from_value_eager(components_; name=name, Toutput_types=Toutput_types)
+ else
+ optional_from_value_graph(components_; name=name, Toutput_types=Toutput_types)
+ end
+ end
+ end
+end
+
+
+"""
+ xlogy(x, y)
+
+
+"""
+begin
+ begin
+ function xlogy_graph(x_, y_; name=nothing)
+ local desc
+ tf.with_op_name(name, "Xlogy") do
+ desc = tf.NodeDescription("Xlogy")
+ begin
+ begin
+ x_ = convert(Tensor{Any}, x_)
+ begin
+ end
+ end
+ begin
+ y_ = convert(Tensor{Any}, y_)
+ begin
+ end
+ end
+ begin
+ (x_, y_) = tf.tf_promote(x_, y_)
+ end
+ end
+ begin
+ begin
+ tf.add_input(desc, x_)
+ end
+ begin
+ tf.add_input(desc, y_)
+ end
+ end
+ begin
+ end
+ end
+ tf.Tensor(tf.Operation(desc))
+ end
+ end
+ begin
+ function xlogy_eager(x_, y_; name=nothing)
+ desc = tf.EagerOp("Xlogy")
+ x_ = convert(tf.EagerTensor, x_)
+ y_ = convert(tf.EagerTensor, y_)
+ begin
+ begin
+ tf.add_input(desc, x_)
+ end
+ begin
+ tf.add_input(desc, y_)
+ end
+ end
+ begin
+ end
+ begin
+ desc["T"] = tf.data_type(x_)
+ end
+ begin
+ desc["T"] = tf.data_type(y_)
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(xlogy, [x_, y_], name=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res[1]
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function xlogy(x_, y_; name=nothing)
+ if tf.in_eager_mode()
+ xlogy_eager(x_, y_; name=name)
+ else
+ xlogy_graph(x_, y_; name=name)
+ end
+ end
+ end
+end
+
+
+"""
+ cross(a, b)
+
+
+"""
+begin
+ begin
+ function cross_graph(a_, b_; name=nothing)
+ local desc
+ tf.with_op_name(name, "Cross") do
+ desc = tf.NodeDescription("Cross")
+ begin
+ begin
+ a_ = convert(Tensor{Any}, a_)
+ begin
+ end
+ end
+ begin
+ b_ = convert(Tensor{Any}, b_)
+ begin
+ end
+ end
+ begin
+ (a_, b_) = tf.tf_promote(a_, b_)
+ end
+ end
+ begin
+ begin
+ tf.add_input(desc, a_)
+ end
+ begin
+ tf.add_input(desc, b_)
+ end
+ end
+ begin
+ end
+ end
+ tf.Tensor(tf.Operation(desc))
+ end
+ end
+ begin
+ function cross_eager(a_, b_; name=nothing)
+ desc = tf.EagerOp("Cross")
+ a_ = convert(tf.EagerTensor, a_)
+ b_ = convert(tf.EagerTensor, b_)
+ begin
+ begin
+ tf.add_input(desc, a_)
+ end
+ begin
+ tf.add_input(desc, b_)
+ end
+ end
+ begin
+ end
+ begin
+ desc["T"] = tf.data_type(a_)
+ end
+ begin
+ desc["T"] = tf.data_type(b_)
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(cross, [a_, b_], name=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res[1]
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function cross(a_, b_; name=nothing)
+ if tf.in_eager_mode()
+ cross_eager(a_, b_; name=name)
+ else
+ cross_graph(a_, b_; name=name)
+ end
+ end
+ end
+end
+
+
+"""
+ bitwise_and(x, y)
+
+
+"""
+begin
+ begin
+ function bitwise_and_graph(x_, y_; name=nothing)
+ local desc
+ tf.with_op_name(name, "BitwiseAnd") do
+ desc = tf.NodeDescription("BitwiseAnd")
+ begin
+ begin
+ x_ = convert(Tensor{Any}, x_)
+ begin
+ end
+ end
+ begin
+ y_ = convert(Tensor{Any}, y_)
+ begin
+ end
+ end
+ begin
+ (x_, y_) = tf.tf_promote(x_, y_)
+ end
+ end
+ begin
+ begin
+ tf.add_input(desc, x_)
+ end
+ begin
+ tf.add_input(desc, y_)
+ end
+ end
+ begin
+ end
+ end
+ tf.Tensor(tf.Operation(desc))
+ end
+ end
+ begin
+ function bitwise_and_eager(x_, y_; name=nothing)
+ desc = tf.EagerOp("BitwiseAnd")
+ x_ = convert(tf.EagerTensor, x_)
+ y_ = convert(tf.EagerTensor, y_)
+ begin
+ begin
+ tf.add_input(desc, x_)
+ end
+ begin
+ tf.add_input(desc, y_)
+ end
+ end
+ begin
+ end
+ begin
+ desc["T"] = tf.data_type(x_)
+ end
+ begin
+ desc["T"] = tf.data_type(y_)
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(bitwise_and, [x_, y_], name=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res[1]
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function bitwise_and(x_, y_; name=nothing)
+ if tf.in_eager_mode()
+ bitwise_and_eager(x_, y_; name=name)
+ else
+ bitwise_and_graph(x_, y_; name=name)
+ end
+ end
+ end
+end
+
+
+"""
+ broadcast_to(input, shape)
+
+
+"""
+begin
+ begin
+ function broadcast_to_graph(input_, shape_; name=nothing)
+ local desc
+ tf.with_op_name(name, "BroadcastTo") do
+ desc = tf.NodeDescription("BroadcastTo")
+ begin
+ begin
+ input_ = convert(Tensor{Any}, input_)
+ begin
+ end
+ end
+ begin
+ shape_ = convert(Tensor{Int32}, shape_)
+ begin
+ end
+ end
+ begin
+ (input_,) = tf.tf_promote(input_)
+ end
+ begin
+ (shape_,) = tf.tf_promote(shape_)
+ end
+ end
+ begin
+ begin
+ tf.add_input(desc, input_)
+ end
+ begin
+ tf.add_input(desc, shape_)
+ end
+ end
+ begin
+ end
+ end
+ tf.Tensor(tf.Operation(desc))
+ end
+ end
+ begin
+ function broadcast_to_eager(input_, shape_; name=nothing)
+ desc = tf.EagerOp("BroadcastTo")
+ input_ = convert(tf.EagerTensor, input_)
+ shape_ = convert(tf.EagerTensor, shape_)
+ begin
+ begin
+ tf.add_input(desc, input_)
+ end
+ begin
+ tf.add_input(desc, shape_)
+ end
+ end
+ begin
+ end
+ begin
+ desc["T"] = tf.data_type(input_)
+ end
+ begin
+ desc["Tidx"] = tf.data_type(shape_)
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(broadcast_to, [input_, shape_], name=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res[1]
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function broadcast_to(input_, shape_; name=nothing)
+ if tf.in_eager_mode()
+ broadcast_to_eager(input_, shape_; name=name)
+ else
+ broadcast_to_graph(input_, shape_; name=name)
+ end
+ end
+ end
+end
+
+
+"""
+ elu_grad(gradients, outputs)
+
+
+"""
+begin
+ begin
+ function elu_grad_graph(gradients_, outputs_; name=nothing)
+ local desc
+ tf.with_op_name(name, "EluGrad") do
+ desc = tf.NodeDescription("EluGrad")
+ begin
+ begin
+ gradients_ = convert(Tensor{Any}, gradients_)
+ begin
+ end
+ end
+ begin
+ outputs_ = convert(Tensor{Any}, outputs_)
+ begin
+ end
+ end
+ begin
+ (gradients_, outputs_) = tf.tf_promote(gradients_, outputs_)
+ end
+ end
+ begin
+ begin
+ tf.add_input(desc, gradients_)
+ end
+ begin
+ tf.add_input(desc, outputs_)
+ end
+ end
+ begin
+ end
+ end
+ tf.Tensor(tf.Operation(desc))
+ end
+ end
+ begin
+ function elu_grad_eager(gradients_, outputs_; name=nothing)
+ desc = tf.EagerOp("EluGrad")
+ gradients_ = convert(tf.EagerTensor, gradients_)
+ outputs_ = convert(tf.EagerTensor, outputs_)
+ begin
+ begin
+ tf.add_input(desc, gradients_)
+ end
+ begin
+ tf.add_input(desc, outputs_)
+ end
+ end
+ begin
+ end
+ begin
+ desc["T"] = tf.data_type(gradients_)
+ end
+ begin
+ desc["T"] = tf.data_type(outputs_)
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(elu_grad, [gradients_, outputs_], name=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res[1]
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function elu_grad(gradients_, outputs_; name=nothing)
+ if tf.in_eager_mode()
+ elu_grad_eager(gradients_, outputs_; name=name)
+ else
+ elu_grad_graph(gradients_, outputs_; name=name)
+ end
+ end
+ end
+end
+
+
+"""
+ cudnn_rnn_backprop(input, input_h, input_c, params, output, output_h, output_c, output_backprop, output_h_backprop, output_c_backprop, reserve_space; rnn_mode=, input_mode=, direction=, dropout=?, seed=0, seed2=0)
+
+
+"""
+begin
+ begin
+ function cudnn_rnn_backprop_graph(input_, input_h_, input_c_, params_, output_, output_h_, output_c_, output_backprop_, output_h_backprop_, output_c_backprop_, reserve_space_; name=nothing, rnn_mode=nothing, input_mode=nothing, direction=nothing, dropout=nothing, seed=nothing, seed2=nothing)
+ local desc
+ tf.with_op_name(name, "CudnnRNNBackprop") do
+ desc = tf.NodeDescription("CudnnRNNBackprop")
+ begin
+ begin
+ input_ = convert(Tensor{Any}, input_)
+ begin
+ end
+ end
+ begin
+ input_h_ = convert(Tensor{Any}, input_h_)
+ begin
+ end
+ end
+ begin
+ input_c_ = convert(Tensor{Any}, input_c_)
+ begin
+ end
+ end
+ begin
+ params_ = convert(Tensor{Any}, params_)
+ begin
+ end
+ end
+ begin
+ output_ = convert(Tensor{Any}, output_)
+ begin
+ end
+ end
+ begin
+ output_h_ = convert(Tensor{Any}, output_h_)
+ begin
+ end
+ end
+ begin
+ output_c_ = convert(Tensor{Any}, output_c_)
+ begin
+ end
+ end
+ begin
+ output_backprop_ = convert(Tensor{Any}, output_backprop_)
+ begin
+ end
+ end
+ begin
+ output_h_backprop_ = convert(Tensor{Any}, output_h_backprop_)
+ begin
+ end
+ end
+ begin
+ output_c_backprop_ = convert(Tensor{Any}, output_c_backprop_)
+ begin
+ end
+ end
+ begin
+ reserve_space_ = convert(Tensor{Any}, reserve_space_)
+ begin
+ end
+ end
+ begin
+ (input_, input_h_, input_c_, params_, output_, output_h_, output_c_, output_backprop_, output_h_backprop_, output_c_backprop_, reserve_space_) = tf.tf_promote(input_, input_h_, input_c_, params_, output_, output_h_, output_c_, output_backprop_, output_h_backprop_, output_c_backprop_, reserve_space_)
+ end
+ end
+ begin
+ begin
+ tf.add_input(desc, input_)
+ end
+ begin
+ tf.add_input(desc, input_h_)
+ end
+ begin
+ tf.add_input(desc, input_c_)
+ end
+ begin
+ tf.add_input(desc, params_)
+ end
+ begin
+ tf.add_input(desc, output_)
+ end
+ begin
+ tf.add_input(desc, output_h_)
+ end
+ begin
+ tf.add_input(desc, output_c_)
+ end
+ begin
+ tf.add_input(desc, output_backprop_)
+ end
+ begin
+ tf.add_input(desc, output_h_backprop_)
+ end
+ begin
+ tf.add_input(desc, output_c_backprop_)
+ end
+ begin
+ tf.add_input(desc, reserve_space_)
+ end
+ end
+ begin
+ begin
+ if rnn_mode !== nothing
+ desc["rnn_mode"] = Base.String(rnn_mode)
+ end
+ end
+ begin
+ if input_mode !== nothing
+ desc["input_mode"] = Base.String(input_mode)
+ end
+ end
+ begin
+ if direction !== nothing
+ desc["direction"] = Base.String(direction)
+ end
+ end
+ begin
+ if dropout !== nothing
+ desc["dropout"] = Base.identity(dropout)
+ end
+ end
+ begin
+ if seed !== nothing
+ desc["seed"] = Base.Int(seed)
+ end
+ end
+ begin
+ if seed2 !== nothing
+ desc["seed2"] = Base.Int(seed2)
+ end
+ end
+ end
+ end
+ begin
+ out = tf.Tensor[]
+ op = tf.Operation(desc)
+ for out_idx = 1:4
+ push!(out, tf.Tensor(op, out_idx))
+ end
+ out
+ end
+ end
+ end
+ begin
+ function cudnn_rnn_backprop_eager(input_, input_h_, input_c_, params_, output_, output_h_, output_c_, output_backprop_, output_h_backprop_, output_c_backprop_, reserve_space_; name=nothing, rnn_mode=nothing, input_mode=nothing, direction=nothing, dropout=nothing, seed=nothing, seed2=nothing)
+ desc = tf.EagerOp("CudnnRNNBackprop")
+ input_ = convert(tf.EagerTensor, input_)
+ input_h_ = convert(tf.EagerTensor, input_h_)
+ input_c_ = convert(tf.EagerTensor, input_c_)
+ params_ = convert(tf.EagerTensor, params_)
+ output_ = convert(tf.EagerTensor, output_)
+ output_h_ = convert(tf.EagerTensor, output_h_)
+ output_c_ = convert(tf.EagerTensor, output_c_)
+ output_backprop_ = convert(tf.EagerTensor, output_backprop_)
+ output_h_backprop_ = convert(tf.EagerTensor, output_h_backprop_)
+ output_c_backprop_ = convert(tf.EagerTensor, output_c_backprop_)
+ reserve_space_ = convert(tf.EagerTensor, reserve_space_)
+ begin
+ begin
+ tf.add_input(desc, input_)
+ end
+ begin
+ tf.add_input(desc, input_h_)
+ end
+ begin
+ tf.add_input(desc, input_c_)
+ end
+ begin
+ tf.add_input(desc, params_)
+ end
+ begin
+ tf.add_input(desc, output_)
+ end
+ begin
+ tf.add_input(desc, output_h_)
+ end
+ begin
+ tf.add_input(desc, output_c_)
+ end
+ begin
+ tf.add_input(desc, output_backprop_)
+ end
+ begin
+ tf.add_input(desc, output_h_backprop_)
+ end
+ begin
+ tf.add_input(desc, output_c_backprop_)
+ end
+ begin
+ tf.add_input(desc, reserve_space_)
+ end
+ end
+ begin
+ begin
+ if rnn_mode !== nothing
+ desc["rnn_mode"] = Base.String(rnn_mode)
+ end
+ end
+ begin
+ if input_mode !== nothing
+ desc["input_mode"] = Base.String(input_mode)
+ end
+ end
+ begin
+ if direction !== nothing
+ desc["direction"] = Base.String(direction)
+ end
+ end
+ begin
+ if dropout !== nothing
+ desc["dropout"] = Base.identity(dropout)
+ end
+ end
+ begin
+ if seed !== nothing
+ desc["seed"] = Base.Int(seed)
+ end
+ end
+ begin
+ if seed2 !== nothing
+ desc["seed2"] = Base.Int(seed2)
+ end
+ end
+ end
+ begin
+ desc["T"] = tf.data_type(input_)
+ end
+ begin
+ desc["T"] = tf.data_type(input_h_)
+ end
+ begin
+ desc["T"] = tf.data_type(input_c_)
+ end
+ begin
+ desc["T"] = tf.data_type(params_)
+ end
+ begin
+ desc["T"] = tf.data_type(output_)
+ end
+ begin
+ desc["T"] = tf.data_type(output_h_)
+ end
+ begin
+ desc["T"] = tf.data_type(output_c_)
+ end
+ begin
+ desc["T"] = tf.data_type(output_backprop_)
+ end
+ begin
+ desc["T"] = tf.data_type(output_h_backprop_)
+ end
+ begin
+ desc["T"] = tf.data_type(output_c_backprop_)
+ end
+ begin
+ desc["T"] = tf.data_type(reserve_space_)
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(cudnn_rnn_backprop, [input_, input_h_, input_c_, params_, output_, output_h_, output_c_, output_backprop_, output_h_backprop_, output_c_backprop_, reserve_space_], name=nothing, rnn_mode=nothing, input_mode=nothing, direction=nothing, dropout=nothing, seed=nothing, seed2=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function cudnn_rnn_backprop(input_, input_h_, input_c_, params_, output_, output_h_, output_c_, output_backprop_, output_h_backprop_, output_c_backprop_, reserve_space_; name=nothing, rnn_mode=nothing, input_mode=nothing, direction=nothing, dropout=nothing, seed=nothing, seed2=nothing)
+ if tf.in_eager_mode()
+ cudnn_rnn_backprop_eager(input_, input_h_, input_c_, params_, output_, output_h_, output_c_, output_backprop_, output_h_backprop_, output_c_backprop_, reserve_space_; name=name, rnn_mode=rnn_mode, input_mode=input_mode, direction=direction, dropout=dropout, seed=seed, seed2=seed2)
+ else
+ cudnn_rnn_backprop_graph(input_, input_h_, input_c_, params_, output_, output_h_, output_c_, output_backprop_, output_h_backprop_, output_c_backprop_, reserve_space_; name=name, rnn_mode=rnn_mode, input_mode=input_mode, direction=direction, dropout=dropout, seed=seed, seed2=seed2)
+ end
+ end
+ end
+end
+
+
+"""
+ string_to_hash_bucket_fast(input)
+
+
+"""
+begin
+ begin
+ function string_to_hash_bucket_fast_graph(input_; name=nothing, num_buckets=nothing)
+ local desc
+ tf.with_op_name(name, "StringToHashBucketFast") do
+ desc = tf.NodeDescription("StringToHashBucketFast")
+ begin
+ begin
+ input_ = convert(Tensor{String}, input_)
+ begin
+ end
+ end
+ end
+ begin
+ begin
+ tf.add_input(desc, input_)
+ end
+ end
+ begin
+ begin
+ if num_buckets !== nothing
+ desc["num_buckets"] = Base.Int(num_buckets)
+ end
+ end
+ end
+ end
+ tf.Tensor(tf.Operation(desc))
+ end
+ end
+ begin
+ function string_to_hash_bucket_fast_eager(input_; name=nothing, num_buckets=nothing)
+ desc = tf.EagerOp("StringToHashBucketFast")
+ input_ = convert(tf.EagerTensor, input_)
+ begin
+ begin
+ tf.add_input(desc, input_)
+ end
+ end
+ begin
+ begin
+ if num_buckets !== nothing
+ desc["num_buckets"] = Base.Int(num_buckets)
+ end
+ end
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(string_to_hash_bucket_fast, [input_], name=nothing, num_buckets=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res[1]
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function string_to_hash_bucket_fast(input_; name=nothing, num_buckets=nothing)
+ if tf.in_eager_mode()
+ string_to_hash_bucket_fast_eager(input_; name=name, num_buckets=num_buckets)
+ else
+ string_to_hash_bucket_fast_graph(input_; name=name, num_buckets=num_buckets)
+ end
+ end
+ end
+end
+
+
+"""
+ mutable_hash_table(; container=, shared_name=, use_node_name_sharing=false)
+
+
+"""
+begin
+ begin
+ function mutable_hash_table_graph(; name=nothing, container=nothing, shared_name=nothing, use_node_name_sharing=nothing, key_dtype=nothing, value_dtype=nothing)
+ local desc
+ tf.with_op_name(name, "MutableHashTable") do
+ desc = tf.NodeDescription("MutableHashTable")
+ begin
+ end
+ begin
+ end
+ begin
+ begin
+ if container !== nothing
+ desc["container"] = Base.String(container)
+ end
+ end
+ begin
+ if shared_name !== nothing
+ desc["shared_name"] = Base.String(shared_name)
+ end
+ end
+ begin
+ if use_node_name_sharing !== nothing
+ desc["use_node_name_sharing"] = Base.Bool(use_node_name_sharing)
+ end
+ end
+ begin
+ if key_dtype !== nothing
+ desc["key_dtype"] = Base.identity(key_dtype)
+ end
+ end
+ begin
+ if value_dtype !== nothing
+ desc["value_dtype"] = Base.identity(value_dtype)
+ end
+ end
+ end
+ end
+ tf.Tensor(tf.Operation(desc))
+ end
+ end
+ begin
+ function mutable_hash_table_eager(; name=nothing, container=nothing, shared_name=nothing, use_node_name_sharing=nothing, key_dtype=nothing, value_dtype=nothing)
+ desc = tf.EagerOp("MutableHashTable")
+ begin
+ end
+ begin
+ begin
+ if container !== nothing
+ desc["container"] = Base.String(container)
+ end
+ end
+ begin
+ if shared_name !== nothing
+ desc["shared_name"] = Base.String(shared_name)
+ end
+ end
+ begin
+ if use_node_name_sharing !== nothing
+ desc["use_node_name_sharing"] = Base.Bool(use_node_name_sharing)
+ end
+ end
+ begin
+ if key_dtype !== nothing
+ desc["key_dtype"] = Base.identity(key_dtype)
+ end
+ end
+ begin
+ if value_dtype !== nothing
+ desc["value_dtype"] = Base.identity(value_dtype)
+ end
+ end
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(mutable_hash_table, [], name=nothing, container=nothing, shared_name=nothing, use_node_name_sharing=nothing, key_dtype=nothing, value_dtype=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res[1]
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function mutable_hash_table(; name=nothing, container=nothing, shared_name=nothing, use_node_name_sharing=nothing, key_dtype=nothing, value_dtype=nothing)
+ if tf.in_eager_mode()
+ mutable_hash_table_eager(; name=name, container=container, shared_name=shared_name, use_node_name_sharing=use_node_name_sharing, key_dtype=key_dtype, value_dtype=value_dtype)
+ else
+ mutable_hash_table_graph(; name=name, container=container, shared_name=shared_name, use_node_name_sharing=use_node_name_sharing, key_dtype=key_dtype, value_dtype=value_dtype)
+ end
+ end
+ end
+end
+
+
+"""
+ relu(features)
+
+
+"""
+begin
+ begin
+ function relu_graph(features_; name=nothing)
+ local desc
+ tf.with_op_name(name, "Relu") do
+ desc = tf.NodeDescription("Relu")
+ begin
+ begin
+ features_ = convert(Tensor{Any}, features_)
+ begin
+ end
+ end
+ begin
+ (features_,) = tf.tf_promote(features_)
+ end
+ end
+ begin
+ begin
+ tf.add_input(desc, features_)
+ end
+ end
+ begin
+ end
+ end
+ tf.Tensor(tf.Operation(desc))
+ end
+ end
+ begin
+ function relu_eager(features_; name=nothing)
+ desc = tf.EagerOp("Relu")
+ features_ = convert(tf.EagerTensor, features_)
+ begin
+ begin
+ tf.add_input(desc, features_)
+ end
+ end
+ begin
+ end
+ begin
+ desc["T"] = tf.data_type(features_)
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(relu, [features_], name=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res[1]
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function relu(features_; name=nothing)
+ if tf.in_eager_mode()
+ relu_eager(features_; name=name)
+ else
+ relu_graph(features_; name=name)
+ end
+ end
+ end
+end
+
+
+"""
+ nth_element(input, n; reverse=false)
+
+
+"""
+begin
+ begin
+ function nth_element_graph(input_, n_; name=nothing, reverse=nothing)
+ local desc
+ tf.with_op_name(name, "NthElement") do
+ desc = tf.NodeDescription("NthElement")
+ begin
+ begin
+ input_ = convert(Tensor{Any}, input_)
+ begin
+ end
+ end
+ begin
+ n_ = convert(Tensor{Int32}, n_)
+ begin
+ end
+ end
+ begin
+ (input_,) = tf.tf_promote(input_)
+ end
+ end
+ begin
+ begin
+ tf.add_input(desc, input_)
+ end
+ begin
+ tf.add_input(desc, n_)
+ end
+ end
+ begin
+ begin
+ if reverse !== nothing
+ desc["reverse"] = Base.Bool(reverse)
+ end
+ end
+ end
+ end
+ tf.Tensor(tf.Operation(desc))
+ end
+ end
+ begin
+ function nth_element_eager(input_, n_; name=nothing, reverse=nothing)
+ desc = tf.EagerOp("NthElement")
+ input_ = convert(tf.EagerTensor, input_)
+ n_ = convert(tf.EagerTensor, n_)
+ begin
+ begin
+ tf.add_input(desc, input_)
+ end
+ begin
+ tf.add_input(desc, n_)
+ end
+ end
+ begin
+ begin
+ if reverse !== nothing
+ desc["reverse"] = Base.Bool(reverse)
+ end
+ end
+ end
+ begin
+ desc["T"] = tf.data_type(input_)
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(nth_element, [input_, n_], name=nothing, reverse=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res[1]
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function nth_element(input_, n_; name=nothing, reverse=nothing)
+ if tf.in_eager_mode()
+ nth_element_eager(input_, n_; name=name, reverse=reverse)
+ else
+ nth_element_graph(input_, n_; name=name, reverse=reverse)
+ end
+ end
+ end
+end
+
+
+"""
+ softsign(features)
+
+
+"""
+begin
+ begin
+ function softsign_graph(features_; name=nothing)
+ local desc
+ tf.with_op_name(name, "Softsign") do
+ desc = tf.NodeDescription("Softsign")
+ begin
+ begin
+ features_ = convert(Tensor{Any}, features_)
+ begin
+ end
+ end
+ begin
+ (features_,) = tf.tf_promote(features_)
+ end
+ end
+ begin
+ begin
+ tf.add_input(desc, features_)
+ end
+ end
+ begin
+ end
+ end
+ tf.Tensor(tf.Operation(desc))
+ end
+ end
+ begin
+ function softsign_eager(features_; name=nothing)
+ desc = tf.EagerOp("Softsign")
+ features_ = convert(tf.EagerTensor, features_)
+ begin
+ begin
+ tf.add_input(desc, features_)
+ end
+ end
+ begin
+ end
+ begin
+ desc["T"] = tf.data_type(features_)
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(softsign, [features_], name=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res[1]
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function softsign(features_; name=nothing)
+ if tf.in_eager_mode()
+ softsign_eager(features_; name=name)
+ else
+ softsign_graph(features_; name=name)
+ end
+ end
+ end
+end
+
+
+"""
+ mutable_dense_hash_table(empty_key; container=, shared_name=, use_node_name_sharing=false, value_shape=?, initial_num_buckets=131072, max_load_factor=?)
+
+
+"""
+begin
+ begin
+ function mutable_dense_hash_table_graph(empty_key_; name=nothing, container=nothing, shared_name=nothing, use_node_name_sharing=nothing, key_dtype=nothing, value_dtype=nothing, value_shape=nothing, initial_num_buckets=nothing, max_load_factor=nothing)
+ local desc
+ tf.with_op_name(name, "MutableDenseHashTable") do
+ desc = tf.NodeDescription("MutableDenseHashTable")
+ begin
+ begin
+ empty_key_ = convert(Tensor{Any}, empty_key_)
+ begin
+ end
+ end
+ begin
+ (empty_key_,) = tf.tf_promote(empty_key_)
+ end
+ end
+ begin
+ begin
+ tf.add_input(desc, empty_key_)
+ end
+ end
+ begin
+ begin
+ if container !== nothing
+ desc["container"] = Base.String(container)
+ end
+ end
+ begin
+ if shared_name !== nothing
+ desc["shared_name"] = Base.String(shared_name)
+ end
+ end
+ begin
+ if use_node_name_sharing !== nothing
+ desc["use_node_name_sharing"] = Base.Bool(use_node_name_sharing)
+ end
+ end
+ begin
+ if key_dtype !== nothing
+ desc["key_dtype"] = Base.identity(key_dtype)
+ end
+ end
+ begin
+ if value_dtype !== nothing
+ desc["value_dtype"] = Base.identity(value_dtype)
+ end
+ end
+ begin
+ if value_shape !== nothing
+ desc["value_shape"] = Base.identity(value_shape)
+ end
+ end
+ begin
+ if initial_num_buckets !== nothing
+ desc["initial_num_buckets"] = Base.Int(initial_num_buckets)
+ end
+ end
+ begin
+ if max_load_factor !== nothing
+ desc["max_load_factor"] = Base.identity(max_load_factor)
+ end
+ end
+ end
+ end
+ tf.Tensor(tf.Operation(desc))
+ end
+ end
+ begin
+ function mutable_dense_hash_table_eager(empty_key_; name=nothing, container=nothing, shared_name=nothing, use_node_name_sharing=nothing, key_dtype=nothing, value_dtype=nothing, value_shape=nothing, initial_num_buckets=nothing, max_load_factor=nothing)
+ desc = tf.EagerOp("MutableDenseHashTable")
+ empty_key_ = convert(tf.EagerTensor, empty_key_)
+ begin
+ begin
+ tf.add_input(desc, empty_key_)
+ end
+ end
+ begin
+ begin
+ if container !== nothing
+ desc["container"] = Base.String(container)
+ end
+ end
+ begin
+ if shared_name !== nothing
+ desc["shared_name"] = Base.String(shared_name)
+ end
+ end
+ begin
+ if use_node_name_sharing !== nothing
+ desc["use_node_name_sharing"] = Base.Bool(use_node_name_sharing)
+ end
+ end
+ begin
+ if key_dtype !== nothing
+ desc["key_dtype"] = Base.identity(key_dtype)
+ end
+ end
+ begin
+ if value_dtype !== nothing
+ desc["value_dtype"] = Base.identity(value_dtype)
+ end
+ end
+ begin
+ if value_shape !== nothing
+ desc["value_shape"] = Base.identity(value_shape)
+ end
+ end
+ begin
+ if initial_num_buckets !== nothing
+ desc["initial_num_buckets"] = Base.Int(initial_num_buckets)
+ end
+ end
+ begin
+ if max_load_factor !== nothing
+ desc["max_load_factor"] = Base.identity(max_load_factor)
+ end
+ end
+ end
+ begin
+ desc["key_dtype"] = tf.data_type(empty_key_)
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(mutable_dense_hash_table, [empty_key_], name=nothing, container=nothing, shared_name=nothing, use_node_name_sharing=nothing, key_dtype=nothing, value_dtype=nothing, value_shape=nothing, initial_num_buckets=nothing, max_load_factor=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res[1]
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function mutable_dense_hash_table(empty_key_; name=nothing, container=nothing, shared_name=nothing, use_node_name_sharing=nothing, key_dtype=nothing, value_dtype=nothing, value_shape=nothing, initial_num_buckets=nothing, max_load_factor=nothing)
+ if tf.in_eager_mode()
+ mutable_dense_hash_table_eager(empty_key_; name=name, container=container, shared_name=shared_name, use_node_name_sharing=use_node_name_sharing, key_dtype=key_dtype, value_dtype=value_dtype, value_shape=value_shape, initial_num_buckets=initial_num_buckets, max_load_factor=max_load_factor)
+ else
+ mutable_dense_hash_table_graph(empty_key_; name=name, container=container, shared_name=shared_name, use_node_name_sharing=use_node_name_sharing, key_dtype=key_dtype, value_dtype=value_dtype, value_shape=value_shape, initial_num_buckets=initial_num_buckets, max_load_factor=max_load_factor)
+ end
+ end
+ end
+end
+
+
+"""
+ _shutdown_distributed_tpu()
+
+An op that shuts down a running distributed TPU system. The Op returns
+"""
+begin
+ begin
+ function _shutdown_distributed_tpu_graph(; name=nothing)
+ local desc
+ tf.with_op_name(name, "_ShutdownDistributedTPU") do
+ desc = tf.NodeDescription("_ShutdownDistributedTPU")
+ begin
+ end
+ begin
+ end
+ begin
+ end
+ end
+ tf.Tensor(tf.Operation(desc))
+ end
+ end
+ begin
+ function _shutdown_distributed_tpu_eager(; name=nothing)
+ desc = tf.EagerOp("_ShutdownDistributedTPU")
+ begin
+ end
+ begin
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(_shutdown_distributed_tpu, [], name=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res[1]
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function _shutdown_distributed_tpu(; name=nothing)
+ if tf.in_eager_mode()
+ _shutdown_distributed_tpu_eager(; name=name)
+ else
+ _shutdown_distributed_tpu_graph(; name=name)
+ end
+ end
+ end
+end
+
+
+"""
+ polygamma(a, x)
+
+
+"""
+begin
+ begin
+ function polygamma_graph(a_, x_; name=nothing)
+ local desc
+ tf.with_op_name(name, "Polygamma") do
+ desc = tf.NodeDescription("Polygamma")
+ begin
+ begin
+ a_ = convert(Tensor{Any}, a_)
+ begin
+ end
+ end
+ begin
+ x_ = convert(Tensor{Any}, x_)
+ begin
+ end
+ end
+ begin
+ (a_, x_) = tf.tf_promote(a_, x_)
+ end
+ end
+ begin
+ begin
+ tf.add_input(desc, a_)
+ end
+ begin
+ tf.add_input(desc, x_)
+ end
+ end
+ begin
+ end
+ end
+ tf.Tensor(tf.Operation(desc))
+ end
+ end
+ begin
+ function polygamma_eager(a_, x_; name=nothing)
+ desc = tf.EagerOp("Polygamma")
+ a_ = convert(tf.EagerTensor, a_)
+ x_ = convert(tf.EagerTensor, x_)
+ begin
+ begin
+ tf.add_input(desc, a_)
+ end
+ begin
+ tf.add_input(desc, x_)
+ end
+ end
+ begin
+ end
+ begin
+ desc["T"] = tf.data_type(a_)
+ end
+ begin
+ desc["T"] = tf.data_type(x_)
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(polygamma, [a_, x_], name=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res[1]
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function polygamma(a_, x_; name=nothing)
+ if tf.in_eager_mode()
+ polygamma_eager(a_, x_; name=name)
+ else
+ polygamma_graph(a_, x_; name=name)
+ end
+ end
+ end
+end
+
+
+"""
+ nccl_reduce(input)
+
+
+"""
+begin
+ begin
+ function nccl_reduce_graph(input_; name=nothing, reduction=nothing, num_devices=nothing)
+ local desc
+ tf.with_op_name(name, "NcclReduce") do
+ desc = tf.NodeDescription("NcclReduce")
+ begin
+ begin
+ input_ = [convert(Tensor{Any}, x) for x = input_]
+ begin
+ end
+ end
+ begin
+ (input_,) = tf.tf_promote(input_)
+ end
+ end
+ begin
+ begin
+ tf.add_input(desc, input_)
+ end
+ end
+ begin
+ begin
+ if reduction !== nothing
+ desc["reduction"] = Base.String(reduction)
+ end
+ end
+ begin
+ if num_devices !== nothing
+ desc["num_devices"] = Base.Int(num_devices)
+ end
+ end
+ end
+ end
+ tf.Tensor(tf.Operation(desc))
+ end
+ end
+ begin
+ function nccl_reduce_eager(input_; name=nothing, reduction=nothing, num_devices=nothing)
+ desc = tf.EagerOp("NcclReduce")
+ input_ = convert(tf.EagerTensor, input_)
+ begin
+ begin
+ tf.add_input(desc, input_)
+ end
+ end
+ begin
+ begin
+ if reduction !== nothing
+ desc["reduction"] = Base.String(reduction)
+ end
+ end
+ begin
+ if num_devices !== nothing
+ desc["num_devices"] = Base.Int(num_devices)
+ end
+ end
+ end
+ begin
+ desc["T"] = tf.data_type(input_)
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(nccl_reduce, [input_], name=nothing, reduction=nothing, num_devices=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res[1]
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function nccl_reduce(input_; name=nothing, reduction=nothing, num_devices=nothing)
+ if tf.in_eager_mode()
+ nccl_reduce_eager(input_; name=name, reduction=reduction, num_devices=num_devices)
+ else
+ nccl_reduce_graph(input_; name=name, reduction=reduction, num_devices=num_devices)
+ end
+ end
+ end
+end
+
+
+"""
+ arg_max(input, dimension; output_type=Int64)
+
+
+"""
+begin
+ begin
+ function arg_max_graph(input_, dimension_; name=nothing, output_type=nothing)
+ local desc
+ tf.with_op_name(name, "ArgMax") do
+ desc = tf.NodeDescription("ArgMax")
+ begin
+ begin
+ input_ = convert(Tensor{Any}, input_)
+ begin
+ end
+ end
+ begin
+ dimension_ = convert(Tensor{Int32}, dimension_)
+ begin
+ dimension_ = dimension_ - convert(tf.Tensor{eltype(dimension_)}, 1)
+ end
+ end
+ begin
+ (input_,) = tf.tf_promote(input_)
+ end
+ begin
+ (dimension_,) = tf.tf_promote(dimension_)
+ end
+ end
+ begin
+ begin
+ tf.add_input(desc, input_)
+ end
+ begin
+ tf.add_input(desc, dimension_)
+ end
+ end
+ begin
+ begin
+ if output_type !== nothing
+ desc["output_type"] = Base.identity(output_type)
+ end
+ end
+ end
+ end
+ tf.Tensor(tf.Operation(desc))
+ end
+ end
+ begin
+ function arg_max_eager(input_, dimension_; name=nothing, output_type=nothing)
+ desc = tf.EagerOp("ArgMax")
+ input_ = convert(tf.EagerTensor, input_)
+ dimension_ = convert(tf.EagerTensor, dimension_)
+ begin
+ begin
+ tf.add_input(desc, input_)
+ end
+ begin
+ tf.add_input(desc, dimension_)
+ end
+ end
+ begin
+ begin
+ if output_type !== nothing
+ desc["output_type"] = Base.identity(output_type)
+ end
+ end
+ end
+ begin
+ desc["T"] = tf.data_type(input_)
+ end
+ begin
+ desc["Tidx"] = tf.data_type(dimension_)
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(arg_max, [input_, dimension_], name=nothing, output_type=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res[1]
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function arg_max(input_, dimension_; name=nothing, output_type=nothing)
+ if tf.in_eager_mode()
+ arg_max_eager(input_, dimension_; name=name, output_type=output_type)
+ else
+ arg_max_graph(input_, dimension_; name=name, output_type=output_type)
+ end
+ end
+ end
+end
+
+
+"""
+ matrix_set_diag(input, diagonal)
+
+
+"""
+begin
+ begin
+ function matrix_set_diag_graph(input_, diagonal_; name=nothing)
+ local desc
+ tf.with_op_name(name, "MatrixSetDiag") do
+ desc = tf.NodeDescription("MatrixSetDiag")
+ begin
+ begin
+ input_ = convert(Tensor{Any}, input_)
+ begin
+ end
+ end
+ begin
+ diagonal_ = convert(Tensor{Any}, diagonal_)
+ begin
+ end
+ end
+ begin
+ (input_, diagonal_) = tf.tf_promote(input_, diagonal_)
+ end
+ end
+ begin
+ begin
+ tf.add_input(desc, input_)
+ end
+ begin
+ tf.add_input(desc, diagonal_)
+ end
+ end
+ begin
+ end
+ end
+ tf.Tensor(tf.Operation(desc))
+ end
+ end
+ begin
+ function matrix_set_diag_eager(input_, diagonal_; name=nothing)
+ desc = tf.EagerOp("MatrixSetDiag")
+ input_ = convert(tf.EagerTensor, input_)
+ diagonal_ = convert(tf.EagerTensor, diagonal_)
+ begin
+ begin
+ tf.add_input(desc, input_)
+ end
+ begin
+ tf.add_input(desc, diagonal_)
+ end
+ end
+ begin
+ end
+ begin
+ desc["T"] = tf.data_type(input_)
+ end
+ begin
+ desc["T"] = tf.data_type(diagonal_)
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(matrix_set_diag, [input_, diagonal_], name=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res[1]
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function matrix_set_diag(input_, diagonal_; name=nothing)
+ if tf.in_eager_mode()
+ matrix_set_diag_eager(input_, diagonal_; name=name)
+ else
+ matrix_set_diag_graph(input_, diagonal_; name=name)
+ end
+ end
+ end
+end
+
+
+"""
+ space_to_batch_nd(input, block_shape, paddings)
+
+
+"""
+begin
+ begin
+ function space_to_batch_nd_graph(input_, block_shape_, paddings_; name=nothing)
+ local desc
+ tf.with_op_name(name, "SpaceToBatchND") do
+ desc = tf.NodeDescription("SpaceToBatchND")
+ begin
+ begin
+ input_ = convert(Tensor{Any}, input_)
+ begin
+ end
+ end
+ begin
+ block_shape_ = convert(Tensor{Int32}, block_shape_)
+ begin
+ end
+ end
+ begin
+ paddings_ = convert(Tensor{Int32}, paddings_)
+ begin
+ end
+ end
+ begin
+ (input_,) = tf.tf_promote(input_)
+ end
+ begin
+ (paddings_,) = tf.tf_promote(paddings_)
+ end
+ begin
+ (block_shape_,) = tf.tf_promote(block_shape_)
+ end
+ end
+ begin
+ begin
+ tf.add_input(desc, input_)
+ end
+ begin
+ tf.add_input(desc, block_shape_)
+ end
+ begin
+ tf.add_input(desc, paddings_)
+ end
+ end
+ begin
+ end
+ end
+ tf.Tensor(tf.Operation(desc))
+ end
+ end
+ begin
+ function space_to_batch_nd_eager(input_, block_shape_, paddings_; name=nothing)
+ desc = tf.EagerOp("SpaceToBatchND")
+ input_ = convert(tf.EagerTensor, input_)
+ block_shape_ = convert(tf.EagerTensor, block_shape_)
+ paddings_ = convert(tf.EagerTensor, paddings_)
+ begin
+ begin
+ tf.add_input(desc, input_)
+ end
+ begin
+ tf.add_input(desc, block_shape_)
+ end
+ begin
+ tf.add_input(desc, paddings_)
+ end
+ end
+ begin
+ end
+ begin
+ desc["T"] = tf.data_type(input_)
+ end
+ begin
+ desc["Tblock_shape"] = tf.data_type(block_shape_)
+ end
+ begin
+ desc["Tpaddings"] = tf.data_type(paddings_)
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(space_to_batch_nd, [input_, block_shape_, paddings_], name=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res[1]
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function space_to_batch_nd(input_, block_shape_, paddings_; name=nothing)
+ if tf.in_eager_mode()
+ space_to_batch_nd_eager(input_, block_shape_, paddings_; name=name)
+ else
+ space_to_batch_nd_graph(input_, block_shape_, paddings_; name=name)
+ end
+ end
+ end
+end
+
+
+"""
+ sparse_reshape(input_indices, input_shape, new_shape)
+
+
+"""
+begin
+ begin
+ function sparse_reshape_graph(input_indices_, input_shape_, new_shape_; name=nothing)
+ local desc
+ tf.with_op_name(name, "SparseReshape") do
+ desc = tf.NodeDescription("SparseReshape")
+ begin
+ begin
+ input_indices_ = convert(Tensor{Int64}, input_indices_)
+ begin
+ end
+ end
+ begin
+ input_shape_ = convert(Tensor{Int64}, input_shape_)
+ begin
+ end
+ end
+ begin
+ new_shape_ = convert(Tensor{Int64}, new_shape_)
+ begin
+ end
+ end
+ end
+ begin
+ begin
+ tf.add_input(desc, input_indices_)
+ end
+ begin
+ tf.add_input(desc, input_shape_)
+ end
+ begin
+ tf.add_input(desc, new_shape_)
+ end
+ end
+ begin
+ end
+ end
+ begin
+ out = tf.Tensor[]
+ op = tf.Operation(desc)
+ for out_idx = 1:2
+ push!(out, tf.Tensor(op, out_idx))
+ end
+ out
+ end
+ end
+ end
+ begin
+ function sparse_reshape_eager(input_indices_, input_shape_, new_shape_; name=nothing)
+ desc = tf.EagerOp("SparseReshape")
+ input_indices_ = convert(tf.EagerTensor, input_indices_)
+ input_shape_ = convert(tf.EagerTensor, input_shape_)
+ new_shape_ = convert(tf.EagerTensor, new_shape_)
+ begin
+ begin
+ tf.add_input(desc, input_indices_)
+ end
+ begin
+ tf.add_input(desc, input_shape_)
+ end
+ begin
+ tf.add_input(desc, new_shape_)
+ end
+ end
+ begin
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(sparse_reshape, [input_indices_, input_shape_, new_shape_], name=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function sparse_reshape(input_indices_, input_shape_, new_shape_; name=nothing)
+ if tf.in_eager_mode()
+ sparse_reshape_eager(input_indices_, input_shape_, new_shape_; name=name)
+ else
+ sparse_reshape_graph(input_indices_, input_shape_, new_shape_; name=name)
+ end
+ end
+ end
+end
+
+
+"""
+ optimize_dataset(input_dataset, optimizations)
+
+
+"""
+begin
+ begin
+ function optimize_dataset_graph(input_dataset_, optimizations_; name=nothing, output_types=nothing, output_shapes=nothing)
+ local desc
+ tf.with_op_name(name, "OptimizeDataset") do
+ desc = tf.NodeDescription("OptimizeDataset")
+ begin
+ begin
+ input_dataset_ = convert(Tensor{Any}, input_dataset_)
+ begin
+ end
+ end
+ begin
+ optimizations_ = convert(Tensor{String}, optimizations_)
+ begin
+ end
+ end
+ end
+ begin
+ begin
+ tf.add_input(desc, input_dataset_)
+ end
+ begin
+ tf.add_input(desc, optimizations_)
+ end
+ end
+ begin
+ begin
+ if output_types !== nothing
+ desc["output_types"] = map(Base.identity, output_types)
+ end
+ end
+ begin
+ if output_shapes !== nothing
+ desc["output_shapes"] = map(Base.identity, output_shapes)
+ end
+ end
+ end
+ end
+ tf.Tensor(tf.Operation(desc))
+ end
+ end
+ begin
+ function optimize_dataset_eager(input_dataset_, optimizations_; name=nothing, output_types=nothing, output_shapes=nothing)
+ desc = tf.EagerOp("OptimizeDataset")
+ input_dataset_ = convert(tf.EagerTensor, input_dataset_)
+ optimizations_ = convert(tf.EagerTensor, optimizations_)
+ begin
+ begin
+ tf.add_input(desc, input_dataset_)
+ end
+ begin
+ tf.add_input(desc, optimizations_)
+ end
+ end
+ begin
+ begin
+ if output_types !== nothing
+ desc["output_types"] = map(Base.identity, output_types)
+ end
+ end
+ begin
+ if output_shapes !== nothing
+ desc["output_shapes"] = map(Base.identity, output_shapes)
+ end
+ end
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(optimize_dataset, [input_dataset_, optimizations_], name=nothing, output_types=nothing, output_shapes=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res[1]
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function optimize_dataset(input_dataset_, optimizations_; name=nothing, output_types=nothing, output_shapes=nothing)
+ if tf.in_eager_mode()
+ optimize_dataset_eager(input_dataset_, optimizations_; name=name, output_types=output_types, output_shapes=output_shapes)
+ else
+ optimize_dataset_graph(input_dataset_, optimizations_; name=name, output_types=output_types, output_shapes=output_shapes)
+ end
+ end
+ end
+end
+
+
+"""
+ concat_v2(values, axis)
+
+
+"""
+begin
+ begin
+ function concat_v2_graph(values_, axis_; name=nothing, N=nothing)
+ local desc
+ tf.with_op_name(name, "ConcatV2") do
+ desc = tf.NodeDescription("ConcatV2")
+ begin
+ begin
+ values_ = [convert(Tensor{Any}, x) for x = values_]
+ begin
+ end
+ end
+ begin
+ axis_ = convert(Tensor{Int32}, axis_)
+ begin
+ axis_ = axis_ - convert(tf.Tensor{eltype(axis_)}, 1)
+ end
+ end
+ begin
+ (values_,) = tf.tf_promote(values_)
+ end
+ begin
+ (axis_,) = tf.tf_promote(axis_)
+ end
+ end
+ begin
+ begin
+ tf.add_input(desc, values_)
+ end
+ begin
+ tf.add_input(desc, axis_)
+ end
+ end
+ begin
+ begin
+ if N !== nothing
+ desc["N"] = Base.Int(N)
+ end
+ end
+ end
+ end
+ tf.Tensor(tf.Operation(desc))
+ end
+ end
+ begin
+ function concat_v2_eager(values_, axis_; name=nothing, N=nothing)
+ desc = tf.EagerOp("ConcatV2")
+ values_ = convert(tf.EagerTensor, values_)
+ axis_ = convert(tf.EagerTensor, axis_)
+ begin
+ begin
+ tf.add_input(desc, values_)
+ end
+ begin
+ tf.add_input(desc, axis_)
+ end
+ end
+ begin
+ begin
+ if N !== nothing
+ desc["N"] = Base.Int(N)
+ end
+ end
+ end
+ begin
+ desc["T"] = tf.data_type(values_)
+ end
+ begin
+ desc["Tidx"] = tf.data_type(axis_)
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(concat_v2, [values_, axis_], name=nothing, N=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res[1]
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function concat_v2(values_, axis_; name=nothing, N=nothing)
+ if tf.in_eager_mode()
+ concat_v2_eager(values_, axis_; name=name, N=N)
+ else
+ concat_v2_graph(values_, axis_; name=name, N=N)
+ end
+ end
+ end
+end
+
+
+"""
+ resource_sparse_apply_adadelta(var, accum, accum_update, lr, rho, epsilon, grad, indices; use_locking=false)
+
+
+"""
+begin
+ begin
+ function resource_sparse_apply_adadelta_graph(var_, accum_, accum_update_, lr_, rho_, epsilon_, grad_, indices_; name=nothing, use_locking=nothing)
+ local desc
+ tf.with_op_name(name, "ResourceSparseApplyAdadelta") do
+ desc = tf.NodeDescription("ResourceSparseApplyAdadelta")
+ begin
+ begin
+ var_ = convert(Tensor{Any}, var_)
+ begin
+ end
+ end
+ begin
+ accum_ = convert(Tensor{Any}, accum_)
+ begin
+ end
+ end
+ begin
+ accum_update_ = convert(Tensor{Any}, accum_update_)
+ begin
+ end
+ end
+ begin
+ lr_ = convert(Tensor{Any}, lr_)
+ begin
+ end
+ end
+ begin
+ rho_ = convert(Tensor{Any}, rho_)
+ begin
+ end
+ end
+ begin
+ epsilon_ = convert(Tensor{Any}, epsilon_)
+ begin
+ end
+ end
+ begin
+ grad_ = convert(Tensor{Any}, grad_)
+ begin
+ end
+ end
+ begin
+ indices_ = convert(Tensor{Any}, indices_)
+ begin
+ indices_ = indices_ - convert(tf.Tensor{eltype(indices_)}, 1)
+ end
+ end
+ begin
+ (lr_, rho_, epsilon_, grad_) = tf.tf_promote(lr_, rho_, epsilon_, grad_)
+ end
+ begin
+ (indices_,) = tf.tf_promote(indices_)
+ end
+ end
+ begin
+ begin
+ tf.add_input(desc, var_)
+ end
+ begin
+ tf.add_input(desc, accum_)
+ end
+ begin
+ tf.add_input(desc, accum_update_)
+ end
+ begin
+ tf.add_input(desc, lr_)
+ end
+ begin
+ tf.add_input(desc, rho_)
+ end
+ begin
+ tf.add_input(desc, epsilon_)
+ end
+ begin
+ tf.add_input(desc, grad_)
+ end
+ begin
+ tf.add_input(desc, indices_)
+ end
+ end
+ begin
+ begin
+ if use_locking !== nothing
+ desc["use_locking"] = Base.Bool(use_locking)
+ end
+ end
+ end
+ end
+ tf.Tensor(tf.Operation(desc))
+ end
+ end
+ begin
+ function resource_sparse_apply_adadelta_eager(var_, accum_, accum_update_, lr_, rho_, epsilon_, grad_, indices_; name=nothing, use_locking=nothing)
+ desc = tf.EagerOp("ResourceSparseApplyAdadelta")
+ var_ = convert(tf.EagerTensor, var_)
+ accum_ = convert(tf.EagerTensor, accum_)
+ accum_update_ = convert(tf.EagerTensor, accum_update_)
+ lr_ = convert(tf.EagerTensor, lr_)
+ rho_ = convert(tf.EagerTensor, rho_)
+ epsilon_ = convert(tf.EagerTensor, epsilon_)
+ grad_ = convert(tf.EagerTensor, grad_)
+ indices_ = convert(tf.EagerTensor, indices_)
+ begin
+ begin
+ tf.add_input(desc, var_)
+ end
+ begin
+ tf.add_input(desc, accum_)
+ end
+ begin
+ tf.add_input(desc, accum_update_)
+ end
+ begin
+ tf.add_input(desc, lr_)
+ end
+ begin
+ tf.add_input(desc, rho_)
+ end
+ begin
+ tf.add_input(desc, epsilon_)
+ end
+ begin
+ tf.add_input(desc, grad_)
+ end
+ begin
+ tf.add_input(desc, indices_)
+ end
+ end
+ begin
+ begin
+ if use_locking !== nothing
+ desc["use_locking"] = Base.Bool(use_locking)
+ end
+ end
+ end
+ begin
+ desc["T"] = tf.data_type(lr_)
+ end
+ begin
+ desc["T"] = tf.data_type(rho_)
+ end
+ begin
+ desc["T"] = tf.data_type(epsilon_)
+ end
+ begin
+ desc["T"] = tf.data_type(grad_)
+ end
+ begin
+ desc["Tindices"] = tf.data_type(indices_)
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(resource_sparse_apply_adadelta, [var_, accum_, accum_update_, lr_, rho_, epsilon_, grad_, indices_], name=nothing, use_locking=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res[1]
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function resource_sparse_apply_adadelta(var_, accum_, accum_update_, lr_, rho_, epsilon_, grad_, indices_; name=nothing, use_locking=nothing)
+ if tf.in_eager_mode()
+ resource_sparse_apply_adadelta_eager(var_, accum_, accum_update_, lr_, rho_, epsilon_, grad_, indices_; name=name, use_locking=use_locking)
+ else
+ resource_sparse_apply_adadelta_graph(var_, accum_, accum_update_, lr_, rho_, epsilon_, grad_, indices_; name=name, use_locking=use_locking)
+ end
+ end
+ end
+end
+
+
+"""
+ tile(input, multiples)
+
+
+"""
+begin
+ begin
+ function tile_graph(input_, multiples_; name=nothing)
+ local desc
+ tf.with_op_name(name, "Tile") do
+ desc = tf.NodeDescription("Tile")
+ begin
+ begin
+ input_ = convert(Tensor{Any}, input_)
+ begin
+ end
+ end
+ begin
+ multiples_ = convert(Tensor{Int32}, multiples_)
+ begin
+ end
+ end
+ begin
+ (input_,) = tf.tf_promote(input_)
+ end
+ begin
+ (multiples_,) = tf.tf_promote(multiples_)
+ end
+ end
+ begin
+ begin
+ tf.add_input(desc, input_)
+ end
+ begin
+ tf.add_input(desc, multiples_)
+ end
+ end
+ begin
+ end
+ end
+ tf.Tensor(tf.Operation(desc))
+ end
+ end
+ begin
+ function tile_eager(input_, multiples_; name=nothing)
+ desc = tf.EagerOp("Tile")
+ input_ = convert(tf.EagerTensor, input_)
+ multiples_ = convert(tf.EagerTensor, multiples_)
+ begin
+ begin
+ tf.add_input(desc, input_)
+ end
+ begin
+ tf.add_input(desc, multiples_)
+ end
+ end
+ begin
+ end
+ begin
+ desc["T"] = tf.data_type(input_)
+ end
+ begin
+ desc["Tmultiples"] = tf.data_type(multiples_)
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(tile, [input_, multiples_], name=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res[1]
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function tile(input_, multiples_; name=nothing)
+ if tf.in_eager_mode()
+ tile_eager(input_, multiples_; name=name)
+ else
+ tile_graph(input_, multiples_; name=name)
+ end
+ end
+ end
+end
+
+
+"""
+ mutex_v2(; container=, shared_name=)
+
+
+"""
+begin
+ begin
+ function mutex_v2_graph(; name=nothing, container=nothing, shared_name=nothing)
+ local desc
+ tf.with_op_name(name, "MutexV2") do
+ desc = tf.NodeDescription("MutexV2")
+ begin
+ end
+ begin
+ end
+ begin
+ begin
+ if container !== nothing
+ desc["container"] = Base.String(container)
+ end
+ end
+ begin
+ if shared_name !== nothing
+ desc["shared_name"] = Base.String(shared_name)
+ end
+ end
+ end
+ end
+ tf.Tensor(tf.Operation(desc))
+ end
+ end
+ begin
+ function mutex_v2_eager(; name=nothing, container=nothing, shared_name=nothing)
+ desc = tf.EagerOp("MutexV2")
+ begin
+ end
+ begin
+ begin
+ if container !== nothing
+ desc["container"] = Base.String(container)
+ end
+ end
+ begin
+ if shared_name !== nothing
+ desc["shared_name"] = Base.String(shared_name)
+ end
+ end
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(mutex_v2, [], name=nothing, container=nothing, shared_name=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res[1]
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function mutex_v2(; name=nothing, container=nothing, shared_name=nothing)
+ if tf.in_eager_mode()
+ mutex_v2_eager(; name=name, container=container, shared_name=shared_name)
+ else
+ mutex_v2_graph(; name=name, container=container, shared_name=shared_name)
+ end
+ end
+ end
+end
+
+
+"""
+ serialize_many_sparse(sparse_indices, sparse_values, sparse_shape; out_type=String)
+
+
+"""
+begin
+ begin
+ function serialize_many_sparse_graph(sparse_indices_, sparse_values_, sparse_shape_; name=nothing, out_type=nothing)
+ local desc
+ tf.with_op_name(name, "SerializeManySparse") do
+ desc = tf.NodeDescription("SerializeManySparse")
+ begin
+ begin
+ sparse_indices_ = convert(Tensor{Int64}, sparse_indices_)
+ begin
+ end
+ end
+ begin
+ sparse_values_ = convert(Tensor{Any}, sparse_values_)
+ begin
+ end
+ end
+ begin
+ sparse_shape_ = convert(Tensor{Int64}, sparse_shape_)
+ begin
+ end
+ end
+ begin
+ (sparse_values_,) = tf.tf_promote(sparse_values_)
+ end
+ end
+ begin
+ begin
+ tf.add_input(desc, sparse_indices_)
+ end
+ begin
+ tf.add_input(desc, sparse_values_)
+ end
+ begin
+ tf.add_input(desc, sparse_shape_)
+ end
+ end
+ begin
+ begin
+ if out_type !== nothing
+ desc["out_type"] = Base.identity(out_type)
+ end
+ end
+ end
+ end
+ tf.Tensor(tf.Operation(desc))
+ end
+ end
+ begin
+ function serialize_many_sparse_eager(sparse_indices_, sparse_values_, sparse_shape_; name=nothing, out_type=nothing)
+ desc = tf.EagerOp("SerializeManySparse")
+ sparse_indices_ = convert(tf.EagerTensor, sparse_indices_)
+ sparse_values_ = convert(tf.EagerTensor, sparse_values_)
+ sparse_shape_ = convert(tf.EagerTensor, sparse_shape_)
+ begin
+ begin
+ tf.add_input(desc, sparse_indices_)
+ end
+ begin
+ tf.add_input(desc, sparse_values_)
+ end
+ begin
+ tf.add_input(desc, sparse_shape_)
+ end
+ end
+ begin
+ begin
+ if out_type !== nothing
+ desc["out_type"] = Base.identity(out_type)
+ end
+ end
+ end
+ begin
+ desc["T"] = tf.data_type(sparse_values_)
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(serialize_many_sparse, [sparse_indices_, sparse_values_, sparse_shape_], name=nothing, out_type=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res[1]
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function serialize_many_sparse(sparse_indices_, sparse_values_, sparse_shape_; name=nothing, out_type=nothing)
+ if tf.in_eager_mode()
+ serialize_many_sparse_eager(sparse_indices_, sparse_values_, sparse_shape_; name=name, out_type=out_type)
+ else
+ serialize_many_sparse_graph(sparse_indices_, sparse_values_, sparse_shape_; name=name, out_type=out_type)
+ end
+ end
+ end
+end
+
+
+"""
+ tpu_embedding_activations(embedding_variable, sliced_activations)
+
+An op enabling differentiation of TPU Embeddings.
+"""
+begin
+ begin
+ function tpu_embedding_activations_graph(embedding_variable_, sliced_activations_; name=nothing, table_id=nothing, lookup_id=nothing)
+ local desc
+ tf.with_op_name(name, "TPUEmbeddingActivations") do
+ desc = tf.NodeDescription("TPUEmbeddingActivations")
+ begin
+ begin
+ embedding_variable_ = convert(Tensor{Float32}, embedding_variable_)
+ begin
+ end
+ end
+ begin
+ sliced_activations_ = convert(Tensor{Float32}, sliced_activations_)
+ begin
+ end
+ end
+ end
+ begin
+ begin
+ tf.add_input(desc, embedding_variable_)
+ end
+ begin
+ tf.add_input(desc, sliced_activations_)
+ end
+ end
+ begin
+ begin
+ if table_id !== nothing
+ desc["table_id"] = Base.Int(table_id)
+ end
+ end
+ begin
+ if lookup_id !== nothing
+ desc["lookup_id"] = Base.Int(lookup_id)
+ end
+ end
+ end
+ end
+ tf.Tensor(tf.Operation(desc))
+ end
+ end
+ begin
+ function tpu_embedding_activations_eager(embedding_variable_, sliced_activations_; name=nothing, table_id=nothing, lookup_id=nothing)
+ desc = tf.EagerOp("TPUEmbeddingActivations")
+ embedding_variable_ = convert(tf.EagerTensor, embedding_variable_)
+ sliced_activations_ = convert(tf.EagerTensor, sliced_activations_)
+ begin
+ begin
+ tf.add_input(desc, embedding_variable_)
+ end
+ begin
+ tf.add_input(desc, sliced_activations_)
+ end
+ end
+ begin
+ begin
+ if table_id !== nothing
+ desc["table_id"] = Base.Int(table_id)
+ end
+ end
+ begin
+ if lookup_id !== nothing
+ desc["lookup_id"] = Base.Int(lookup_id)
+ end
+ end
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(tpu_embedding_activations, [embedding_variable_, sliced_activations_], name=nothing, table_id=nothing, lookup_id=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res[1]
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function tpu_embedding_activations(embedding_variable_, sliced_activations_; name=nothing, table_id=nothing, lookup_id=nothing)
+ if tf.in_eager_mode()
+ tpu_embedding_activations_eager(embedding_variable_, sliced_activations_; name=name, table_id=table_id, lookup_id=lookup_id)
+ else
+ tpu_embedding_activations_graph(embedding_variable_, sliced_activations_; name=name, table_id=table_id, lookup_id=lookup_id)
+ end
+ end
+ end
+end
+
+
+"""
+ batch_matrix_solve_ls(matrix, rhs, l2_regularizer; fast=true)
+
+
+"""
+begin
+ begin
+ function batch_matrix_solve_ls_graph(matrix_, rhs_, l2_regularizer_; name=nothing, fast=nothing)
+ local desc
+ tf.with_op_name(name, "BatchMatrixSolveLs") do
+ desc = tf.NodeDescription("BatchMatrixSolveLs")
+ begin
+ begin
+ matrix_ = convert(Tensor{Any}, matrix_)
+ begin
+ end
+ end
+ begin
+ rhs_ = convert(Tensor{Any}, rhs_)
+ begin
+ end
+ end
+ begin
+ l2_regularizer_ = convert(Tensor{Float64}, l2_regularizer_)
+ begin
+ end
+ end
+ begin
+ (matrix_, rhs_) = tf.tf_promote(matrix_, rhs_)
+ end
+ end
+ begin
+ begin
+ tf.add_input(desc, matrix_)
+ end
+ begin
+ tf.add_input(desc, rhs_)
+ end
+ begin
+ tf.add_input(desc, l2_regularizer_)
+ end
+ end
+ begin
+ begin
+ if fast !== nothing
+ desc["fast"] = Base.Bool(fast)
+ end
+ end
+ end
+ end
+ tf.Tensor(tf.Operation(desc))
+ end
+ end
+ begin
+ function batch_matrix_solve_ls_eager(matrix_, rhs_, l2_regularizer_; name=nothing, fast=nothing)
+ desc = tf.EagerOp("BatchMatrixSolveLs")
+ matrix_ = convert(tf.EagerTensor, matrix_)
+ rhs_ = convert(tf.EagerTensor, rhs_)
+ l2_regularizer_ = convert(tf.EagerTensor, l2_regularizer_)
+ begin
+ begin
+ tf.add_input(desc, matrix_)
+ end
+ begin
+ tf.add_input(desc, rhs_)
+ end
+ begin
+ tf.add_input(desc, l2_regularizer_)
+ end
+ end
+ begin
+ begin
+ if fast !== nothing
+ desc["fast"] = Base.Bool(fast)
+ end
+ end
+ end
+ begin
+ desc["T"] = tf.data_type(matrix_)
+ end
+ begin
+ desc["T"] = tf.data_type(rhs_)
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(batch_matrix_solve_ls, [matrix_, rhs_, l2_regularizer_], name=nothing, fast=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res[1]
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function batch_matrix_solve_ls(matrix_, rhs_, l2_regularizer_; name=nothing, fast=nothing)
+ if tf.in_eager_mode()
+ batch_matrix_solve_ls_eager(matrix_, rhs_, l2_regularizer_; name=name, fast=fast)
+ else
+ batch_matrix_solve_ls_graph(matrix_, rhs_, l2_regularizer_; name=name, fast=fast)
+ end
+ end
+ end
+end
+
+
+"""
+ not_equal(x, y)
+
+
+"""
+begin
+ begin
+ function not_equal_graph(x_, y_; name=nothing)
+ local desc
+ tf.with_op_name(name, "NotEqual") do
+ desc = tf.NodeDescription("NotEqual")
+ begin
+ begin
+ x_ = convert(Tensor{Any}, x_)
+ begin
+ end
+ end
+ begin
+ y_ = convert(Tensor{Any}, y_)
+ begin
+ end
+ end
+ begin
+ (x_, y_) = tf.tf_promote(x_, y_)
+ end
+ end
+ begin
+ begin
+ tf.add_input(desc, x_)
+ end
+ begin
+ tf.add_input(desc, y_)
+ end
+ end
+ begin
+ end
+ end
+ tf.Tensor(tf.Operation(desc))
+ end
+ end
+ begin
+ function not_equal_eager(x_, y_; name=nothing)
+ desc = tf.EagerOp("NotEqual")
+ x_ = convert(tf.EagerTensor, x_)
+ y_ = convert(tf.EagerTensor, y_)
+ begin
+ begin
+ tf.add_input(desc, x_)
+ end
+ begin
+ tf.add_input(desc, y_)
+ end
+ end
+ begin
+ end
+ begin
+ desc["T"] = tf.data_type(x_)
+ end
+ begin
+ desc["T"] = tf.data_type(y_)
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(not_equal, [x_, y_], name=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res[1]
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function not_equal(x_, y_; name=nothing)
+ if tf.in_eager_mode()
+ not_equal_eager(x_, y_; name=name)
+ else
+ not_equal_graph(x_, y_; name=name)
+ end
+ end
+ end
+end
+
+
+"""
+ lgamma(x)
+
+
+"""
+begin
+ begin
+ function lgamma_graph(x_; name=nothing)
+ local desc
+ tf.with_op_name(name, "Lgamma") do
+ desc = tf.NodeDescription("Lgamma")
+ begin
+ begin
+ x_ = convert(Tensor{Any}, x_)
+ begin
+ end
+ end
+ begin
+ (x_,) = tf.tf_promote(x_)
+ end
+ end
+ begin
+ begin
+ tf.add_input(desc, x_)
+ end
+ end
+ begin
+ end
+ end
+ tf.Tensor(tf.Operation(desc))
+ end
+ end
+ begin
+ function lgamma_eager(x_; name=nothing)
+ desc = tf.EagerOp("Lgamma")
+ x_ = convert(tf.EagerTensor, x_)
+ begin
+ begin
+ tf.add_input(desc, x_)
+ end
+ end
+ begin
+ end
+ begin
+ desc["T"] = tf.data_type(x_)
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(lgamma, [x_], name=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res[1]
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function lgamma(x_; name=nothing)
+ if tf.in_eager_mode()
+ lgamma_eager(x_; name=name)
+ else
+ lgamma_graph(x_; name=name)
+ end
+ end
+ end
+end
+
+
+"""
+ tpu_replicate_metadata(; num_cores_per_replica=1, topology=, use_tpu=true, device_assignment=Int64[], computation_shape=Int64[], host_compute_core=Int64[])
+
+
+"""
+begin
+ begin
+ function tpu_replicate_metadata_graph(; name=nothing, num_replicas=nothing, num_cores_per_replica=nothing, topology=nothing, use_tpu=nothing, device_assignment=nothing, computation_shape=nothing, host_compute_core=nothing)
+ local desc
+ tf.with_op_name(name, "TPUReplicateMetadata") do
+ desc = tf.NodeDescription("TPUReplicateMetadata")
+ begin
+ end
+ begin
+ end
+ begin
+ begin
+ if num_replicas !== nothing
+ desc["num_replicas"] = Base.Int(num_replicas)
+ end
+ end
+ begin
+ if num_cores_per_replica !== nothing
+ desc["num_cores_per_replica"] = Base.Int(num_cores_per_replica)
+ end
+ end
+ begin
+ if topology !== nothing
+ desc["topology"] = Base.String(topology)
+ end
+ end
+ begin
+ if use_tpu !== nothing
+ desc["use_tpu"] = Base.Bool(use_tpu)
+ end
+ end
+ begin
+ if device_assignment !== nothing
+ desc["device_assignment"] = map(Base.identity, device_assignment)
+ end
+ end
+ begin
+ if computation_shape !== nothing
+ desc["computation_shape"] = map(Base.identity, computation_shape)
+ end
+ end
+ begin
+ if host_compute_core !== nothing
+ desc["host_compute_core"] = map(Base.identity, host_compute_core)
+ end
+ end
+ end
+ end
+ tf.Tensor(tf.Operation(desc))
+ end
+ end
+ begin
+ function tpu_replicate_metadata_eager(; name=nothing, num_replicas=nothing, num_cores_per_replica=nothing, topology=nothing, use_tpu=nothing, device_assignment=nothing, computation_shape=nothing, host_compute_core=nothing)
+ desc = tf.EagerOp("TPUReplicateMetadata")
+ begin
+ end
+ begin
+ begin
+ if num_replicas !== nothing
+ desc["num_replicas"] = Base.Int(num_replicas)
+ end
+ end
+ begin
+ if num_cores_per_replica !== nothing
+ desc["num_cores_per_replica"] = Base.Int(num_cores_per_replica)
+ end
+ end
+ begin
+ if topology !== nothing
+ desc["topology"] = Base.String(topology)
+ end
+ end
+ begin
+ if use_tpu !== nothing
+ desc["use_tpu"] = Base.Bool(use_tpu)
+ end
+ end
+ begin
+ if device_assignment !== nothing
+ desc["device_assignment"] = map(Base.identity, device_assignment)
+ end
+ end
+ begin
+ if computation_shape !== nothing
+ desc["computation_shape"] = map(Base.identity, computation_shape)
+ end
+ end
+ begin
+ if host_compute_core !== nothing
+ desc["host_compute_core"] = map(Base.identity, host_compute_core)
+ end
+ end
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(tpu_replicate_metadata, [], name=nothing, num_replicas=nothing, num_cores_per_replica=nothing, topology=nothing, use_tpu=nothing, device_assignment=nothing, computation_shape=nothing, host_compute_core=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res[1]
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function tpu_replicate_metadata(; name=nothing, num_replicas=nothing, num_cores_per_replica=nothing, topology=nothing, use_tpu=nothing, device_assignment=nothing, computation_shape=nothing, host_compute_core=nothing)
+ if tf.in_eager_mode()
+ tpu_replicate_metadata_eager(; name=name, num_replicas=num_replicas, num_cores_per_replica=num_cores_per_replica, topology=topology, use_tpu=use_tpu, device_assignment=device_assignment, computation_shape=computation_shape, host_compute_core=host_compute_core)
+ else
+ tpu_replicate_metadata_graph(; name=name, num_replicas=num_replicas, num_cores_per_replica=num_cores_per_replica, topology=topology, use_tpu=use_tpu, device_assignment=device_assignment, computation_shape=computation_shape, host_compute_core=host_compute_core)
+ end
+ end
+ end
+end
+
+
+"""
+ experimental_thread_pool_handle(; max_intra_op_parallelism=1, container=, shared_name=)
+
+
+"""
+begin
+ begin
+ function experimental_thread_pool_handle_graph(; name=nothing, num_threads=nothing, max_intra_op_parallelism=nothing, display_name=nothing, container=nothing, shared_name=nothing)
+ local desc
+ tf.with_op_name(name, "ExperimentalThreadPoolHandle") do
+ desc = tf.NodeDescription("ExperimentalThreadPoolHandle")
+ begin
+ end
+ begin
+ end
+ begin
+ begin
+ if num_threads !== nothing
+ desc["num_threads"] = Base.Int(num_threads)
+ end
+ end
+ begin
+ if max_intra_op_parallelism !== nothing
+ desc["max_intra_op_parallelism"] = Base.Int(max_intra_op_parallelism)
+ end
+ end
+ begin
+ if display_name !== nothing
+ desc["display_name"] = Base.String(display_name)
+ end
+ end
+ begin
+ if container !== nothing
+ desc["container"] = Base.String(container)
+ end
+ end
+ begin
+ if shared_name !== nothing
+ desc["shared_name"] = Base.String(shared_name)
+ end
+ end
+ end
+ end
+ tf.Tensor(tf.Operation(desc))
+ end
+ end
+ begin
+ function experimental_thread_pool_handle_eager(; name=nothing, num_threads=nothing, max_intra_op_parallelism=nothing, display_name=nothing, container=nothing, shared_name=nothing)
+ desc = tf.EagerOp("ExperimentalThreadPoolHandle")
+ begin
+ end
+ begin
+ begin
+ if num_threads !== nothing
+ desc["num_threads"] = Base.Int(num_threads)
+ end
+ end
+ begin
+ if max_intra_op_parallelism !== nothing
+ desc["max_intra_op_parallelism"] = Base.Int(max_intra_op_parallelism)
+ end
+ end
+ begin
+ if display_name !== nothing
+ desc["display_name"] = Base.String(display_name)
+ end
+ end
+ begin
+ if container !== nothing
+ desc["container"] = Base.String(container)
+ end
+ end
+ begin
+ if shared_name !== nothing
+ desc["shared_name"] = Base.String(shared_name)
+ end
+ end
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(experimental_thread_pool_handle, [], name=nothing, num_threads=nothing, max_intra_op_parallelism=nothing, display_name=nothing, container=nothing, shared_name=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res[1]
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function experimental_thread_pool_handle(; name=nothing, num_threads=nothing, max_intra_op_parallelism=nothing, display_name=nothing, container=nothing, shared_name=nothing)
+ if tf.in_eager_mode()
+ experimental_thread_pool_handle_eager(; name=name, num_threads=num_threads, max_intra_op_parallelism=max_intra_op_parallelism, display_name=display_name, container=container, shared_name=shared_name)
+ else
+ experimental_thread_pool_handle_graph(; name=name, num_threads=num_threads, max_intra_op_parallelism=max_intra_op_parallelism, display_name=display_name, container=container, shared_name=shared_name)
+ end
+ end
+ end
+end
+
+
+"""
+ self_adjoint_eig(input)
+
+
+"""
+begin
+ begin
+ function self_adjoint_eig_graph(input_; name=nothing)
+ local desc
+ tf.with_op_name(name, "SelfAdjointEig") do
+ desc = tf.NodeDescription("SelfAdjointEig")
+ begin
+ begin
+ input_ = convert(Tensor{Any}, input_)
+ begin
+ end
+ end
+ begin
+ (input_,) = tf.tf_promote(input_)
+ end
+ end
+ begin
+ begin
+ tf.add_input(desc, input_)
+ end
+ end
+ begin
+ end
+ end
+ tf.Tensor(tf.Operation(desc))
+ end
+ end
+ begin
+ function self_adjoint_eig_eager(input_; name=nothing)
+ desc = tf.EagerOp("SelfAdjointEig")
+ input_ = convert(tf.EagerTensor, input_)
+ begin
+ begin
+ tf.add_input(desc, input_)
+ end
+ end
+ begin
+ end
+ begin
+ desc["T"] = tf.data_type(input_)
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(self_adjoint_eig, [input_], name=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res[1]
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function self_adjoint_eig(input_; name=nothing)
+ if tf.in_eager_mode()
+ self_adjoint_eig_eager(input_; name=name)
+ else
+ self_adjoint_eig_graph(input_; name=name)
+ end
+ end
+ end
+end
+
+
+"""
+ boosted_trees_quantile_stream_resource_get_bucket_boundaries(quantile_stream_resource_handle)
+
+
+"""
+begin
+ begin
+ function boosted_trees_quantile_stream_resource_get_bucket_boundaries_graph(quantile_stream_resource_handle_; name=nothing, num_features=nothing)
+ local desc
+ tf.with_op_name(name, "BoostedTreesQuantileStreamResourceGetBucketBoundaries") do
+ desc = tf.NodeDescription("BoostedTreesQuantileStreamResourceGetBucketBoundaries")
+ begin
+ begin
+ quantile_stream_resource_handle_ = convert(Tensor{Any}, quantile_stream_resource_handle_)
+ begin
+ end
+ end
+ end
+ begin
+ begin
+ tf.add_input(desc, quantile_stream_resource_handle_)
+ end
+ end
+ begin
+ begin
+ if num_features !== nothing
+ desc["num_features"] = Base.Int(num_features)
+ end
+ end
+ end
+ end
+ begin
+ out = tf.Tensor[]
+ op = tf.Operation(desc)
+ for out_idx = 1:num_features
+ push!(out, tf.Tensor(op, out_idx))
+ end
+ out
+ end
+ end
+ end
+ begin
+ function boosted_trees_quantile_stream_resource_get_bucket_boundaries_eager(quantile_stream_resource_handle_; name=nothing, num_features=nothing)
+ desc = tf.EagerOp("BoostedTreesQuantileStreamResourceGetBucketBoundaries")
+ quantile_stream_resource_handle_ = convert(tf.EagerTensor, quantile_stream_resource_handle_)
+ begin
+ begin
+ tf.add_input(desc, quantile_stream_resource_handle_)
+ end
+ end
+ begin
+ begin
+ if num_features !== nothing
+ desc["num_features"] = Base.Int(num_features)
+ end
+ end
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(boosted_trees_quantile_stream_resource_get_bucket_boundaries, [quantile_stream_resource_handle_], name=nothing, num_features=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function boosted_trees_quantile_stream_resource_get_bucket_boundaries(quantile_stream_resource_handle_; name=nothing, num_features=nothing)
+ if tf.in_eager_mode()
+ boosted_trees_quantile_stream_resource_get_bucket_boundaries_eager(quantile_stream_resource_handle_; name=name, num_features=num_features)
+ else
+ boosted_trees_quantile_stream_resource_get_bucket_boundaries_graph(quantile_stream_resource_handle_; name=name, num_features=num_features)
+ end
+ end
+ end
+end
+
+
+"""
+ sparse_dense_cwise_div(sp_indices, sp_values, sp_shape, dense)
+
+
+"""
+begin
+ begin
+ function sparse_dense_cwise_div_graph(sp_indices_, sp_values_, sp_shape_, dense_; name=nothing)
+ local desc
+ tf.with_op_name(name, "SparseDenseCwiseDiv") do
+ desc = tf.NodeDescription("SparseDenseCwiseDiv")
+ begin
+ begin
+ sp_indices_ = convert(Tensor{Int64}, sp_indices_)
+ begin
+ end
+ end
+ begin
+ sp_values_ = convert(Tensor{Any}, sp_values_)
+ begin
+ end
+ end
+ begin
+ sp_shape_ = convert(Tensor{Int64}, sp_shape_)
+ begin
+ end
+ end
+ begin
+ dense_ = convert(Tensor{Any}, dense_)
+ begin
+ end
+ end
+ begin
+ (sp_values_, dense_) = tf.tf_promote(sp_values_, dense_)
+ end
+ end
+ begin
+ begin
+ tf.add_input(desc, sp_indices_)
+ end
+ begin
+ tf.add_input(desc, sp_values_)
+ end
+ begin
+ tf.add_input(desc, sp_shape_)
+ end
+ begin
+ tf.add_input(desc, dense_)
+ end
+ end
+ begin
+ end
+ end
+ tf.Tensor(tf.Operation(desc))
+ end
+ end
+ begin
+ function sparse_dense_cwise_div_eager(sp_indices_, sp_values_, sp_shape_, dense_; name=nothing)
+ desc = tf.EagerOp("SparseDenseCwiseDiv")
+ sp_indices_ = convert(tf.EagerTensor, sp_indices_)
+ sp_values_ = convert(tf.EagerTensor, sp_values_)
+ sp_shape_ = convert(tf.EagerTensor, sp_shape_)
+ dense_ = convert(tf.EagerTensor, dense_)
+ begin
+ begin
+ tf.add_input(desc, sp_indices_)
+ end
+ begin
+ tf.add_input(desc, sp_values_)
+ end
+ begin
+ tf.add_input(desc, sp_shape_)
+ end
+ begin
+ tf.add_input(desc, dense_)
+ end
+ end
+ begin
+ end
+ begin
+ desc["T"] = tf.data_type(sp_values_)
+ end
+ begin
+ desc["T"] = tf.data_type(dense_)
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(sparse_dense_cwise_div, [sp_indices_, sp_values_, sp_shape_, dense_], name=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res[1]
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function sparse_dense_cwise_div(sp_indices_, sp_values_, sp_shape_, dense_; name=nothing)
+ if tf.in_eager_mode()
+ sparse_dense_cwise_div_eager(sp_indices_, sp_values_, sp_shape_, dense_; name=name)
+ else
+ sparse_dense_cwise_div_graph(sp_indices_, sp_values_, sp_shape_, dense_; name=name)
+ end
+ end
+ end
+end
+
+
+"""
+ acos(x)
+
+
+"""
+begin
+ begin
+ function acos_graph(x_; name=nothing)
+ local desc
+ tf.with_op_name(name, "Acos") do
+ desc = tf.NodeDescription("Acos")
+ begin
+ begin
+ x_ = convert(Tensor{Any}, x_)
+ begin
+ end
+ end
+ begin
+ (x_,) = tf.tf_promote(x_)
+ end
+ end
+ begin
+ begin
+ tf.add_input(desc, x_)
+ end
+ end
+ begin
+ end
+ end
+ tf.Tensor(tf.Operation(desc))
+ end
+ end
+ begin
+ function acos_eager(x_; name=nothing)
+ desc = tf.EagerOp("Acos")
+ x_ = convert(tf.EagerTensor, x_)
+ begin
+ begin
+ tf.add_input(desc, x_)
+ end
+ end
+ begin
+ end
+ begin
+ desc["T"] = tf.data_type(x_)
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(acos, [x_], name=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res[1]
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function acos(x_; name=nothing)
+ if tf.in_eager_mode()
+ acos_eager(x_; name=name)
+ else
+ acos_graph(x_; name=name)
+ end
+ end
+ end
+end
+
+
+"""
+ all(input, reduction_indices; keep_dims=false)
+
+
+"""
+begin
+ begin
+ function all_graph(input_, reduction_indices_; name=nothing, keep_dims=nothing)
+ local desc
+ tf.with_op_name(name, "All") do
+ desc = tf.NodeDescription("All")
+ begin
+ begin
+ input_ = convert(Tensor{Bool}, input_)
+ begin
+ end
+ end
+ begin
+ reduction_indices_ = convert(Tensor{Int32}, reduction_indices_)
+ begin
+ reduction_indices_ = reduction_indices_ - convert(tf.Tensor{eltype(reduction_indices_)}, 1)
+ end
+ end
+ begin
+ (reduction_indices_,) = tf.tf_promote(reduction_indices_)
+ end
+ end
+ begin
+ begin
+ tf.add_input(desc, input_)
+ end
+ begin
+ tf.add_input(desc, reduction_indices_)
+ end
+ end
+ begin
+ begin
+ if keep_dims !== nothing
+ desc["keep_dims"] = Base.Bool(keep_dims)
+ end
+ end
+ end
+ end
+ tf.Tensor(tf.Operation(desc))
+ end
+ end
+ begin
+ function all_eager(input_, reduction_indices_; name=nothing, keep_dims=nothing)
+ desc = tf.EagerOp("All")
+ input_ = convert(tf.EagerTensor, input_)
+ reduction_indices_ = convert(tf.EagerTensor, reduction_indices_)
+ begin
+ begin
+ tf.add_input(desc, input_)
+ end
+ begin
+ tf.add_input(desc, reduction_indices_)
+ end
+ end
+ begin
+ begin
+ if keep_dims !== nothing
+ desc["keep_dims"] = Base.Bool(keep_dims)
+ end
+ end
+ end
+ begin
+ desc["Tidx"] = tf.data_type(reduction_indices_)
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(all, [input_, reduction_indices_], name=nothing, keep_dims=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res[1]
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function all(input_, reduction_indices_; name=nothing, keep_dims=nothing)
+ if tf.in_eager_mode()
+ all_eager(input_, reduction_indices_; name=name, keep_dims=keep_dims)
+ else
+ all_graph(input_, reduction_indices_; name=name, keep_dims=keep_dims)
+ end
+ end
+ end
+end
+
+
+"""
+ compare_and_bitpack(input, threshold)
+
+
+"""
+begin
+ begin
+ function compare_and_bitpack_graph(input_, threshold_; name=nothing)
+ local desc
+ tf.with_op_name(name, "CompareAndBitpack") do
+ desc = tf.NodeDescription("CompareAndBitpack")
+ begin
+ begin
+ input_ = convert(Tensor{Any}, input_)
+ begin
+ end
+ end
+ begin
+ threshold_ = convert(Tensor{Any}, threshold_)
+ begin
+ end
+ end
+ begin
+ (input_, threshold_) = tf.tf_promote(input_, threshold_)
+ end
+ end
+ begin
+ begin
+ tf.add_input(desc, input_)
+ end
+ begin
+ tf.add_input(desc, threshold_)
+ end
+ end
+ begin
+ end
+ end
+ tf.Tensor(tf.Operation(desc))
+ end
+ end
+ begin
+ function compare_and_bitpack_eager(input_, threshold_; name=nothing)
+ desc = tf.EagerOp("CompareAndBitpack")
+ input_ = convert(tf.EagerTensor, input_)
+ threshold_ = convert(tf.EagerTensor, threshold_)
+ begin
+ begin
+ tf.add_input(desc, input_)
+ end
+ begin
+ tf.add_input(desc, threshold_)
+ end
+ end
+ begin
+ end
+ begin
+ desc["T"] = tf.data_type(input_)
+ end
+ begin
+ desc["T"] = tf.data_type(threshold_)
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(compare_and_bitpack, [input_, threshold_], name=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res[1]
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function compare_and_bitpack(input_, threshold_; name=nothing)
+ if tf.in_eager_mode()
+ compare_and_bitpack_eager(input_, threshold_; name=name)
+ else
+ compare_and_bitpack_graph(input_, threshold_; name=name)
+ end
+ end
+ end
+end
+
+
+"""
+ var_handle_op(; container=, shared_name=)
+
+
+"""
+begin
+ begin
+ function var_handle_op_graph(; name=nothing, container=nothing, shared_name=nothing, dtype=nothing, shape=nothing)
+ local desc
+ tf.with_op_name(name, "VarHandleOp") do
+ desc = tf.NodeDescription("VarHandleOp")
+ begin
+ end
+ begin
+ end
+ begin
+ begin
+ if container !== nothing
+ desc["container"] = Base.String(container)
+ end
+ end
+ begin
+ if shared_name !== nothing
+ desc["shared_name"] = Base.String(shared_name)
+ end
+ end
+ begin
+ if dtype !== nothing
+ desc["dtype"] = Base.identity(dtype)
+ end
+ end
+ begin
+ if shape !== nothing
+ desc["shape"] = Base.identity(shape)
+ end
+ end
+ end
+ end
+ tf.Tensor(tf.Operation(desc))
+ end
+ end
+ begin
+ function var_handle_op_eager(; name=nothing, container=nothing, shared_name=nothing, dtype=nothing, shape=nothing)
+ desc = tf.EagerOp("VarHandleOp")
+ begin
+ end
+ begin
+ begin
+ if container !== nothing
+ desc["container"] = Base.String(container)
+ end
+ end
+ begin
+ if shared_name !== nothing
+ desc["shared_name"] = Base.String(shared_name)
+ end
+ end
+ begin
+ if dtype !== nothing
+ desc["dtype"] = Base.identity(dtype)
+ end
+ end
+ begin
+ if shape !== nothing
+ desc["shape"] = Base.identity(shape)
+ end
+ end
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(var_handle_op, [], name=nothing, container=nothing, shared_name=nothing, dtype=nothing, shape=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res[1]
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function var_handle_op(; name=nothing, container=nothing, shared_name=nothing, dtype=nothing, shape=nothing)
+ if tf.in_eager_mode()
+ var_handle_op_eager(; name=name, container=container, shared_name=shared_name, dtype=dtype, shape=shape)
+ else
+ var_handle_op_graph(; name=name, container=container, shared_name=shared_name, dtype=dtype, shape=shape)
+ end
+ end
+ end
+end
+
+
+"""
+ experimental_unique_dataset(input_dataset)
+
+
+"""
+begin
+ begin
+ function experimental_unique_dataset_graph(input_dataset_; name=nothing, output_types=nothing, output_shapes=nothing)
+ local desc
+ tf.with_op_name(name, "ExperimentalUniqueDataset") do
+ desc = tf.NodeDescription("ExperimentalUniqueDataset")
+ begin
+ begin
+ input_dataset_ = convert(Tensor{Any}, input_dataset_)
+ begin
+ end
+ end
+ end
+ begin
+ begin
+ tf.add_input(desc, input_dataset_)
+ end
+ end
+ begin
+ begin
+ if output_types !== nothing
+ desc["output_types"] = map(Base.identity, output_types)
+ end
+ end
+ begin
+ if output_shapes !== nothing
+ desc["output_shapes"] = map(Base.identity, output_shapes)
+ end
+ end
+ end
+ end
+ tf.Tensor(tf.Operation(desc))
+ end
+ end
+ begin
+ function experimental_unique_dataset_eager(input_dataset_; name=nothing, output_types=nothing, output_shapes=nothing)
+ desc = tf.EagerOp("ExperimentalUniqueDataset")
+ input_dataset_ = convert(tf.EagerTensor, input_dataset_)
+ begin
+ begin
+ tf.add_input(desc, input_dataset_)
+ end
+ end
+ begin
+ begin
+ if output_types !== nothing
+ desc["output_types"] = map(Base.identity, output_types)
+ end
+ end
+ begin
+ if output_shapes !== nothing
+ desc["output_shapes"] = map(Base.identity, output_shapes)
+ end
+ end
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(experimental_unique_dataset, [input_dataset_], name=nothing, output_types=nothing, output_shapes=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res[1]
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function experimental_unique_dataset(input_dataset_; name=nothing, output_types=nothing, output_shapes=nothing)
+ if tf.in_eager_mode()
+ experimental_unique_dataset_eager(input_dataset_; name=name, output_types=output_types, output_shapes=output_shapes)
+ else
+ experimental_unique_dataset_graph(input_dataset_; name=name, output_types=output_types, output_shapes=output_shapes)
+ end
+ end
+ end
+end
+
+
+"""
+ list_diff(x, y; out_idx=Int32)
+
+
+"""
+begin
+ begin
+ function list_diff_graph(x_, y_; name=nothing, out_idx=nothing)
+ local desc
+ tf.with_op_name(name, "ListDiff") do
+ desc = tf.NodeDescription("ListDiff")
+ begin
+ begin
+ x_ = convert(Tensor{Any}, x_)
+ begin
+ end
+ end
+ begin
+ y_ = convert(Tensor{Any}, y_)
+ begin
+ end
+ end
+ begin
+ (x_, y_) = tf.tf_promote(x_, y_)
+ end
+ end
+ begin
+ begin
+ tf.add_input(desc, x_)
+ end
+ begin
+ tf.add_input(desc, y_)
+ end
+ end
+ begin
+ begin
+ if out_idx !== nothing
+ desc["out_idx"] = Base.identity(out_idx)
+ end
+ end
+ end
+ end
+ begin
+ out = tf.Tensor[]
+ op = tf.Operation(desc)
+ for out_idx = 1:2
+ push!(out, tf.Tensor(op, out_idx))
+ end
+ out
+ end
+ end
+ end
+ begin
+ function list_diff_eager(x_, y_; name=nothing, out_idx=nothing)
+ desc = tf.EagerOp("ListDiff")
+ x_ = convert(tf.EagerTensor, x_)
+ y_ = convert(tf.EagerTensor, y_)
+ begin
+ begin
+ tf.add_input(desc, x_)
+ end
+ begin
+ tf.add_input(desc, y_)
+ end
+ end
+ begin
+ begin
+ if out_idx !== nothing
+ desc["out_idx"] = Base.identity(out_idx)
+ end
+ end
+ end
+ begin
+ desc["T"] = tf.data_type(x_)
+ end
+ begin
+ desc["T"] = tf.data_type(y_)
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(list_diff, [x_, y_], name=nothing, out_idx=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function list_diff(x_, y_; name=nothing, out_idx=nothing)
+ if tf.in_eager_mode()
+ list_diff_eager(x_, y_; name=name, out_idx=out_idx)
+ else
+ list_diff_graph(x_, y_; name=name, out_idx=out_idx)
+ end
+ end
+ end
+end
+
+
+"""
+ create_summary_file_writer(writer, logdir, max_queue, flush_millis, filename_suffix)
+
+
+"""
+begin
+ begin
+ function create_summary_file_writer_graph(writer_, logdir_, max_queue_, flush_millis_, filename_suffix_; name=nothing)
+ local desc
+ tf.with_op_name(name, "CreateSummaryFileWriter") do
+ desc = tf.NodeDescription("CreateSummaryFileWriter")
+ begin
+ begin
+ writer_ = convert(Tensor{Any}, writer_)
+ begin
+ end
+ end
+ begin
+ logdir_ = convert(Tensor{String}, logdir_)
+ begin
+ end
+ end
+ begin
+ max_queue_ = convert(Tensor{Int32}, max_queue_)
+ begin
+ end
+ end
+ begin
+ flush_millis_ = convert(Tensor{Int32}, flush_millis_)
+ begin
+ end
+ end
+ begin
+ filename_suffix_ = convert(Tensor{String}, filename_suffix_)
+ begin
+ end
+ end
+ end
+ begin
+ begin
+ tf.add_input(desc, writer_)
+ end
+ begin
+ tf.add_input(desc, logdir_)
+ end
+ begin
+ tf.add_input(desc, max_queue_)
+ end
+ begin
+ tf.add_input(desc, flush_millis_)
+ end
+ begin
+ tf.add_input(desc, filename_suffix_)
+ end
+ end
+ begin
+ end
+ end
+ tf.Tensor(tf.Operation(desc))
+ end
+ end
+ begin
+ function create_summary_file_writer_eager(writer_, logdir_, max_queue_, flush_millis_, filename_suffix_; name=nothing)
+ desc = tf.EagerOp("CreateSummaryFileWriter")
+ writer_ = convert(tf.EagerTensor, writer_)
+ logdir_ = convert(tf.EagerTensor, logdir_)
+ max_queue_ = convert(tf.EagerTensor, max_queue_)
+ flush_millis_ = convert(tf.EagerTensor, flush_millis_)
+ filename_suffix_ = convert(tf.EagerTensor, filename_suffix_)
+ begin
+ begin
+ tf.add_input(desc, writer_)
+ end
+ begin
+ tf.add_input(desc, logdir_)
+ end
+ begin
+ tf.add_input(desc, max_queue_)
+ end
+ begin
+ tf.add_input(desc, flush_millis_)
+ end
+ begin
+ tf.add_input(desc, filename_suffix_)
+ end
+ end
+ begin
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(create_summary_file_writer, [writer_, logdir_, max_queue_, flush_millis_, filename_suffix_], name=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res[1]
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function create_summary_file_writer(writer_, logdir_, max_queue_, flush_millis_, filename_suffix_; name=nothing)
+ if tf.in_eager_mode()
+ create_summary_file_writer_eager(writer_, logdir_, max_queue_, flush_millis_, filename_suffix_; name=name)
+ else
+ create_summary_file_writer_graph(writer_, logdir_, max_queue_, flush_millis_, filename_suffix_; name=name)
+ end
+ end
+ end
+end
+
+
+"""
+ generate_vocab_remapping(new_vocab_file, old_vocab_file; old_vocab_size=-1)
+
+
+"""
+begin
+ begin
+ function generate_vocab_remapping_graph(new_vocab_file_, old_vocab_file_; name=nothing, new_vocab_offset=nothing, num_new_vocab=nothing, old_vocab_size=nothing)
+ local desc
+ tf.with_op_name(name, "GenerateVocabRemapping") do
+ desc = tf.NodeDescription("GenerateVocabRemapping")
+ begin
+ begin
+ new_vocab_file_ = convert(Tensor{String}, new_vocab_file_)
+ begin
+ end
+ end
+ begin
+ old_vocab_file_ = convert(Tensor{String}, old_vocab_file_)
+ begin
+ end
+ end
+ end
+ begin
+ begin
+ tf.add_input(desc, new_vocab_file_)
+ end
+ begin
+ tf.add_input(desc, old_vocab_file_)
+ end
+ end
+ begin
+ begin
+ if new_vocab_offset !== nothing
+ desc["new_vocab_offset"] = Base.Int(new_vocab_offset)
+ end
+ end
+ begin
+ if num_new_vocab !== nothing
+ desc["num_new_vocab"] = Base.Int(num_new_vocab)
+ end
+ end
+ begin
+ if old_vocab_size !== nothing
+ desc["old_vocab_size"] = Base.Int(old_vocab_size)
+ end
+ end
+ end
+ end
+ begin
+ out = tf.Tensor[]
+ op = tf.Operation(desc)
+ for out_idx = 1:2
+ push!(out, tf.Tensor(op, out_idx))
+ end
+ out
+ end
+ end
+ end
+ begin
+ function generate_vocab_remapping_eager(new_vocab_file_, old_vocab_file_; name=nothing, new_vocab_offset=nothing, num_new_vocab=nothing, old_vocab_size=nothing)
+ desc = tf.EagerOp("GenerateVocabRemapping")
+ new_vocab_file_ = convert(tf.EagerTensor, new_vocab_file_)
+ old_vocab_file_ = convert(tf.EagerTensor, old_vocab_file_)
+ begin
+ begin
+ tf.add_input(desc, new_vocab_file_)
+ end
+ begin
+ tf.add_input(desc, old_vocab_file_)
+ end
+ end
+ begin
+ begin
+ if new_vocab_offset !== nothing
+ desc["new_vocab_offset"] = Base.Int(new_vocab_offset)
+ end
+ end
+ begin
+ if num_new_vocab !== nothing
+ desc["num_new_vocab"] = Base.Int(num_new_vocab)
+ end
+ end
+ begin
+ if old_vocab_size !== nothing
+ desc["old_vocab_size"] = Base.Int(old_vocab_size)
+ end
+ end
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(generate_vocab_remapping, [new_vocab_file_, old_vocab_file_], name=nothing, new_vocab_offset=nothing, num_new_vocab=nothing, old_vocab_size=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function generate_vocab_remapping(new_vocab_file_, old_vocab_file_; name=nothing, new_vocab_offset=nothing, num_new_vocab=nothing, old_vocab_size=nothing)
+ if tf.in_eager_mode()
+ generate_vocab_remapping_eager(new_vocab_file_, old_vocab_file_; name=name, new_vocab_offset=new_vocab_offset, num_new_vocab=num_new_vocab, old_vocab_size=old_vocab_size)
+ else
+ generate_vocab_remapping_graph(new_vocab_file_, old_vocab_file_; name=name, new_vocab_offset=new_vocab_offset, num_new_vocab=num_new_vocab, old_vocab_size=old_vocab_size)
+ end
+ end
+ end
+end
+
+
+"""
+ batch_matrix_inverse(input; adjoint=false)
+
+
+"""
+begin
+ begin
+ function batch_matrix_inverse_graph(input_; name=nothing, adjoint=nothing)
+ local desc
+ tf.with_op_name(name, "BatchMatrixInverse") do
+ desc = tf.NodeDescription("BatchMatrixInverse")
+ begin
+ begin
+ input_ = convert(Tensor{Any}, input_)
+ begin
+ end
+ end
+ begin
+ (input_,) = tf.tf_promote(input_)
+ end
+ end
+ begin
+ begin
+ tf.add_input(desc, input_)
+ end
+ end
+ begin
+ begin
+ if adjoint !== nothing
+ desc["adjoint"] = Base.Bool(adjoint)
+ end
+ end
+ end
+ end
+ tf.Tensor(tf.Operation(desc))
+ end
+ end
+ begin
+ function batch_matrix_inverse_eager(input_; name=nothing, adjoint=nothing)
+ desc = tf.EagerOp("BatchMatrixInverse")
+ input_ = convert(tf.EagerTensor, input_)
+ begin
+ begin
+ tf.add_input(desc, input_)
+ end
+ end
+ begin
+ begin
+ if adjoint !== nothing
+ desc["adjoint"] = Base.Bool(adjoint)
+ end
+ end
+ end
+ begin
+ desc["T"] = tf.data_type(input_)
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(batch_matrix_inverse, [input_], name=nothing, adjoint=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res[1]
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function batch_matrix_inverse(input_; name=nothing, adjoint=nothing)
+ if tf.in_eager_mode()
+ batch_matrix_inverse_eager(input_; name=name, adjoint=adjoint)
+ else
+ batch_matrix_inverse_graph(input_; name=name, adjoint=adjoint)
+ end
+ end
+ end
+end
+
+
+"""
+ control_trigger()
+
+
+"""
+begin
+ begin
+ function control_trigger_graph(; name=nothing)
+ local desc
+ tf.with_op_name(name, "ControlTrigger") do
+ desc = tf.NodeDescription("ControlTrigger")
+ begin
+ end
+ begin
+ end
+ begin
+ end
+ end
+ tf.Tensor(tf.Operation(desc))
+ end
+ end
+ begin
+ function control_trigger_eager(; name=nothing)
+ desc = tf.EagerOp("ControlTrigger")
+ begin
+ end
+ begin
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(control_trigger, [], name=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res[1]
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function control_trigger(; name=nothing)
+ if tf.in_eager_mode()
+ control_trigger_eager(; name=name)
+ else
+ control_trigger_graph(; name=name)
+ end
+ end
+ end
+end
+
+
+"""
+ stop_gradient(input)
+
+
+"""
+begin
+ begin
+ function stop_gradient_graph(input_; name=nothing)
+ local desc
+ tf.with_op_name(name, "StopGradient") do
+ desc = tf.NodeDescription("StopGradient")
+ begin
+ begin
+ input_ = convert(Tensor{Any}, input_)
+ begin
+ end
+ end
+ begin
+ (input_,) = tf.tf_promote(input_)
+ end
+ end
+ begin
+ begin
+ tf.add_input(desc, input_)
+ end
+ end
+ begin
+ end
+ end
+ tf.Tensor(tf.Operation(desc))
+ end
+ end
+ begin
+ function stop_gradient_eager(input_; name=nothing)
+ desc = tf.EagerOp("StopGradient")
+ input_ = convert(tf.EagerTensor, input_)
+ begin
+ begin
+ tf.add_input(desc, input_)
+ end
+ end
+ begin
+ end
+ begin
+ desc["T"] = tf.data_type(input_)
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(stop_gradient, [input_], name=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res[1]
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function stop_gradient(input_; name=nothing)
+ if tf.in_eager_mode()
+ stop_gradient_eager(input_; name=name)
+ else
+ stop_gradient_graph(input_; name=name)
+ end
+ end
+ end
+end
+
+
+"""
+ split(split_dim, value)
+
+
+"""
+begin
+ begin
+ function split_graph(split_dim_, value_; name=nothing, num_split=nothing)
+ local desc
+ tf.with_op_name(name, "Split") do
+ desc = tf.NodeDescription("Split")
+ begin
+ begin
+ split_dim_ = convert(Tensor{Int32}, split_dim_)
+ begin
+ split_dim_ = split_dim_ - convert(tf.Tensor{eltype(split_dim_)}, 1)
+ end
+ end
+ begin
+ value_ = convert(Tensor{Any}, value_)
+ begin
+ end
+ end
+ begin
+ (value_,) = tf.tf_promote(value_)
+ end
+ end
+ begin
+ begin
+ tf.add_input(desc, split_dim_)
+ end
+ begin
+ tf.add_input(desc, value_)
+ end
+ end
+ begin
+ begin
+ if num_split !== nothing
+ desc["num_split"] = Base.Int(num_split)
+ end
+ end
+ end
+ end
+ begin
+ out = tf.Tensor[]
+ op = tf.Operation(desc)
+ for out_idx = 1:num_split
+ push!(out, tf.Tensor(op, out_idx))
+ end
+ out
+ end
+ end
+ end
+ begin
+ function split_eager(split_dim_, value_; name=nothing, num_split=nothing)
+ desc = tf.EagerOp("Split")
+ split_dim_ = convert(tf.EagerTensor, split_dim_)
+ value_ = convert(tf.EagerTensor, value_)
+ begin
+ begin
+ tf.add_input(desc, split_dim_)
+ end
+ begin
+ tf.add_input(desc, value_)
+ end
+ end
+ begin
+ begin
+ if num_split !== nothing
+ desc["num_split"] = Base.Int(num_split)
+ end
+ end
+ end
+ begin
+ desc["T"] = tf.data_type(value_)
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(split, [split_dim_, value_], name=nothing, num_split=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function split(split_dim_, value_; name=nothing, num_split=nothing)
+ if tf.in_eager_mode()
+ split_eager(split_dim_, value_; name=name, num_split=num_split)
+ else
+ split_graph(split_dim_, value_; name=name, num_split=num_split)
+ end
+ end
+ end
+end
+
+
+"""
+ unpack(value; axis=0)
+
+
+"""
+begin
+ begin
+ function unpack_graph(value_; name=nothing, num=nothing, axis=nothing)
+ local desc
+ tf.with_op_name(name, "Unpack") do
+ desc = tf.NodeDescription("Unpack")
+ begin
+ begin
+ value_ = convert(Tensor{Any}, value_)
+ begin
+ end
+ end
+ begin
+ (value_,) = tf.tf_promote(value_)
+ end
+ end
+ begin
+ begin
+ tf.add_input(desc, value_)
+ end
+ end
+ begin
+ begin
+ if num !== nothing
+ desc["num"] = Base.Int(num)
+ end
+ end
+ begin
+ if axis !== nothing
+ axis = Base.Int(axis) - 1
+ end
+ end
+ begin
+ if axis !== nothing
+ desc["axis"] = Base.Int(axis)
+ end
+ end
+ end
+ end
+ begin
+ out = tf.Tensor[]
+ op = tf.Operation(desc)
+ for out_idx = 1:num
+ push!(out, tf.Tensor(op, out_idx))
+ end
+ out
+ end
+ end
+ end
+ begin
+ function unpack_eager(value_; name=nothing, num=nothing, axis=nothing)
+ desc = tf.EagerOp("Unpack")
+ value_ = convert(tf.EagerTensor, value_)
+ begin
+ begin
+ tf.add_input(desc, value_)
+ end
+ end
+ begin
+ begin
+ if num !== nothing
+ desc["num"] = Base.Int(num)
+ end
+ end
+ begin
+ if axis !== nothing
+ axis = Base.Int(axis) - 1
+ end
+ end
+ begin
+ if axis !== nothing
+ desc["axis"] = Base.Int(axis)
+ end
+ end
+ end
+ begin
+ desc["T"] = tf.data_type(value_)
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(unpack, [value_], name=nothing, num=nothing, axis=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function unpack(value_; name=nothing, num=nothing, axis=nothing)
+ if tf.in_eager_mode()
+ unpack_eager(value_; name=name, num=num, axis=axis)
+ else
+ unpack_graph(value_; name=name, num=num, axis=axis)
+ end
+ end
+ end
+end
+
+
+"""
+ resource_scatter_max(resource, indices, updates)
+
+
+"""
+begin
+ begin
+ function resource_scatter_max_graph(resource_, indices_, updates_; name=nothing, dtype=nothing)
+ local desc
+ tf.with_op_name(name, "ResourceScatterMax") do
+ desc = tf.NodeDescription("ResourceScatterMax")
+ begin
+ begin
+ resource_ = convert(Tensor{Any}, resource_)
+ begin
+ end
+ end
+ begin
+ indices_ = convert(Tensor{Any}, indices_)
+ begin
+ indices_ = indices_ - convert(tf.Tensor{eltype(indices_)}, 1)
+ end
+ end
+ begin
+ updates_ = convert(Tensor{Any}, updates_)
+ begin
+ end
+ end
+ begin
+ (updates_,) = tf.tf_promote(updates_)
+ end
+ begin
+ (indices_,) = tf.tf_promote(indices_)
+ end
+ end
+ begin
+ begin
+ tf.add_input(desc, resource_)
+ end
+ begin
+ tf.add_input(desc, indices_)
+ end
+ begin
+ tf.add_input(desc, updates_)
+ end
+ end
+ begin
+ begin
+ if dtype !== nothing
+ desc["dtype"] = Base.identity(dtype)
+ end
+ end
+ end
+ end
+ tf.Tensor(tf.Operation(desc))
+ end
+ end
+ begin
+ function resource_scatter_max_eager(resource_, indices_, updates_; name=nothing, dtype=nothing)
+ desc = tf.EagerOp("ResourceScatterMax")
+ resource_ = convert(tf.EagerTensor, resource_)
+ indices_ = convert(tf.EagerTensor, indices_)
+ updates_ = convert(tf.EagerTensor, updates_)
+ begin
+ begin
+ tf.add_input(desc, resource_)
+ end
+ begin
+ tf.add_input(desc, indices_)
+ end
+ begin
+ tf.add_input(desc, updates_)
+ end
+ end
+ begin
+ begin
+ if dtype !== nothing
+ desc["dtype"] = Base.identity(dtype)
+ end
+ end
+ end
+ begin
+ desc["Tindices"] = tf.data_type(indices_)
+ end
+ begin
+ desc["dtype"] = tf.data_type(updates_)
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(resource_scatter_max, [resource_, indices_, updates_], name=nothing, dtype=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res[1]
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function resource_scatter_max(resource_, indices_, updates_; name=nothing, dtype=nothing)
+ if tf.in_eager_mode()
+ resource_scatter_max_eager(resource_, indices_, updates_; name=name, dtype=dtype)
+ else
+ resource_scatter_max_graph(resource_, indices_, updates_; name=name, dtype=dtype)
+ end
+ end
+ end
+end
+
+
+"""
+ tensor_array_write(handle, index, value, flow_in)
+
+
+"""
+begin
+ begin
+ function tensor_array_write_graph(handle_, index_, value_, flow_in_; name=nothing)
+ local desc
+ tf.with_op_name(name, "TensorArrayWrite") do
+ desc = tf.NodeDescription("TensorArrayWrite")
+ begin
+ begin
+ handle_ = convert(Tensor{String}, handle_)
+ begin
+ end
+ end
+ begin
+ index_ = convert(Tensor{Int32}, index_)
+ begin
+ end
+ end
+ begin
+ value_ = convert(Tensor{Any}, value_)
+ begin
+ end
+ end
+ begin
+ flow_in_ = convert(Tensor{Float32}, flow_in_)
+ begin
+ end
+ end
+ begin
+ (value_,) = tf.tf_promote(value_)
+ end
+ end
+ begin
+ begin
+ tf.add_input(desc, handle_)
+ end
+ begin
+ tf.add_input(desc, index_)
+ end
+ begin
+ tf.add_input(desc, value_)
+ end
+ begin
+ tf.add_input(desc, flow_in_)
+ end
+ end
+ begin
+ end
+ end
+ tf.Tensor(tf.Operation(desc))
+ end
+ end
+ begin
+ function tensor_array_write_eager(handle_, index_, value_, flow_in_; name=nothing)
+ desc = tf.EagerOp("TensorArrayWrite")
+ handle_ = convert(tf.EagerTensor, handle_)
+ index_ = convert(tf.EagerTensor, index_)
+ value_ = convert(tf.EagerTensor, value_)
+ flow_in_ = convert(tf.EagerTensor, flow_in_)
+ begin
+ begin
+ tf.add_input(desc, handle_)
+ end
+ begin
+ tf.add_input(desc, index_)
+ end
+ begin
+ tf.add_input(desc, value_)
+ end
+ begin
+ tf.add_input(desc, flow_in_)
+ end
+ end
+ begin
+ end
+ begin
+ desc["T"] = tf.data_type(value_)
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(tensor_array_write, [handle_, index_, value_, flow_in_], name=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res[1]
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function tensor_array_write(handle_, index_, value_, flow_in_; name=nothing)
+ if tf.in_eager_mode()
+ tensor_array_write_eager(handle_, index_, value_, flow_in_; name=name)
+ else
+ tensor_array_write_graph(handle_, index_, value_, flow_in_; name=name)
+ end
+ end
+ end
+end
+
+
+"""
+ fill(dims, value; index_type=Int32)
+
+
+"""
+begin
+ begin
+ function fill_graph(dims_, value_; name=nothing, index_type=nothing)
+ local desc
+ tf.with_op_name(name, "Fill") do
+ desc = tf.NodeDescription("Fill")
+ begin
+ begin
+ dims_ = convert(Tensor{Int32}, dims_)
+ begin
+ end
+ end
+ begin
+ value_ = convert(Tensor{Any}, value_)
+ begin
+ end
+ end
+ begin
+ (value_,) = tf.tf_promote(value_)
+ end
+ begin
+ (dims_,) = tf.tf_promote(dims_)
+ end
+ end
+ begin
+ begin
+ tf.add_input(desc, dims_)
+ end
+ begin
+ tf.add_input(desc, value_)
+ end
+ end
+ begin
+ begin
+ if index_type !== nothing
+ desc["index_type"] = Base.identity(index_type)
+ end
+ end
+ end
+ end
+ tf.Tensor(tf.Operation(desc))
+ end
+ end
+ begin
+ function fill_eager(dims_, value_; name=nothing, index_type=nothing)
+ desc = tf.EagerOp("Fill")
+ dims_ = convert(tf.EagerTensor, dims_)
+ value_ = convert(tf.EagerTensor, value_)
+ begin
+ begin
+ tf.add_input(desc, dims_)
+ end
+ begin
+ tf.add_input(desc, value_)
+ end
+ end
+ begin
+ begin
+ if index_type !== nothing
+ desc["index_type"] = Base.identity(index_type)
+ end
+ end
+ end
+ begin
+ desc["index_type"] = tf.data_type(dims_)
+ end
+ begin
+ desc["T"] = tf.data_type(value_)
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(fill, [dims_, value_], name=nothing, index_type=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res[1]
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function fill(dims_, value_; name=nothing, index_type=nothing)
+ if tf.in_eager_mode()
+ fill_eager(dims_, value_; name=name, index_type=index_type)
+ else
+ fill_graph(dims_, value_; name=name, index_type=index_type)
+ end
+ end
+ end
+end
+
+
+"""
+ softmax(logits)
+
+
+"""
+begin
+ begin
+ function softmax_graph(logits_; name=nothing)
+ local desc
+ tf.with_op_name(name, "Softmax") do
+ desc = tf.NodeDescription("Softmax")
+ begin
+ begin
+ logits_ = convert(Tensor{Any}, logits_)
+ begin
+ end
+ end
+ begin
+ (logits_,) = tf.tf_promote(logits_)
+ end
+ end
+ begin
+ begin
+ tf.add_input(desc, logits_)
+ end
+ end
+ begin
+ end
+ end
+ tf.Tensor(tf.Operation(desc))
+ end
+ end
+ begin
+ function softmax_eager(logits_; name=nothing)
+ desc = tf.EagerOp("Softmax")
+ logits_ = convert(tf.EagerTensor, logits_)
+ begin
+ begin
+ tf.add_input(desc, logits_)
+ end
+ end
+ begin
+ end
+ begin
+ desc["T"] = tf.data_type(logits_)
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(softmax, [logits_], name=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res[1]
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function softmax(logits_; name=nothing)
+ if tf.in_eager_mode()
+ softmax_eager(logits_; name=name)
+ else
+ softmax_graph(logits_; name=name)
+ end
+ end
+ end
+end
+
+
+"""
+ resize_bicubic(images, size; align_corners=false)
+
+
+"""
+begin
+ begin
+ function resize_bicubic_graph(images_, size_; name=nothing, align_corners=nothing)
+ local desc
+ tf.with_op_name(name, "ResizeBicubic") do
+ desc = tf.NodeDescription("ResizeBicubic")
+ begin
+ begin
+ images_ = convert(Tensor{Any}, images_)
+ begin
+ end
+ end
+ begin
+ size_ = convert(Tensor{Int32}, size_)
+ begin
+ end
+ end
+ begin
+ (images_,) = tf.tf_promote(images_)
+ end
+ end
+ begin
+ begin
+ tf.add_input(desc, images_)
+ end
+ begin
+ tf.add_input(desc, size_)
+ end
+ end
+ begin
+ begin
+ if align_corners !== nothing
+ desc["align_corners"] = Base.Bool(align_corners)
+ end
+ end
+ end
+ end
+ tf.Tensor(tf.Operation(desc))
+ end
+ end
+ begin
+ function resize_bicubic_eager(images_, size_; name=nothing, align_corners=nothing)
+ desc = tf.EagerOp("ResizeBicubic")
+ images_ = convert(tf.EagerTensor, images_)
+ size_ = convert(tf.EagerTensor, size_)
+ begin
+ begin
+ tf.add_input(desc, images_)
+ end
+ begin
+ tf.add_input(desc, size_)
+ end
+ end
+ begin
+ begin
+ if align_corners !== nothing
+ desc["align_corners"] = Base.Bool(align_corners)
+ end
+ end
+ end
+ begin
+ desc["T"] = tf.data_type(images_)
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(resize_bicubic, [images_, size_], name=nothing, align_corners=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res[1]
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function resize_bicubic(images_, size_; name=nothing, align_corners=nothing)
+ if tf.in_eager_mode()
+ resize_bicubic_eager(images_, size_; name=name, align_corners=align_corners)
+ else
+ resize_bicubic_graph(images_, size_; name=name, align_corners=align_corners)
+ end
+ end
+ end
+end
+
+
+"""
+ infeed_dequeue_tuple()
+
+A placeholder op for multiple values that will be fed into the computation
+"""
+begin
+ begin
+ function infeed_dequeue_tuple_graph(; name=nothing, dtypes=nothing, shapes=nothing)
+ local desc
+ tf.with_op_name(name, "InfeedDequeueTuple") do
+ desc = tf.NodeDescription("InfeedDequeueTuple")
+ begin
+ end
+ begin
+ end
+ begin
+ begin
+ if dtypes !== nothing
+ desc["dtypes"] = map(Base.identity, dtypes)
+ end
+ end
+ begin
+ if shapes !== nothing
+ desc["shapes"] = map(Base.identity, shapes)
+ end
+ end
+ end
+ end
+ tf.Tensor(tf.Operation(desc))
+ end
+ end
+ begin
+ function infeed_dequeue_tuple_eager(; name=nothing, dtypes=nothing, shapes=nothing)
+ desc = tf.EagerOp("InfeedDequeueTuple")
+ begin
+ end
+ begin
+ begin
+ if dtypes !== nothing
+ desc["dtypes"] = map(Base.identity, dtypes)
+ end
+ end
+ begin
+ if shapes !== nothing
+ desc["shapes"] = map(Base.identity, shapes)
+ end
+ end
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(infeed_dequeue_tuple, [], name=nothing, dtypes=nothing, shapes=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res[1]
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function infeed_dequeue_tuple(; name=nothing, dtypes=nothing, shapes=nothing)
+ if tf.in_eager_mode()
+ infeed_dequeue_tuple_eager(; name=name, dtypes=dtypes, shapes=shapes)
+ else
+ infeed_dequeue_tuple_graph(; name=name, dtypes=dtypes, shapes=shapes)
+ end
+ end
+ end
+end
+
+
+"""
+ multi_device_iterator()
+
+
+"""
+begin
+ begin
+ function multi_device_iterator_graph(; name=nothing, devices=nothing, shared_name=nothing, container=nothing, output_types=nothing, output_shapes=nothing)
+ local desc
+ tf.with_op_name(name, "MultiDeviceIterator") do
+ desc = tf.NodeDescription("MultiDeviceIterator")
+ begin
+ end
+ begin
+ end
+ begin
+ begin
+ if devices !== nothing
+ desc["devices"] = map(Base.identity, devices)
+ end
+ end
+ begin
+ if shared_name !== nothing
+ desc["shared_name"] = Base.String(shared_name)
+ end
+ end
+ begin
+ if container !== nothing
+ desc["container"] = Base.String(container)
+ end
+ end
+ begin
+ if output_types !== nothing
+ desc["output_types"] = map(Base.identity, output_types)
+ end
+ end
+ begin
+ if output_shapes !== nothing
+ desc["output_shapes"] = map(Base.identity, output_shapes)
+ end
+ end
+ end
+ end
+ tf.Tensor(tf.Operation(desc))
+ end
+ end
+ begin
+ function multi_device_iterator_eager(; name=nothing, devices=nothing, shared_name=nothing, container=nothing, output_types=nothing, output_shapes=nothing)
+ desc = tf.EagerOp("MultiDeviceIterator")
+ begin
+ end
+ begin
+ begin
+ if devices !== nothing
+ desc["devices"] = map(Base.identity, devices)
+ end
+ end
+ begin
+ if shared_name !== nothing
+ desc["shared_name"] = Base.String(shared_name)
+ end
+ end
+ begin
+ if container !== nothing
+ desc["container"] = Base.String(container)
+ end
+ end
+ begin
+ if output_types !== nothing
+ desc["output_types"] = map(Base.identity, output_types)
+ end
+ end
+ begin
+ if output_shapes !== nothing
+ desc["output_shapes"] = map(Base.identity, output_shapes)
+ end
+ end
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(multi_device_iterator, [], name=nothing, devices=nothing, shared_name=nothing, container=nothing, output_types=nothing, output_shapes=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res[1]
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function multi_device_iterator(; name=nothing, devices=nothing, shared_name=nothing, container=nothing, output_types=nothing, output_shapes=nothing)
+ if tf.in_eager_mode()
+ multi_device_iterator_eager(; name=name, devices=devices, shared_name=shared_name, container=container, output_types=output_types, output_shapes=output_shapes)
+ else
+ multi_device_iterator_graph(; name=name, devices=devices, shared_name=shared_name, container=container, output_types=output_types, output_shapes=output_shapes)
+ end
+ end
+ end
+end
+
+
+"""
+ decode_csv(records, record_defaults; field_delim=, use_quote_delim=true, na_value=, select_cols=Int64[])
+
+
+"""
+begin
+ begin
+ function decode_csv_graph(records_, record_defaults_; name=nothing, OUT_TYPE=nothing, field_delim=nothing, use_quote_delim=nothing, na_value=nothing, select_cols=nothing)
+ local desc
+ tf.with_op_name(name, "DecodeCSV") do
+ desc = tf.NodeDescription("DecodeCSV")
+ begin
+ begin
+ records_ = convert(Tensor{String}, records_)
+ begin
+ end
+ end
+ begin
+ record_defaults_ = [convert(Tensor{Any}, x) for x = record_defaults_]
+ begin
+ end
+ end
+ end
+ begin
+ begin
+ tf.add_input(desc, records_)
+ end
+ begin
+ tf.add_input(desc, record_defaults_)
+ end
+ end
+ begin
+ begin
+ if OUT_TYPE !== nothing
+ desc["OUT_TYPE"] = map(Base.identity, OUT_TYPE)
+ end
+ end
+ begin
+ if field_delim !== nothing
+ desc["field_delim"] = Base.String(field_delim)
+ end
+ end
+ begin
+ if use_quote_delim !== nothing
+ desc["use_quote_delim"] = Base.Bool(use_quote_delim)
+ end
+ end
+ begin
+ if na_value !== nothing
+ desc["na_value"] = Base.String(na_value)
+ end
+ end
+ begin
+ if select_cols !== nothing
+ desc["select_cols"] = map(Base.identity, select_cols)
+ end
+ end
+ end
+ end
+ tf.Tensor(tf.Operation(desc))
+ end
+ end
+ begin
+ function decode_csv_eager(records_, record_defaults_; name=nothing, OUT_TYPE=nothing, field_delim=nothing, use_quote_delim=nothing, na_value=nothing, select_cols=nothing)
+ desc = tf.EagerOp("DecodeCSV")
+ records_ = convert(tf.EagerTensor, records_)
+ record_defaults_ = convert(tf.EagerTensor, record_defaults_)
+ begin
+ begin
+ tf.add_input(desc, records_)
+ end
+ begin
+ tf.add_input(desc, record_defaults_)
+ end
+ end
+ begin
+ begin
+ if OUT_TYPE !== nothing
+ desc["OUT_TYPE"] = map(Base.identity, OUT_TYPE)
+ end
+ end
+ begin
+ if field_delim !== nothing
+ desc["field_delim"] = Base.String(field_delim)
+ end
+ end
+ begin
+ if use_quote_delim !== nothing
+ desc["use_quote_delim"] = Base.Bool(use_quote_delim)
+ end
+ end
+ begin
+ if na_value !== nothing
+ desc["na_value"] = Base.String(na_value)
+ end
+ end
+ begin
+ if select_cols !== nothing
+ desc["select_cols"] = map(Base.identity, select_cols)
+ end
+ end
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(decode_csv, [records_, record_defaults_], name=nothing, OUT_TYPE=nothing, field_delim=nothing, use_quote_delim=nothing, na_value=nothing, select_cols=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res[1]
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function decode_csv(records_, record_defaults_; name=nothing, OUT_TYPE=nothing, field_delim=nothing, use_quote_delim=nothing, na_value=nothing, select_cols=nothing)
+ if tf.in_eager_mode()
+ decode_csv_eager(records_, record_defaults_; name=name, OUT_TYPE=OUT_TYPE, field_delim=field_delim, use_quote_delim=use_quote_delim, na_value=na_value, select_cols=select_cols)
+ else
+ decode_csv_graph(records_, record_defaults_; name=name, OUT_TYPE=OUT_TYPE, field_delim=field_delim, use_quote_delim=use_quote_delim, na_value=na_value, select_cols=select_cols)
+ end
+ end
+ end
+end
+
+
+"""
+ lookup_table_find(table_handle, keys, default_value)
+
+
+"""
+begin
+ begin
+ function lookup_table_find_graph(table_handle_, keys_, default_value_; name=nothing)
+ local desc
+ tf.with_op_name(name, "LookupTableFind") do
+ desc = tf.NodeDescription("LookupTableFind")
+ begin
+ begin
+ table_handle_ = convert(Tensor{String}, table_handle_)
+ begin
+ end
+ end
+ begin
+ keys_ = convert(Tensor{Any}, keys_)
+ begin
+ end
+ end
+ begin
+ default_value_ = convert(Tensor{Any}, default_value_)
+ begin
+ end
+ end
+ begin
+ (keys_,) = tf.tf_promote(keys_)
+ end
+ begin
+ (default_value_,) = tf.tf_promote(default_value_)
+ end
+ end
+ begin
+ begin
+ tf.add_input(desc, table_handle_)
+ end
+ begin
+ tf.add_input(desc, keys_)
+ end
+ begin
+ tf.add_input(desc, default_value_)
+ end
+ end
+ begin
+ end
+ end
+ tf.Tensor(tf.Operation(desc))
+ end
+ end
+ begin
+ function lookup_table_find_eager(table_handle_, keys_, default_value_; name=nothing)
+ desc = tf.EagerOp("LookupTableFind")
+ table_handle_ = convert(tf.EagerTensor, table_handle_)
+ keys_ = convert(tf.EagerTensor, keys_)
+ default_value_ = convert(tf.EagerTensor, default_value_)
+ begin
+ begin
+ tf.add_input(desc, table_handle_)
+ end
+ begin
+ tf.add_input(desc, keys_)
+ end
+ begin
+ tf.add_input(desc, default_value_)
+ end
+ end
+ begin
+ end
+ begin
+ desc["Tin"] = tf.data_type(keys_)
+ end
+ begin
+ desc["Tout"] = tf.data_type(default_value_)
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(lookup_table_find, [table_handle_, keys_, default_value_], name=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res[1]
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function lookup_table_find(table_handle_, keys_, default_value_; name=nothing)
+ if tf.in_eager_mode()
+ lookup_table_find_eager(table_handle_, keys_, default_value_; name=name)
+ else
+ lookup_table_find_graph(table_handle_, keys_, default_value_; name=name)
+ end
+ end
+ end
+end
+
+
+"""
+ shuffle_and_repeat_dataset(input_dataset, buffer_size, seed, seed2, count)
+
+
+"""
+begin
+ begin
+ function shuffle_and_repeat_dataset_graph(input_dataset_, buffer_size_, seed_, seed2_, count_; name=nothing, output_types=nothing, output_shapes=nothing)
+ local desc
+ tf.with_op_name(name, "ShuffleAndRepeatDataset") do
+ desc = tf.NodeDescription("ShuffleAndRepeatDataset")
+ begin
+ begin
+ input_dataset_ = convert(Tensor{Any}, input_dataset_)
+ begin
+ end
+ end
+ begin
+ buffer_size_ = convert(Tensor{Int64}, buffer_size_)
+ begin
+ end
+ end
+ begin
+ seed_ = convert(Tensor{Int64}, seed_)
+ begin
+ end
+ end
+ begin
+ seed2_ = convert(Tensor{Int64}, seed2_)
+ begin
+ end
+ end
+ begin
+ count_ = convert(Tensor{Int64}, count_)
+ begin
+ end
+ end
+ end
+ begin
+ begin
+ tf.add_input(desc, input_dataset_)
+ end
+ begin
+ tf.add_input(desc, buffer_size_)
+ end
+ begin
+ tf.add_input(desc, seed_)
+ end
+ begin
+ tf.add_input(desc, seed2_)
+ end
+ begin
+ tf.add_input(desc, count_)
+ end
+ end
+ begin
+ begin
+ if output_types !== nothing
+ desc["output_types"] = map(Base.identity, output_types)
+ end
+ end
+ begin
+ if output_shapes !== nothing
+ desc["output_shapes"] = map(Base.identity, output_shapes)
+ end
+ end
+ end
+ end
+ tf.Tensor(tf.Operation(desc))
+ end
+ end
+ begin
+ function shuffle_and_repeat_dataset_eager(input_dataset_, buffer_size_, seed_, seed2_, count_; name=nothing, output_types=nothing, output_shapes=nothing)
+ desc = tf.EagerOp("ShuffleAndRepeatDataset")
+ input_dataset_ = convert(tf.EagerTensor, input_dataset_)
+ buffer_size_ = convert(tf.EagerTensor, buffer_size_)
+ seed_ = convert(tf.EagerTensor, seed_)
+ seed2_ = convert(tf.EagerTensor, seed2_)
+ count_ = convert(tf.EagerTensor, count_)
+ begin
+ begin
+ tf.add_input(desc, input_dataset_)
+ end
+ begin
+ tf.add_input(desc, buffer_size_)
+ end
+ begin
+ tf.add_input(desc, seed_)
+ end
+ begin
+ tf.add_input(desc, seed2_)
+ end
+ begin
+ tf.add_input(desc, count_)
+ end
+ end
+ begin
+ begin
+ if output_types !== nothing
+ desc["output_types"] = map(Base.identity, output_types)
+ end
+ end
+ begin
+ if output_shapes !== nothing
+ desc["output_shapes"] = map(Base.identity, output_shapes)
+ end
+ end
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(shuffle_and_repeat_dataset, [input_dataset_, buffer_size_, seed_, seed2_, count_], name=nothing, output_types=nothing, output_shapes=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res[1]
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function shuffle_and_repeat_dataset(input_dataset_, buffer_size_, seed_, seed2_, count_; name=nothing, output_types=nothing, output_shapes=nothing)
+ if tf.in_eager_mode()
+ shuffle_and_repeat_dataset_eager(input_dataset_, buffer_size_, seed_, seed2_, count_; name=name, output_types=output_types, output_shapes=output_shapes)
+ else
+ shuffle_and_repeat_dataset_graph(input_dataset_, buffer_size_, seed_, seed2_, count_; name=name, output_types=output_types, output_shapes=output_shapes)
+ end
+ end
+ end
+end
+
+
+"""
+ experimental_unbatch_dataset(input_dataset)
+
+
+"""
+begin
+ begin
+ function experimental_unbatch_dataset_graph(input_dataset_; name=nothing, output_types=nothing, output_shapes=nothing)
+ local desc
+ tf.with_op_name(name, "ExperimentalUnbatchDataset") do
+ desc = tf.NodeDescription("ExperimentalUnbatchDataset")
+ begin
+ begin
+ input_dataset_ = convert(Tensor{Any}, input_dataset_)
+ begin
+ end
+ end
+ end
+ begin
+ begin
+ tf.add_input(desc, input_dataset_)
+ end
+ end
+ begin
+ begin
+ if output_types !== nothing
+ desc["output_types"] = map(Base.identity, output_types)
+ end
+ end
+ begin
+ if output_shapes !== nothing
+ desc["output_shapes"] = map(Base.identity, output_shapes)
+ end
+ end
+ end
+ end
+ tf.Tensor(tf.Operation(desc))
+ end
+ end
+ begin
+ function experimental_unbatch_dataset_eager(input_dataset_; name=nothing, output_types=nothing, output_shapes=nothing)
+ desc = tf.EagerOp("ExperimentalUnbatchDataset")
+ input_dataset_ = convert(tf.EagerTensor, input_dataset_)
+ begin
+ begin
+ tf.add_input(desc, input_dataset_)
+ end
+ end
+ begin
+ begin
+ if output_types !== nothing
+ desc["output_types"] = map(Base.identity, output_types)
+ end
+ end
+ begin
+ if output_shapes !== nothing
+ desc["output_shapes"] = map(Base.identity, output_shapes)
+ end
+ end
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(experimental_unbatch_dataset, [input_dataset_], name=nothing, output_types=nothing, output_shapes=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res[1]
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function experimental_unbatch_dataset(input_dataset_; name=nothing, output_types=nothing, output_shapes=nothing)
+ if tf.in_eager_mode()
+ experimental_unbatch_dataset_eager(input_dataset_; name=name, output_types=output_types, output_shapes=output_shapes)
+ else
+ experimental_unbatch_dataset_graph(input_dataset_; name=name, output_types=output_types, output_shapes=output_shapes)
+ end
+ end
+ end
+end
+
+
+"""
+ avg_pool3d_grad(orig_input_shape, grad; data_format=)
+
+
+"""
+begin
+ begin
+ function avg_pool3d_grad_graph(orig_input_shape_, grad_; name=nothing, ksize=nothing, strides=nothing, padding=nothing, data_format=nothing)
+ local desc
+ tf.with_op_name(name, "AvgPool3DGrad") do
+ desc = tf.NodeDescription("AvgPool3DGrad")
+ begin
+ begin
+ orig_input_shape_ = convert(Tensor{Int32}, orig_input_shape_)
+ begin
+ end
+ end
+ begin
+ grad_ = convert(Tensor{Any}, grad_)
+ begin
+ end
+ end
+ begin
+ (grad_,) = tf.tf_promote(grad_)
+ end
+ end
+ begin
+ begin
+ tf.add_input(desc, orig_input_shape_)
+ end
+ begin
+ tf.add_input(desc, grad_)
+ end
+ end
+ begin
+ begin
+ if ksize !== nothing
+ desc["ksize"] = map(Base.identity, ksize)
+ end
+ end
+ begin
+ if strides !== nothing
+ desc["strides"] = map(Base.identity, strides)
+ end
+ end
+ begin
+ if padding !== nothing
+ desc["padding"] = Base.String(padding)
+ end
+ end
+ begin
+ if data_format !== nothing
+ desc["data_format"] = Base.String(data_format)
+ end
+ end
+ end
+ end
+ tf.Tensor(tf.Operation(desc))
+ end
+ end
+ begin
+ function avg_pool3d_grad_eager(orig_input_shape_, grad_; name=nothing, ksize=nothing, strides=nothing, padding=nothing, data_format=nothing)
+ desc = tf.EagerOp("AvgPool3DGrad")
+ orig_input_shape_ = convert(tf.EagerTensor, orig_input_shape_)
+ grad_ = convert(tf.EagerTensor, grad_)
+ begin
+ begin
+ tf.add_input(desc, orig_input_shape_)
+ end
+ begin
+ tf.add_input(desc, grad_)
+ end
+ end
+ begin
+ begin
+ if ksize !== nothing
+ desc["ksize"] = map(Base.identity, ksize)
+ end
+ end
+ begin
+ if strides !== nothing
+ desc["strides"] = map(Base.identity, strides)
+ end
+ end
+ begin
+ if padding !== nothing
+ desc["padding"] = Base.String(padding)
+ end
+ end
+ begin
+ if data_format !== nothing
+ desc["data_format"] = Base.String(data_format)
+ end
+ end
+ end
+ begin
+ desc["T"] = tf.data_type(grad_)
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(avg_pool3d_grad, [orig_input_shape_, grad_], name=nothing, ksize=nothing, strides=nothing, padding=nothing, data_format=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res[1]
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function avg_pool3d_grad(orig_input_shape_, grad_; name=nothing, ksize=nothing, strides=nothing, padding=nothing, data_format=nothing)
+ if tf.in_eager_mode()
+ avg_pool3d_grad_eager(orig_input_shape_, grad_; name=name, ksize=ksize, strides=strides, padding=padding, data_format=data_format)
+ else
+ avg_pool3d_grad_graph(orig_input_shape_, grad_; name=name, ksize=ksize, strides=strides, padding=padding, data_format=data_format)
+ end
+ end
+ end
+end
+
+
+"""
+ placeholder_with_default(input)
+
+
+"""
+begin
+ begin
+ function placeholder_with_default_graph(input_; name=nothing, dtype=nothing, shape=nothing)
+ local desc
+ tf.with_op_name(name, "PlaceholderWithDefault") do
+ desc = tf.NodeDescription("PlaceholderWithDefault")
+ begin
+ begin
+ input_ = convert(Tensor{Any}, input_)
+ begin
+ end
+ end
+ begin
+ (input_,) = tf.tf_promote(input_)
+ end
+ end
+ begin
+ begin
+ tf.add_input(desc, input_)
+ end
+ end
+ begin
+ begin
+ if dtype !== nothing
+ desc["dtype"] = Base.identity(dtype)
+ end
+ end
+ begin
+ if shape !== nothing
+ desc["shape"] = Base.identity(shape)
+ end
+ end
+ end
+ end
+ tf.Tensor(tf.Operation(desc))
+ end
+ end
+ begin
+ function placeholder_with_default_eager(input_; name=nothing, dtype=nothing, shape=nothing)
+ desc = tf.EagerOp("PlaceholderWithDefault")
+ input_ = convert(tf.EagerTensor, input_)
+ begin
+ begin
+ tf.add_input(desc, input_)
+ end
+ end
+ begin
+ begin
+ if dtype !== nothing
+ desc["dtype"] = Base.identity(dtype)
+ end
+ end
+ begin
+ if shape !== nothing
+ desc["shape"] = Base.identity(shape)
+ end
+ end
+ end
+ begin
+ desc["dtype"] = tf.data_type(input_)
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(placeholder_with_default, [input_], name=nothing, dtype=nothing, shape=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res[1]
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function placeholder_with_default(input_; name=nothing, dtype=nothing, shape=nothing)
+ if tf.in_eager_mode()
+ placeholder_with_default_eager(input_; name=name, dtype=dtype, shape=shape)
+ else
+ placeholder_with_default_graph(input_; name=name, dtype=dtype, shape=shape)
+ end
+ end
+ end
+end
+
+
+"""
+ initialize_table_v2(table_handle, keys, values)
+
+
+"""
+begin
+ begin
+ function initialize_table_v2_graph(table_handle_, keys_, values_; name=nothing)
+ local desc
+ tf.with_op_name(name, "InitializeTableV2") do
+ desc = tf.NodeDescription("InitializeTableV2")
+ begin
+ begin
+ table_handle_ = convert(Tensor{Any}, table_handle_)
+ begin
+ end
+ end
+ begin
+ keys_ = convert(Tensor{Any}, keys_)
+ begin
+ end
+ end
+ begin
+ values_ = convert(Tensor{Any}, values_)
+ begin
+ end
+ end
+ begin
+ (values_,) = tf.tf_promote(values_)
+ end
+ begin
+ (keys_,) = tf.tf_promote(keys_)
+ end
+ end
+ begin
+ begin
+ tf.add_input(desc, table_handle_)
+ end
+ begin
+ tf.add_input(desc, keys_)
+ end
+ begin
+ tf.add_input(desc, values_)
+ end
+ end
+ begin
+ end
+ end
+ tf.Tensor(tf.Operation(desc))
+ end
+ end
+ begin
+ function initialize_table_v2_eager(table_handle_, keys_, values_; name=nothing)
+ desc = tf.EagerOp("InitializeTableV2")
+ table_handle_ = convert(tf.EagerTensor, table_handle_)
+ keys_ = convert(tf.EagerTensor, keys_)
+ values_ = convert(tf.EagerTensor, values_)
+ begin
+ begin
+ tf.add_input(desc, table_handle_)
+ end
+ begin
+ tf.add_input(desc, keys_)
+ end
+ begin
+ tf.add_input(desc, values_)
+ end
+ end
+ begin
+ end
+ begin
+ desc["Tkey"] = tf.data_type(keys_)
+ end
+ begin
+ desc["Tval"] = tf.data_type(values_)
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(initialize_table_v2, [table_handle_, keys_, values_], name=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res[1]
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function initialize_table_v2(table_handle_, keys_, values_; name=nothing)
+ if tf.in_eager_mode()
+ initialize_table_v2_eager(table_handle_, keys_, values_; name=name)
+ else
+ initialize_table_v2_graph(table_handle_, keys_, values_; name=name)
+ end
+ end
+ end
+end
+
+
+"""
+ set_size(set_indices, set_values, set_shape; validate_indices=true)
+
+
+"""
+begin
+ begin
+ function set_size_graph(set_indices_, set_values_, set_shape_; name=nothing, validate_indices=nothing)
+ local desc
+ tf.with_op_name(name, "SetSize") do
+ desc = tf.NodeDescription("SetSize")
+ begin
+ begin
+ set_indices_ = convert(Tensor{Int64}, set_indices_)
+ begin
+ end
+ end
+ begin
+ set_values_ = convert(Tensor{Any}, set_values_)
+ begin
+ end
+ end
+ begin
+ set_shape_ = convert(Tensor{Int64}, set_shape_)
+ begin
+ end
+ end
+ begin
+ (set_values_,) = tf.tf_promote(set_values_)
+ end
+ end
+ begin
+ begin
+ tf.add_input(desc, set_indices_)
+ end
+ begin
+ tf.add_input(desc, set_values_)
+ end
+ begin
+ tf.add_input(desc, set_shape_)
+ end
+ end
+ begin
+ begin
+ if validate_indices !== nothing
+ desc["validate_indices"] = Base.Bool(validate_indices)
+ end
+ end
+ end
+ end
+ tf.Tensor(tf.Operation(desc))
+ end
+ end
+ begin
+ function set_size_eager(set_indices_, set_values_, set_shape_; name=nothing, validate_indices=nothing)
+ desc = tf.EagerOp("SetSize")
+ set_indices_ = convert(tf.EagerTensor, set_indices_)
+ set_values_ = convert(tf.EagerTensor, set_values_)
+ set_shape_ = convert(tf.EagerTensor, set_shape_)
+ begin
+ begin
+ tf.add_input(desc, set_indices_)
+ end
+ begin
+ tf.add_input(desc, set_values_)
+ end
+ begin
+ tf.add_input(desc, set_shape_)
+ end
+ end
+ begin
+ begin
+ if validate_indices !== nothing
+ desc["validate_indices"] = Base.Bool(validate_indices)
+ end
+ end
+ end
+ begin
+ desc["T"] = tf.data_type(set_values_)
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(set_size, [set_indices_, set_values_, set_shape_], name=nothing, validate_indices=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res[1]
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function set_size(set_indices_, set_values_, set_shape_; name=nothing, validate_indices=nothing)
+ if tf.in_eager_mode()
+ set_size_eager(set_indices_, set_values_, set_shape_; name=name, validate_indices=validate_indices)
+ else
+ set_size_graph(set_indices_, set_values_, set_shape_; name=name, validate_indices=validate_indices)
+ end
+ end
+ end
+end
+
+
+"""
+ assert(condition, data; summarize=3)
+
+
+"""
+begin
+ begin
+ function assert_graph(condition_, data_; name=nothing, T=nothing, summarize=nothing)
+ local desc
+ tf.with_op_name(name, "Assert") do
+ desc = tf.NodeDescription("Assert")
+ begin
+ begin
+ condition_ = convert(Tensor{Bool}, condition_)
+ begin
+ end
+ end
+ begin
+ data_ = [convert(Tensor{Any}, x) for x = data_]
+ begin
+ end
+ end
+ end
+ begin
+ begin
+ tf.add_input(desc, condition_)
+ end
+ begin
+ tf.add_input(desc, data_)
+ end
+ end
+ begin
+ begin
+ if T !== nothing
+ desc["T"] = map(Base.identity, T)
+ end
+ end
+ begin
+ if summarize !== nothing
+ desc["summarize"] = Base.Int(summarize)
+ end
+ end
+ end
+ end
+ tf.Tensor(tf.Operation(desc))
+ end
+ end
+ begin
+ function assert_eager(condition_, data_; name=nothing, T=nothing, summarize=nothing)
+ desc = tf.EagerOp("Assert")
+ condition_ = convert(tf.EagerTensor, condition_)
+ data_ = convert(tf.EagerTensor, data_)
+ begin
+ begin
+ tf.add_input(desc, condition_)
+ end
+ begin
+ tf.add_input(desc, data_)
+ end
+ end
+ begin
+ begin
+ if T !== nothing
+ desc["T"] = map(Base.identity, T)
+ end
+ end
+ begin
+ if summarize !== nothing
+ desc["summarize"] = Base.Int(summarize)
+ end
+ end
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(assert, [condition_, data_], name=nothing, T=nothing, summarize=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res[1]
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function assert(condition_, data_; name=nothing, T=nothing, summarize=nothing)
+ if tf.in_eager_mode()
+ assert_eager(condition_, data_; name=name, T=T, summarize=summarize)
+ else
+ assert_graph(condition_, data_; name=name, T=T, summarize=summarize)
+ end
+ end
+ end
+end
+
+
+"""
+ non_max_suppression_v4(boxes, scores, max_output_size, iou_threshold, score_threshold; pad_to_max_output_size=false)
+
+
+"""
+begin
+ begin
+ function non_max_suppression_v4_graph(boxes_, scores_, max_output_size_, iou_threshold_, score_threshold_; name=nothing, pad_to_max_output_size=nothing)
+ local desc
+ tf.with_op_name(name, "NonMaxSuppressionV4") do
+ desc = tf.NodeDescription("NonMaxSuppressionV4")
+ begin
+ begin
+ boxes_ = convert(Tensor{Float32}, boxes_)
+ begin
+ end
+ end
+ begin
+ scores_ = convert(Tensor{Float32}, scores_)
+ begin
+ end
+ end
+ begin
+ max_output_size_ = convert(Tensor{Int32}, max_output_size_)
+ begin
+ end
+ end
+ begin
+ iou_threshold_ = convert(Tensor{Float32}, iou_threshold_)
+ begin
+ end
+ end
+ begin
+ score_threshold_ = convert(Tensor{Float32}, score_threshold_)
+ begin
+ end
+ end
+ begin
+ (boxes_, scores_) = tf.tf_promote(boxes_, scores_)
+ end
+ end
+ begin
+ begin
+ tf.add_input(desc, boxes_)
+ end
+ begin
+ tf.add_input(desc, scores_)
+ end
+ begin
+ tf.add_input(desc, max_output_size_)
+ end
+ begin
+ tf.add_input(desc, iou_threshold_)
+ end
+ begin
+ tf.add_input(desc, score_threshold_)
+ end
+ end
+ begin
+ begin
+ if pad_to_max_output_size !== nothing
+ desc["pad_to_max_output_size"] = Base.Bool(pad_to_max_output_size)
+ end
+ end
+ end
+ end
+ begin
+ out = tf.Tensor[]
+ op = tf.Operation(desc)
+ for out_idx = 1:2
+ push!(out, tf.Tensor(op, out_idx))
+ end
+ out
+ end
+ end
+ end
+ begin
+ function non_max_suppression_v4_eager(boxes_, scores_, max_output_size_, iou_threshold_, score_threshold_; name=nothing, pad_to_max_output_size=nothing)
+ desc = tf.EagerOp("NonMaxSuppressionV4")
+ boxes_ = convert(tf.EagerTensor, boxes_)
+ scores_ = convert(tf.EagerTensor, scores_)
+ max_output_size_ = convert(tf.EagerTensor, max_output_size_)
+ iou_threshold_ = convert(tf.EagerTensor, iou_threshold_)
+ score_threshold_ = convert(tf.EagerTensor, score_threshold_)
+ begin
+ begin
+ tf.add_input(desc, boxes_)
+ end
+ begin
+ tf.add_input(desc, scores_)
+ end
+ begin
+ tf.add_input(desc, max_output_size_)
+ end
+ begin
+ tf.add_input(desc, iou_threshold_)
+ end
+ begin
+ tf.add_input(desc, score_threshold_)
+ end
+ end
+ begin
+ begin
+ if pad_to_max_output_size !== nothing
+ desc["pad_to_max_output_size"] = Base.Bool(pad_to_max_output_size)
+ end
+ end
+ end
+ begin
+ desc["T"] = tf.data_type(boxes_)
+ end
+ begin
+ desc["T"] = tf.data_type(scores_)
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(non_max_suppression_v4, [boxes_, scores_, max_output_size_, iou_threshold_, score_threshold_], name=nothing, pad_to_max_output_size=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function non_max_suppression_v4(boxes_, scores_, max_output_size_, iou_threshold_, score_threshold_; name=nothing, pad_to_max_output_size=nothing)
+ if tf.in_eager_mode()
+ non_max_suppression_v4_eager(boxes_, scores_, max_output_size_, iou_threshold_, score_threshold_; name=name, pad_to_max_output_size=pad_to_max_output_size)
+ else
+ non_max_suppression_v4_graph(boxes_, scores_, max_output_size_, iou_threshold_, score_threshold_; name=name, pad_to_max_output_size=pad_to_max_output_size)
+ end
+ end
+ end
+end
+
+
+"""
+ sample_distorted_bounding_box_v2(image_size, bounding_boxes, min_object_covered; seed=0, seed2=0, aspect_ratio_range=Int64[], area_range=Int64[], max_attempts=100, use_image_if_no_bounding_boxes=false)
+
+
+"""
+begin
+ begin
+ function sample_distorted_bounding_box_v2_graph(image_size_, bounding_boxes_, min_object_covered_; name=nothing, seed=nothing, seed2=nothing, aspect_ratio_range=nothing, area_range=nothing, max_attempts=nothing, use_image_if_no_bounding_boxes=nothing)
+ local desc
+ tf.with_op_name(name, "SampleDistortedBoundingBoxV2") do
+ desc = tf.NodeDescription("SampleDistortedBoundingBoxV2")
+ begin
+ begin
+ image_size_ = convert(Tensor{Any}, image_size_)
+ begin
+ end
+ end
+ begin
+ bounding_boxes_ = convert(Tensor{Float32}, bounding_boxes_)
+ begin
+ end
+ end
+ begin
+ min_object_covered_ = convert(Tensor{Float32}, min_object_covered_)
+ begin
+ end
+ end
+ begin
+ (image_size_,) = tf.tf_promote(image_size_)
+ end
+ end
+ begin
+ begin
+ tf.add_input(desc, image_size_)
+ end
+ begin
+ tf.add_input(desc, bounding_boxes_)
+ end
+ begin
+ tf.add_input(desc, min_object_covered_)
+ end
+ end
+ begin
+ begin
+ if seed !== nothing
+ desc["seed"] = Base.Int(seed)
+ end
+ end
+ begin
+ if seed2 !== nothing
+ desc["seed2"] = Base.Int(seed2)
+ end
+ end
+ begin
+ if aspect_ratio_range !== nothing
+ desc["aspect_ratio_range"] = map(Base.identity, aspect_ratio_range)
+ end
+ end
+ begin
+ if area_range !== nothing
+ desc["area_range"] = map(Base.identity, area_range)
+ end
+ end
+ begin
+ if max_attempts !== nothing
+ desc["max_attempts"] = Base.Int(max_attempts)
+ end
+ end
+ begin
+ if use_image_if_no_bounding_boxes !== nothing
+ desc["use_image_if_no_bounding_boxes"] = Base.Bool(use_image_if_no_bounding_boxes)
+ end
+ end
+ end
+ end
+ begin
+ out = tf.Tensor[]
+ op = tf.Operation(desc)
+ for out_idx = 1:3
+ push!(out, tf.Tensor(op, out_idx))
+ end
+ out
+ end
+ end
+ end
+ begin
+ function sample_distorted_bounding_box_v2_eager(image_size_, bounding_boxes_, min_object_covered_; name=nothing, seed=nothing, seed2=nothing, aspect_ratio_range=nothing, area_range=nothing, max_attempts=nothing, use_image_if_no_bounding_boxes=nothing)
+ desc = tf.EagerOp("SampleDistortedBoundingBoxV2")
+ image_size_ = convert(tf.EagerTensor, image_size_)
+ bounding_boxes_ = convert(tf.EagerTensor, bounding_boxes_)
+ min_object_covered_ = convert(tf.EagerTensor, min_object_covered_)
+ begin
+ begin
+ tf.add_input(desc, image_size_)
+ end
+ begin
+ tf.add_input(desc, bounding_boxes_)
+ end
+ begin
+ tf.add_input(desc, min_object_covered_)
+ end
+ end
+ begin
+ begin
+ if seed !== nothing
+ desc["seed"] = Base.Int(seed)
+ end
+ end
+ begin
+ if seed2 !== nothing
+ desc["seed2"] = Base.Int(seed2)
+ end
+ end
+ begin
+ if aspect_ratio_range !== nothing
+ desc["aspect_ratio_range"] = map(Base.identity, aspect_ratio_range)
+ end
+ end
+ begin
+ if area_range !== nothing
+ desc["area_range"] = map(Base.identity, area_range)
+ end
+ end
+ begin
+ if max_attempts !== nothing
+ desc["max_attempts"] = Base.Int(max_attempts)
+ end
+ end
+ begin
+ if use_image_if_no_bounding_boxes !== nothing
+ desc["use_image_if_no_bounding_boxes"] = Base.Bool(use_image_if_no_bounding_boxes)
+ end
+ end
+ end
+ begin
+ desc["T"] = tf.data_type(image_size_)
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(sample_distorted_bounding_box_v2, [image_size_, bounding_boxes_, min_object_covered_], name=nothing, seed=nothing, seed2=nothing, aspect_ratio_range=nothing, area_range=nothing, max_attempts=nothing, use_image_if_no_bounding_boxes=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function sample_distorted_bounding_box_v2(image_size_, bounding_boxes_, min_object_covered_; name=nothing, seed=nothing, seed2=nothing, aspect_ratio_range=nothing, area_range=nothing, max_attempts=nothing, use_image_if_no_bounding_boxes=nothing)
+ if tf.in_eager_mode()
+ sample_distorted_bounding_box_v2_eager(image_size_, bounding_boxes_, min_object_covered_; name=name, seed=seed, seed2=seed2, aspect_ratio_range=aspect_ratio_range, area_range=area_range, max_attempts=max_attempts, use_image_if_no_bounding_boxes=use_image_if_no_bounding_boxes)
+ else
+ sample_distorted_bounding_box_v2_graph(image_size_, bounding_boxes_, min_object_covered_; name=name, seed=seed, seed2=seed2, aspect_ratio_range=aspect_ratio_range, area_range=area_range, max_attempts=max_attempts, use_image_if_no_bounding_boxes=use_image_if_no_bounding_boxes)
+ end
+ end
+ end
+end
+
+
+"""
+ initialize_table_from_text_file(table_handle, filename; vocab_size=-1, delimiter=)
+
+
+"""
+begin
+ begin
+ function initialize_table_from_text_file_graph(table_handle_, filename_; name=nothing, key_index=nothing, value_index=nothing, vocab_size=nothing, delimiter=nothing)
+ local desc
+ tf.with_op_name(name, "InitializeTableFromTextFile") do
+ desc = tf.NodeDescription("InitializeTableFromTextFile")
+ begin
+ begin
+ table_handle_ = convert(Tensor{String}, table_handle_)
+ begin
+ end
+ end
+ begin
+ filename_ = convert(Tensor{String}, filename_)
+ begin
+ end
+ end
+ end
+ begin
+ begin
+ tf.add_input(desc, table_handle_)
+ end
+ begin
+ tf.add_input(desc, filename_)
+ end
+ end
+ begin
+ begin
+ if key_index !== nothing
+ desc["key_index"] = Base.Int(key_index)
+ end
+ end
+ begin
+ if value_index !== nothing
+ desc["value_index"] = Base.Int(value_index)
+ end
+ end
+ begin
+ if vocab_size !== nothing
+ desc["vocab_size"] = Base.Int(vocab_size)
+ end
+ end
+ begin
+ if delimiter !== nothing
+ desc["delimiter"] = Base.String(delimiter)
+ end
+ end
+ end
+ end
+ tf.Tensor(tf.Operation(desc))
+ end
+ end
+ begin
+ function initialize_table_from_text_file_eager(table_handle_, filename_; name=nothing, key_index=nothing, value_index=nothing, vocab_size=nothing, delimiter=nothing)
+ desc = tf.EagerOp("InitializeTableFromTextFile")
+ table_handle_ = convert(tf.EagerTensor, table_handle_)
+ filename_ = convert(tf.EagerTensor, filename_)
+ begin
+ begin
+ tf.add_input(desc, table_handle_)
+ end
+ begin
+ tf.add_input(desc, filename_)
+ end
+ end
+ begin
+ begin
+ if key_index !== nothing
+ desc["key_index"] = Base.Int(key_index)
+ end
+ end
+ begin
+ if value_index !== nothing
+ desc["value_index"] = Base.Int(value_index)
+ end
+ end
+ begin
+ if vocab_size !== nothing
+ desc["vocab_size"] = Base.Int(vocab_size)
+ end
+ end
+ begin
+ if delimiter !== nothing
+ desc["delimiter"] = Base.String(delimiter)
+ end
+ end
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(initialize_table_from_text_file, [table_handle_, filename_], name=nothing, key_index=nothing, value_index=nothing, vocab_size=nothing, delimiter=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res[1]
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function initialize_table_from_text_file(table_handle_, filename_; name=nothing, key_index=nothing, value_index=nothing, vocab_size=nothing, delimiter=nothing)
+ if tf.in_eager_mode()
+ initialize_table_from_text_file_eager(table_handle_, filename_; name=name, key_index=key_index, value_index=value_index, vocab_size=vocab_size, delimiter=delimiter)
+ else
+ initialize_table_from_text_file_graph(table_handle_, filename_; name=name, key_index=key_index, value_index=value_index, vocab_size=vocab_size, delimiter=delimiter)
+ end
+ end
+ end
+end
+
+
+"""
+ lookup_table_size(table_handle)
+
+
+"""
+begin
+ begin
+ function lookup_table_size_graph(table_handle_; name=nothing)
+ local desc
+ tf.with_op_name(name, "LookupTableSize") do
+ desc = tf.NodeDescription("LookupTableSize")
+ begin
+ begin
+ table_handle_ = convert(Tensor{String}, table_handle_)
+ begin
+ end
+ end
+ end
+ begin
+ begin
+ tf.add_input(desc, table_handle_)
+ end
+ end
+ begin
+ end
+ end
+ tf.Tensor(tf.Operation(desc))
+ end
+ end
+ begin
+ function lookup_table_size_eager(table_handle_; name=nothing)
+ desc = tf.EagerOp("LookupTableSize")
+ table_handle_ = convert(tf.EagerTensor, table_handle_)
+ begin
+ begin
+ tf.add_input(desc, table_handle_)
+ end
+ end
+ begin
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(lookup_table_size, [table_handle_], name=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res[1]
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function lookup_table_size(table_handle_; name=nothing)
+ if tf.in_eager_mode()
+ lookup_table_size_eager(table_handle_; name=name)
+ else
+ lookup_table_size_graph(table_handle_; name=name)
+ end
+ end
+ end
+end
+
+
+"""
+ sparse_apply_adagrad_da(var, gradient_accumulator, gradient_squared_accumulator, grad, indices, lr, l1, l2, global_step; use_locking=false)
+
+
+"""
+begin
+ begin
+ function sparse_apply_adagrad_da_graph(var_, gradient_accumulator_, gradient_squared_accumulator_, grad_, indices_, lr_, l1_, l2_, global_step_; name=nothing, use_locking=nothing)
+ local desc
+ tf.with_op_name(name, "SparseApplyAdagradDA") do
+ desc = tf.NodeDescription("SparseApplyAdagradDA")
+ begin
+ begin
+ var_ = convert(Tensor{Any}, var_)
+ begin
+ end
+ end
+ begin
+ gradient_accumulator_ = convert(Tensor{Any}, gradient_accumulator_)
+ begin
+ end
+ end
+ begin
+ gradient_squared_accumulator_ = convert(Tensor{Any}, gradient_squared_accumulator_)
+ begin
+ end
+ end
+ begin
+ grad_ = convert(Tensor{Any}, grad_)
+ begin
+ end
+ end
+ begin
+ indices_ = convert(Tensor{Any}, indices_)
+ begin
+ indices_ = indices_ - convert(tf.Tensor{eltype(indices_)}, 1)
+ end
+ end
+ begin
+ lr_ = convert(Tensor{Any}, lr_)
+ begin
+ end
+ end
+ begin
+ l1_ = convert(Tensor{Any}, l1_)
+ begin
+ end
+ end
+ begin
+ l2_ = convert(Tensor{Any}, l2_)
+ begin
+ end
+ end
+ begin
+ global_step_ = convert(Tensor{Int64}, global_step_)
+ begin
+ end
+ end
+ begin
+ (var_, gradient_accumulator_, gradient_squared_accumulator_, grad_, lr_, l1_, l2_) = tf.tf_promote(var_, gradient_accumulator_, gradient_squared_accumulator_, grad_, lr_, l1_, l2_)
+ end
+ begin
+ (indices_,) = tf.tf_promote(indices_)
+ end
+ end
+ begin
+ begin
+ tf.add_input(desc, var_)
+ end
+ begin
+ tf.add_input(desc, gradient_accumulator_)
+ end
+ begin
+ tf.add_input(desc, gradient_squared_accumulator_)
+ end
+ begin
+ tf.add_input(desc, grad_)
+ end
+ begin
+ tf.add_input(desc, indices_)
+ end
+ begin
+ tf.add_input(desc, lr_)
+ end
+ begin
+ tf.add_input(desc, l1_)
+ end
+ begin
+ tf.add_input(desc, l2_)
+ end
+ begin
+ tf.add_input(desc, global_step_)
+ end
+ end
+ begin
+ begin
+ if use_locking !== nothing
+ desc["use_locking"] = Base.Bool(use_locking)
+ end
+ end
+ end
+ end
+ tf.Tensor(tf.Operation(desc))
+ end
+ end
+ begin
+ function sparse_apply_adagrad_da_eager(var_, gradient_accumulator_, gradient_squared_accumulator_, grad_, indices_, lr_, l1_, l2_, global_step_; name=nothing, use_locking=nothing)
+ desc = tf.EagerOp("SparseApplyAdagradDA")
+ var_ = convert(tf.EagerTensor, var_)
+ gradient_accumulator_ = convert(tf.EagerTensor, gradient_accumulator_)
+ gradient_squared_accumulator_ = convert(tf.EagerTensor, gradient_squared_accumulator_)
+ grad_ = convert(tf.EagerTensor, grad_)
+ indices_ = convert(tf.EagerTensor, indices_)
+ lr_ = convert(tf.EagerTensor, lr_)
+ l1_ = convert(tf.EagerTensor, l1_)
+ l2_ = convert(tf.EagerTensor, l2_)
+ global_step_ = convert(tf.EagerTensor, global_step_)
+ begin
+ begin
+ tf.add_input(desc, var_)
+ end
+ begin
+ tf.add_input(desc, gradient_accumulator_)
+ end
+ begin
+ tf.add_input(desc, gradient_squared_accumulator_)
+ end
+ begin
+ tf.add_input(desc, grad_)
+ end
+ begin
+ tf.add_input(desc, indices_)
+ end
+ begin
+ tf.add_input(desc, lr_)
+ end
+ begin
+ tf.add_input(desc, l1_)
+ end
+ begin
+ tf.add_input(desc, l2_)
+ end
+ begin
+ tf.add_input(desc, global_step_)
+ end
+ end
+ begin
+ begin
+ if use_locking !== nothing
+ desc["use_locking"] = Base.Bool(use_locking)
+ end
+ end
+ end
+ begin
+ desc["T"] = tf.data_type(var_)
+ end
+ begin
+ desc["T"] = tf.data_type(gradient_accumulator_)
+ end
+ begin
+ desc["T"] = tf.data_type(gradient_squared_accumulator_)
+ end
+ begin
+ desc["T"] = tf.data_type(grad_)
+ end
+ begin
+ desc["Tindices"] = tf.data_type(indices_)
+ end
+ begin
+ desc["T"] = tf.data_type(lr_)
+ end
+ begin
+ desc["T"] = tf.data_type(l1_)
+ end
+ begin
+ desc["T"] = tf.data_type(l2_)
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(sparse_apply_adagrad_da, [var_, gradient_accumulator_, gradient_squared_accumulator_, grad_, indices_, lr_, l1_, l2_, global_step_], name=nothing, use_locking=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res[1]
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function sparse_apply_adagrad_da(var_, gradient_accumulator_, gradient_squared_accumulator_, grad_, indices_, lr_, l1_, l2_, global_step_; name=nothing, use_locking=nothing)
+ if tf.in_eager_mode()
+ sparse_apply_adagrad_da_eager(var_, gradient_accumulator_, gradient_squared_accumulator_, grad_, indices_, lr_, l1_, l2_, global_step_; name=name, use_locking=use_locking)
+ else
+ sparse_apply_adagrad_da_graph(var_, gradient_accumulator_, gradient_squared_accumulator_, grad_, indices_, lr_, l1_, l2_, global_step_; name=name, use_locking=use_locking)
+ end
+ end
+ end
+end
+
+
+"""
+ broadcast_gradient_args(s0, s1)
+
+
+"""
+begin
+ begin
+ function broadcast_gradient_args_graph(s0_, s1_; name=nothing)
+ local desc
+ tf.with_op_name(name, "BroadcastGradientArgs") do
+ desc = tf.NodeDescription("BroadcastGradientArgs")
+ begin
+ begin
+ s0_ = convert(Tensor{Int32}, s0_)
+ begin
+ end
+ end
+ begin
+ s1_ = convert(Tensor{Int32}, s1_)
+ begin
+ end
+ end
+ begin
+ (s0_, s1_) = tf.tf_promote(s0_, s1_)
+ end
+ end
+ begin
+ begin
+ tf.add_input(desc, s0_)
+ end
+ begin
+ tf.add_input(desc, s1_)
+ end
+ end
+ begin
+ end
+ end
+ begin
+ out = tf.Tensor[]
+ op = tf.Operation(desc)
+ for out_idx = 1:2
+ push!(out, tf.Tensor(op, out_idx))
+ end
+ out
+ end
+ end
+ end
+ begin
+ function broadcast_gradient_args_eager(s0_, s1_; name=nothing)
+ desc = tf.EagerOp("BroadcastGradientArgs")
+ s0_ = convert(tf.EagerTensor, s0_)
+ s1_ = convert(tf.EagerTensor, s1_)
+ begin
+ begin
+ tf.add_input(desc, s0_)
+ end
+ begin
+ tf.add_input(desc, s1_)
+ end
+ end
+ begin
+ end
+ begin
+ desc["T"] = tf.data_type(s0_)
+ end
+ begin
+ desc["T"] = tf.data_type(s1_)
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(broadcast_gradient_args, [s0_, s1_], name=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function broadcast_gradient_args(s0_, s1_; name=nothing)
+ if tf.in_eager_mode()
+ broadcast_gradient_args_eager(s0_, s1_; name=name)
+ else
+ broadcast_gradient_args_graph(s0_, s1_; name=name)
+ end
+ end
+ end
+end
+
+
+"""
+ summary_writer(; shared_name=, container=)
+
+
+"""
+begin
+ begin
+ function summary_writer_graph(; name=nothing, shared_name=nothing, container=nothing)
+ local desc
+ tf.with_op_name(name, "SummaryWriter") do
+ desc = tf.NodeDescription("SummaryWriter")
+ begin
+ end
+ begin
+ end
+ begin
+ begin
+ if shared_name !== nothing
+ desc["shared_name"] = Base.String(shared_name)
+ end
+ end
+ begin
+ if container !== nothing
+ desc["container"] = Base.String(container)
+ end
+ end
+ end
+ end
+ tf.Tensor(tf.Operation(desc))
+ end
+ end
+ begin
+ function summary_writer_eager(; name=nothing, shared_name=nothing, container=nothing)
+ desc = tf.EagerOp("SummaryWriter")
+ begin
+ end
+ begin
+ begin
+ if shared_name !== nothing
+ desc["shared_name"] = Base.String(shared_name)
+ end
+ end
+ begin
+ if container !== nothing
+ desc["container"] = Base.String(container)
+ end
+ end
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(summary_writer, [], name=nothing, shared_name=nothing, container=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res[1]
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function summary_writer(; name=nothing, shared_name=nothing, container=nothing)
+ if tf.in_eager_mode()
+ summary_writer_eager(; name=name, shared_name=shared_name, container=container)
+ else
+ summary_writer_graph(; name=name, shared_name=shared_name, container=container)
+ end
+ end
+ end
+end
+
+
+"""
+ _while(input)
+
+output = input; While (Cond(output)) { output = Body(output) }
+"""
+begin
+ begin
+ function _while_graph(input_; name=nothing, T=nothing, cond=nothing, body=nothing)
+ local desc
+ tf.with_op_name(name, "_While") do
+ desc = tf.NodeDescription("_While")
+ begin
+ begin
+ input_ = [convert(Tensor{Any}, x) for x = input_]
+ begin
+ end
+ end
+ end
+ begin
+ begin
+ tf.add_input(desc, input_)
+ end
+ end
+ begin
+ begin
+ if T !== nothing
+ desc["T"] = map(Base.identity, T)
+ end
+ end
+ begin
+ if cond !== nothing
+ desc["cond"] = Base.identity(cond)
+ end
+ end
+ begin
+ if body !== nothing
+ desc["body"] = Base.identity(body)
+ end
+ end
+ end
+ end
+ tf.Tensor(tf.Operation(desc))
+ end
+ end
+ begin
+ function _while_eager(input_; name=nothing, T=nothing, cond=nothing, body=nothing)
+ desc = tf.EagerOp("_While")
+ input_ = convert(tf.EagerTensor, input_)
+ begin
+ begin
+ tf.add_input(desc, input_)
+ end
+ end
+ begin
+ begin
+ if T !== nothing
+ desc["T"] = map(Base.identity, T)
+ end
+ end
+ begin
+ if cond !== nothing
+ desc["cond"] = Base.identity(cond)
+ end
+ end
+ begin
+ if body !== nothing
+ desc["body"] = Base.identity(body)
+ end
+ end
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(_while, [input_], name=nothing, T=nothing, cond=nothing, body=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res[1]
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function _while(input_; name=nothing, T=nothing, cond=nothing, body=nothing)
+ if tf.in_eager_mode()
+ _while_eager(input_; name=name, T=T, cond=cond, body=body)
+ else
+ _while_graph(input_; name=name, T=T, cond=cond, body=body)
+ end
+ end
+ end
+end
+
+
+"""
+ recv_tpu_embedding_activations()
+
+An op that receives embedding activations on the TPU.
+"""
+begin
+ begin
+ function recv_tpu_embedding_activations_graph(; name=nothing, num_outputs=nothing, config=nothing)
+ local desc
+ tf.with_op_name(name, "RecvTPUEmbeddingActivations") do
+ desc = tf.NodeDescription("RecvTPUEmbeddingActivations")
+ begin
+ end
+ begin
+ end
+ begin
+ begin
+ if num_outputs !== nothing
+ desc["num_outputs"] = Base.Int(num_outputs)
+ end
+ end
+ begin
+ if config !== nothing
+ desc["config"] = Base.String(config)
+ end
+ end
+ end
+ end
+ begin
+ out = tf.Tensor[]
+ op = tf.Operation(desc)
+ for out_idx = 1:num_outputs
+ push!(out, tf.Tensor(op, out_idx))
+ end
+ out
+ end
+ end
+ end
+ begin
+ function recv_tpu_embedding_activations_eager(; name=nothing, num_outputs=nothing, config=nothing)
+ desc = tf.EagerOp("RecvTPUEmbeddingActivations")
+ begin
+ end
+ begin
+ begin
+ if num_outputs !== nothing
+ desc["num_outputs"] = Base.Int(num_outputs)
+ end
+ end
+ begin
+ if config !== nothing
+ desc["config"] = Base.String(config)
+ end
+ end
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(recv_tpu_embedding_activations, [], name=nothing, num_outputs=nothing, config=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function recv_tpu_embedding_activations(; name=nothing, num_outputs=nothing, config=nothing)
+ if tf.in_eager_mode()
+ recv_tpu_embedding_activations_eager(; name=name, num_outputs=num_outputs, config=config)
+ else
+ recv_tpu_embedding_activations_graph(; name=name, num_outputs=num_outputs, config=config)
+ end
+ end
+ end
+end
+
+
+"""
+ initialize_table(table_handle, keys, values)
+
+
+"""
+begin
+ begin
+ function initialize_table_graph(table_handle_, keys_, values_; name=nothing)
+ local desc
+ tf.with_op_name(name, "InitializeTable") do
+ desc = tf.NodeDescription("InitializeTable")
+ begin
+ begin
+ table_handle_ = convert(Tensor{String}, table_handle_)
+ begin
+ end
+ end
+ begin
+ keys_ = convert(Tensor{Any}, keys_)
+ begin
+ end
+ end
+ begin
+ values_ = convert(Tensor{Any}, values_)
+ begin
+ end
+ end
+ begin
+ (values_,) = tf.tf_promote(values_)
+ end
+ begin
+ (keys_,) = tf.tf_promote(keys_)
+ end
+ end
+ begin
+ begin
+ tf.add_input(desc, table_handle_)
+ end
+ begin
+ tf.add_input(desc, keys_)
+ end
+ begin
+ tf.add_input(desc, values_)
+ end
+ end
+ begin
+ end
+ end
+ tf.Tensor(tf.Operation(desc))
+ end
+ end
+ begin
+ function initialize_table_eager(table_handle_, keys_, values_; name=nothing)
+ desc = tf.EagerOp("InitializeTable")
+ table_handle_ = convert(tf.EagerTensor, table_handle_)
+ keys_ = convert(tf.EagerTensor, keys_)
+ values_ = convert(tf.EagerTensor, values_)
+ begin
+ begin
+ tf.add_input(desc, table_handle_)
+ end
+ begin
+ tf.add_input(desc, keys_)
+ end
+ begin
+ tf.add_input(desc, values_)
+ end
+ end
+ begin
+ end
+ begin
+ desc["Tkey"] = tf.data_type(keys_)
+ end
+ begin
+ desc["Tval"] = tf.data_type(values_)
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(initialize_table, [table_handle_, keys_, values_], name=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res[1]
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function initialize_table(table_handle_, keys_, values_; name=nothing)
+ if tf.in_eager_mode()
+ initialize_table_eager(table_handle_, keys_, values_; name=name)
+ else
+ initialize_table_graph(table_handle_, keys_, values_; name=name)
+ end
+ end
+ end
+end
+
+
+"""
+ debug_numeric_summary(input; device_name=, tensor_name=, debug_urls=Int64[], lower_bound=?, upper_bound=?, mute_if_healthy=false, gated_grpc=false)
+
+Debug Numeric Summary Op.
+"""
+begin
+ begin
+ function debug_numeric_summary_graph(input_; name=nothing, device_name=nothing, tensor_name=nothing, debug_urls=nothing, lower_bound=nothing, upper_bound=nothing, mute_if_healthy=nothing, gated_grpc=nothing)
+ local desc
+ tf.with_op_name(name, "DebugNumericSummary") do
+ desc = tf.NodeDescription("DebugNumericSummary")
+ begin
+ begin
+ input_ = convert(Tensor{Any}, input_)
+ begin
+ end
+ end
+ begin
+ (input_,) = tf.tf_promote(input_)
+ end
+ end
+ begin
+ begin
+ tf.add_input(desc, input_)
+ end
+ end
+ begin
+ begin
+ if device_name !== nothing
+ desc["device_name"] = Base.String(device_name)
+ end
+ end
+ begin
+ if tensor_name !== nothing
+ desc["tensor_name"] = Base.String(tensor_name)
+ end
+ end
+ begin
+ if debug_urls !== nothing
+ desc["debug_urls"] = map(Base.identity, debug_urls)
+ end
+ end
+ begin
+ if lower_bound !== nothing
+ desc["lower_bound"] = Base.identity(lower_bound)
+ end
+ end
+ begin
+ if upper_bound !== nothing
+ desc["upper_bound"] = Base.identity(upper_bound)
+ end
+ end
+ begin
+ if mute_if_healthy !== nothing
+ desc["mute_if_healthy"] = Base.Bool(mute_if_healthy)
+ end
+ end
+ begin
+ if gated_grpc !== nothing
+ desc["gated_grpc"] = Base.Bool(gated_grpc)
+ end
+ end
+ end
+ end
+ tf.Tensor(tf.Operation(desc))
+ end
+ end
+ begin
+ function debug_numeric_summary_eager(input_; name=nothing, device_name=nothing, tensor_name=nothing, debug_urls=nothing, lower_bound=nothing, upper_bound=nothing, mute_if_healthy=nothing, gated_grpc=nothing)
+ desc = tf.EagerOp("DebugNumericSummary")
+ input_ = convert(tf.EagerTensor, input_)
+ begin
+ begin
+ tf.add_input(desc, input_)
+ end
+ end
+ begin
+ begin
+ if device_name !== nothing
+ desc["device_name"] = Base.String(device_name)
+ end
+ end
+ begin
+ if tensor_name !== nothing
+ desc["tensor_name"] = Base.String(tensor_name)
+ end
+ end
+ begin
+ if debug_urls !== nothing
+ desc["debug_urls"] = map(Base.identity, debug_urls)
+ end
+ end
+ begin
+ if lower_bound !== nothing
+ desc["lower_bound"] = Base.identity(lower_bound)
+ end
+ end
+ begin
+ if upper_bound !== nothing
+ desc["upper_bound"] = Base.identity(upper_bound)
+ end
+ end
+ begin
+ if mute_if_healthy !== nothing
+ desc["mute_if_healthy"] = Base.Bool(mute_if_healthy)
+ end
+ end
+ begin
+ if gated_grpc !== nothing
+ desc["gated_grpc"] = Base.Bool(gated_grpc)
+ end
+ end
+ end
+ begin
+ desc["T"] = tf.data_type(input_)
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(debug_numeric_summary, [input_], name=nothing, device_name=nothing, tensor_name=nothing, debug_urls=nothing, lower_bound=nothing, upper_bound=nothing, mute_if_healthy=nothing, gated_grpc=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res[1]
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function debug_numeric_summary(input_; name=nothing, device_name=nothing, tensor_name=nothing, debug_urls=nothing, lower_bound=nothing, upper_bound=nothing, mute_if_healthy=nothing, gated_grpc=nothing)
+ if tf.in_eager_mode()
+ debug_numeric_summary_eager(input_; name=name, device_name=device_name, tensor_name=tensor_name, debug_urls=debug_urls, lower_bound=lower_bound, upper_bound=upper_bound, mute_if_healthy=mute_if_healthy, gated_grpc=gated_grpc)
+ else
+ debug_numeric_summary_graph(input_; name=name, device_name=device_name, tensor_name=tensor_name, debug_urls=debug_urls, lower_bound=lower_bound, upper_bound=upper_bound, mute_if_healthy=mute_if_healthy, gated_grpc=gated_grpc)
+ end
+ end
+ end
+end
+
+
+"""
+ retrieve_tpu_embedding_adagrad_parameters_grad_accum_debug(; table_id=-1, table_name=)
+
+Retrieve embedding parameters for a single table.
+"""
+begin
+ begin
+ function retrieve_tpu_embedding_adagrad_parameters_grad_accum_debug_graph(; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing)
+ local desc
+ tf.with_op_name(name, "RetrieveTPUEmbeddingAdagradParametersGradAccumDebug") do
+ desc = tf.NodeDescription("RetrieveTPUEmbeddingAdagradParametersGradAccumDebug")
+ begin
+ end
+ begin
+ end
+ begin
+ begin
+ if table_id !== nothing
+ desc["table_id"] = Base.Int(table_id)
+ end
+ end
+ begin
+ if table_name !== nothing
+ desc["table_name"] = Base.String(table_name)
+ end
+ end
+ begin
+ if num_shards !== nothing
+ desc["num_shards"] = Base.Int(num_shards)
+ end
+ end
+ begin
+ if shard_id !== nothing
+ desc["shard_id"] = Base.Int(shard_id)
+ end
+ end
+ end
+ end
+ begin
+ out = tf.Tensor[]
+ op = tf.Operation(desc)
+ for out_idx = 1:3
+ push!(out, tf.Tensor(op, out_idx))
+ end
+ out
+ end
+ end
+ end
+ begin
+ function retrieve_tpu_embedding_adagrad_parameters_grad_accum_debug_eager(; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing)
+ desc = tf.EagerOp("RetrieveTPUEmbeddingAdagradParametersGradAccumDebug")
+ begin
+ end
+ begin
+ begin
+ if table_id !== nothing
+ desc["table_id"] = Base.Int(table_id)
+ end
+ end
+ begin
+ if table_name !== nothing
+ desc["table_name"] = Base.String(table_name)
+ end
+ end
+ begin
+ if num_shards !== nothing
+ desc["num_shards"] = Base.Int(num_shards)
+ end
+ end
+ begin
+ if shard_id !== nothing
+ desc["shard_id"] = Base.Int(shard_id)
+ end
+ end
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(retrieve_tpu_embedding_adagrad_parameters_grad_accum_debug, [], name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function retrieve_tpu_embedding_adagrad_parameters_grad_accum_debug(; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing)
+ if tf.in_eager_mode()
+ retrieve_tpu_embedding_adagrad_parameters_grad_accum_debug_eager(; name=name, table_id=table_id, table_name=table_name, num_shards=num_shards, shard_id=shard_id)
+ else
+ retrieve_tpu_embedding_adagrad_parameters_grad_accum_debug_graph(; name=name, table_id=table_id, table_name=table_name, num_shards=num_shards, shard_id=shard_id)
+ end
+ end
+ end
+end
+
+
+"""
+ tanh(x)
+
+
+"""
+begin
+ begin
+ function tanh_graph(x_; name=nothing)
+ local desc
+ tf.with_op_name(name, "Tanh") do
+ desc = tf.NodeDescription("Tanh")
+ begin
+ begin
+ x_ = convert(Tensor{Any}, x_)
+ begin
+ end
+ end
+ begin
+ (x_,) = tf.tf_promote(x_)
+ end
+ end
+ begin
+ begin
+ tf.add_input(desc, x_)
+ end
+ end
+ begin
+ end
+ end
+ tf.Tensor(tf.Operation(desc))
+ end
+ end
+ begin
+ function tanh_eager(x_; name=nothing)
+ desc = tf.EagerOp("Tanh")
+ x_ = convert(tf.EagerTensor, x_)
+ begin
+ begin
+ tf.add_input(desc, x_)
+ end
+ end
+ begin
+ end
+ begin
+ desc["T"] = tf.data_type(x_)
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(tanh, [x_], name=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res[1]
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function tanh(x_; name=nothing)
+ if tf.in_eager_mode()
+ tanh_eager(x_; name=name)
+ else
+ tanh_graph(x_; name=name)
+ end
+ end
+ end
+end
+
+
+"""
+ symbolic_gradient(input)
+
+
+"""
+begin
+ begin
+ function symbolic_gradient_graph(input_; name=nothing, Tin=nothing, Tout=nothing, f=nothing)
+ local desc
+ tf.with_op_name(name, "SymbolicGradient") do
+ desc = tf.NodeDescription("SymbolicGradient")
+ begin
+ begin
+ input_ = [convert(Tensor{Any}, x) for x = input_]
+ begin
+ end
+ end
+ end
+ begin
+ begin
+ tf.add_input(desc, input_)
+ end
+ end
+ begin
+ begin
+ if Tin !== nothing
+ desc["Tin"] = map(Base.identity, Tin)
+ end
+ end
+ begin
+ if Tout !== nothing
+ desc["Tout"] = map(Base.identity, Tout)
+ end
+ end
+ begin
+ if f !== nothing
+ desc["f"] = Base.identity(f)
+ end
+ end
+ end
+ end
+ tf.Tensor(tf.Operation(desc))
+ end
+ end
+ begin
+ function symbolic_gradient_eager(input_; name=nothing, Tin=nothing, Tout=nothing, f=nothing)
+ desc = tf.EagerOp("SymbolicGradient")
+ input_ = convert(tf.EagerTensor, input_)
+ begin
+ begin
+ tf.add_input(desc, input_)
+ end
+ end
+ begin
+ begin
+ if Tin !== nothing
+ desc["Tin"] = map(Base.identity, Tin)
+ end
+ end
+ begin
+ if Tout !== nothing
+ desc["Tout"] = map(Base.identity, Tout)
+ end
+ end
+ begin
+ if f !== nothing
+ desc["f"] = Base.identity(f)
+ end
+ end
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(symbolic_gradient, [input_], name=nothing, Tin=nothing, Tout=nothing, f=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res[1]
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function symbolic_gradient(input_; name=nothing, Tin=nothing, Tout=nothing, f=nothing)
+ if tf.in_eager_mode()
+ symbolic_gradient_eager(input_; name=name, Tin=Tin, Tout=Tout, f=f)
+ else
+ symbolic_gradient_graph(input_; name=name, Tin=Tin, Tout=Tout, f=f)
+ end
+ end
+ end
+end
+
+
+"""
+ boosted_trees_update_ensemble(tree_ensemble_handle, feature_ids, node_ids, gains, thresholds, left_node_contribs, right_node_contribs, max_depth, learning_rate)
+
+
+"""
+begin
+ begin
+ function boosted_trees_update_ensemble_graph(tree_ensemble_handle_, feature_ids_, node_ids_, gains_, thresholds_, left_node_contribs_, right_node_contribs_, max_depth_, learning_rate_; name=nothing, pruning_mode=nothing, num_features=nothing)
+ local desc
+ tf.with_op_name(name, "BoostedTreesUpdateEnsemble") do
+ desc = tf.NodeDescription("BoostedTreesUpdateEnsemble")
+ begin
+ begin
+ tree_ensemble_handle_ = convert(Tensor{Any}, tree_ensemble_handle_)
+ begin
+ end
+ end
+ begin
+ feature_ids_ = convert(Tensor{Int32}, feature_ids_)
+ begin
+ end
+ end
+ begin
+ node_ids_ = [convert(Tensor{Int32}, x) for x = node_ids_]
+ begin
+ end
+ end
+ begin
+ gains_ = [convert(Tensor{Float32}, x) for x = gains_]
+ begin
+ end
+ end
+ begin
+ thresholds_ = [convert(Tensor{Int32}, x) for x = thresholds_]
+ begin
+ end
+ end
+ begin
+ left_node_contribs_ = [convert(Tensor{Float32}, x) for x = left_node_contribs_]
+ begin
+ end
+ end
+ begin
+ right_node_contribs_ = [convert(Tensor{Float32}, x) for x = right_node_contribs_]
+ begin
+ end
+ end
+ begin
+ max_depth_ = convert(Tensor{Int32}, max_depth_)
+ begin
+ end
+ end
+ begin
+ learning_rate_ = convert(Tensor{Float32}, learning_rate_)
+ begin
+ end
+ end
+ end
+ begin
+ begin
+ tf.add_input(desc, tree_ensemble_handle_)
+ end
+ begin
+ tf.add_input(desc, feature_ids_)
+ end
+ begin
+ tf.add_input(desc, node_ids_)
+ end
+ begin
+ tf.add_input(desc, gains_)
+ end
+ begin
+ tf.add_input(desc, thresholds_)
+ end
+ begin
+ tf.add_input(desc, left_node_contribs_)
+ end
+ begin
+ tf.add_input(desc, right_node_contribs_)
+ end
+ begin
+ tf.add_input(desc, max_depth_)
+ end
+ begin
+ tf.add_input(desc, learning_rate_)
+ end
+ end
+ begin
+ begin
+ if pruning_mode !== nothing
+ desc["pruning_mode"] = Base.Int(pruning_mode)
+ end
+ end
+ begin
+ if num_features !== nothing
+ desc["num_features"] = Base.Int(num_features)
+ end
+ end
+ end
+ end
+ tf.Tensor(tf.Operation(desc))
+ end
+ end
+ begin
+ function boosted_trees_update_ensemble_eager(tree_ensemble_handle_, feature_ids_, node_ids_, gains_, thresholds_, left_node_contribs_, right_node_contribs_, max_depth_, learning_rate_; name=nothing, pruning_mode=nothing, num_features=nothing)
+ desc = tf.EagerOp("BoostedTreesUpdateEnsemble")
+ tree_ensemble_handle_ = convert(tf.EagerTensor, tree_ensemble_handle_)
+ feature_ids_ = convert(tf.EagerTensor, feature_ids_)
+ node_ids_ = convert(tf.EagerTensor, node_ids_)
+ gains_ = convert(tf.EagerTensor, gains_)
+ thresholds_ = convert(tf.EagerTensor, thresholds_)
+ left_node_contribs_ = convert(tf.EagerTensor, left_node_contribs_)
+ right_node_contribs_ = convert(tf.EagerTensor, right_node_contribs_)
+ max_depth_ = convert(tf.EagerTensor, max_depth_)
+ learning_rate_ = convert(tf.EagerTensor, learning_rate_)
+ begin
+ begin
+ tf.add_input(desc, tree_ensemble_handle_)
+ end
+ begin
+ tf.add_input(desc, feature_ids_)
+ end
+ begin
+ tf.add_input(desc, node_ids_)
+ end
+ begin
+ tf.add_input(desc, gains_)
+ end
+ begin
+ tf.add_input(desc, thresholds_)
+ end
+ begin
+ tf.add_input(desc, left_node_contribs_)
+ end
+ begin
+ tf.add_input(desc, right_node_contribs_)
+ end
+ begin
+ tf.add_input(desc, max_depth_)
+ end
+ begin
+ tf.add_input(desc, learning_rate_)
+ end
+ end
+ begin
+ begin
+ if pruning_mode !== nothing
+ desc["pruning_mode"] = Base.Int(pruning_mode)
+ end
+ end
+ begin
+ if num_features !== nothing
+ desc["num_features"] = Base.Int(num_features)
+ end
+ end
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(boosted_trees_update_ensemble, [tree_ensemble_handle_, feature_ids_, node_ids_, gains_, thresholds_, left_node_contribs_, right_node_contribs_, max_depth_, learning_rate_], name=nothing, pruning_mode=nothing, num_features=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res[1]
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function boosted_trees_update_ensemble(tree_ensemble_handle_, feature_ids_, node_ids_, gains_, thresholds_, left_node_contribs_, right_node_contribs_, max_depth_, learning_rate_; name=nothing, pruning_mode=nothing, num_features=nothing)
+ if tf.in_eager_mode()
+ boosted_trees_update_ensemble_eager(tree_ensemble_handle_, feature_ids_, node_ids_, gains_, thresholds_, left_node_contribs_, right_node_contribs_, max_depth_, learning_rate_; name=name, pruning_mode=pruning_mode, num_features=num_features)
+ else
+ boosted_trees_update_ensemble_graph(tree_ensemble_handle_, feature_ids_, node_ids_, gains_, thresholds_, left_node_contribs_, right_node_contribs_, max_depth_, learning_rate_; name=name, pruning_mode=pruning_mode, num_features=num_features)
+ end
+ end
+ end
+end
+
+
+"""
+ apply_momentum(var, accum, lr, grad, momentum; use_locking=false, use_nesterov=false)
+
+
+"""
+begin
+ begin
+ function apply_momentum_graph(var_, accum_, lr_, grad_, momentum_; name=nothing, use_locking=nothing, use_nesterov=nothing)
+ local desc
+ tf.with_op_name(name, "ApplyMomentum") do
+ desc = tf.NodeDescription("ApplyMomentum")
+ begin
+ begin
+ var_ = convert(Tensor{Any}, var_)
+ begin
+ end
+ end
+ begin
+ accum_ = convert(Tensor{Any}, accum_)
+ begin
+ end
+ end
+ begin
+ lr_ = convert(Tensor{Any}, lr_)
+ begin
+ end
+ end
+ begin
+ grad_ = convert(Tensor{Any}, grad_)
+ begin
+ end
+ end
+ begin
+ momentum_ = convert(Tensor{Any}, momentum_)
+ begin
+ end
+ end
+ begin
+ (var_, accum_, lr_, grad_, momentum_) = tf.tf_promote(var_, accum_, lr_, grad_, momentum_)
+ end
+ end
+ begin
+ begin
+ tf.add_input(desc, var_)
+ end
+ begin
+ tf.add_input(desc, accum_)
+ end
+ begin
+ tf.add_input(desc, lr_)
+ end
+ begin
+ tf.add_input(desc, grad_)
+ end
+ begin
+ tf.add_input(desc, momentum_)
+ end
+ end
+ begin
+ begin
+ if use_locking !== nothing
+ desc["use_locking"] = Base.Bool(use_locking)
+ end
+ end
+ begin
+ if use_nesterov !== nothing
+ desc["use_nesterov"] = Base.Bool(use_nesterov)
+ end
+ end
+ end
+ end
+ tf.Tensor(tf.Operation(desc))
+ end
+ end
+ begin
+ function apply_momentum_eager(var_, accum_, lr_, grad_, momentum_; name=nothing, use_locking=nothing, use_nesterov=nothing)
+ desc = tf.EagerOp("ApplyMomentum")
+ var_ = convert(tf.EagerTensor, var_)
+ accum_ = convert(tf.EagerTensor, accum_)
+ lr_ = convert(tf.EagerTensor, lr_)
+ grad_ = convert(tf.EagerTensor, grad_)
+ momentum_ = convert(tf.EagerTensor, momentum_)
+ begin
+ begin
+ tf.add_input(desc, var_)
+ end
+ begin
+ tf.add_input(desc, accum_)
+ end
+ begin
+ tf.add_input(desc, lr_)
+ end
+ begin
+ tf.add_input(desc, grad_)
+ end
+ begin
+ tf.add_input(desc, momentum_)
+ end
+ end
+ begin
+ begin
+ if use_locking !== nothing
+ desc["use_locking"] = Base.Bool(use_locking)
+ end
+ end
+ begin
+ if use_nesterov !== nothing
+ desc["use_nesterov"] = Base.Bool(use_nesterov)
+ end
+ end
+ end
+ begin
+ desc["T"] = tf.data_type(var_)
+ end
+ begin
+ desc["T"] = tf.data_type(accum_)
+ end
+ begin
+ desc["T"] = tf.data_type(lr_)
+ end
+ begin
+ desc["T"] = tf.data_type(grad_)
+ end
+ begin
+ desc["T"] = tf.data_type(momentum_)
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(apply_momentum, [var_, accum_, lr_, grad_, momentum_], name=nothing, use_locking=nothing, use_nesterov=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res[1]
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function apply_momentum(var_, accum_, lr_, grad_, momentum_; name=nothing, use_locking=nothing, use_nesterov=nothing)
+ if tf.in_eager_mode()
+ apply_momentum_eager(var_, accum_, lr_, grad_, momentum_; name=name, use_locking=use_locking, use_nesterov=use_nesterov)
+ else
+ apply_momentum_graph(var_, accum_, lr_, grad_, momentum_; name=name, use_locking=use_locking, use_nesterov=use_nesterov)
+ end
+ end
+ end
+end
+
+
+"""
+ reader_read(reader_handle, queue_handle)
+
+
+"""
+begin
+ begin
+ function reader_read_graph(reader_handle_, queue_handle_; name=nothing)
+ local desc
+ tf.with_op_name(name, "ReaderRead") do
+ desc = tf.NodeDescription("ReaderRead")
+ begin
+ begin
+ reader_handle_ = convert(Tensor{String}, reader_handle_)
+ begin
+ end
+ end
+ begin
+ queue_handle_ = convert(Tensor{String}, queue_handle_)
+ begin
+ end
+ end
+ end
+ begin
+ begin
+ tf.add_input(desc, reader_handle_)
+ end
+ begin
+ tf.add_input(desc, queue_handle_)
+ end
+ end
+ begin
+ end
+ end
+ begin
+ out = tf.Tensor[]
+ op = tf.Operation(desc)
+ for out_idx = 1:2
+ push!(out, tf.Tensor(op, out_idx))
+ end
+ out
+ end
+ end
+ end
+ begin
+ function reader_read_eager(reader_handle_, queue_handle_; name=nothing)
+ desc = tf.EagerOp("ReaderRead")
+ reader_handle_ = convert(tf.EagerTensor, reader_handle_)
+ queue_handle_ = convert(tf.EagerTensor, queue_handle_)
+ begin
+ begin
+ tf.add_input(desc, reader_handle_)
+ end
+ begin
+ tf.add_input(desc, queue_handle_)
+ end
+ end
+ begin
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(reader_read, [reader_handle_, queue_handle_], name=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function reader_read(reader_handle_, queue_handle_; name=nothing)
+ if tf.in_eager_mode()
+ reader_read_eager(reader_handle_, queue_handle_; name=name)
+ else
+ reader_read_graph(reader_handle_, queue_handle_; name=name)
+ end
+ end
+ end
+end
+
+
+"""
+ _wait_for_distributed_tpu(inputs; startup_timeout_sec=20)
+
+An op that blocks execution until a distributed TPU system has
+"""
+begin
+ begin
+ function _wait_for_distributed_tpu_graph(inputs_; name=nothing, startup_timeout_sec=nothing, N=nothing)
+ local desc
+ tf.with_op_name(name, "_WaitForDistributedTPU") do
+ desc = tf.NodeDescription("_WaitForDistributedTPU")
+ begin
+ begin
+ inputs_ = [convert(Tensor{Int32}, x) for x = inputs_]
+ begin
+ end
+ end
+ end
+ begin
+ begin
+ tf.add_input(desc, inputs_)
+ end
+ end
+ begin
+ begin
+ if startup_timeout_sec !== nothing
+ desc["startup_timeout_sec"] = Base.Int(startup_timeout_sec)
+ end
+ end
+ begin
+ if N !== nothing
+ desc["N"] = Base.Int(N)
+ end
+ end
+ end
+ end
+ tf.Tensor(tf.Operation(desc))
+ end
+ end
+ begin
+ function _wait_for_distributed_tpu_eager(inputs_; name=nothing, startup_timeout_sec=nothing, N=nothing)
+ desc = tf.EagerOp("_WaitForDistributedTPU")
+ inputs_ = convert(tf.EagerTensor, inputs_)
+ begin
+ begin
+ tf.add_input(desc, inputs_)
+ end
+ end
+ begin
+ begin
+ if startup_timeout_sec !== nothing
+ desc["startup_timeout_sec"] = Base.Int(startup_timeout_sec)
+ end
+ end
+ begin
+ if N !== nothing
+ desc["N"] = Base.Int(N)
+ end
+ end
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(_wait_for_distributed_tpu, [inputs_], name=nothing, startup_timeout_sec=nothing, N=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res[1]
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function _wait_for_distributed_tpu(inputs_; name=nothing, startup_timeout_sec=nothing, N=nothing)
+ if tf.in_eager_mode()
+ _wait_for_distributed_tpu_eager(inputs_; name=name, startup_timeout_sec=startup_timeout_sec, N=N)
+ else
+ _wait_for_distributed_tpu_graph(inputs_; name=name, startup_timeout_sec=startup_timeout_sec, N=N)
+ end
+ end
+ end
+end
+
+
+"""
+ mutex_lock(mutex)
+
+
+"""
+begin
+ begin
+ function mutex_lock_graph(mutex_; name=nothing)
+ local desc
+ tf.with_op_name(name, "MutexLock") do
+ desc = tf.NodeDescription("MutexLock")
+ begin
+ begin
+ mutex_ = convert(Tensor{Any}, mutex_)
+ begin
+ end
+ end
+ end
+ begin
+ begin
+ tf.add_input(desc, mutex_)
+ end
+ end
+ begin
+ end
+ end
+ tf.Tensor(tf.Operation(desc))
+ end
+ end
+ begin
+ function mutex_lock_eager(mutex_; name=nothing)
+ desc = tf.EagerOp("MutexLock")
+ mutex_ = convert(tf.EagerTensor, mutex_)
+ begin
+ begin
+ tf.add_input(desc, mutex_)
+ end
+ end
+ begin
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(mutex_lock, [mutex_], name=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res[1]
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function mutex_lock(mutex_; name=nothing)
+ if tf.in_eager_mode()
+ mutex_lock_eager(mutex_; name=name)
+ else
+ mutex_lock_graph(mutex_; name=name)
+ end
+ end
+ end
+end
+
+
+"""
+ accumulator_set_global_step(handle, new_global_step)
+
+
+"""
+begin
+ begin
+ function accumulator_set_global_step_graph(handle_, new_global_step_; name=nothing)
+ local desc
+ tf.with_op_name(name, "AccumulatorSetGlobalStep") do
+ desc = tf.NodeDescription("AccumulatorSetGlobalStep")
+ begin
+ begin
+ handle_ = convert(Tensor{String}, handle_)
+ begin
+ end
+ end
+ begin
+ new_global_step_ = convert(Tensor{Int64}, new_global_step_)
+ begin
+ end
+ end
+ end
+ begin
+ begin
+ tf.add_input(desc, handle_)
+ end
+ begin
+ tf.add_input(desc, new_global_step_)
+ end
+ end
+ begin
+ end
+ end
+ tf.Tensor(tf.Operation(desc))
+ end
+ end
+ begin
+ function accumulator_set_global_step_eager(handle_, new_global_step_; name=nothing)
+ desc = tf.EagerOp("AccumulatorSetGlobalStep")
+ handle_ = convert(tf.EagerTensor, handle_)
+ new_global_step_ = convert(tf.EagerTensor, new_global_step_)
+ begin
+ begin
+ tf.add_input(desc, handle_)
+ end
+ begin
+ tf.add_input(desc, new_global_step_)
+ end
+ end
+ begin
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(accumulator_set_global_step, [handle_, new_global_step_], name=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res[1]
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function accumulator_set_global_step(handle_, new_global_step_; name=nothing)
+ if tf.in_eager_mode()
+ accumulator_set_global_step_eager(handle_, new_global_step_; name=name)
+ else
+ accumulator_set_global_step_graph(handle_, new_global_step_; name=name)
+ end
+ end
+ end
+end
+
+
+"""
+ quantized_add(x, y, min_x, max_x, min_y, max_y)
+
+
+"""
+begin
+ begin
+ function quantized_add_graph(x_, y_, min_x_, max_x_, min_y_, max_y_; name=nothing)
+ local desc
+ tf.with_op_name(name, "QuantizedAdd") do
+ desc = tf.NodeDescription("QuantizedAdd")
+ begin
+ begin
+ x_ = convert(Tensor{Any}, x_)
+ begin
+ end
+ end
+ begin
+ y_ = convert(Tensor{Any}, y_)
+ begin
+ end
+ end
+ begin
+ min_x_ = convert(Tensor{Float32}, min_x_)
+ begin
+ end
+ end
+ begin
+ max_x_ = convert(Tensor{Float32}, max_x_)
+ begin
+ end
+ end
+ begin
+ min_y_ = convert(Tensor{Float32}, min_y_)
+ begin
+ end
+ end
+ begin
+ max_y_ = convert(Tensor{Float32}, max_y_)
+ begin
+ end
+ end
+ begin
+ (x_,) = tf.tf_promote(x_)
+ end
+ begin
+ (y_,) = tf.tf_promote(y_)
+ end
+ end
+ begin
+ begin
+ tf.add_input(desc, x_)
+ end
+ begin
+ tf.add_input(desc, y_)
+ end
+ begin
+ tf.add_input(desc, min_x_)
+ end
+ begin
+ tf.add_input(desc, max_x_)
+ end
+ begin
+ tf.add_input(desc, min_y_)
+ end
+ begin
+ tf.add_input(desc, max_y_)
+ end
+ end
+ begin
+ end
+ end
+ begin
+ out = tf.Tensor[]
+ op = tf.Operation(desc)
+ for out_idx = 1:3
+ push!(out, tf.Tensor(op, out_idx))
+ end
+ out
+ end
+ end
+ end
+ begin
+ function quantized_add_eager(x_, y_, min_x_, max_x_, min_y_, max_y_; name=nothing)
+ desc = tf.EagerOp("QuantizedAdd")
+ x_ = convert(tf.EagerTensor, x_)
+ y_ = convert(tf.EagerTensor, y_)
+ min_x_ = convert(tf.EagerTensor, min_x_)
+ max_x_ = convert(tf.EagerTensor, max_x_)
+ min_y_ = convert(tf.EagerTensor, min_y_)
+ max_y_ = convert(tf.EagerTensor, max_y_)
+ begin
+ begin
+ tf.add_input(desc, x_)
+ end
+ begin
+ tf.add_input(desc, y_)
+ end
+ begin
+ tf.add_input(desc, min_x_)
+ end
+ begin
+ tf.add_input(desc, max_x_)
+ end
+ begin
+ tf.add_input(desc, min_y_)
+ end
+ begin
+ tf.add_input(desc, max_y_)
+ end
+ end
+ begin
+ end
+ begin
+ desc["T1"] = tf.data_type(x_)
+ end
+ begin
+ desc["T2"] = tf.data_type(y_)
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(quantized_add, [x_, y_, min_x_, max_x_, min_y_, max_y_], name=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function quantized_add(x_, y_, min_x_, max_x_, min_y_, max_y_; name=nothing)
+ if tf.in_eager_mode()
+ quantized_add_eager(x_, y_, min_x_, max_x_, min_y_, max_y_; name=name)
+ else
+ quantized_add_graph(x_, y_, min_x_, max_x_, min_y_, max_y_; name=name)
+ end
+ end
+ end
+end
+
+
+"""
+ squeeze(input; squeeze_dims=Int64[])
+
+
+"""
+begin
+ begin
+ function squeeze_graph(input_; name=nothing, squeeze_dims=nothing)
+ local desc
+ tf.with_op_name(name, "Squeeze") do
+ desc = tf.NodeDescription("Squeeze")
+ begin
+ begin
+ input_ = convert(Tensor{Any}, input_)
+ begin
+ end
+ end
+ begin
+ (input_,) = tf.tf_promote(input_)
+ end
+ end
+ begin
+ begin
+ tf.add_input(desc, input_)
+ end
+ end
+ begin
+ begin
+ if squeeze_dims !== nothing
+ desc["squeeze_dims"] = map(Base.identity, squeeze_dims)
+ end
+ end
+ end
+ end
+ tf.Tensor(tf.Operation(desc))
+ end
+ end
+ begin
+ function squeeze_eager(input_; name=nothing, squeeze_dims=nothing)
+ desc = tf.EagerOp("Squeeze")
+ input_ = convert(tf.EagerTensor, input_)
+ begin
+ begin
+ tf.add_input(desc, input_)
+ end
+ end
+ begin
+ begin
+ if squeeze_dims !== nothing
+ desc["squeeze_dims"] = map(Base.identity, squeeze_dims)
+ end
+ end
+ end
+ begin
+ desc["T"] = tf.data_type(input_)
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(squeeze, [input_], name=nothing, squeeze_dims=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res[1]
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function squeeze(input_; name=nothing, squeeze_dims=nothing)
+ if tf.in_eager_mode()
+ squeeze_eager(input_; name=name, squeeze_dims=squeeze_dims)
+ else
+ squeeze_graph(input_; name=name, squeeze_dims=squeeze_dims)
+ end
+ end
+ end
+end
+
+
+"""
+ experimental_matching_files_dataset(patterns)
+
+
+"""
+begin
+ begin
+ function experimental_matching_files_dataset_graph(patterns_; name=nothing)
+ local desc
+ tf.with_op_name(name, "ExperimentalMatchingFilesDataset") do
+ desc = tf.NodeDescription("ExperimentalMatchingFilesDataset")
+ begin
+ begin
+ patterns_ = convert(Tensor{String}, patterns_)
+ begin
+ end
+ end
+ end
+ begin
+ begin
+ tf.add_input(desc, patterns_)
+ end
+ end
+ begin
+ end
+ end
+ tf.Tensor(tf.Operation(desc))
+ end
+ end
+ begin
+ function experimental_matching_files_dataset_eager(patterns_; name=nothing)
+ desc = tf.EagerOp("ExperimentalMatchingFilesDataset")
+ patterns_ = convert(tf.EagerTensor, patterns_)
+ begin
+ begin
+ tf.add_input(desc, patterns_)
+ end
+ end
+ begin
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(experimental_matching_files_dataset, [patterns_], name=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res[1]
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function experimental_matching_files_dataset(patterns_; name=nothing)
+ if tf.in_eager_mode()
+ experimental_matching_files_dataset_eager(patterns_; name=name)
+ else
+ experimental_matching_files_dataset_graph(patterns_; name=name)
+ end
+ end
+ end
+end
+
+
+"""
+ experimental_dataset_to_tf_record(input_dataset, filename, compression_type)
+
+
+"""
+begin
+ begin
+ function experimental_dataset_to_tf_record_graph(input_dataset_, filename_, compression_type_; name=nothing)
+ local desc
+ tf.with_op_name(name, "ExperimentalDatasetToTFRecord") do
+ desc = tf.NodeDescription("ExperimentalDatasetToTFRecord")
+ begin
+ begin
+ input_dataset_ = convert(Tensor{Any}, input_dataset_)
+ begin
+ end
+ end
+ begin
+ filename_ = convert(Tensor{String}, filename_)
+ begin
+ end
+ end
+ begin
+ compression_type_ = convert(Tensor{String}, compression_type_)
+ begin
+ end
+ end
+ end
+ begin
+ begin
+ tf.add_input(desc, input_dataset_)
+ end
+ begin
+ tf.add_input(desc, filename_)
+ end
+ begin
+ tf.add_input(desc, compression_type_)
+ end
+ end
+ begin
+ end
+ end
+ tf.Tensor(tf.Operation(desc))
+ end
+ end
+ begin
+ function experimental_dataset_to_tf_record_eager(input_dataset_, filename_, compression_type_; name=nothing)
+ desc = tf.EagerOp("ExperimentalDatasetToTFRecord")
+ input_dataset_ = convert(tf.EagerTensor, input_dataset_)
+ filename_ = convert(tf.EagerTensor, filename_)
+ compression_type_ = convert(tf.EagerTensor, compression_type_)
+ begin
+ begin
+ tf.add_input(desc, input_dataset_)
+ end
+ begin
+ tf.add_input(desc, filename_)
+ end
+ begin
+ tf.add_input(desc, compression_type_)
+ end
+ end
+ begin
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(experimental_dataset_to_tf_record, [input_dataset_, filename_, compression_type_], name=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res[1]
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function experimental_dataset_to_tf_record(input_dataset_, filename_, compression_type_; name=nothing)
+ if tf.in_eager_mode()
+ experimental_dataset_to_tf_record_eager(input_dataset_, filename_, compression_type_; name=name)
+ else
+ experimental_dataset_to_tf_record_graph(input_dataset_, filename_, compression_type_; name=name)
+ end
+ end
+ end
+end
+
+
+"""
+ no_op()
+
+
+"""
+begin
+ begin
+ function no_op_graph(; name=nothing)
+ local desc
+ tf.with_op_name(name, "NoOp") do
+ desc = tf.NodeDescription("NoOp")
+ begin
+ end
+ begin
+ end
+ begin
+ end
+ end
+ tf.Tensor(tf.Operation(desc))
+ end
+ end
+ begin
+ function no_op_eager(; name=nothing)
+ desc = tf.EagerOp("NoOp")
+ begin
+ end
+ begin
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(no_op, [], name=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res[1]
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function no_op(; name=nothing)
+ if tf.in_eager_mode()
+ no_op_eager(; name=name)
+ else
+ no_op_graph(; name=name)
+ end
+ end
+ end
+end
+
+
+"""
+ zip_dataset(input_datasets)
+
+
+"""
+begin
+ begin
+ function zip_dataset_graph(input_datasets_; name=nothing, output_types=nothing, output_shapes=nothing, N=nothing)
+ local desc
+ tf.with_op_name(name, "ZipDataset") do
+ desc = tf.NodeDescription("ZipDataset")
+ begin
+ begin
+ input_datasets_ = [convert(Tensor{Any}, x) for x = input_datasets_]
+ begin
+ end
+ end
+ end
+ begin
+ begin
+ tf.add_input(desc, input_datasets_)
+ end
+ end
+ begin
+ begin
+ if output_types !== nothing
+ desc["output_types"] = map(Base.identity, output_types)
+ end
+ end
+ begin
+ if output_shapes !== nothing
+ desc["output_shapes"] = map(Base.identity, output_shapes)
+ end
+ end
+ begin
+ if N !== nothing
+ desc["N"] = Base.Int(N)
+ end
+ end
+ end
+ end
+ tf.Tensor(tf.Operation(desc))
+ end
+ end
+ begin
+ function zip_dataset_eager(input_datasets_; name=nothing, output_types=nothing, output_shapes=nothing, N=nothing)
+ desc = tf.EagerOp("ZipDataset")
+ input_datasets_ = convert(tf.EagerTensor, input_datasets_)
+ begin
+ begin
+ tf.add_input(desc, input_datasets_)
+ end
+ end
+ begin
+ begin
+ if output_types !== nothing
+ desc["output_types"] = map(Base.identity, output_types)
+ end
+ end
+ begin
+ if output_shapes !== nothing
+ desc["output_shapes"] = map(Base.identity, output_shapes)
+ end
+ end
+ begin
+ if N !== nothing
+ desc["N"] = Base.Int(N)
+ end
+ end
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(zip_dataset, [input_datasets_], name=nothing, output_types=nothing, output_shapes=nothing, N=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res[1]
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function zip_dataset(input_datasets_; name=nothing, output_types=nothing, output_shapes=nothing, N=nothing)
+ if tf.in_eager_mode()
+ zip_dataset_eager(input_datasets_; name=name, output_types=output_types, output_shapes=output_shapes, N=N)
+ else
+ zip_dataset_graph(input_datasets_; name=name, output_types=output_types, output_shapes=output_shapes, N=N)
+ end
+ end
+ end
+end
+
+
+"""
+ load_tpu_embedding_stochastic_gradient_descent_parameters(parameters; table_id=-1, table_name=)
+
+Load embedding parameters for a single table.
+"""
+begin
+ begin
+ function load_tpu_embedding_stochastic_gradient_descent_parameters_graph(parameters_; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing)
+ local desc
+ tf.with_op_name(name, "LoadTPUEmbeddingStochasticGradientDescentParameters") do
+ desc = tf.NodeDescription("LoadTPUEmbeddingStochasticGradientDescentParameters")
+ begin
+ begin
+ parameters_ = convert(Tensor{Float32}, parameters_)
+ begin
+ end
+ end
+ end
+ begin
+ begin
+ tf.add_input(desc, parameters_)
+ end
+ end
+ begin
+ begin
+ if table_id !== nothing
+ desc["table_id"] = Base.Int(table_id)
+ end
+ end
+ begin
+ if table_name !== nothing
+ desc["table_name"] = Base.String(table_name)
+ end
+ end
+ begin
+ if num_shards !== nothing
+ desc["num_shards"] = Base.Int(num_shards)
+ end
+ end
+ begin
+ if shard_id !== nothing
+ desc["shard_id"] = Base.Int(shard_id)
+ end
+ end
+ end
+ end
+ tf.Tensor(tf.Operation(desc))
+ end
+ end
+ begin
+ function load_tpu_embedding_stochastic_gradient_descent_parameters_eager(parameters_; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing)
+ desc = tf.EagerOp("LoadTPUEmbeddingStochasticGradientDescentParameters")
+ parameters_ = convert(tf.EagerTensor, parameters_)
+ begin
+ begin
+ tf.add_input(desc, parameters_)
+ end
+ end
+ begin
+ begin
+ if table_id !== nothing
+ desc["table_id"] = Base.Int(table_id)
+ end
+ end
+ begin
+ if table_name !== nothing
+ desc["table_name"] = Base.String(table_name)
+ end
+ end
+ begin
+ if num_shards !== nothing
+ desc["num_shards"] = Base.Int(num_shards)
+ end
+ end
+ begin
+ if shard_id !== nothing
+ desc["shard_id"] = Base.Int(shard_id)
+ end
+ end
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(load_tpu_embedding_stochastic_gradient_descent_parameters, [parameters_], name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res[1]
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function load_tpu_embedding_stochastic_gradient_descent_parameters(parameters_; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing)
+ if tf.in_eager_mode()
+ load_tpu_embedding_stochastic_gradient_descent_parameters_eager(parameters_; name=name, table_id=table_id, table_name=table_name, num_shards=num_shards, shard_id=shard_id)
+ else
+ load_tpu_embedding_stochastic_gradient_descent_parameters_graph(parameters_; name=name, table_id=table_id, table_name=table_name, num_shards=num_shards, shard_id=shard_id)
+ end
+ end
+ end
+end
+
+
+"""
+ identity_reader_v2(; container=, shared_name=)
+
+
+"""
+begin
+ begin
+ function identity_reader_v2_graph(; name=nothing, container=nothing, shared_name=nothing)
+ local desc
+ tf.with_op_name(name, "IdentityReaderV2") do
+ desc = tf.NodeDescription("IdentityReaderV2")
+ begin
+ end
+ begin
+ end
+ begin
+ begin
+ if container !== nothing
+ desc["container"] = Base.String(container)
+ end
+ end
+ begin
+ if shared_name !== nothing
+ desc["shared_name"] = Base.String(shared_name)
+ end
+ end
+ end
+ end
+ tf.Tensor(tf.Operation(desc))
+ end
+ end
+ begin
+ function identity_reader_v2_eager(; name=nothing, container=nothing, shared_name=nothing)
+ desc = tf.EagerOp("IdentityReaderV2")
+ begin
+ end
+ begin
+ begin
+ if container !== nothing
+ desc["container"] = Base.String(container)
+ end
+ end
+ begin
+ if shared_name !== nothing
+ desc["shared_name"] = Base.String(shared_name)
+ end
+ end
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(identity_reader_v2, [], name=nothing, container=nothing, shared_name=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res[1]
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function identity_reader_v2(; name=nothing, container=nothing, shared_name=nothing)
+ if tf.in_eager_mode()
+ identity_reader_v2_eager(; name=name, container=container, shared_name=shared_name)
+ else
+ identity_reader_v2_graph(; name=name, container=container, shared_name=shared_name)
+ end
+ end
+ end
+end
+
+
+"""
+ lmdb_reader(; container=, shared_name=)
+
+
+"""
+begin
+ begin
+ function lmdb_reader_graph(; name=nothing, container=nothing, shared_name=nothing)
+ local desc
+ tf.with_op_name(name, "LMDBReader") do
+ desc = tf.NodeDescription("LMDBReader")
+ begin
+ end
+ begin
+ end
+ begin
+ begin
+ if container !== nothing
+ desc["container"] = Base.String(container)
+ end
+ end
+ begin
+ if shared_name !== nothing
+ desc["shared_name"] = Base.String(shared_name)
+ end
+ end
+ end
+ end
+ tf.Tensor(tf.Operation(desc))
+ end
+ end
+ begin
+ function lmdb_reader_eager(; name=nothing, container=nothing, shared_name=nothing)
+ desc = tf.EagerOp("LMDBReader")
+ begin
+ end
+ begin
+ begin
+ if container !== nothing
+ desc["container"] = Base.String(container)
+ end
+ end
+ begin
+ if shared_name !== nothing
+ desc["shared_name"] = Base.String(shared_name)
+ end
+ end
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(lmdb_reader, [], name=nothing, container=nothing, shared_name=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res[1]
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function lmdb_reader(; name=nothing, container=nothing, shared_name=nothing)
+ if tf.in_eager_mode()
+ lmdb_reader_eager(; name=name, container=container, shared_name=shared_name)
+ else
+ lmdb_reader_graph(; name=name, container=container, shared_name=shared_name)
+ end
+ end
+ end
+end
+
+
+"""
+ nccl_all_reduce(input)
+
+
+"""
+begin
+ begin
+ function nccl_all_reduce_graph(input_; name=nothing, reduction=nothing, num_devices=nothing, shared_name=nothing)
+ local desc
+ tf.with_op_name(name, "NcclAllReduce") do
+ desc = tf.NodeDescription("NcclAllReduce")
+ begin
+ begin
+ input_ = convert(Tensor{Any}, input_)
+ begin
+ end
+ end
+ begin
+ (input_,) = tf.tf_promote(input_)
+ end
+ end
+ begin
+ begin
+ tf.add_input(desc, input_)
+ end
+ end
+ begin
+ begin
+ if reduction !== nothing
+ desc["reduction"] = Base.String(reduction)
+ end
+ end
+ begin
+ if num_devices !== nothing
+ desc["num_devices"] = Base.Int(num_devices)
+ end
+ end
+ begin
+ if shared_name !== nothing
+ desc["shared_name"] = Base.String(shared_name)
+ end
+ end
+ end
+ end
+ tf.Tensor(tf.Operation(desc))
+ end
+ end
+ begin
+ function nccl_all_reduce_eager(input_; name=nothing, reduction=nothing, num_devices=nothing, shared_name=nothing)
+ desc = tf.EagerOp("NcclAllReduce")
+ input_ = convert(tf.EagerTensor, input_)
+ begin
+ begin
+ tf.add_input(desc, input_)
+ end
+ end
+ begin
+ begin
+ if reduction !== nothing
+ desc["reduction"] = Base.String(reduction)
+ end
+ end
+ begin
+ if num_devices !== nothing
+ desc["num_devices"] = Base.Int(num_devices)
+ end
+ end
+ begin
+ if shared_name !== nothing
+ desc["shared_name"] = Base.String(shared_name)
+ end
+ end
+ end
+ begin
+ desc["T"] = tf.data_type(input_)
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(nccl_all_reduce, [input_], name=nothing, reduction=nothing, num_devices=nothing, shared_name=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res[1]
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function nccl_all_reduce(input_; name=nothing, reduction=nothing, num_devices=nothing, shared_name=nothing)
+ if tf.in_eager_mode()
+ nccl_all_reduce_eager(input_; name=name, reduction=reduction, num_devices=num_devices, shared_name=shared_name)
+ else
+ nccl_all_reduce_graph(input_; name=name, reduction=reduction, num_devices=num_devices, shared_name=shared_name)
+ end
+ end
+ end
+end
+
+
+"""
+ text_line_dataset(filenames, compression_type, buffer_size)
+
+
+"""
+begin
+ begin
+ function text_line_dataset_graph(filenames_, compression_type_, buffer_size_; name=nothing)
+ local desc
+ tf.with_op_name(name, "TextLineDataset") do
+ desc = tf.NodeDescription("TextLineDataset")
+ begin
+ begin
+ filenames_ = convert(Tensor{String}, filenames_)
+ begin
+ end
+ end
+ begin
+ compression_type_ = convert(Tensor{String}, compression_type_)
+ begin
+ end
+ end
+ begin
+ buffer_size_ = convert(Tensor{Int64}, buffer_size_)
+ begin
+ end
+ end
+ end
+ begin
+ begin
+ tf.add_input(desc, filenames_)
+ end
+ begin
+ tf.add_input(desc, compression_type_)
+ end
+ begin
+ tf.add_input(desc, buffer_size_)
+ end
+ end
+ begin
+ end
+ end
+ tf.Tensor(tf.Operation(desc))
+ end
+ end
+ begin
+ function text_line_dataset_eager(filenames_, compression_type_, buffer_size_; name=nothing)
+ desc = tf.EagerOp("TextLineDataset")
+ filenames_ = convert(tf.EagerTensor, filenames_)
+ compression_type_ = convert(tf.EagerTensor, compression_type_)
+ buffer_size_ = convert(tf.EagerTensor, buffer_size_)
+ begin
+ begin
+ tf.add_input(desc, filenames_)
+ end
+ begin
+ tf.add_input(desc, compression_type_)
+ end
+ begin
+ tf.add_input(desc, buffer_size_)
+ end
+ end
+ begin
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(text_line_dataset, [filenames_, compression_type_, buffer_size_], name=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res[1]
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function text_line_dataset(filenames_, compression_type_, buffer_size_; name=nothing)
+ if tf.in_eager_mode()
+ text_line_dataset_eager(filenames_, compression_type_, buffer_size_; name=name)
+ else
+ text_line_dataset_graph(filenames_, compression_type_, buffer_size_; name=name)
+ end
+ end
+ end
+end
+
+
+"""
+ sdca_shrink_l1(weights)
+
+
+"""
+begin
+ begin
+ function sdca_shrink_l1_graph(weights_; name=nothing, num_features=nothing, l1=nothing, l2=nothing)
+ local desc
+ tf.with_op_name(name, "SdcaShrinkL1") do
+ desc = tf.NodeDescription("SdcaShrinkL1")
+ begin
+ begin
+ weights_ = [convert(Tensor{Float32}, x) for x = weights_]
+ begin
+ end
+ end
+ end
+ begin
+ begin
+ tf.add_input(desc, weights_)
+ end
+ end
+ begin
+ begin
+ if num_features !== nothing
+ desc["num_features"] = Base.Int(num_features)
+ end
+ end
+ begin
+ if l1 !== nothing
+ desc["l1"] = Base.identity(l1)
+ end
+ end
+ begin
+ if l2 !== nothing
+ desc["l2"] = Base.identity(l2)
+ end
+ end
+ end
+ end
+ tf.Tensor(tf.Operation(desc))
+ end
+ end
+ begin
+ function sdca_shrink_l1_eager(weights_; name=nothing, num_features=nothing, l1=nothing, l2=nothing)
+ desc = tf.EagerOp("SdcaShrinkL1")
+ weights_ = convert(tf.EagerTensor, weights_)
+ begin
+ begin
+ tf.add_input(desc, weights_)
+ end
+ end
+ begin
+ begin
+ if num_features !== nothing
+ desc["num_features"] = Base.Int(num_features)
+ end
+ end
+ begin
+ if l1 !== nothing
+ desc["l1"] = Base.identity(l1)
+ end
+ end
+ begin
+ if l2 !== nothing
+ desc["l2"] = Base.identity(l2)
+ end
+ end
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(sdca_shrink_l1, [weights_], name=nothing, num_features=nothing, l1=nothing, l2=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res[1]
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function sdca_shrink_l1(weights_; name=nothing, num_features=nothing, l1=nothing, l2=nothing)
+ if tf.in_eager_mode()
+ sdca_shrink_l1_eager(weights_; name=name, num_features=num_features, l1=l1, l2=l2)
+ else
+ sdca_shrink_l1_graph(weights_; name=name, num_features=num_features, l1=l1, l2=l2)
+ end
+ end
+ end
+end
+
+
+"""
+ tf_record_reader_v2(; container=, shared_name=, compression_type=)
+
+
+"""
+begin
+ begin
+ function tf_record_reader_v2_graph(; name=nothing, container=nothing, shared_name=nothing, compression_type=nothing)
+ local desc
+ tf.with_op_name(name, "TFRecordReaderV2") do
+ desc = tf.NodeDescription("TFRecordReaderV2")
+ begin
+ end
+ begin
+ end
+ begin
+ begin
+ if container !== nothing
+ desc["container"] = Base.String(container)
+ end
+ end
+ begin
+ if shared_name !== nothing
+ desc["shared_name"] = Base.String(shared_name)
+ end
+ end
+ begin
+ if compression_type !== nothing
+ desc["compression_type"] = Base.String(compression_type)
+ end
+ end
+ end
+ end
+ tf.Tensor(tf.Operation(desc))
+ end
+ end
+ begin
+ function tf_record_reader_v2_eager(; name=nothing, container=nothing, shared_name=nothing, compression_type=nothing)
+ desc = tf.EagerOp("TFRecordReaderV2")
+ begin
+ end
+ begin
+ begin
+ if container !== nothing
+ desc["container"] = Base.String(container)
+ end
+ end
+ begin
+ if shared_name !== nothing
+ desc["shared_name"] = Base.String(shared_name)
+ end
+ end
+ begin
+ if compression_type !== nothing
+ desc["compression_type"] = Base.String(compression_type)
+ end
+ end
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(tf_record_reader_v2, [], name=nothing, container=nothing, shared_name=nothing, compression_type=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res[1]
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function tf_record_reader_v2(; name=nothing, container=nothing, shared_name=nothing, compression_type=nothing)
+ if tf.in_eager_mode()
+ tf_record_reader_v2_eager(; name=name, container=container, shared_name=shared_name, compression_type=compression_type)
+ else
+ tf_record_reader_v2_graph(; name=name, container=container, shared_name=shared_name, compression_type=compression_type)
+ end
+ end
+ end
+end
+
+
+"""
+ padded_batch_dataset_v2(input_dataset, batch_size, padded_shapes, padding_values, drop_remainder)
+
+
+"""
+begin
+ begin
+ function padded_batch_dataset_v2_graph(input_dataset_, batch_size_, padded_shapes_, padding_values_, drop_remainder_; name=nothing, Toutput_types=nothing, output_shapes=nothing, N=nothing)
+ local desc
+ tf.with_op_name(name, "PaddedBatchDatasetV2") do
+ desc = tf.NodeDescription("PaddedBatchDatasetV2")
+ begin
+ begin
+ input_dataset_ = convert(Tensor{Any}, input_dataset_)
+ begin
+ end
+ end
+ begin
+ batch_size_ = convert(Tensor{Int64}, batch_size_)
+ begin
+ end
+ end
+ begin
+ padded_shapes_ = [convert(Tensor{Int64}, x) for x = padded_shapes_]
+ begin
+ end
+ end
+ begin
+ padding_values_ = [convert(Tensor{Any}, x) for x = padding_values_]
+ begin
+ end
+ end
+ begin
+ drop_remainder_ = convert(Tensor{Bool}, drop_remainder_)
+ begin
+ end
+ end
+ end
+ begin
+ begin
+ tf.add_input(desc, input_dataset_)
+ end
+ begin
+ tf.add_input(desc, batch_size_)
+ end
+ begin
+ tf.add_input(desc, padded_shapes_)
+ end
+ begin
+ tf.add_input(desc, padding_values_)
+ end
+ begin
+ tf.add_input(desc, drop_remainder_)
+ end
+ end
+ begin
+ begin
+ if Toutput_types !== nothing
+ desc["Toutput_types"] = map(Base.identity, Toutput_types)
+ end
+ end
+ begin
+ if output_shapes !== nothing
+ desc["output_shapes"] = map(Base.identity, output_shapes)
+ end
+ end
+ begin
+ if N !== nothing
+ desc["N"] = Base.Int(N)
+ end
+ end
+ end
+ end
+ tf.Tensor(tf.Operation(desc))
+ end
+ end
+ begin
+ function padded_batch_dataset_v2_eager(input_dataset_, batch_size_, padded_shapes_, padding_values_, drop_remainder_; name=nothing, Toutput_types=nothing, output_shapes=nothing, N=nothing)
+ desc = tf.EagerOp("PaddedBatchDatasetV2")
+ input_dataset_ = convert(tf.EagerTensor, input_dataset_)
+ batch_size_ = convert(tf.EagerTensor, batch_size_)
+ padded_shapes_ = convert(tf.EagerTensor, padded_shapes_)
+ padding_values_ = convert(tf.EagerTensor, padding_values_)
+ drop_remainder_ = convert(tf.EagerTensor, drop_remainder_)
+ begin
+ begin
+ tf.add_input(desc, input_dataset_)
+ end
+ begin
+ tf.add_input(desc, batch_size_)
+ end
+ begin
+ tf.add_input(desc, padded_shapes_)
+ end
+ begin
+ tf.add_input(desc, padding_values_)
+ end
+ begin
+ tf.add_input(desc, drop_remainder_)
+ end
+ end
+ begin
+ begin
+ if Toutput_types !== nothing
+ desc["Toutput_types"] = map(Base.identity, Toutput_types)
+ end
+ end
+ begin
+ if output_shapes !== nothing
+ desc["output_shapes"] = map(Base.identity, output_shapes)
+ end
+ end
+ begin
+ if N !== nothing
+ desc["N"] = Base.Int(N)
+ end
+ end
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(padded_batch_dataset_v2, [input_dataset_, batch_size_, padded_shapes_, padding_values_, drop_remainder_], name=nothing, Toutput_types=nothing, output_shapes=nothing, N=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res[1]
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function padded_batch_dataset_v2(input_dataset_, batch_size_, padded_shapes_, padding_values_, drop_remainder_; name=nothing, Toutput_types=nothing, output_shapes=nothing, N=nothing)
+ if tf.in_eager_mode()
+ padded_batch_dataset_v2_eager(input_dataset_, batch_size_, padded_shapes_, padding_values_, drop_remainder_; name=name, Toutput_types=Toutput_types, output_shapes=output_shapes, N=N)
+ else
+ padded_batch_dataset_v2_graph(input_dataset_, batch_size_, padded_shapes_, padding_values_, drop_remainder_; name=name, Toutput_types=Toutput_types, output_shapes=output_shapes, N=N)
+ end
+ end
+ end
+end
+
+
+"""
+ multi_device_iterator_from_string_handle(string_handle; output_types=Int64[], output_shapes=Int64[])
+
+
+"""
+begin
+ begin
+ function multi_device_iterator_from_string_handle_graph(string_handle_; name=nothing, output_types=nothing, output_shapes=nothing)
+ local desc
+ tf.with_op_name(name, "MultiDeviceIteratorFromStringHandle") do
+ desc = tf.NodeDescription("MultiDeviceIteratorFromStringHandle")
+ begin
+ begin
+ string_handle_ = convert(Tensor{String}, string_handle_)
+ begin
+ end
+ end
+ end
+ begin
+ begin
+ tf.add_input(desc, string_handle_)
+ end
+ end
+ begin
+ begin
+ if output_types !== nothing
+ desc["output_types"] = map(Base.identity, output_types)
+ end
+ end
+ begin
+ if output_shapes !== nothing
+ desc["output_shapes"] = map(Base.identity, output_shapes)
+ end
+ end
+ end
+ end
+ tf.Tensor(tf.Operation(desc))
+ end
+ end
+ begin
+ function multi_device_iterator_from_string_handle_eager(string_handle_; name=nothing, output_types=nothing, output_shapes=nothing)
+ desc = tf.EagerOp("MultiDeviceIteratorFromStringHandle")
+ string_handle_ = convert(tf.EagerTensor, string_handle_)
+ begin
+ begin
+ tf.add_input(desc, string_handle_)
+ end
+ end
+ begin
+ begin
+ if output_types !== nothing
+ desc["output_types"] = map(Base.identity, output_types)
+ end
+ end
+ begin
+ if output_shapes !== nothing
+ desc["output_shapes"] = map(Base.identity, output_shapes)
+ end
+ end
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(multi_device_iterator_from_string_handle, [string_handle_], name=nothing, output_types=nothing, output_shapes=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res[1]
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function multi_device_iterator_from_string_handle(string_handle_; name=nothing, output_types=nothing, output_shapes=nothing)
+ if tf.in_eager_mode()
+ multi_device_iterator_from_string_handle_eager(string_handle_; name=name, output_types=output_types, output_shapes=output_shapes)
+ else
+ multi_device_iterator_from_string_handle_graph(string_handle_; name=name, output_types=output_types, output_shapes=output_shapes)
+ end
+ end
+ end
+end
+
+
+"""
+ load_tpu_embedding_proximal_adagrad_parameters(parameters, accumulators; table_id=-1, table_name=)
+
+Load embedding parameters for a single table.
+"""
+begin
+ begin
+ function load_tpu_embedding_proximal_adagrad_parameters_graph(parameters_, accumulators_; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing)
+ local desc
+ tf.with_op_name(name, "LoadTPUEmbeddingProximalAdagradParameters") do
+ desc = tf.NodeDescription("LoadTPUEmbeddingProximalAdagradParameters")
+ begin
+ begin
+ parameters_ = convert(Tensor{Float32}, parameters_)
+ begin
+ end
+ end
+ begin
+ accumulators_ = convert(Tensor{Float32}, accumulators_)
+ begin
+ end
+ end
+ end
+ begin
+ begin
+ tf.add_input(desc, parameters_)
+ end
+ begin
+ tf.add_input(desc, accumulators_)
+ end
+ end
+ begin
+ begin
+ if table_id !== nothing
+ desc["table_id"] = Base.Int(table_id)
+ end
+ end
+ begin
+ if table_name !== nothing
+ desc["table_name"] = Base.String(table_name)
+ end
+ end
+ begin
+ if num_shards !== nothing
+ desc["num_shards"] = Base.Int(num_shards)
+ end
+ end
+ begin
+ if shard_id !== nothing
+ desc["shard_id"] = Base.Int(shard_id)
+ end
+ end
+ end
+ end
+ tf.Tensor(tf.Operation(desc))
+ end
+ end
+ begin
+ function load_tpu_embedding_proximal_adagrad_parameters_eager(parameters_, accumulators_; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing)
+ desc = tf.EagerOp("LoadTPUEmbeddingProximalAdagradParameters")
+ parameters_ = convert(tf.EagerTensor, parameters_)
+ accumulators_ = convert(tf.EagerTensor, accumulators_)
+ begin
+ begin
+ tf.add_input(desc, parameters_)
+ end
+ begin
+ tf.add_input(desc, accumulators_)
+ end
+ end
+ begin
+ begin
+ if table_id !== nothing
+ desc["table_id"] = Base.Int(table_id)
+ end
+ end
+ begin
+ if table_name !== nothing
+ desc["table_name"] = Base.String(table_name)
+ end
+ end
+ begin
+ if num_shards !== nothing
+ desc["num_shards"] = Base.Int(num_shards)
+ end
+ end
+ begin
+ if shard_id !== nothing
+ desc["shard_id"] = Base.Int(shard_id)
+ end
+ end
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(load_tpu_embedding_proximal_adagrad_parameters, [parameters_, accumulators_], name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res[1]
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function load_tpu_embedding_proximal_adagrad_parameters(parameters_, accumulators_; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing)
+ if tf.in_eager_mode()
+ load_tpu_embedding_proximal_adagrad_parameters_eager(parameters_, accumulators_; name=name, table_id=table_id, table_name=table_name, num_shards=num_shards, shard_id=shard_id)
+ else
+ load_tpu_embedding_proximal_adagrad_parameters_graph(parameters_, accumulators_; name=name, table_id=table_id, table_name=table_name, num_shards=num_shards, shard_id=shard_id)
+ end
+ end
+ end
+end
+
+
+"""
+ tensor_array_size(handle, flow_in)
+
+
+"""
+begin
+ begin
+ function tensor_array_size_graph(handle_, flow_in_; name=nothing)
+ local desc
+ tf.with_op_name(name, "TensorArraySize") do
+ desc = tf.NodeDescription("TensorArraySize")
+ begin
+ begin
+ handle_ = convert(Tensor{String}, handle_)
+ begin
+ end
+ end
+ begin
+ flow_in_ = convert(Tensor{Float32}, flow_in_)
+ begin
+ end
+ end
+ end
+ begin
+ begin
+ tf.add_input(desc, handle_)
+ end
+ begin
+ tf.add_input(desc, flow_in_)
+ end
+ end
+ begin
+ end
+ end
+ tf.Tensor(tf.Operation(desc))
+ end
+ end
+ begin
+ function tensor_array_size_eager(handle_, flow_in_; name=nothing)
+ desc = tf.EagerOp("TensorArraySize")
+ handle_ = convert(tf.EagerTensor, handle_)
+ flow_in_ = convert(tf.EagerTensor, flow_in_)
+ begin
+ begin
+ tf.add_input(desc, handle_)
+ end
+ begin
+ tf.add_input(desc, flow_in_)
+ end
+ end
+ begin
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(tensor_array_size, [handle_, flow_in_], name=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res[1]
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function tensor_array_size(handle_, flow_in_; name=nothing)
+ if tf.in_eager_mode()
+ tensor_array_size_eager(handle_, flow_in_; name=name)
+ else
+ tensor_array_size_graph(handle_, flow_in_; name=name)
+ end
+ end
+ end
+end
+
+
+"""
+ ordered_map_size(; capacity=0, memory_limit=0, container=, shared_name=)
+
+
+"""
+begin
+ begin
+ function ordered_map_size_graph(; name=nothing, capacity=nothing, memory_limit=nothing, dtypes=nothing, container=nothing, shared_name=nothing)
+ local desc
+ tf.with_op_name(name, "OrderedMapSize") do
+ desc = tf.NodeDescription("OrderedMapSize")
+ begin
+ end
+ begin
+ end
+ begin
+ begin
+ if capacity !== nothing
+ desc["capacity"] = Base.Int(capacity)
+ end
+ end
+ begin
+ if memory_limit !== nothing
+ desc["memory_limit"] = Base.Int(memory_limit)
+ end
+ end
+ begin
+ if dtypes !== nothing
+ desc["dtypes"] = map(Base.identity, dtypes)
+ end
+ end
+ begin
+ if container !== nothing
+ desc["container"] = Base.String(container)
+ end
+ end
+ begin
+ if shared_name !== nothing
+ desc["shared_name"] = Base.String(shared_name)
+ end
+ end
+ end
+ end
+ tf.Tensor(tf.Operation(desc))
+ end
+ end
+ begin
+ function ordered_map_size_eager(; name=nothing, capacity=nothing, memory_limit=nothing, dtypes=nothing, container=nothing, shared_name=nothing)
+ desc = tf.EagerOp("OrderedMapSize")
+ begin
+ end
+ begin
+ begin
+ if capacity !== nothing
+ desc["capacity"] = Base.Int(capacity)
+ end
+ end
+ begin
+ if memory_limit !== nothing
+ desc["memory_limit"] = Base.Int(memory_limit)
+ end
+ end
+ begin
+ if dtypes !== nothing
+ desc["dtypes"] = map(Base.identity, dtypes)
+ end
+ end
+ begin
+ if container !== nothing
+ desc["container"] = Base.String(container)
+ end
+ end
+ begin
+ if shared_name !== nothing
+ desc["shared_name"] = Base.String(shared_name)
+ end
+ end
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(ordered_map_size, [], name=nothing, capacity=nothing, memory_limit=nothing, dtypes=nothing, container=nothing, shared_name=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res[1]
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function ordered_map_size(; name=nothing, capacity=nothing, memory_limit=nothing, dtypes=nothing, container=nothing, shared_name=nothing)
+ if tf.in_eager_mode()
+ ordered_map_size_eager(; name=name, capacity=capacity, memory_limit=memory_limit, dtypes=dtypes, container=container, shared_name=shared_name)
+ else
+ ordered_map_size_graph(; name=name, capacity=capacity, memory_limit=memory_limit, dtypes=dtypes, container=container, shared_name=shared_name)
+ end
+ end
+ end
+end
+
+
+"""
+ stateless_random_uniform(shape, seed; dtype=Float32)
+
+
+"""
+begin
+ begin
+ function stateless_random_uniform_graph(shape_, seed_; name=nothing, dtype=nothing)
+ local desc
+ tf.with_op_name(name, "StatelessRandomUniform") do
+ desc = tf.NodeDescription("StatelessRandomUniform")
+ begin
+ begin
+ shape_ = convert(Tensor{Int32}, shape_)
+ begin
+ end
+ end
+ begin
+ seed_ = convert(Tensor{Int64}, seed_)
+ begin
+ end
+ end
+ begin
+ (shape_,) = tf.tf_promote(shape_)
+ end
+ begin
+ (seed_,) = tf.tf_promote(seed_)
+ end
+ end
+ begin
+ begin
+ tf.add_input(desc, shape_)
+ end
+ begin
+ tf.add_input(desc, seed_)
+ end
+ end
+ begin
+ begin
+ if dtype !== nothing
+ desc["dtype"] = Base.identity(dtype)
+ end
+ end
+ end
+ end
+ tf.Tensor(tf.Operation(desc))
+ end
+ end
+ begin
+ function stateless_random_uniform_eager(shape_, seed_; name=nothing, dtype=nothing)
+ desc = tf.EagerOp("StatelessRandomUniform")
+ shape_ = convert(tf.EagerTensor, shape_)
+ seed_ = convert(tf.EagerTensor, seed_)
+ begin
+ begin
+ tf.add_input(desc, shape_)
+ end
+ begin
+ tf.add_input(desc, seed_)
+ end
+ end
+ begin
+ begin
+ if dtype !== nothing
+ desc["dtype"] = Base.identity(dtype)
+ end
+ end
+ end
+ begin
+ desc["T"] = tf.data_type(shape_)
+ end
+ begin
+ desc["Tseed"] = tf.data_type(seed_)
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(stateless_random_uniform, [shape_, seed_], name=nothing, dtype=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res[1]
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function stateless_random_uniform(shape_, seed_; name=nothing, dtype=nothing)
+ if tf.in_eager_mode()
+ stateless_random_uniform_eager(shape_, seed_; name=name, dtype=dtype)
+ else
+ stateless_random_uniform_graph(shape_, seed_; name=name, dtype=dtype)
+ end
+ end
+ end
+end
+
+
+"""
+ sparse_to_sparse_set_operation(set1_indices, set1_values, set1_shape, set2_indices, set2_values, set2_shape; validate_indices=true)
+
+
+"""
+begin
+ begin
+ function sparse_to_sparse_set_operation_graph(set1_indices_, set1_values_, set1_shape_, set2_indices_, set2_values_, set2_shape_; name=nothing, set_operation=nothing, validate_indices=nothing)
+ local desc
+ tf.with_op_name(name, "SparseToSparseSetOperation") do
+ desc = tf.NodeDescription("SparseToSparseSetOperation")
+ begin
+ begin
+ set1_indices_ = convert(Tensor{Int64}, set1_indices_)
+ begin
+ end
+ end
+ begin
+ set1_values_ = convert(Tensor{Any}, set1_values_)
+ begin
+ end
+ end
+ begin
+ set1_shape_ = convert(Tensor{Int64}, set1_shape_)
+ begin
+ end
+ end
+ begin
+ set2_indices_ = convert(Tensor{Int64}, set2_indices_)
+ begin
+ end
+ end
+ begin
+ set2_values_ = convert(Tensor{Any}, set2_values_)
+ begin
+ end
+ end
+ begin
+ set2_shape_ = convert(Tensor{Int64}, set2_shape_)
+ begin
+ end
+ end
+ begin
+ (set1_values_, set2_values_) = tf.tf_promote(set1_values_, set2_values_)
+ end
+ end
+ begin
+ begin
+ tf.add_input(desc, set1_indices_)
+ end
+ begin
+ tf.add_input(desc, set1_values_)
+ end
+ begin
+ tf.add_input(desc, set1_shape_)
+ end
+ begin
+ tf.add_input(desc, set2_indices_)
+ end
+ begin
+ tf.add_input(desc, set2_values_)
+ end
+ begin
+ tf.add_input(desc, set2_shape_)
+ end
+ end
+ begin
+ begin
+ if set_operation !== nothing
+ desc["set_operation"] = Base.String(set_operation)
+ end
+ end
+ begin
+ if validate_indices !== nothing
+ desc["validate_indices"] = Base.Bool(validate_indices)
+ end
+ end
+ end
+ end
+ begin
+ out = tf.Tensor[]
+ op = tf.Operation(desc)
+ for out_idx = 1:3
+ push!(out, tf.Tensor(op, out_idx))
+ end
+ out
+ end
+ end
+ end
+ begin
+ function sparse_to_sparse_set_operation_eager(set1_indices_, set1_values_, set1_shape_, set2_indices_, set2_values_, set2_shape_; name=nothing, set_operation=nothing, validate_indices=nothing)
+ desc = tf.EagerOp("SparseToSparseSetOperation")
+ set1_indices_ = convert(tf.EagerTensor, set1_indices_)
+ set1_values_ = convert(tf.EagerTensor, set1_values_)
+ set1_shape_ = convert(tf.EagerTensor, set1_shape_)
+ set2_indices_ = convert(tf.EagerTensor, set2_indices_)
+ set2_values_ = convert(tf.EagerTensor, set2_values_)
+ set2_shape_ = convert(tf.EagerTensor, set2_shape_)
+ begin
+ begin
+ tf.add_input(desc, set1_indices_)
+ end
+ begin
+ tf.add_input(desc, set1_values_)
+ end
+ begin
+ tf.add_input(desc, set1_shape_)
+ end
+ begin
+ tf.add_input(desc, set2_indices_)
+ end
+ begin
+ tf.add_input(desc, set2_values_)
+ end
+ begin
+ tf.add_input(desc, set2_shape_)
+ end
+ end
+ begin
+ begin
+ if set_operation !== nothing
+ desc["set_operation"] = Base.String(set_operation)
+ end
+ end
+ begin
+ if validate_indices !== nothing
+ desc["validate_indices"] = Base.Bool(validate_indices)
+ end
+ end
+ end
+ begin
+ desc["T"] = tf.data_type(set1_values_)
+ end
+ begin
+ desc["T"] = tf.data_type(set2_values_)
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(sparse_to_sparse_set_operation, [set1_indices_, set1_values_, set1_shape_, set2_indices_, set2_values_, set2_shape_], name=nothing, set_operation=nothing, validate_indices=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function sparse_to_sparse_set_operation(set1_indices_, set1_values_, set1_shape_, set2_indices_, set2_values_, set2_shape_; name=nothing, set_operation=nothing, validate_indices=nothing)
+ if tf.in_eager_mode()
+ sparse_to_sparse_set_operation_eager(set1_indices_, set1_values_, set1_shape_, set2_indices_, set2_values_, set2_shape_; name=name, set_operation=set_operation, validate_indices=validate_indices)
+ else
+ sparse_to_sparse_set_operation_graph(set1_indices_, set1_values_, set1_shape_, set2_indices_, set2_values_, set2_shape_; name=name, set_operation=set_operation, validate_indices=validate_indices)
+ end
+ end
+ end
+end
+
+
+"""
+ tensor_summary(tensor; description=, labels=Int64[], display_name=)
+
+
+"""
+begin
+ begin
+ function tensor_summary_graph(tensor_; name=nothing, description=nothing, labels=nothing, display_name=nothing)
+ local desc
+ tf.with_op_name(name, "TensorSummary") do
+ desc = tf.NodeDescription("TensorSummary")
+ begin
+ begin
+ tensor_ = convert(Tensor{Any}, tensor_)
+ begin
+ end
+ end
+ begin
+ (tensor_,) = tf.tf_promote(tensor_)
+ end
+ end
+ begin
+ begin
+ tf.add_input(desc, tensor_)
+ end
+ end
+ begin
+ begin
+ if description !== nothing
+ desc["description"] = Base.String(description)
+ end
+ end
+ begin
+ if labels !== nothing
+ desc["labels"] = map(Base.identity, labels)
+ end
+ end
+ begin
+ if display_name !== nothing
+ desc["display_name"] = Base.String(display_name)
+ end
+ end
+ end
+ end
+ tf.Tensor(tf.Operation(desc))
+ end
+ end
+ begin
+ function tensor_summary_eager(tensor_; name=nothing, description=nothing, labels=nothing, display_name=nothing)
+ desc = tf.EagerOp("TensorSummary")
+ tensor_ = convert(tf.EagerTensor, tensor_)
+ begin
+ begin
+ tf.add_input(desc, tensor_)
+ end
+ end
+ begin
+ begin
+ if description !== nothing
+ desc["description"] = Base.String(description)
+ end
+ end
+ begin
+ if labels !== nothing
+ desc["labels"] = map(Base.identity, labels)
+ end
+ end
+ begin
+ if display_name !== nothing
+ desc["display_name"] = Base.String(display_name)
+ end
+ end
+ end
+ begin
+ desc["T"] = tf.data_type(tensor_)
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(tensor_summary, [tensor_], name=nothing, description=nothing, labels=nothing, display_name=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res[1]
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function tensor_summary(tensor_; name=nothing, description=nothing, labels=nothing, display_name=nothing)
+ if tf.in_eager_mode()
+ tensor_summary_eager(tensor_; name=name, description=description, labels=labels, display_name=display_name)
+ else
+ tensor_summary_graph(tensor_; name=name, description=description, labels=labels, display_name=display_name)
+ end
+ end
+ end
+end
+
+
+"""
+ remote_fused_graph_execute(inputs)
+
+
+"""
+begin
+ begin
+ function remote_fused_graph_execute_graph(inputs_; name=nothing, Tinputs=nothing, Toutputs=nothing, serialized_remote_fused_graph_execute_info=nothing)
+ local desc
+ tf.with_op_name(name, "RemoteFusedGraphExecute") do
+ desc = tf.NodeDescription("RemoteFusedGraphExecute")
+ begin
+ begin
+ inputs_ = [convert(Tensor{Any}, x) for x = inputs_]
+ begin
+ end
+ end
+ end
+ begin
+ begin
+ tf.add_input(desc, inputs_)
+ end
+ end
+ begin
+ begin
+ if Tinputs !== nothing
+ desc["Tinputs"] = map(Base.identity, Tinputs)
+ end
+ end
+ begin
+ if Toutputs !== nothing
+ desc["Toutputs"] = map(Base.identity, Toutputs)
+ end
+ end
+ begin
+ if serialized_remote_fused_graph_execute_info !== nothing
+ desc["serialized_remote_fused_graph_execute_info"] = Base.String(serialized_remote_fused_graph_execute_info)
+ end
+ end
+ end
+ end
+ tf.Tensor(tf.Operation(desc))
+ end
+ end
+ begin
+ function remote_fused_graph_execute_eager(inputs_; name=nothing, Tinputs=nothing, Toutputs=nothing, serialized_remote_fused_graph_execute_info=nothing)
+ desc = tf.EagerOp("RemoteFusedGraphExecute")
+ inputs_ = convert(tf.EagerTensor, inputs_)
+ begin
+ begin
+ tf.add_input(desc, inputs_)
+ end
+ end
+ begin
+ begin
+ if Tinputs !== nothing
+ desc["Tinputs"] = map(Base.identity, Tinputs)
+ end
+ end
+ begin
+ if Toutputs !== nothing
+ desc["Toutputs"] = map(Base.identity, Toutputs)
+ end
+ end
+ begin
+ if serialized_remote_fused_graph_execute_info !== nothing
+ desc["serialized_remote_fused_graph_execute_info"] = Base.String(serialized_remote_fused_graph_execute_info)
+ end
+ end
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(remote_fused_graph_execute, [inputs_], name=nothing, Tinputs=nothing, Toutputs=nothing, serialized_remote_fused_graph_execute_info=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res[1]
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function remote_fused_graph_execute(inputs_; name=nothing, Tinputs=nothing, Toutputs=nothing, serialized_remote_fused_graph_execute_info=nothing)
+ if tf.in_eager_mode()
+ remote_fused_graph_execute_eager(inputs_; name=name, Tinputs=Tinputs, Toutputs=Toutputs, serialized_remote_fused_graph_execute_info=serialized_remote_fused_graph_execute_info)
+ else
+ remote_fused_graph_execute_graph(inputs_; name=name, Tinputs=Tinputs, Toutputs=Toutputs, serialized_remote_fused_graph_execute_info=serialized_remote_fused_graph_execute_info)
+ end
+ end
+ end
+end
+
+
+"""
+ sparse_slice_grad(backprop_val_grad, input_indices, input_start, output_indices)
+
+
+"""
+begin
+ begin
+ function sparse_slice_grad_graph(backprop_val_grad_, input_indices_, input_start_, output_indices_; name=nothing)
+ local desc
+ tf.with_op_name(name, "SparseSliceGrad") do
+ desc = tf.NodeDescription("SparseSliceGrad")
+ begin
+ begin
+ backprop_val_grad_ = convert(Tensor{Any}, backprop_val_grad_)
+ begin
+ end
+ end
+ begin
+ input_indices_ = convert(Tensor{Int64}, input_indices_)
+ begin
+ end
+ end
+ begin
+ input_start_ = convert(Tensor{Int64}, input_start_)
+ begin
+ end
+ end
+ begin
+ output_indices_ = convert(Tensor{Int64}, output_indices_)
+ begin
+ end
+ end
+ begin
+ (backprop_val_grad_,) = tf.tf_promote(backprop_val_grad_)
+ end
+ end
+ begin
+ begin
+ tf.add_input(desc, backprop_val_grad_)
+ end
+ begin
+ tf.add_input(desc, input_indices_)
+ end
+ begin
+ tf.add_input(desc, input_start_)
+ end
+ begin
+ tf.add_input(desc, output_indices_)
+ end
+ end
+ begin
+ end
+ end
+ tf.Tensor(tf.Operation(desc))
+ end
+ end
+ begin
+ function sparse_slice_grad_eager(backprop_val_grad_, input_indices_, input_start_, output_indices_; name=nothing)
+ desc = tf.EagerOp("SparseSliceGrad")
+ backprop_val_grad_ = convert(tf.EagerTensor, backprop_val_grad_)
+ input_indices_ = convert(tf.EagerTensor, input_indices_)
+ input_start_ = convert(tf.EagerTensor, input_start_)
+ output_indices_ = convert(tf.EagerTensor, output_indices_)
+ begin
+ begin
+ tf.add_input(desc, backprop_val_grad_)
+ end
+ begin
+ tf.add_input(desc, input_indices_)
+ end
+ begin
+ tf.add_input(desc, input_start_)
+ end
+ begin
+ tf.add_input(desc, output_indices_)
+ end
+ end
+ begin
+ end
+ begin
+ desc["T"] = tf.data_type(backprop_val_grad_)
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(sparse_slice_grad, [backprop_val_grad_, input_indices_, input_start_, output_indices_], name=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res[1]
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function sparse_slice_grad(backprop_val_grad_, input_indices_, input_start_, output_indices_; name=nothing)
+ if tf.in_eager_mode()
+ sparse_slice_grad_eager(backprop_val_grad_, input_indices_, input_start_, output_indices_; name=name)
+ else
+ sparse_slice_grad_graph(backprop_val_grad_, input_indices_, input_start_, output_indices_; name=name)
+ end
+ end
+ end
+end
+
+
+"""
+ cumsum(x, axis; exclusive=false, reverse=false)
+
+
+"""
+begin
+ begin
+ function cumsum_graph(x_, axis_; name=nothing, exclusive=nothing, reverse=nothing)
+ local desc
+ tf.with_op_name(name, "Cumsum") do
+ desc = tf.NodeDescription("Cumsum")
+ begin
+ begin
+ x_ = convert(Tensor{Any}, x_)
+ begin
+ end
+ end
+ begin
+ axis_ = convert(Tensor{Int32}, axis_)
+ begin
+ axis_ = axis_ - convert(tf.Tensor{eltype(axis_)}, 1)
+ end
+ end
+ begin
+ (x_,) = tf.tf_promote(x_)
+ end
+ begin
+ (axis_,) = tf.tf_promote(axis_)
+ end
+ end
+ begin
+ begin
+ tf.add_input(desc, x_)
+ end
+ begin
+ tf.add_input(desc, axis_)
+ end
+ end
+ begin
+ begin
+ if exclusive !== nothing
+ desc["exclusive"] = Base.Bool(exclusive)
+ end
+ end
+ begin
+ if reverse !== nothing
+ desc["reverse"] = Base.Bool(reverse)
+ end
+ end
+ end
+ end
+ tf.Tensor(tf.Operation(desc))
+ end
+ end
+ begin
+ function cumsum_eager(x_, axis_; name=nothing, exclusive=nothing, reverse=nothing)
+ desc = tf.EagerOp("Cumsum")
+ x_ = convert(tf.EagerTensor, x_)
+ axis_ = convert(tf.EagerTensor, axis_)
+ begin
+ begin
+ tf.add_input(desc, x_)
+ end
+ begin
+ tf.add_input(desc, axis_)
+ end
+ end
+ begin
+ begin
+ if exclusive !== nothing
+ desc["exclusive"] = Base.Bool(exclusive)
+ end
+ end
+ begin
+ if reverse !== nothing
+ desc["reverse"] = Base.Bool(reverse)
+ end
+ end
+ end
+ begin
+ desc["T"] = tf.data_type(x_)
+ end
+ begin
+ desc["Tidx"] = tf.data_type(axis_)
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(cumsum, [x_, axis_], name=nothing, exclusive=nothing, reverse=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res[1]
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function cumsum(x_, axis_; name=nothing, exclusive=nothing, reverse=nothing)
+ if tf.in_eager_mode()
+ cumsum_eager(x_, axis_; name=name, exclusive=exclusive, reverse=reverse)
+ else
+ cumsum_graph(x_, axis_; name=name, exclusive=exclusive, reverse=reverse)
+ end
+ end
+ end
+end
+
+
+"""
+ batch_norm_with_global_normalization_grad(t, m, v, gamma, backprop)
+
+
+"""
+begin
+ begin
+ function batch_norm_with_global_normalization_grad_graph(t_, m_, v_, gamma_, backprop_; name=nothing, variance_epsilon=nothing, scale_after_normalization=nothing)
+ local desc
+ tf.with_op_name(name, "BatchNormWithGlobalNormalizationGrad") do
+ desc = tf.NodeDescription("BatchNormWithGlobalNormalizationGrad")
+ begin
+ begin
+ t_ = convert(Tensor{Any}, t_)
+ begin
+ end
+ end
+ begin
+ m_ = convert(Tensor{Any}, m_)
+ begin
+ end
+ end
+ begin
+ v_ = convert(Tensor{Any}, v_)
+ begin
+ end
+ end
+ begin
+ gamma_ = convert(Tensor{Any}, gamma_)
+ begin
+ end
+ end
+ begin
+ backprop_ = convert(Tensor{Any}, backprop_)
+ begin
+ end
+ end
+ begin
+ (t_, m_, v_, gamma_, backprop_) = tf.tf_promote(t_, m_, v_, gamma_, backprop_)
+ end
+ end
+ begin
+ begin
+ tf.add_input(desc, t_)
+ end
+ begin
+ tf.add_input(desc, m_)
+ end
+ begin
+ tf.add_input(desc, v_)
+ end
+ begin
+ tf.add_input(desc, gamma_)
+ end
+ begin
+ tf.add_input(desc, backprop_)
+ end
+ end
+ begin
+ begin
+ if variance_epsilon !== nothing
+ desc["variance_epsilon"] = Base.identity(variance_epsilon)
+ end
+ end
+ begin
+ if scale_after_normalization !== nothing
+ desc["scale_after_normalization"] = Base.Bool(scale_after_normalization)
+ end
+ end
+ end
+ end
+ begin
+ out = tf.Tensor[]
+ op = tf.Operation(desc)
+ for out_idx = 1:5
+ push!(out, tf.Tensor(op, out_idx))
+ end
+ out
+ end
+ end
+ end
+ begin
+ function batch_norm_with_global_normalization_grad_eager(t_, m_, v_, gamma_, backprop_; name=nothing, variance_epsilon=nothing, scale_after_normalization=nothing)
+ desc = tf.EagerOp("BatchNormWithGlobalNormalizationGrad")
+ t_ = convert(tf.EagerTensor, t_)
+ m_ = convert(tf.EagerTensor, m_)
+ v_ = convert(tf.EagerTensor, v_)
+ gamma_ = convert(tf.EagerTensor, gamma_)
+ backprop_ = convert(tf.EagerTensor, backprop_)
+ begin
+ begin
+ tf.add_input(desc, t_)
+ end
+ begin
+ tf.add_input(desc, m_)
+ end
+ begin
+ tf.add_input(desc, v_)
+ end
+ begin
+ tf.add_input(desc, gamma_)
+ end
+ begin
+ tf.add_input(desc, backprop_)
+ end
+ end
+ begin
+ begin
+ if variance_epsilon !== nothing
+ desc["variance_epsilon"] = Base.identity(variance_epsilon)
+ end
+ end
+ begin
+ if scale_after_normalization !== nothing
+ desc["scale_after_normalization"] = Base.Bool(scale_after_normalization)
+ end
+ end
+ end
+ begin
+ desc["T"] = tf.data_type(t_)
+ end
+ begin
+ desc["T"] = tf.data_type(m_)
+ end
+ begin
+ desc["T"] = tf.data_type(v_)
+ end
+ begin
+ desc["T"] = tf.data_type(gamma_)
+ end
+ begin
+ desc["T"] = tf.data_type(backprop_)
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(batch_norm_with_global_normalization_grad, [t_, m_, v_, gamma_, backprop_], name=nothing, variance_epsilon=nothing, scale_after_normalization=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function batch_norm_with_global_normalization_grad(t_, m_, v_, gamma_, backprop_; name=nothing, variance_epsilon=nothing, scale_after_normalization=nothing)
+ if tf.in_eager_mode()
+ batch_norm_with_global_normalization_grad_eager(t_, m_, v_, gamma_, backprop_; name=name, variance_epsilon=variance_epsilon, scale_after_normalization=scale_after_normalization)
+ else
+ batch_norm_with_global_normalization_grad_graph(t_, m_, v_, gamma_, backprop_; name=name, variance_epsilon=variance_epsilon, scale_after_normalization=scale_after_normalization)
+ end
+ end
+ end
+end
+
+
+"""
+ avg_pool_grad(orig_input_shape, grad; data_format=)
+
+
+"""
+begin
+ begin
+ function avg_pool_grad_graph(orig_input_shape_, grad_; name=nothing, ksize=nothing, strides=nothing, padding=nothing, data_format=nothing)
+ local desc
+ tf.with_op_name(name, "AvgPoolGrad") do
+ desc = tf.NodeDescription("AvgPoolGrad")
+ begin
+ begin
+ orig_input_shape_ = convert(Tensor{Int32}, orig_input_shape_)
+ begin
+ end
+ end
+ begin
+ grad_ = convert(Tensor{Any}, grad_)
+ begin
+ end
+ end
+ begin
+ (grad_,) = tf.tf_promote(grad_)
+ end
+ end
+ begin
+ begin
+ tf.add_input(desc, orig_input_shape_)
+ end
+ begin
+ tf.add_input(desc, grad_)
+ end
+ end
+ begin
+ begin
+ if ksize !== nothing
+ desc["ksize"] = map(Base.identity, ksize)
+ end
+ end
+ begin
+ if strides !== nothing
+ desc["strides"] = map(Base.identity, strides)
+ end
+ end
+ begin
+ if padding !== nothing
+ desc["padding"] = Base.String(padding)
+ end
+ end
+ begin
+ if data_format !== nothing
+ desc["data_format"] = Base.String(data_format)
+ end
+ end
+ end
+ end
+ tf.Tensor(tf.Operation(desc))
+ end
+ end
+ begin
+ function avg_pool_grad_eager(orig_input_shape_, grad_; name=nothing, ksize=nothing, strides=nothing, padding=nothing, data_format=nothing)
+ desc = tf.EagerOp("AvgPoolGrad")
+ orig_input_shape_ = convert(tf.EagerTensor, orig_input_shape_)
+ grad_ = convert(tf.EagerTensor, grad_)
+ begin
+ begin
+ tf.add_input(desc, orig_input_shape_)
+ end
+ begin
+ tf.add_input(desc, grad_)
+ end
+ end
+ begin
+ begin
+ if ksize !== nothing
+ desc["ksize"] = map(Base.identity, ksize)
+ end
+ end
+ begin
+ if strides !== nothing
+ desc["strides"] = map(Base.identity, strides)
+ end
+ end
+ begin
+ if padding !== nothing
+ desc["padding"] = Base.String(padding)
+ end
+ end
+ begin
+ if data_format !== nothing
+ desc["data_format"] = Base.String(data_format)
+ end
+ end
+ end
+ begin
+ desc["T"] = tf.data_type(grad_)
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(avg_pool_grad, [orig_input_shape_, grad_], name=nothing, ksize=nothing, strides=nothing, padding=nothing, data_format=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res[1]
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function avg_pool_grad(orig_input_shape_, grad_; name=nothing, ksize=nothing, strides=nothing, padding=nothing, data_format=nothing)
+ if tf.in_eager_mode()
+ avg_pool_grad_eager(orig_input_shape_, grad_; name=name, ksize=ksize, strides=strides, padding=padding, data_format=data_format)
+ else
+ avg_pool_grad_graph(orig_input_shape_, grad_; name=name, ksize=ksize, strides=strides, padding=padding, data_format=data_format)
+ end
+ end
+ end
+end
+
+
+"""
+ restore_v2(prefix, tensor_names, shape_and_slices)
+
+
+"""
+begin
+ begin
+ function restore_v2_graph(prefix_, tensor_names_, shape_and_slices_; name=nothing, dtypes=nothing)
+ local desc
+ tf.with_op_name(name, "RestoreV2") do
+ desc = tf.NodeDescription("RestoreV2")
+ begin
+ begin
+ prefix_ = convert(Tensor{String}, prefix_)
+ begin
+ end
+ end
+ begin
+ tensor_names_ = convert(Tensor{String}, tensor_names_)
+ begin
+ end
+ end
+ begin
+ shape_and_slices_ = convert(Tensor{String}, shape_and_slices_)
+ begin
+ end
+ end
+ end
+ begin
+ begin
+ tf.add_input(desc, prefix_)
+ end
+ begin
+ tf.add_input(desc, tensor_names_)
+ end
+ begin
+ tf.add_input(desc, shape_and_slices_)
+ end
+ end
+ begin
+ begin
+ if dtypes !== nothing
+ desc["dtypes"] = map(Base.identity, dtypes)
+ end
+ end
+ end
+ end
+ tf.Tensor(tf.Operation(desc))
+ end
+ end
+ begin
+ function restore_v2_eager(prefix_, tensor_names_, shape_and_slices_; name=nothing, dtypes=nothing)
+ desc = tf.EagerOp("RestoreV2")
+ prefix_ = convert(tf.EagerTensor, prefix_)
+ tensor_names_ = convert(tf.EagerTensor, tensor_names_)
+ shape_and_slices_ = convert(tf.EagerTensor, shape_and_slices_)
+ begin
+ begin
+ tf.add_input(desc, prefix_)
+ end
+ begin
+ tf.add_input(desc, tensor_names_)
+ end
+ begin
+ tf.add_input(desc, shape_and_slices_)
+ end
+ end
+ begin
+ begin
+ if dtypes !== nothing
+ desc["dtypes"] = map(Base.identity, dtypes)
+ end
+ end
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(restore_v2, [prefix_, tensor_names_, shape_and_slices_], name=nothing, dtypes=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res[1]
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function restore_v2(prefix_, tensor_names_, shape_and_slices_; name=nothing, dtypes=nothing)
+ if tf.in_eager_mode()
+ restore_v2_eager(prefix_, tensor_names_, shape_and_slices_; name=name, dtypes=dtypes)
+ else
+ restore_v2_graph(prefix_, tensor_names_, shape_and_slices_; name=name, dtypes=dtypes)
+ end
+ end
+ end
+end
+
+
+"""
+ relu6(features)
+
+
+"""
+begin
+ begin
+ function relu6_graph(features_; name=nothing)
+ local desc
+ tf.with_op_name(name, "Relu6") do
+ desc = tf.NodeDescription("Relu6")
+ begin
+ begin
+ features_ = convert(Tensor{Any}, features_)
+ begin
+ end
+ end
+ begin
+ (features_,) = tf.tf_promote(features_)
+ end
+ end
+ begin
+ begin
+ tf.add_input(desc, features_)
+ end
+ end
+ begin
+ end
+ end
+ tf.Tensor(tf.Operation(desc))
+ end
+ end
+ begin
+ function relu6_eager(features_; name=nothing)
+ desc = tf.EagerOp("Relu6")
+ features_ = convert(tf.EagerTensor, features_)
+ begin
+ begin
+ tf.add_input(desc, features_)
+ end
+ end
+ begin
+ end
+ begin
+ desc["T"] = tf.data_type(features_)
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(relu6, [features_], name=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res[1]
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function relu6(features_; name=nothing)
+ if tf.in_eager_mode()
+ relu6_eager(features_; name=name)
+ else
+ relu6_graph(features_; name=name)
+ end
+ end
+ end
+end
+
+
+"""
+ sparse_apply_rms_prop(var, ms, mom, lr, rho, momentum, epsilon, grad, indices; use_locking=false)
+
+
+"""
+begin
+ begin
+ function sparse_apply_rms_prop_graph(var_, ms_, mom_, lr_, rho_, momentum_, epsilon_, grad_, indices_; name=nothing, use_locking=nothing)
+ local desc
+ tf.with_op_name(name, "SparseApplyRMSProp") do
+ desc = tf.NodeDescription("SparseApplyRMSProp")
+ begin
+ begin
+ var_ = convert(Tensor{Any}, var_)
+ begin
+ end
+ end
+ begin
+ ms_ = convert(Tensor{Any}, ms_)
+ begin
+ end
+ end
+ begin
+ mom_ = convert(Tensor{Any}, mom_)
+ begin
+ end
+ end
+ begin
+ lr_ = convert(Tensor{Any}, lr_)
+ begin
+ end
+ end
+ begin
+ rho_ = convert(Tensor{Any}, rho_)
+ begin
+ end
+ end
+ begin
+ momentum_ = convert(Tensor{Any}, momentum_)
+ begin
+ end
+ end
+ begin
+ epsilon_ = convert(Tensor{Any}, epsilon_)
+ begin
+ end
+ end
+ begin
+ grad_ = convert(Tensor{Any}, grad_)
+ begin
+ end
+ end
+ begin
+ indices_ = convert(Tensor{Any}, indices_)
+ begin
+ indices_ = indices_ - convert(tf.Tensor{eltype(indices_)}, 1)
+ end
+ end
+ begin
+ (var_, ms_, mom_, lr_, rho_, momentum_, epsilon_, grad_) = tf.tf_promote(var_, ms_, mom_, lr_, rho_, momentum_, epsilon_, grad_)
+ end
+ begin
+ (indices_,) = tf.tf_promote(indices_)
+ end
+ end
+ begin
+ begin
+ tf.add_input(desc, var_)
+ end
+ begin
+ tf.add_input(desc, ms_)
+ end
+ begin
+ tf.add_input(desc, mom_)
+ end
+ begin
+ tf.add_input(desc, lr_)
+ end
+ begin
+ tf.add_input(desc, rho_)
+ end
+ begin
+ tf.add_input(desc, momentum_)
+ end
+ begin
+ tf.add_input(desc, epsilon_)
+ end
+ begin
+ tf.add_input(desc, grad_)
+ end
+ begin
+ tf.add_input(desc, indices_)
+ end
+ end
+ begin
+ begin
+ if use_locking !== nothing
+ desc["use_locking"] = Base.Bool(use_locking)
+ end
+ end
+ end
+ end
+ tf.Tensor(tf.Operation(desc))
+ end
+ end
+ begin
+ function sparse_apply_rms_prop_eager(var_, ms_, mom_, lr_, rho_, momentum_, epsilon_, grad_, indices_; name=nothing, use_locking=nothing)
+ desc = tf.EagerOp("SparseApplyRMSProp")
+ var_ = convert(tf.EagerTensor, var_)
+ ms_ = convert(tf.EagerTensor, ms_)
+ mom_ = convert(tf.EagerTensor, mom_)
+ lr_ = convert(tf.EagerTensor, lr_)
+ rho_ = convert(tf.EagerTensor, rho_)
+ momentum_ = convert(tf.EagerTensor, momentum_)
+ epsilon_ = convert(tf.EagerTensor, epsilon_)
+ grad_ = convert(tf.EagerTensor, grad_)
+ indices_ = convert(tf.EagerTensor, indices_)
+ begin
+ begin
+ tf.add_input(desc, var_)
+ end
+ begin
+ tf.add_input(desc, ms_)
+ end
+ begin
+ tf.add_input(desc, mom_)
+ end
+ begin
+ tf.add_input(desc, lr_)
+ end
+ begin
+ tf.add_input(desc, rho_)
+ end
+ begin
+ tf.add_input(desc, momentum_)
+ end
+ begin
+ tf.add_input(desc, epsilon_)
+ end
+ begin
+ tf.add_input(desc, grad_)
+ end
+ begin
+ tf.add_input(desc, indices_)
+ end
+ end
+ begin
+ begin
+ if use_locking !== nothing
+ desc["use_locking"] = Base.Bool(use_locking)
+ end
+ end
+ end
+ begin
+ desc["T"] = tf.data_type(var_)
+ end
+ begin
+ desc["T"] = tf.data_type(ms_)
+ end
+ begin
+ desc["T"] = tf.data_type(mom_)
+ end
+ begin
+ desc["T"] = tf.data_type(lr_)
+ end
+ begin
+ desc["T"] = tf.data_type(rho_)
+ end
+ begin
+ desc["T"] = tf.data_type(momentum_)
+ end
+ begin
+ desc["T"] = tf.data_type(epsilon_)
+ end
+ begin
+ desc["T"] = tf.data_type(grad_)
+ end
+ begin
+ desc["Tindices"] = tf.data_type(indices_)
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(sparse_apply_rms_prop, [var_, ms_, mom_, lr_, rho_, momentum_, epsilon_, grad_, indices_], name=nothing, use_locking=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res[1]
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function sparse_apply_rms_prop(var_, ms_, mom_, lr_, rho_, momentum_, epsilon_, grad_, indices_; name=nothing, use_locking=nothing)
+ if tf.in_eager_mode()
+ sparse_apply_rms_prop_eager(var_, ms_, mom_, lr_, rho_, momentum_, epsilon_, grad_, indices_; name=name, use_locking=use_locking)
+ else
+ sparse_apply_rms_prop_graph(var_, ms_, mom_, lr_, rho_, momentum_, epsilon_, grad_, indices_; name=name, use_locking=use_locking)
+ end
+ end
+ end
+end
+
+
+"""
+ _recv(; client_terminated=false)
+
+Receives the named tensor from send_device on recv_device.
+"""
+begin
+ begin
+ function _recv_graph(; name=nothing, tensor_type=nothing, tensor_name=nothing, send_device=nothing, send_device_incarnation=nothing, recv_device=nothing, client_terminated=nothing)
+ local desc
+ tf.with_op_name(name, "_Recv") do
+ desc = tf.NodeDescription("_Recv")
+ begin
+ end
+ begin
+ end
+ begin
+ begin
+ if tensor_type !== nothing
+ desc["tensor_type"] = Base.identity(tensor_type)
+ end
+ end
+ begin
+ if tensor_name !== nothing
+ desc["tensor_name"] = Base.String(tensor_name)
+ end
+ end
+ begin
+ if send_device !== nothing
+ desc["send_device"] = Base.String(send_device)
+ end
+ end
+ begin
+ if send_device_incarnation !== nothing
+ desc["send_device_incarnation"] = Base.Int(send_device_incarnation)
+ end
+ end
+ begin
+ if recv_device !== nothing
+ desc["recv_device"] = Base.String(recv_device)
+ end
+ end
+ begin
+ if client_terminated !== nothing
+ desc["client_terminated"] = Base.Bool(client_terminated)
+ end
+ end
+ end
+ end
+ tf.Tensor(tf.Operation(desc))
+ end
+ end
+ begin
+ function _recv_eager(; name=nothing, tensor_type=nothing, tensor_name=nothing, send_device=nothing, send_device_incarnation=nothing, recv_device=nothing, client_terminated=nothing)
+ desc = tf.EagerOp("_Recv")
+ begin
+ end
+ begin
+ begin
+ if tensor_type !== nothing
+ desc["tensor_type"] = Base.identity(tensor_type)
+ end
+ end
+ begin
+ if tensor_name !== nothing
+ desc["tensor_name"] = Base.String(tensor_name)
+ end
+ end
+ begin
+ if send_device !== nothing
+ desc["send_device"] = Base.String(send_device)
+ end
+ end
+ begin
+ if send_device_incarnation !== nothing
+ desc["send_device_incarnation"] = Base.Int(send_device_incarnation)
+ end
+ end
+ begin
+ if recv_device !== nothing
+ desc["recv_device"] = Base.String(recv_device)
+ end
+ end
+ begin
+ if client_terminated !== nothing
+ desc["client_terminated"] = Base.Bool(client_terminated)
+ end
+ end
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(_recv, [], name=nothing, tensor_type=nothing, tensor_name=nothing, send_device=nothing, send_device_incarnation=nothing, recv_device=nothing, client_terminated=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res[1]
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function _recv(; name=nothing, tensor_type=nothing, tensor_name=nothing, send_device=nothing, send_device_incarnation=nothing, recv_device=nothing, client_terminated=nothing)
+ if tf.in_eager_mode()
+ _recv_eager(; name=name, tensor_type=tensor_type, tensor_name=tensor_name, send_device=send_device, send_device_incarnation=send_device_incarnation, recv_device=recv_device, client_terminated=client_terminated)
+ else
+ _recv_graph(; name=name, tensor_type=tensor_type, tensor_name=tensor_name, send_device=send_device, send_device_incarnation=send_device_incarnation, recv_device=recv_device, client_terminated=client_terminated)
+ end
+ end
+ end
+end
+
+
+"""
+ max_pool(input; data_format=)
+
+
+"""
+begin
+ begin
+ function max_pool_graph(input_; name=nothing, ksize=nothing, strides=nothing, padding=nothing, data_format=nothing)
+ local desc
+ tf.with_op_name(name, "MaxPool") do
+ desc = tf.NodeDescription("MaxPool")
+ begin
+ begin
+ input_ = convert(Tensor{Float32}, input_)
+ begin
+ end
+ end
+ begin
+ (input_,) = tf.tf_promote(input_)
+ end
+ end
+ begin
+ begin
+ tf.add_input(desc, input_)
+ end
+ end
+ begin
+ begin
+ if ksize !== nothing
+ desc["ksize"] = map(Base.identity, ksize)
+ end
+ end
+ begin
+ if strides !== nothing
+ desc["strides"] = map(Base.identity, strides)
+ end
+ end
+ begin
+ if padding !== nothing
+ desc["padding"] = Base.String(padding)
+ end
+ end
+ begin
+ if data_format !== nothing
+ desc["data_format"] = Base.String(data_format)
+ end
+ end
+ end
+ end
+ tf.Tensor(tf.Operation(desc))
+ end
+ end
+ begin
+ function max_pool_eager(input_; name=nothing, ksize=nothing, strides=nothing, padding=nothing, data_format=nothing)
+ desc = tf.EagerOp("MaxPool")
+ input_ = convert(tf.EagerTensor, input_)
+ begin
+ begin
+ tf.add_input(desc, input_)
+ end
+ end
+ begin
+ begin
+ if ksize !== nothing
+ desc["ksize"] = map(Base.identity, ksize)
+ end
+ end
+ begin
+ if strides !== nothing
+ desc["strides"] = map(Base.identity, strides)
+ end
+ end
+ begin
+ if padding !== nothing
+ desc["padding"] = Base.String(padding)
+ end
+ end
+ begin
+ if data_format !== nothing
+ desc["data_format"] = Base.String(data_format)
+ end
+ end
+ end
+ begin
+ desc["T"] = tf.data_type(input_)
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(max_pool, [input_], name=nothing, ksize=nothing, strides=nothing, padding=nothing, data_format=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res[1]
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function max_pool(input_; name=nothing, ksize=nothing, strides=nothing, padding=nothing, data_format=nothing)
+ if tf.in_eager_mode()
+ max_pool_eager(input_; name=name, ksize=ksize, strides=strides, padding=padding, data_format=data_format)
+ else
+ max_pool_graph(input_; name=name, ksize=ksize, strides=strides, padding=padding, data_format=data_format)
+ end
+ end
+ end
+end
+
+
+"""
+ invert(x)
+
+
+"""
+begin
+ begin
+ function invert_graph(x_; name=nothing)
+ local desc
+ tf.with_op_name(name, "Invert") do
+ desc = tf.NodeDescription("Invert")
+ begin
+ begin
+ x_ = convert(Tensor{Any}, x_)
+ begin
+ end
+ end
+ begin
+ (x_,) = tf.tf_promote(x_)
+ end
+ end
+ begin
+ begin
+ tf.add_input(desc, x_)
+ end
+ end
+ begin
+ end
+ end
+ tf.Tensor(tf.Operation(desc))
+ end
+ end
+ begin
+ function invert_eager(x_; name=nothing)
+ desc = tf.EagerOp("Invert")
+ x_ = convert(tf.EagerTensor, x_)
+ begin
+ begin
+ tf.add_input(desc, x_)
+ end
+ end
+ begin
+ end
+ begin
+ desc["T"] = tf.data_type(x_)
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(invert, [x_], name=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res[1]
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function invert(x_; name=nothing)
+ if tf.in_eager_mode()
+ invert_eager(x_; name=name)
+ else
+ invert_graph(x_; name=name)
+ end
+ end
+ end
+end
+
+
+"""
+ _unary_ops_composition(x)
+
+*NOTE*: Do not invoke this operator directly in Python. Graph rewrite pass is
+"""
+begin
+ begin
+ function _unary_ops_composition_graph(x_; name=nothing, op_names=nothing)
+ local desc
+ tf.with_op_name(name, "_UnaryOpsComposition") do
+ desc = tf.NodeDescription("_UnaryOpsComposition")
+ begin
+ begin
+ x_ = convert(Tensor{Any}, x_)
+ begin
+ end
+ end
+ begin
+ (x_,) = tf.tf_promote(x_)
+ end
+ end
+ begin
+ begin
+ tf.add_input(desc, x_)
+ end
+ end
+ begin
+ begin
+ if op_names !== nothing
+ desc["op_names"] = map(Base.identity, op_names)
+ end
+ end
+ end
+ end
+ tf.Tensor(tf.Operation(desc))
+ end
+ end
+ begin
+ function _unary_ops_composition_eager(x_; name=nothing, op_names=nothing)
+ desc = tf.EagerOp("_UnaryOpsComposition")
+ x_ = convert(tf.EagerTensor, x_)
+ begin
+ begin
+ tf.add_input(desc, x_)
+ end
+ end
+ begin
+ begin
+ if op_names !== nothing
+ desc["op_names"] = map(Base.identity, op_names)
+ end
+ end
+ end
+ begin
+ desc["T"] = tf.data_type(x_)
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(_unary_ops_composition, [x_], name=nothing, op_names=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res[1]
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function _unary_ops_composition(x_; name=nothing, op_names=nothing)
+ if tf.in_eager_mode()
+ _unary_ops_composition_eager(x_; name=name, op_names=op_names)
+ else
+ _unary_ops_composition_graph(x_; name=name, op_names=op_names)
+ end
+ end
+ end
+end
+
+
+"""
+ experimental_map_dataset(input_dataset, other_arguments; use_inter_op_parallelism=true, preserve_cardinality=false)
+
+
+"""
+begin
+ begin
+ function experimental_map_dataset_graph(input_dataset_, other_arguments_; name=nothing, f=nothing, Targuments=nothing, output_types=nothing, output_shapes=nothing, use_inter_op_parallelism=nothing, preserve_cardinality=nothing)
+ local desc
+ tf.with_op_name(name, "ExperimentalMapDataset") do
+ desc = tf.NodeDescription("ExperimentalMapDataset")
+ begin
+ begin
+ input_dataset_ = convert(Tensor{Any}, input_dataset_)
+ begin
+ end
+ end
+ begin
+ other_arguments_ = [convert(Tensor{Any}, x) for x = other_arguments_]
+ begin
+ end
+ end
+ end
+ begin
+ begin
+ tf.add_input(desc, input_dataset_)
+ end
+ begin
+ tf.add_input(desc, other_arguments_)
+ end
+ end
+ begin
+ begin
+ if f !== nothing
+ desc["f"] = Base.identity(f)
+ end
+ end
+ begin
+ if Targuments !== nothing
+ desc["Targuments"] = map(Base.identity, Targuments)
+ end
+ end
+ begin
+ if output_types !== nothing
+ desc["output_types"] = map(Base.identity, output_types)
+ end
+ end
+ begin
+ if output_shapes !== nothing
+ desc["output_shapes"] = map(Base.identity, output_shapes)
+ end
+ end
+ begin
+ if use_inter_op_parallelism !== nothing
+ desc["use_inter_op_parallelism"] = Base.Bool(use_inter_op_parallelism)
+ end
+ end
+ begin
+ if preserve_cardinality !== nothing
+ desc["preserve_cardinality"] = Base.Bool(preserve_cardinality)
+ end
+ end
+ end
+ end
+ tf.Tensor(tf.Operation(desc))
+ end
+ end
+ begin
+ function experimental_map_dataset_eager(input_dataset_, other_arguments_; name=nothing, f=nothing, Targuments=nothing, output_types=nothing, output_shapes=nothing, use_inter_op_parallelism=nothing, preserve_cardinality=nothing)
+ desc = tf.EagerOp("ExperimentalMapDataset")
+ input_dataset_ = convert(tf.EagerTensor, input_dataset_)
+ other_arguments_ = convert(tf.EagerTensor, other_arguments_)
+ begin
+ begin
+ tf.add_input(desc, input_dataset_)
+ end
+ begin
+ tf.add_input(desc, other_arguments_)
+ end
+ end
+ begin
+ begin
+ if f !== nothing
+ desc["f"] = Base.identity(f)
+ end
+ end
+ begin
+ if Targuments !== nothing
+ desc["Targuments"] = map(Base.identity, Targuments)
+ end
+ end
+ begin
+ if output_types !== nothing
+ desc["output_types"] = map(Base.identity, output_types)
+ end
+ end
+ begin
+ if output_shapes !== nothing
+ desc["output_shapes"] = map(Base.identity, output_shapes)
+ end
+ end
+ begin
+ if use_inter_op_parallelism !== nothing
+ desc["use_inter_op_parallelism"] = Base.Bool(use_inter_op_parallelism)
+ end
+ end
+ begin
+ if preserve_cardinality !== nothing
+ desc["preserve_cardinality"] = Base.Bool(preserve_cardinality)
+ end
+ end
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(experimental_map_dataset, [input_dataset_, other_arguments_], name=nothing, f=nothing, Targuments=nothing, output_types=nothing, output_shapes=nothing, use_inter_op_parallelism=nothing, preserve_cardinality=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res[1]
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function experimental_map_dataset(input_dataset_, other_arguments_; name=nothing, f=nothing, Targuments=nothing, output_types=nothing, output_shapes=nothing, use_inter_op_parallelism=nothing, preserve_cardinality=nothing)
+ if tf.in_eager_mode()
+ experimental_map_dataset_eager(input_dataset_, other_arguments_; name=name, f=f, Targuments=Targuments, output_types=output_types, output_shapes=output_shapes, use_inter_op_parallelism=use_inter_op_parallelism, preserve_cardinality=preserve_cardinality)
+ else
+ experimental_map_dataset_graph(input_dataset_, other_arguments_; name=name, f=f, Targuments=Targuments, output_types=output_types, output_shapes=output_shapes, use_inter_op_parallelism=use_inter_op_parallelism, preserve_cardinality=preserve_cardinality)
+ end
+ end
+ end
+end
+
+
+"""
+ load_tpu_embedding_adam_parameters(parameters, momenta, velocities; table_id=-1, table_name=)
+
+Load embedding parameters for a single table.
+"""
+begin
+ begin
+ function load_tpu_embedding_adam_parameters_graph(parameters_, momenta_, velocities_; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing)
+ local desc
+ tf.with_op_name(name, "LoadTPUEmbeddingADAMParameters") do
+ desc = tf.NodeDescription("LoadTPUEmbeddingADAMParameters")
+ begin
+ begin
+ parameters_ = convert(Tensor{Float32}, parameters_)
+ begin
+ end
+ end
+ begin
+ momenta_ = convert(Tensor{Float32}, momenta_)
+ begin
+ end
+ end
+ begin
+ velocities_ = convert(Tensor{Float32}, velocities_)
+ begin
+ end
+ end
+ end
+ begin
+ begin
+ tf.add_input(desc, parameters_)
+ end
+ begin
+ tf.add_input(desc, momenta_)
+ end
+ begin
+ tf.add_input(desc, velocities_)
+ end
+ end
+ begin
+ begin
+ if table_id !== nothing
+ desc["table_id"] = Base.Int(table_id)
+ end
+ end
+ begin
+ if table_name !== nothing
+ desc["table_name"] = Base.String(table_name)
+ end
+ end
+ begin
+ if num_shards !== nothing
+ desc["num_shards"] = Base.Int(num_shards)
+ end
+ end
+ begin
+ if shard_id !== nothing
+ desc["shard_id"] = Base.Int(shard_id)
+ end
+ end
+ end
+ end
+ tf.Tensor(tf.Operation(desc))
+ end
+ end
+ begin
+ function load_tpu_embedding_adam_parameters_eager(parameters_, momenta_, velocities_; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing)
+ desc = tf.EagerOp("LoadTPUEmbeddingADAMParameters")
+ parameters_ = convert(tf.EagerTensor, parameters_)
+ momenta_ = convert(tf.EagerTensor, momenta_)
+ velocities_ = convert(tf.EagerTensor, velocities_)
+ begin
+ begin
+ tf.add_input(desc, parameters_)
+ end
+ begin
+ tf.add_input(desc, momenta_)
+ end
+ begin
+ tf.add_input(desc, velocities_)
+ end
+ end
+ begin
+ begin
+ if table_id !== nothing
+ desc["table_id"] = Base.Int(table_id)
+ end
+ end
+ begin
+ if table_name !== nothing
+ desc["table_name"] = Base.String(table_name)
+ end
+ end
+ begin
+ if num_shards !== nothing
+ desc["num_shards"] = Base.Int(num_shards)
+ end
+ end
+ begin
+ if shard_id !== nothing
+ desc["shard_id"] = Base.Int(shard_id)
+ end
+ end
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(load_tpu_embedding_adam_parameters, [parameters_, momenta_, velocities_], name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res[1]
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function load_tpu_embedding_adam_parameters(parameters_, momenta_, velocities_; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing)
+ if tf.in_eager_mode()
+ load_tpu_embedding_adam_parameters_eager(parameters_, momenta_, velocities_; name=name, table_id=table_id, table_name=table_name, num_shards=num_shards, shard_id=shard_id)
+ else
+ load_tpu_embedding_adam_parameters_graph(parameters_, momenta_, velocities_; name=name, table_id=table_id, table_name=table_name, num_shards=num_shards, shard_id=shard_id)
+ end
+ end
+ end
+end
+
+
+"""
+ parse_tensor(serialized)
+
+
+"""
+begin
+ begin
+ function parse_tensor_graph(serialized_; name=nothing, out_type=nothing)
+ local desc
+ tf.with_op_name(name, "ParseTensor") do
+ desc = tf.NodeDescription("ParseTensor")
+ begin
+ begin
+ serialized_ = convert(Tensor{String}, serialized_)
+ begin
+ end
+ end
+ end
+ begin
+ begin
+ tf.add_input(desc, serialized_)
+ end
+ end
+ begin
+ begin
+ if out_type !== nothing
+ desc["out_type"] = Base.identity(out_type)
+ end
+ end
+ end
+ end
+ tf.Tensor(tf.Operation(desc))
+ end
+ end
+ begin
+ function parse_tensor_eager(serialized_; name=nothing, out_type=nothing)
+ desc = tf.EagerOp("ParseTensor")
+ serialized_ = convert(tf.EagerTensor, serialized_)
+ begin
+ begin
+ tf.add_input(desc, serialized_)
+ end
+ end
+ begin
+ begin
+ if out_type !== nothing
+ desc["out_type"] = Base.identity(out_type)
+ end
+ end
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(parse_tensor, [serialized_], name=nothing, out_type=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res[1]
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function parse_tensor(serialized_; name=nothing, out_type=nothing)
+ if tf.in_eager_mode()
+ parse_tensor_eager(serialized_; name=name, out_type=out_type)
+ else
+ parse_tensor_graph(serialized_; name=name, out_type=out_type)
+ end
+ end
+ end
+end
+
+
+"""
+ experimental_materialized_index_dataset_handle()
+
+
+"""
+begin
+ begin
+ function experimental_materialized_index_dataset_handle_graph(; name=nothing, container=nothing, shared_name=nothing, output_types=nothing, output_shapes=nothing)
+ local desc
+ tf.with_op_name(name, "ExperimentalMaterializedIndexDatasetHandle") do
+ desc = tf.NodeDescription("ExperimentalMaterializedIndexDatasetHandle")
+ begin
+ end
+ begin
+ end
+ begin
+ begin
+ if container !== nothing
+ desc["container"] = Base.String(container)
+ end
+ end
+ begin
+ if shared_name !== nothing
+ desc["shared_name"] = Base.String(shared_name)
+ end
+ end
+ begin
+ if output_types !== nothing
+ desc["output_types"] = map(Base.identity, output_types)
+ end
+ end
+ begin
+ if output_shapes !== nothing
+ desc["output_shapes"] = map(Base.identity, output_shapes)
+ end
+ end
+ end
+ end
+ tf.Tensor(tf.Operation(desc))
+ end
+ end
+ begin
+ function experimental_materialized_index_dataset_handle_eager(; name=nothing, container=nothing, shared_name=nothing, output_types=nothing, output_shapes=nothing)
+ desc = tf.EagerOp("ExperimentalMaterializedIndexDatasetHandle")
+ begin
+ end
+ begin
+ begin
+ if container !== nothing
+ desc["container"] = Base.String(container)
+ end
+ end
+ begin
+ if shared_name !== nothing
+ desc["shared_name"] = Base.String(shared_name)
+ end
+ end
+ begin
+ if output_types !== nothing
+ desc["output_types"] = map(Base.identity, output_types)
+ end
+ end
+ begin
+ if output_shapes !== nothing
+ desc["output_shapes"] = map(Base.identity, output_shapes)
+ end
+ end
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(experimental_materialized_index_dataset_handle, [], name=nothing, container=nothing, shared_name=nothing, output_types=nothing, output_shapes=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res[1]
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function experimental_materialized_index_dataset_handle(; name=nothing, container=nothing, shared_name=nothing, output_types=nothing, output_shapes=nothing)
+ if tf.in_eager_mode()
+ experimental_materialized_index_dataset_handle_eager(; name=name, container=container, shared_name=shared_name, output_types=output_types, output_shapes=output_shapes)
+ else
+ experimental_materialized_index_dataset_handle_graph(; name=name, container=container, shared_name=shared_name, output_types=output_types, output_shapes=output_shapes)
+ end
+ end
+ end
+end
+
+
+"""
+ multi_device_iterator_get_next_from_shard(multi_device_iterator, shard_num, incarnation_id)
+
+
+"""
+begin
+ begin
+ function multi_device_iterator_get_next_from_shard_graph(multi_device_iterator_, shard_num_, incarnation_id_; name=nothing, output_types=nothing, output_shapes=nothing)
+ local desc
+ tf.with_op_name(name, "MultiDeviceIteratorGetNextFromShard") do
+ desc = tf.NodeDescription("MultiDeviceIteratorGetNextFromShard")
+ begin
+ begin
+ multi_device_iterator_ = convert(Tensor{Any}, multi_device_iterator_)
+ begin
+ end
+ end
+ begin
+ shard_num_ = convert(Tensor{Int32}, shard_num_)
+ begin
+ end
+ end
+ begin
+ incarnation_id_ = convert(Tensor{Int64}, incarnation_id_)
+ begin
+ end
+ end
+ end
+ begin
+ begin
+ tf.add_input(desc, multi_device_iterator_)
+ end
+ begin
+ tf.add_input(desc, shard_num_)
+ end
+ begin
+ tf.add_input(desc, incarnation_id_)
+ end
+ end
+ begin
+ begin
+ if output_types !== nothing
+ desc["output_types"] = map(Base.identity, output_types)
+ end
+ end
+ begin
+ if output_shapes !== nothing
+ desc["output_shapes"] = map(Base.identity, output_shapes)
+ end
+ end
+ end
+ end
+ tf.Tensor(tf.Operation(desc))
+ end
+ end
+ begin
+ function multi_device_iterator_get_next_from_shard_eager(multi_device_iterator_, shard_num_, incarnation_id_; name=nothing, output_types=nothing, output_shapes=nothing)
+ desc = tf.EagerOp("MultiDeviceIteratorGetNextFromShard")
+ multi_device_iterator_ = convert(tf.EagerTensor, multi_device_iterator_)
+ shard_num_ = convert(tf.EagerTensor, shard_num_)
+ incarnation_id_ = convert(tf.EagerTensor, incarnation_id_)
+ begin
+ begin
+ tf.add_input(desc, multi_device_iterator_)
+ end
+ begin
+ tf.add_input(desc, shard_num_)
+ end
+ begin
+ tf.add_input(desc, incarnation_id_)
+ end
+ end
+ begin
+ begin
+ if output_types !== nothing
+ desc["output_types"] = map(Base.identity, output_types)
+ end
+ end
+ begin
+ if output_shapes !== nothing
+ desc["output_shapes"] = map(Base.identity, output_shapes)
+ end
+ end
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(multi_device_iterator_get_next_from_shard, [multi_device_iterator_, shard_num_, incarnation_id_], name=nothing, output_types=nothing, output_shapes=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res[1]
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function multi_device_iterator_get_next_from_shard(multi_device_iterator_, shard_num_, incarnation_id_; name=nothing, output_types=nothing, output_shapes=nothing)
+ if tf.in_eager_mode()
+ multi_device_iterator_get_next_from_shard_eager(multi_device_iterator_, shard_num_, incarnation_id_; name=name, output_types=output_types, output_shapes=output_shapes)
+ else
+ multi_device_iterator_get_next_from_shard_graph(multi_device_iterator_, shard_num_, incarnation_id_; name=name, output_types=output_types, output_shapes=output_shapes)
+ end
+ end
+ end
+end
+
+
+"""
+ random_uniform_int(shape, minval, maxval; seed=0, seed2=0)
+
+
+"""
+begin
+ begin
+ function random_uniform_int_graph(shape_, minval_, maxval_; name=nothing, seed=nothing, seed2=nothing)
+ local desc
+ tf.with_op_name(name, "RandomUniformInt") do
+ desc = tf.NodeDescription("RandomUniformInt")
+ begin
+ begin
+ shape_ = convert(Tensor{Any}, shape_)
+ begin
+ end
+ end
+ begin
+ minval_ = convert(Tensor{Any}, minval_)
+ begin
+ end
+ end
+ begin
+ maxval_ = convert(Tensor{Any}, maxval_)
+ begin
+ end
+ end
+ begin
+ (shape_,) = tf.tf_promote(shape_)
+ end
+ begin
+ (minval_, maxval_) = tf.tf_promote(minval_, maxval_)
+ end
+ end
+ begin
+ begin
+ tf.add_input(desc, shape_)
+ end
+ begin
+ tf.add_input(desc, minval_)
+ end
+ begin
+ tf.add_input(desc, maxval_)
+ end
+ end
+ begin
+ begin
+ if seed !== nothing
+ desc["seed"] = Base.Int(seed)
+ end
+ end
+ begin
+ if seed2 !== nothing
+ desc["seed2"] = Base.Int(seed2)
+ end
+ end
+ end
+ end
+ tf.Tensor(tf.Operation(desc))
+ end
+ end
+ begin
+ function random_uniform_int_eager(shape_, minval_, maxval_; name=nothing, seed=nothing, seed2=nothing)
+ desc = tf.EagerOp("RandomUniformInt")
+ shape_ = convert(tf.EagerTensor, shape_)
+ minval_ = convert(tf.EagerTensor, minval_)
+ maxval_ = convert(tf.EagerTensor, maxval_)
+ begin
+ begin
+ tf.add_input(desc, shape_)
+ end
+ begin
+ tf.add_input(desc, minval_)
+ end
+ begin
+ tf.add_input(desc, maxval_)
+ end
+ end
+ begin
+ begin
+ if seed !== nothing
+ desc["seed"] = Base.Int(seed)
+ end
+ end
+ begin
+ if seed2 !== nothing
+ desc["seed2"] = Base.Int(seed2)
+ end
+ end
+ end
+ begin
+ desc["T"] = tf.data_type(shape_)
+ end
+ begin
+ desc["Tout"] = tf.data_type(minval_)
+ end
+ begin
+ desc["Tout"] = tf.data_type(maxval_)
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(random_uniform_int, [shape_, minval_, maxval_], name=nothing, seed=nothing, seed2=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res[1]
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function random_uniform_int(shape_, minval_, maxval_; name=nothing, seed=nothing, seed2=nothing)
+ if tf.in_eager_mode()
+ random_uniform_int_eager(shape_, minval_, maxval_; name=name, seed=seed, seed2=seed2)
+ else
+ random_uniform_int_graph(shape_, minval_, maxval_; name=name, seed=seed, seed2=seed2)
+ end
+ end
+ end
+end
+
+
+"""
+ sparse_softmax_cross_entropy_with_logits(features, labels)
+
+
+"""
+begin
+ begin
+ function sparse_softmax_cross_entropy_with_logits_graph(features_, labels_; name=nothing)
+ local desc
+ tf.with_op_name(name, "SparseSoftmaxCrossEntropyWithLogits") do
+ desc = tf.NodeDescription("SparseSoftmaxCrossEntropyWithLogits")
+ begin
+ begin
+ features_ = convert(Tensor{Any}, features_)
+ begin
+ end
+ end
+ begin
+ labels_ = convert(Tensor{Int64}, labels_)
+ begin
+ end
+ end
+ begin
+ (features_,) = tf.tf_promote(features_)
+ end
+ begin
+ (labels_,) = tf.tf_promote(labels_)
+ end
+ end
+ begin
+ begin
+ tf.add_input(desc, features_)
+ end
+ begin
+ tf.add_input(desc, labels_)
+ end
+ end
+ begin
+ end
+ end
+ begin
+ out = tf.Tensor[]
+ op = tf.Operation(desc)
+ for out_idx = 1:2
+ push!(out, tf.Tensor(op, out_idx))
+ end
+ out
+ end
+ end
+ end
+ begin
+ function sparse_softmax_cross_entropy_with_logits_eager(features_, labels_; name=nothing)
+ desc = tf.EagerOp("SparseSoftmaxCrossEntropyWithLogits")
+ features_ = convert(tf.EagerTensor, features_)
+ labels_ = convert(tf.EagerTensor, labels_)
+ begin
+ begin
+ tf.add_input(desc, features_)
+ end
+ begin
+ tf.add_input(desc, labels_)
+ end
+ end
+ begin
+ end
+ begin
+ desc["T"] = tf.data_type(features_)
+ end
+ begin
+ desc["Tlabels"] = tf.data_type(labels_)
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(sparse_softmax_cross_entropy_with_logits, [features_, labels_], name=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function sparse_softmax_cross_entropy_with_logits(features_, labels_; name=nothing)
+ if tf.in_eager_mode()
+ sparse_softmax_cross_entropy_with_logits_eager(features_, labels_; name=name)
+ else
+ sparse_softmax_cross_entropy_with_logits_graph(features_, labels_; name=name)
+ end
+ end
+ end
+end
+
+
+"""
+ tensor_array_read_v2(handle, index, flow_in)
+
+
+"""
+begin
+ begin
+ function tensor_array_read_v2_graph(handle_, index_, flow_in_; name=nothing, dtype=nothing)
+ local desc
+ tf.with_op_name(name, "TensorArrayReadV2") do
+ desc = tf.NodeDescription("TensorArrayReadV2")
+ begin
+ begin
+ handle_ = convert(Tensor{String}, handle_)
+ begin
+ end
+ end
+ begin
+ index_ = convert(Tensor{Int32}, index_)
+ begin
+ end
+ end
+ begin
+ flow_in_ = convert(Tensor{Float32}, flow_in_)
+ begin
+ end
+ end
+ end
+ begin
+ begin
+ tf.add_input(desc, handle_)
+ end
+ begin
+ tf.add_input(desc, index_)
+ end
+ begin
+ tf.add_input(desc, flow_in_)
+ end
+ end
+ begin
+ begin
+ if dtype !== nothing
+ desc["dtype"] = Base.identity(dtype)
+ end
+ end
+ end
+ end
+ tf.Tensor(tf.Operation(desc))
+ end
+ end
+ begin
+ function tensor_array_read_v2_eager(handle_, index_, flow_in_; name=nothing, dtype=nothing)
+ desc = tf.EagerOp("TensorArrayReadV2")
+ handle_ = convert(tf.EagerTensor, handle_)
+ index_ = convert(tf.EagerTensor, index_)
+ flow_in_ = convert(tf.EagerTensor, flow_in_)
+ begin
+ begin
+ tf.add_input(desc, handle_)
+ end
+ begin
+ tf.add_input(desc, index_)
+ end
+ begin
+ tf.add_input(desc, flow_in_)
+ end
+ end
+ begin
+ begin
+ if dtype !== nothing
+ desc["dtype"] = Base.identity(dtype)
+ end
+ end
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(tensor_array_read_v2, [handle_, index_, flow_in_], name=nothing, dtype=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res[1]
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function tensor_array_read_v2(handle_, index_, flow_in_; name=nothing, dtype=nothing)
+ if tf.in_eager_mode()
+ tensor_array_read_v2_eager(handle_, index_, flow_in_; name=name, dtype=dtype)
+ else
+ tensor_array_read_v2_graph(handle_, index_, flow_in_; name=name, dtype=dtype)
+ end
+ end
+ end
+end
+
+
+"""
+ reader_read_up_to(reader_handle, queue_handle, num_records)
+
+
+"""
+begin
+ begin
+ function reader_read_up_to_graph(reader_handle_, queue_handle_, num_records_; name=nothing)
+ local desc
+ tf.with_op_name(name, "ReaderReadUpTo") do
+ desc = tf.NodeDescription("ReaderReadUpTo")
+ begin
+ begin
+ reader_handle_ = convert(Tensor{String}, reader_handle_)
+ begin
+ end
+ end
+ begin
+ queue_handle_ = convert(Tensor{String}, queue_handle_)
+ begin
+ end
+ end
+ begin
+ num_records_ = convert(Tensor{Int64}, num_records_)
+ begin
+ end
+ end
+ end
+ begin
+ begin
+ tf.add_input(desc, reader_handle_)
+ end
+ begin
+ tf.add_input(desc, queue_handle_)
+ end
+ begin
+ tf.add_input(desc, num_records_)
+ end
+ end
+ begin
+ end
+ end
+ begin
+ out = tf.Tensor[]
+ op = tf.Operation(desc)
+ for out_idx = 1:2
+ push!(out, tf.Tensor(op, out_idx))
+ end
+ out
+ end
+ end
+ end
+ begin
+ function reader_read_up_to_eager(reader_handle_, queue_handle_, num_records_; name=nothing)
+ desc = tf.EagerOp("ReaderReadUpTo")
+ reader_handle_ = convert(tf.EagerTensor, reader_handle_)
+ queue_handle_ = convert(tf.EagerTensor, queue_handle_)
+ num_records_ = convert(tf.EagerTensor, num_records_)
+ begin
+ begin
+ tf.add_input(desc, reader_handle_)
+ end
+ begin
+ tf.add_input(desc, queue_handle_)
+ end
+ begin
+ tf.add_input(desc, num_records_)
+ end
+ end
+ begin
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(reader_read_up_to, [reader_handle_, queue_handle_, num_records_], name=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function reader_read_up_to(reader_handle_, queue_handle_, num_records_; name=nothing)
+ if tf.in_eager_mode()
+ reader_read_up_to_eager(reader_handle_, queue_handle_, num_records_; name=name)
+ else
+ reader_read_up_to_graph(reader_handle_, queue_handle_, num_records_; name=name)
+ end
+ end
+ end
+end
+
+
+"""
+ encode_proto(sizes, values; descriptor_source=)
+
+
+"""
+begin
+ begin
+ function encode_proto_graph(sizes_, values_; name=nothing, field_names=nothing, message_type=nothing, descriptor_source=nothing, Tinput_types=nothing)
+ local desc
+ tf.with_op_name(name, "EncodeProto") do
+ desc = tf.NodeDescription("EncodeProto")
+ begin
+ begin
+ sizes_ = convert(Tensor{Int32}, sizes_)
+ begin
+ end
+ end
+ begin
+ values_ = [convert(Tensor{Any}, x) for x = values_]
+ begin
+ end
+ end
+ end
+ begin
+ begin
+ tf.add_input(desc, sizes_)
+ end
+ begin
+ tf.add_input(desc, values_)
+ end
+ end
+ begin
+ begin
+ if field_names !== nothing
+ desc["field_names"] = map(Base.identity, field_names)
+ end
+ end
+ begin
+ if message_type !== nothing
+ desc["message_type"] = Base.String(message_type)
+ end
+ end
+ begin
+ if descriptor_source !== nothing
+ desc["descriptor_source"] = Base.String(descriptor_source)
+ end
+ end
+ begin
+ if Tinput_types !== nothing
+ desc["Tinput_types"] = map(Base.identity, Tinput_types)
+ end
+ end
+ end
+ end
+ tf.Tensor(tf.Operation(desc))
+ end
+ end
+ begin
+ function encode_proto_eager(sizes_, values_; name=nothing, field_names=nothing, message_type=nothing, descriptor_source=nothing, Tinput_types=nothing)
+ desc = tf.EagerOp("EncodeProto")
+ sizes_ = convert(tf.EagerTensor, sizes_)
+ values_ = convert(tf.EagerTensor, values_)
+ begin
+ begin
+ tf.add_input(desc, sizes_)
+ end
+ begin
+ tf.add_input(desc, values_)
+ end
+ end
+ begin
+ begin
+ if field_names !== nothing
+ desc["field_names"] = map(Base.identity, field_names)
+ end
+ end
+ begin
+ if message_type !== nothing
+ desc["message_type"] = Base.String(message_type)
+ end
+ end
+ begin
+ if descriptor_source !== nothing
+ desc["descriptor_source"] = Base.String(descriptor_source)
+ end
+ end
+ begin
+ if Tinput_types !== nothing
+ desc["Tinput_types"] = map(Base.identity, Tinput_types)
+ end
+ end
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(encode_proto, [sizes_, values_], name=nothing, field_names=nothing, message_type=nothing, descriptor_source=nothing, Tinput_types=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res[1]
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function encode_proto(sizes_, values_; name=nothing, field_names=nothing, message_type=nothing, descriptor_source=nothing, Tinput_types=nothing)
+ if tf.in_eager_mode()
+ encode_proto_eager(sizes_, values_; name=name, field_names=field_names, message_type=message_type, descriptor_source=descriptor_source, Tinput_types=Tinput_types)
+ else
+ encode_proto_graph(sizes_, values_; name=name, field_names=field_names, message_type=message_type, descriptor_source=descriptor_source, Tinput_types=Tinput_types)
+ end
+ end
+ end
+end
+
+
+"""
+ strided_slice_grad(shape, begin, end, strides, dy; begin_mask=0, end_mask=0, ellipsis_mask=0, new_axis_mask=0, shrink_axis_mask=0)
+
+
+"""
+begin
+ begin
+ function strided_slice_grad_graph(shape_, begin_, end_, strides_, dy_; name=nothing, Index=nothing, begin_mask=nothing, end_mask=nothing, ellipsis_mask=nothing, new_axis_mask=nothing, shrink_axis_mask=nothing)
+ local desc
+ tf.with_op_name(name, "StridedSliceGrad") do
+ desc = tf.NodeDescription("StridedSliceGrad")
+ begin
+ begin
+ shape_ = convert(Tensor{Any}, shape_)
+ begin
+ end
+ end
+ begin
+ begin_ = convert(Tensor{Any}, begin_)
+ begin
+ begin_ = begin_ - convert(tf.Tensor{eltype(begin_)}, 1)
+ end
+ end
+ begin
+ end_ = convert(Tensor{Any}, end_)
+ begin
+ end_ = end_ - convert(tf.Tensor{eltype(end_)}, 1)
+ end
+ end
+ begin
+ strides_ = convert(Tensor{Any}, strides_)
+ begin
+ strides_ = strides_ - convert(tf.Tensor{eltype(strides_)}, 1)
+ end
+ end
+ begin
+ dy_ = convert(Tensor{Any}, dy_)
+ begin
+ end
+ end
+ begin
+ (dy_,) = tf.tf_promote(dy_)
+ end
+ begin
+ (shape_, begin_, end_, strides_) = tf.tf_promote(shape_, begin_, end_, strides_)
+ end
+ end
+ begin
+ begin
+ tf.add_input(desc, shape_)
+ end
+ begin
+ tf.add_input(desc, begin_)
+ end
+ begin
+ tf.add_input(desc, end_)
+ end
+ begin
+ tf.add_input(desc, strides_)
+ end
+ begin
+ tf.add_input(desc, dy_)
+ end
+ end
+ begin
+ begin
+ if Index !== nothing
+ desc["Index"] = Base.identity(Index)
+ end
+ end
+ begin
+ if begin_mask !== nothing
+ begin_mask = Base.Int(begin_mask) - 1
+ end
+ end
+ begin
+ if begin_mask !== nothing
+ desc["begin_mask"] = Base.Int(begin_mask)
+ end
+ end
+ begin
+ if end_mask !== nothing
+ end_mask = Base.Int(end_mask) - 1
+ end
+ end
+ begin
+ if end_mask !== nothing
+ desc["end_mask"] = Base.Int(end_mask)
+ end
+ end
+ begin
+ if ellipsis_mask !== nothing
+ ellipsis_mask = Base.Int(ellipsis_mask) - 1
+ end
+ end
+ begin
+ if ellipsis_mask !== nothing
+ desc["ellipsis_mask"] = Base.Int(ellipsis_mask)
+ end
+ end
+ begin
+ if new_axis_mask !== nothing
+ new_axis_mask = Base.Int(new_axis_mask) - 1
+ end
+ end
+ begin
+ if new_axis_mask !== nothing
+ desc["new_axis_mask"] = Base.Int(new_axis_mask)
+ end
+ end
+ begin
+ if shrink_axis_mask !== nothing
+ shrink_axis_mask = Base.Int(shrink_axis_mask) - 1
+ end
+ end
+ begin
+ if shrink_axis_mask !== nothing
+ desc["shrink_axis_mask"] = Base.Int(shrink_axis_mask)
+ end
+ end
+ end
+ end
+ tf.Tensor(tf.Operation(desc))
+ end
+ end
+ begin
+ function strided_slice_grad_eager(shape_, begin_, end_, strides_, dy_; name=nothing, Index=nothing, begin_mask=nothing, end_mask=nothing, ellipsis_mask=nothing, new_axis_mask=nothing, shrink_axis_mask=nothing)
+ desc = tf.EagerOp("StridedSliceGrad")
+ shape_ = convert(tf.EagerTensor, shape_)
+ begin_ = convert(tf.EagerTensor, begin_)
+ end_ = convert(tf.EagerTensor, end_)
+ strides_ = convert(tf.EagerTensor, strides_)
+ dy_ = convert(tf.EagerTensor, dy_)
+ begin
+ begin
+ tf.add_input(desc, shape_)
+ end
+ begin
+ tf.add_input(desc, begin_)
+ end
+ begin
+ tf.add_input(desc, end_)
+ end
+ begin
+ tf.add_input(desc, strides_)
+ end
+ begin
+ tf.add_input(desc, dy_)
+ end
+ end
+ begin
+ begin
+ if Index !== nothing
+ desc["Index"] = Base.identity(Index)
+ end
+ end
+ begin
+ if begin_mask !== nothing
+ begin_mask = Base.Int(begin_mask) - 1
+ end
+ end
+ begin
+ if begin_mask !== nothing
+ desc["begin_mask"] = Base.Int(begin_mask)
+ end
+ end
+ begin
+ if end_mask !== nothing
+ end_mask = Base.Int(end_mask) - 1
+ end
+ end
+ begin
+ if end_mask !== nothing
+ desc["end_mask"] = Base.Int(end_mask)
+ end
+ end
+ begin
+ if ellipsis_mask !== nothing
+ ellipsis_mask = Base.Int(ellipsis_mask) - 1
+ end
+ end
+ begin
+ if ellipsis_mask !== nothing
+ desc["ellipsis_mask"] = Base.Int(ellipsis_mask)
+ end
+ end
+ begin
+ if new_axis_mask !== nothing
+ new_axis_mask = Base.Int(new_axis_mask) - 1
+ end
+ end
+ begin
+ if new_axis_mask !== nothing
+ desc["new_axis_mask"] = Base.Int(new_axis_mask)
+ end
+ end
+ begin
+ if shrink_axis_mask !== nothing
+ shrink_axis_mask = Base.Int(shrink_axis_mask) - 1
+ end
+ end
+ begin
+ if shrink_axis_mask !== nothing
+ desc["shrink_axis_mask"] = Base.Int(shrink_axis_mask)
+ end
+ end
+ end
+ begin
+ desc["Index"] = tf.data_type(shape_)
+ end
+ begin
+ desc["Index"] = tf.data_type(begin_)
+ end
+ begin
+ desc["Index"] = tf.data_type(end_)
+ end
+ begin
+ desc["Index"] = tf.data_type(strides_)
+ end
+ begin
+ desc["T"] = tf.data_type(dy_)
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(strided_slice_grad, [shape_, begin_, end_, strides_, dy_], name=nothing, Index=nothing, begin_mask=nothing, end_mask=nothing, ellipsis_mask=nothing, new_axis_mask=nothing, shrink_axis_mask=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res[1]
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function strided_slice_grad(shape_, begin_, end_, strides_, dy_; name=nothing, Index=nothing, begin_mask=nothing, end_mask=nothing, ellipsis_mask=nothing, new_axis_mask=nothing, shrink_axis_mask=nothing)
+ if tf.in_eager_mode()
+ strided_slice_grad_eager(shape_, begin_, end_, strides_, dy_; name=name, Index=Index, begin_mask=begin_mask, end_mask=end_mask, ellipsis_mask=ellipsis_mask, new_axis_mask=new_axis_mask, shrink_axis_mask=shrink_axis_mask)
+ else
+ strided_slice_grad_graph(shape_, begin_, end_, strides_, dy_; name=name, Index=Index, begin_mask=begin_mask, end_mask=end_mask, ellipsis_mask=ellipsis_mask, new_axis_mask=new_axis_mask, shrink_axis_mask=shrink_axis_mask)
+ end
+ end
+ end
+end
+
+
+"""
+ _nccl_reduce_send(input)
+
+Replacement node for NcclReduce.
+"""
+begin
+ begin
+ function _nccl_reduce_send_graph(input_; name=nothing, reduction=nothing, num_devices=nothing, shared_name=nothing)
+ local desc
+ tf.with_op_name(name, "_NcclReduceSend") do
+ desc = tf.NodeDescription("_NcclReduceSend")
+ begin
+ begin
+ input_ = convert(Tensor{Any}, input_)
+ begin
+ end
+ end
+ begin
+ (input_,) = tf.tf_promote(input_)
+ end
+ end
+ begin
+ begin
+ tf.add_input(desc, input_)
+ end
+ end
+ begin
+ begin
+ if reduction !== nothing
+ desc["reduction"] = Base.String(reduction)
+ end
+ end
+ begin
+ if num_devices !== nothing
+ desc["num_devices"] = Base.Int(num_devices)
+ end
+ end
+ begin
+ if shared_name !== nothing
+ desc["shared_name"] = Base.String(shared_name)
+ end
+ end
+ end
+ end
+ tf.Tensor(tf.Operation(desc))
+ end
+ end
+ begin
+ function _nccl_reduce_send_eager(input_; name=nothing, reduction=nothing, num_devices=nothing, shared_name=nothing)
+ desc = tf.EagerOp("_NcclReduceSend")
+ input_ = convert(tf.EagerTensor, input_)
+ begin
+ begin
+ tf.add_input(desc, input_)
+ end
+ end
+ begin
+ begin
+ if reduction !== nothing
+ desc["reduction"] = Base.String(reduction)
+ end
+ end
+ begin
+ if num_devices !== nothing
+ desc["num_devices"] = Base.Int(num_devices)
+ end
+ end
+ begin
+ if shared_name !== nothing
+ desc["shared_name"] = Base.String(shared_name)
+ end
+ end
+ end
+ begin
+ desc["T"] = tf.data_type(input_)
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(_nccl_reduce_send, [input_], name=nothing, reduction=nothing, num_devices=nothing, shared_name=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res[1]
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function _nccl_reduce_send(input_; name=nothing, reduction=nothing, num_devices=nothing, shared_name=nothing)
+ if tf.in_eager_mode()
+ _nccl_reduce_send_eager(input_; name=name, reduction=reduction, num_devices=num_devices, shared_name=shared_name)
+ else
+ _nccl_reduce_send_graph(input_; name=name, reduction=reduction, num_devices=num_devices, shared_name=shared_name)
+ end
+ end
+ end
+end
+
+
+"""
+ padded_batch_dataset(input_dataset, batch_size, padded_shapes, padding_values)
+
+
+"""
+begin
+ begin
+ function padded_batch_dataset_graph(input_dataset_, batch_size_, padded_shapes_, padding_values_; name=nothing, Toutput_types=nothing, output_shapes=nothing, N=nothing)
+ local desc
+ tf.with_op_name(name, "PaddedBatchDataset") do
+ desc = tf.NodeDescription("PaddedBatchDataset")
+ begin
+ begin
+ input_dataset_ = convert(Tensor{Any}, input_dataset_)
+ begin
+ end
+ end
+ begin
+ batch_size_ = convert(Tensor{Int64}, batch_size_)
+ begin
+ end
+ end
+ begin
+ padded_shapes_ = [convert(Tensor{Int64}, x) for x = padded_shapes_]
+ begin
+ end
+ end
+ begin
+ padding_values_ = [convert(Tensor{Any}, x) for x = padding_values_]
+ begin
+ end
+ end
+ end
+ begin
+ begin
+ tf.add_input(desc, input_dataset_)
+ end
+ begin
+ tf.add_input(desc, batch_size_)
+ end
+ begin
+ tf.add_input(desc, padded_shapes_)
+ end
+ begin
+ tf.add_input(desc, padding_values_)
+ end
+ end
+ begin
+ begin
+ if Toutput_types !== nothing
+ desc["Toutput_types"] = map(Base.identity, Toutput_types)
+ end
+ end
+ begin
+ if output_shapes !== nothing
+ desc["output_shapes"] = map(Base.identity, output_shapes)
+ end
+ end
+ begin
+ if N !== nothing
+ desc["N"] = Base.Int(N)
+ end
+ end
+ end
+ end
+ tf.Tensor(tf.Operation(desc))
+ end
+ end
+ begin
+ function padded_batch_dataset_eager(input_dataset_, batch_size_, padded_shapes_, padding_values_; name=nothing, Toutput_types=nothing, output_shapes=nothing, N=nothing)
+ desc = tf.EagerOp("PaddedBatchDataset")
+ input_dataset_ = convert(tf.EagerTensor, input_dataset_)
+ batch_size_ = convert(tf.EagerTensor, batch_size_)
+ padded_shapes_ = convert(tf.EagerTensor, padded_shapes_)
+ padding_values_ = convert(tf.EagerTensor, padding_values_)
+ begin
+ begin
+ tf.add_input(desc, input_dataset_)
+ end
+ begin
+ tf.add_input(desc, batch_size_)
+ end
+ begin
+ tf.add_input(desc, padded_shapes_)
+ end
+ begin
+ tf.add_input(desc, padding_values_)
+ end
+ end
+ begin
+ begin
+ if Toutput_types !== nothing
+ desc["Toutput_types"] = map(Base.identity, Toutput_types)
+ end
+ end
+ begin
+ if output_shapes !== nothing
+ desc["output_shapes"] = map(Base.identity, output_shapes)
+ end
+ end
+ begin
+ if N !== nothing
+ desc["N"] = Base.Int(N)
+ end
+ end
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(padded_batch_dataset, [input_dataset_, batch_size_, padded_shapes_, padding_values_], name=nothing, Toutput_types=nothing, output_shapes=nothing, N=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res[1]
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function padded_batch_dataset(input_dataset_, batch_size_, padded_shapes_, padding_values_; name=nothing, Toutput_types=nothing, output_shapes=nothing, N=nothing)
+ if tf.in_eager_mode()
+ padded_batch_dataset_eager(input_dataset_, batch_size_, padded_shapes_, padding_values_; name=name, Toutput_types=Toutput_types, output_shapes=output_shapes, N=N)
+ else
+ padded_batch_dataset_graph(input_dataset_, batch_size_, padded_shapes_, padding_values_; name=name, Toutput_types=Toutput_types, output_shapes=output_shapes, N=N)
+ end
+ end
+ end
+end
+
+
+"""
+ data_format_vec_permute(x; src_format=, dst_format=)
+
+
+"""
+begin
+ begin
+ function data_format_vec_permute_graph(x_; name=nothing, src_format=nothing, dst_format=nothing)
+ local desc
+ tf.with_op_name(name, "DataFormatVecPermute") do
+ desc = tf.NodeDescription("DataFormatVecPermute")
+ begin
+ begin
+ x_ = convert(Tensor{Int32}, x_)
+ begin
+ end
+ end
+ begin
+ (x_,) = tf.tf_promote(x_)
+ end
+ end
+ begin
+ begin
+ tf.add_input(desc, x_)
+ end
+ end
+ begin
+ begin
+ if src_format !== nothing
+ desc["src_format"] = Base.String(src_format)
+ end
+ end
+ begin
+ if dst_format !== nothing
+ desc["dst_format"] = Base.String(dst_format)
+ end
+ end
+ end
+ end
+ tf.Tensor(tf.Operation(desc))
+ end
+ end
+ begin
+ function data_format_vec_permute_eager(x_; name=nothing, src_format=nothing, dst_format=nothing)
+ desc = tf.EagerOp("DataFormatVecPermute")
+ x_ = convert(tf.EagerTensor, x_)
+ begin
+ begin
+ tf.add_input(desc, x_)
+ end
+ end
+ begin
+ begin
+ if src_format !== nothing
+ desc["src_format"] = Base.String(src_format)
+ end
+ end
+ begin
+ if dst_format !== nothing
+ desc["dst_format"] = Base.String(dst_format)
+ end
+ end
+ end
+ begin
+ desc["T"] = tf.data_type(x_)
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(data_format_vec_permute, [x_], name=nothing, src_format=nothing, dst_format=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res[1]
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function data_format_vec_permute(x_; name=nothing, src_format=nothing, dst_format=nothing)
+ if tf.in_eager_mode()
+ data_format_vec_permute_eager(x_; name=name, src_format=src_format, dst_format=dst_format)
+ else
+ data_format_vec_permute_graph(x_; name=name, src_format=src_format, dst_format=dst_format)
+ end
+ end
+ end
+end
+
+
+"""
+ string_format(inputs; template=, placeholder=, summarize=3)
+
+
+"""
+begin
+ begin
+ function string_format_graph(inputs_; name=nothing, T=nothing, template=nothing, placeholder=nothing, summarize=nothing)
+ local desc
+ tf.with_op_name(name, "StringFormat") do
+ desc = tf.NodeDescription("StringFormat")
+ begin
+ begin
+ inputs_ = [convert(Tensor{Any}, x) for x = inputs_]
+ begin
+ end
+ end
+ end
+ begin
+ begin
+ tf.add_input(desc, inputs_)
+ end
+ end
+ begin
+ begin
+ if T !== nothing
+ desc["T"] = map(Base.identity, T)
+ end
+ end
+ begin
+ if template !== nothing
+ desc["template"] = Base.String(template)
+ end
+ end
+ begin
+ if placeholder !== nothing
+ desc["placeholder"] = Base.String(placeholder)
+ end
+ end
+ begin
+ if summarize !== nothing
+ desc["summarize"] = Base.Int(summarize)
+ end
+ end
+ end
+ end
+ tf.Tensor(tf.Operation(desc))
+ end
+ end
+ begin
+ function string_format_eager(inputs_; name=nothing, T=nothing, template=nothing, placeholder=nothing, summarize=nothing)
+ desc = tf.EagerOp("StringFormat")
+ inputs_ = convert(tf.EagerTensor, inputs_)
+ begin
+ begin
+ tf.add_input(desc, inputs_)
+ end
+ end
+ begin
+ begin
+ if T !== nothing
+ desc["T"] = map(Base.identity, T)
+ end
+ end
+ begin
+ if template !== nothing
+ desc["template"] = Base.String(template)
+ end
+ end
+ begin
+ if placeholder !== nothing
+ desc["placeholder"] = Base.String(placeholder)
+ end
+ end
+ begin
+ if summarize !== nothing
+ desc["summarize"] = Base.Int(summarize)
+ end
+ end
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(string_format, [inputs_], name=nothing, T=nothing, template=nothing, placeholder=nothing, summarize=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res[1]
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function string_format(inputs_; name=nothing, T=nothing, template=nothing, placeholder=nothing, summarize=nothing)
+ if tf.in_eager_mode()
+ string_format_eager(inputs_; name=name, T=T, template=template, placeholder=placeholder, summarize=summarize)
+ else
+ string_format_graph(inputs_; name=name, T=T, template=template, placeholder=placeholder, summarize=summarize)
+ end
+ end
+ end
+end
+
+
+"""
+ as_string(input; precision=-1, scientific=false, shortest=false, width=-1, fill=)
+
+
+"""
+begin
+ begin
+ function as_string_graph(input_; name=nothing, precision=nothing, scientific=nothing, shortest=nothing, width=nothing, fill=nothing)
+ local desc
+ tf.with_op_name(name, "AsString") do
+ desc = tf.NodeDescription("AsString")
+ begin
+ begin
+ input_ = convert(Tensor{Any}, input_)
+ begin
+ end
+ end
+ begin
+ (input_,) = tf.tf_promote(input_)
+ end
+ end
+ begin
+ begin
+ tf.add_input(desc, input_)
+ end
+ end
+ begin
+ begin
+ if precision !== nothing
+ desc["precision"] = Base.Int(precision)
+ end
+ end
+ begin
+ if scientific !== nothing
+ desc["scientific"] = Base.Bool(scientific)
+ end
+ end
+ begin
+ if shortest !== nothing
+ desc["shortest"] = Base.Bool(shortest)
+ end
+ end
+ begin
+ if width !== nothing
+ desc["width"] = Base.Int(width)
+ end
+ end
+ begin
+ if fill !== nothing
+ desc["fill"] = Base.String(fill)
+ end
+ end
+ end
+ end
+ tf.Tensor(tf.Operation(desc))
+ end
+ end
+ begin
+ function as_string_eager(input_; name=nothing, precision=nothing, scientific=nothing, shortest=nothing, width=nothing, fill=nothing)
+ desc = tf.EagerOp("AsString")
+ input_ = convert(tf.EagerTensor, input_)
+ begin
+ begin
+ tf.add_input(desc, input_)
+ end
+ end
+ begin
+ begin
+ if precision !== nothing
+ desc["precision"] = Base.Int(precision)
+ end
+ end
+ begin
+ if scientific !== nothing
+ desc["scientific"] = Base.Bool(scientific)
+ end
+ end
+ begin
+ if shortest !== nothing
+ desc["shortest"] = Base.Bool(shortest)
+ end
+ end
+ begin
+ if width !== nothing
+ desc["width"] = Base.Int(width)
+ end
+ end
+ begin
+ if fill !== nothing
+ desc["fill"] = Base.String(fill)
+ end
+ end
+ end
+ begin
+ desc["T"] = tf.data_type(input_)
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(as_string, [input_], name=nothing, precision=nothing, scientific=nothing, shortest=nothing, width=nothing, fill=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res[1]
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function as_string(input_; name=nothing, precision=nothing, scientific=nothing, shortest=nothing, width=nothing, fill=nothing)
+ if tf.in_eager_mode()
+ as_string_eager(input_; name=name, precision=precision, scientific=scientific, shortest=shortest, width=width, fill=fill)
+ else
+ as_string_graph(input_; name=name, precision=precision, scientific=scientific, shortest=shortest, width=width, fill=fill)
+ end
+ end
+ end
+end
+
+
+"""
+ queue_enqueue_many(handle, components; timeout_ms=-1)
+
+
+"""
+begin
+ begin
+ function queue_enqueue_many_graph(handle_, components_; name=nothing, Tcomponents=nothing, timeout_ms=nothing)
+ local desc
+ tf.with_op_name(name, "QueueEnqueueMany") do
+ desc = tf.NodeDescription("QueueEnqueueMany")
+ begin
+ begin
+ handle_ = convert(Tensor{String}, handle_)
+ begin
+ end
+ end
+ begin
+ components_ = [convert(Tensor{Any}, x) for x = components_]
+ begin
+ end
+ end
+ end
+ begin
+ begin
+ tf.add_input(desc, handle_)
+ end
+ begin
+ tf.add_input(desc, components_)
+ end
+ end
+ begin
+ begin
+ if Tcomponents !== nothing
+ desc["Tcomponents"] = map(Base.identity, Tcomponents)
+ end
+ end
+ begin
+ if timeout_ms !== nothing
+ desc["timeout_ms"] = Base.Int(timeout_ms)
+ end
+ end
+ end
+ end
+ tf.Tensor(tf.Operation(desc))
+ end
+ end
+ begin
+ function queue_enqueue_many_eager(handle_, components_; name=nothing, Tcomponents=nothing, timeout_ms=nothing)
+ desc = tf.EagerOp("QueueEnqueueMany")
+ handle_ = convert(tf.EagerTensor, handle_)
+ components_ = convert(tf.EagerTensor, components_)
+ begin
+ begin
+ tf.add_input(desc, handle_)
+ end
+ begin
+ tf.add_input(desc, components_)
+ end
+ end
+ begin
+ begin
+ if Tcomponents !== nothing
+ desc["Tcomponents"] = map(Base.identity, Tcomponents)
+ end
+ end
+ begin
+ if timeout_ms !== nothing
+ desc["timeout_ms"] = Base.Int(timeout_ms)
+ end
+ end
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(queue_enqueue_many, [handle_, components_], name=nothing, Tcomponents=nothing, timeout_ms=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res[1]
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function queue_enqueue_many(handle_, components_; name=nothing, Tcomponents=nothing, timeout_ms=nothing)
+ if tf.in_eager_mode()
+ queue_enqueue_many_eager(handle_, components_; name=name, Tcomponents=Tcomponents, timeout_ms=timeout_ms)
+ else
+ queue_enqueue_many_graph(handle_, components_; name=name, Tcomponents=Tcomponents, timeout_ms=timeout_ms)
+ end
+ end
+ end
+end
+
+
+"""
+ fake_param()
+
+
+"""
+begin
+ begin
+ function fake_param_graph(; name=nothing, dtype=nothing, shape=nothing)
+ local desc
+ tf.with_op_name(name, "FakeParam") do
+ desc = tf.NodeDescription("FakeParam")
+ begin
+ end
+ begin
+ end
+ begin
+ begin
+ if dtype !== nothing
+ desc["dtype"] = Base.identity(dtype)
+ end
+ end
+ begin
+ if shape !== nothing
+ desc["shape"] = Base.identity(shape)
+ end
+ end
+ end
+ end
+ tf.Tensor(tf.Operation(desc))
+ end
+ end
+ begin
+ function fake_param_eager(; name=nothing, dtype=nothing, shape=nothing)
+ desc = tf.EagerOp("FakeParam")
+ begin
+ end
+ begin
+ begin
+ if dtype !== nothing
+ desc["dtype"] = Base.identity(dtype)
+ end
+ end
+ begin
+ if shape !== nothing
+ desc["shape"] = Base.identity(shape)
+ end
+ end
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(fake_param, [], name=nothing, dtype=nothing, shape=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res[1]
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function fake_param(; name=nothing, dtype=nothing, shape=nothing)
+ if tf.in_eager_mode()
+ fake_param_eager(; name=name, dtype=dtype, shape=shape)
+ else
+ fake_param_graph(; name=name, dtype=dtype, shape=shape)
+ end
+ end
+ end
+end
+
+
+"""
+ apply_adagrad(var, accum, lr, grad; use_locking=false, update_slots=true)
+
+
+"""
+begin
+ begin
+ function apply_adagrad_graph(var_, accum_, lr_, grad_; name=nothing, use_locking=nothing, update_slots=nothing)
+ local desc
+ tf.with_op_name(name, "ApplyAdagrad") do
+ desc = tf.NodeDescription("ApplyAdagrad")
+ begin
+ begin
+ var_ = convert(Tensor{Any}, var_)
+ begin
+ end
+ end
+ begin
+ accum_ = convert(Tensor{Any}, accum_)
+ begin
+ end
+ end
+ begin
+ lr_ = convert(Tensor{Any}, lr_)
+ begin
+ end
+ end
+ begin
+ grad_ = convert(Tensor{Any}, grad_)
+ begin
+ end
+ end
+ begin
+ (var_, accum_, lr_, grad_) = tf.tf_promote(var_, accum_, lr_, grad_)
+ end
+ end
+ begin
+ begin
+ tf.add_input(desc, var_)
+ end
+ begin
+ tf.add_input(desc, accum_)
+ end
+ begin
+ tf.add_input(desc, lr_)
+ end
+ begin
+ tf.add_input(desc, grad_)
+ end
+ end
+ begin
+ begin
+ if use_locking !== nothing
+ desc["use_locking"] = Base.Bool(use_locking)
+ end
+ end
+ begin
+ if update_slots !== nothing
+ desc["update_slots"] = Base.Bool(update_slots)
+ end
+ end
+ end
+ end
+ tf.Tensor(tf.Operation(desc))
+ end
+ end
+ begin
+ function apply_adagrad_eager(var_, accum_, lr_, grad_; name=nothing, use_locking=nothing, update_slots=nothing)
+ desc = tf.EagerOp("ApplyAdagrad")
+ var_ = convert(tf.EagerTensor, var_)
+ accum_ = convert(tf.EagerTensor, accum_)
+ lr_ = convert(tf.EagerTensor, lr_)
+ grad_ = convert(tf.EagerTensor, grad_)
+ begin
+ begin
+ tf.add_input(desc, var_)
+ end
+ begin
+ tf.add_input(desc, accum_)
+ end
+ begin
+ tf.add_input(desc, lr_)
+ end
+ begin
+ tf.add_input(desc, grad_)
+ end
+ end
+ begin
+ begin
+ if use_locking !== nothing
+ desc["use_locking"] = Base.Bool(use_locking)
+ end
+ end
+ begin
+ if update_slots !== nothing
+ desc["update_slots"] = Base.Bool(update_slots)
+ end
+ end
+ end
+ begin
+ desc["T"] = tf.data_type(var_)
+ end
+ begin
+ desc["T"] = tf.data_type(accum_)
+ end
+ begin
+ desc["T"] = tf.data_type(lr_)
+ end
+ begin
+ desc["T"] = tf.data_type(grad_)
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(apply_adagrad, [var_, accum_, lr_, grad_], name=nothing, use_locking=nothing, update_slots=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res[1]
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function apply_adagrad(var_, accum_, lr_, grad_; name=nothing, use_locking=nothing, update_slots=nothing)
+ if tf.in_eager_mode()
+ apply_adagrad_eager(var_, accum_, lr_, grad_; name=name, use_locking=use_locking, update_slots=update_slots)
+ else
+ apply_adagrad_graph(var_, accum_, lr_, grad_; name=name, use_locking=use_locking, update_slots=update_slots)
+ end
+ end
+ end
+end
+
+
+"""
+ experimental_iterator_get_device(resource)
+
+
+"""
+begin
+ begin
+ function experimental_iterator_get_device_graph(resource_; name=nothing)
+ local desc
+ tf.with_op_name(name, "ExperimentalIteratorGetDevice") do
+ desc = tf.NodeDescription("ExperimentalIteratorGetDevice")
+ begin
+ begin
+ resource_ = convert(Tensor{Any}, resource_)
+ begin
+ end
+ end
+ end
+ begin
+ begin
+ tf.add_input(desc, resource_)
+ end
+ end
+ begin
+ end
+ end
+ tf.Tensor(tf.Operation(desc))
+ end
+ end
+ begin
+ function experimental_iterator_get_device_eager(resource_; name=nothing)
+ desc = tf.EagerOp("ExperimentalIteratorGetDevice")
+ resource_ = convert(tf.EagerTensor, resource_)
+ begin
+ begin
+ tf.add_input(desc, resource_)
+ end
+ end
+ begin
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(experimental_iterator_get_device, [resource_], name=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res[1]
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function experimental_iterator_get_device(resource_; name=nothing)
+ if tf.in_eager_mode()
+ experimental_iterator_get_device_eager(resource_; name=name)
+ else
+ experimental_iterator_get_device_graph(resource_; name=name)
+ end
+ end
+ end
+end
+
+
+"""
+ adjust_contrast(images, contrast_factor, min_value, max_value)
+
+
+"""
+begin
+ begin
+ function adjust_contrast_graph(images_, contrast_factor_, min_value_, max_value_; name=nothing)
+ local desc
+ tf.with_op_name(name, "AdjustContrast") do
+ desc = tf.NodeDescription("AdjustContrast")
+ begin
+ begin
+ images_ = convert(Tensor{Any}, images_)
+ begin
+ end
+ end
+ begin
+ contrast_factor_ = convert(Tensor{Float32}, contrast_factor_)
+ begin
+ end
+ end
+ begin
+ min_value_ = convert(Tensor{Float32}, min_value_)
+ begin
+ end
+ end
+ begin
+ max_value_ = convert(Tensor{Float32}, max_value_)
+ begin
+ end
+ end
+ begin
+ (images_,) = tf.tf_promote(images_)
+ end
+ end
+ begin
+ begin
+ tf.add_input(desc, images_)
+ end
+ begin
+ tf.add_input(desc, contrast_factor_)
+ end
+ begin
+ tf.add_input(desc, min_value_)
+ end
+ begin
+ tf.add_input(desc, max_value_)
+ end
+ end
+ begin
+ end
+ end
+ tf.Tensor(tf.Operation(desc))
+ end
+ end
+ begin
+ function adjust_contrast_eager(images_, contrast_factor_, min_value_, max_value_; name=nothing)
+ desc = tf.EagerOp("AdjustContrast")
+ images_ = convert(tf.EagerTensor, images_)
+ contrast_factor_ = convert(tf.EagerTensor, contrast_factor_)
+ min_value_ = convert(tf.EagerTensor, min_value_)
+ max_value_ = convert(tf.EagerTensor, max_value_)
+ begin
+ begin
+ tf.add_input(desc, images_)
+ end
+ begin
+ tf.add_input(desc, contrast_factor_)
+ end
+ begin
+ tf.add_input(desc, min_value_)
+ end
+ begin
+ tf.add_input(desc, max_value_)
+ end
+ end
+ begin
+ end
+ begin
+ desc["T"] = tf.data_type(images_)
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(adjust_contrast, [images_, contrast_factor_, min_value_, max_value_], name=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res[1]
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function adjust_contrast(images_, contrast_factor_, min_value_, max_value_; name=nothing)
+ if tf.in_eager_mode()
+ adjust_contrast_eager(images_, contrast_factor_, min_value_, max_value_; name=name)
+ else
+ adjust_contrast_graph(images_, contrast_factor_, min_value_, max_value_; name=name)
+ end
+ end
+ end
+end
+
+
+"""
+ optional_none()
+
+
+"""
+begin
+ begin
+ function optional_none_graph(; name=nothing)
+ local desc
+ tf.with_op_name(name, "OptionalNone") do
+ desc = tf.NodeDescription("OptionalNone")
+ begin
+ end
+ begin
+ end
+ begin
+ end
+ end
+ tf.Tensor(tf.Operation(desc))
+ end
+ end
+ begin
+ function optional_none_eager(; name=nothing)
+ desc = tf.EagerOp("OptionalNone")
+ begin
+ end
+ begin
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(optional_none, [], name=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res[1]
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function optional_none(; name=nothing)
+ if tf.in_eager_mode()
+ optional_none_eager(; name=name)
+ else
+ optional_none_graph(; name=name)
+ end
+ end
+ end
+end
+
+
+"""
+ extract_image_patches(images)
+
+
+"""
+begin
+ begin
+ function extract_image_patches_graph(images_; name=nothing, ksizes=nothing, strides=nothing, rates=nothing, padding=nothing)
+ local desc
+ tf.with_op_name(name, "ExtractImagePatches") do
+ desc = tf.NodeDescription("ExtractImagePatches")
+ begin
+ begin
+ images_ = convert(Tensor{Any}, images_)
+ begin
+ end
+ end
+ begin
+ (images_,) = tf.tf_promote(images_)
+ end
+ end
+ begin
+ begin
+ tf.add_input(desc, images_)
+ end
+ end
+ begin
+ begin
+ if ksizes !== nothing
+ desc["ksizes"] = map(Base.identity, ksizes)
+ end
+ end
+ begin
+ if strides !== nothing
+ desc["strides"] = map(Base.identity, strides)
+ end
+ end
+ begin
+ if rates !== nothing
+ desc["rates"] = map(Base.identity, rates)
+ end
+ end
+ begin
+ if padding !== nothing
+ desc["padding"] = Base.String(padding)
+ end
+ end
+ end
+ end
+ tf.Tensor(tf.Operation(desc))
+ end
+ end
+ begin
+ function extract_image_patches_eager(images_; name=nothing, ksizes=nothing, strides=nothing, rates=nothing, padding=nothing)
+ desc = tf.EagerOp("ExtractImagePatches")
+ images_ = convert(tf.EagerTensor, images_)
+ begin
+ begin
+ tf.add_input(desc, images_)
+ end
+ end
+ begin
+ begin
+ if ksizes !== nothing
+ desc["ksizes"] = map(Base.identity, ksizes)
+ end
+ end
+ begin
+ if strides !== nothing
+ desc["strides"] = map(Base.identity, strides)
+ end
+ end
+ begin
+ if rates !== nothing
+ desc["rates"] = map(Base.identity, rates)
+ end
+ end
+ begin
+ if padding !== nothing
+ desc["padding"] = Base.String(padding)
+ end
+ end
+ end
+ begin
+ desc["T"] = tf.data_type(images_)
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(extract_image_patches, [images_], name=nothing, ksizes=nothing, strides=nothing, rates=nothing, padding=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res[1]
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function extract_image_patches(images_; name=nothing, ksizes=nothing, strides=nothing, rates=nothing, padding=nothing)
+ if tf.in_eager_mode()
+ extract_image_patches_eager(images_; name=name, ksizes=ksizes, strides=strides, rates=rates, padding=padding)
+ else
+ extract_image_patches_graph(images_; name=name, ksizes=ksizes, strides=strides, rates=rates, padding=padding)
+ end
+ end
+ end
+end
+
+
+"""
+ variable_v2(; container=, shared_name=)
+
+
+"""
+begin
+ begin
+ function variable_v2_graph(; name=nothing, shape=nothing, dtype=nothing, container=nothing, shared_name=nothing)
+ local desc
+ tf.with_op_name(name, "VariableV2") do
+ desc = tf.NodeDescription("VariableV2")
+ begin
+ end
+ begin
+ end
+ begin
+ begin
+ if shape !== nothing
+ desc["shape"] = Base.identity(shape)
+ end
+ end
+ begin
+ if dtype !== nothing
+ desc["dtype"] = Base.identity(dtype)
+ end
+ end
+ begin
+ if container !== nothing
+ desc["container"] = Base.String(container)
+ end
+ end
+ begin
+ if shared_name !== nothing
+ desc["shared_name"] = Base.String(shared_name)
+ end
+ end
+ end
+ end
+ tf.Tensor(tf.Operation(desc))
+ end
+ end
+ begin
+ function variable_v2_eager(; name=nothing, shape=nothing, dtype=nothing, container=nothing, shared_name=nothing)
+ desc = tf.EagerOp("VariableV2")
+ begin
+ end
+ begin
+ begin
+ if shape !== nothing
+ desc["shape"] = Base.identity(shape)
+ end
+ end
+ begin
+ if dtype !== nothing
+ desc["dtype"] = Base.identity(dtype)
+ end
+ end
+ begin
+ if container !== nothing
+ desc["container"] = Base.String(container)
+ end
+ end
+ begin
+ if shared_name !== nothing
+ desc["shared_name"] = Base.String(shared_name)
+ end
+ end
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(variable_v2, [], name=nothing, shape=nothing, dtype=nothing, container=nothing, shared_name=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res[1]
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function variable_v2(; name=nothing, shape=nothing, dtype=nothing, container=nothing, shared_name=nothing)
+ if tf.in_eager_mode()
+ variable_v2_eager(; name=name, shape=shape, dtype=dtype, container=container, shared_name=shared_name)
+ else
+ variable_v2_graph(; name=name, shape=shape, dtype=dtype, container=container, shared_name=shared_name)
+ end
+ end
+ end
+end
+
+
+"""
+ elu(features)
+
+
+"""
+begin
+ begin
+ function elu_graph(features_; name=nothing)
+ local desc
+ tf.with_op_name(name, "Elu") do
+ desc = tf.NodeDescription("Elu")
+ begin
+ begin
+ features_ = convert(Tensor{Any}, features_)
+ begin
+ end
+ end
+ begin
+ (features_,) = tf.tf_promote(features_)
+ end
+ end
+ begin
+ begin
+ tf.add_input(desc, features_)
+ end
+ end
+ begin
+ end
+ end
+ tf.Tensor(tf.Operation(desc))
+ end
+ end
+ begin
+ function elu_eager(features_; name=nothing)
+ desc = tf.EagerOp("Elu")
+ features_ = convert(tf.EagerTensor, features_)
+ begin
+ begin
+ tf.add_input(desc, features_)
+ end
+ end
+ begin
+ end
+ begin
+ desc["T"] = tf.data_type(features_)
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(elu, [features_], name=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res[1]
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function elu(features_; name=nothing)
+ if tf.in_eager_mode()
+ elu_eager(features_; name=name)
+ else
+ elu_graph(features_; name=name)
+ end
+ end
+ end
+end
+
+
+"""
+ scatter_update(ref, indices, updates; use_locking=true)
+
+
+"""
+begin
+ begin
+ function scatter_update_graph(ref_, indices_, updates_; name=nothing, use_locking=nothing)
+ local desc
+ tf.with_op_name(name, "ScatterUpdate") do
+ desc = tf.NodeDescription("ScatterUpdate")
+ begin
+ begin
+ ref_ = convert(Tensor{Any}, ref_)
+ begin
+ end
+ end
+ begin
+ indices_ = convert(Tensor{Any}, indices_)
+ begin
+ indices_ = indices_ - convert(tf.Tensor{eltype(indices_)}, 1)
+ end
+ end
+ begin
+ updates_ = convert(Tensor{Any}, updates_)
+ begin
+ end
+ end
+ begin
+ (ref_, updates_) = tf.tf_promote(ref_, updates_)
+ end
+ begin
+ (indices_,) = tf.tf_promote(indices_)
+ end
+ end
+ begin
+ begin
+ tf.add_input(desc, ref_)
+ end
+ begin
+ tf.add_input(desc, indices_)
+ end
+ begin
+ tf.add_input(desc, updates_)
+ end
+ end
+ begin
+ begin
+ if use_locking !== nothing
+ desc["use_locking"] = Base.Bool(use_locking)
+ end
+ end
+ end
+ end
+ tf.Tensor(tf.Operation(desc))
+ end
+ end
+ begin
+ function scatter_update_eager(ref_, indices_, updates_; name=nothing, use_locking=nothing)
+ desc = tf.EagerOp("ScatterUpdate")
+ ref_ = convert(tf.EagerTensor, ref_)
+ indices_ = convert(tf.EagerTensor, indices_)
+ updates_ = convert(tf.EagerTensor, updates_)
+ begin
+ begin
+ tf.add_input(desc, ref_)
+ end
+ begin
+ tf.add_input(desc, indices_)
+ end
+ begin
+ tf.add_input(desc, updates_)
+ end
+ end
+ begin
+ begin
+ if use_locking !== nothing
+ desc["use_locking"] = Base.Bool(use_locking)
+ end
+ end
+ end
+ begin
+ desc["T"] = tf.data_type(ref_)
+ end
+ begin
+ desc["Tindices"] = tf.data_type(indices_)
+ end
+ begin
+ desc["T"] = tf.data_type(updates_)
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(scatter_update, [ref_, indices_, updates_], name=nothing, use_locking=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res[1]
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function scatter_update(ref_, indices_, updates_; name=nothing, use_locking=nothing)
+ if tf.in_eager_mode()
+ scatter_update_eager(ref_, indices_, updates_; name=name, use_locking=use_locking)
+ else
+ scatter_update_graph(ref_, indices_, updates_; name=name, use_locking=use_locking)
+ end
+ end
+ end
+end
+
+
+"""
+ floor_mod(x, y)
+
+
+"""
+begin
+ begin
+ function floor_mod_graph(x_, y_; name=nothing)
+ local desc
+ tf.with_op_name(name, "FloorMod") do
+ desc = tf.NodeDescription("FloorMod")
+ begin
+ begin
+ x_ = convert(Tensor{Any}, x_)
+ begin
+ end
+ end
+ begin
+ y_ = convert(Tensor{Any}, y_)
+ begin
+ end
+ end
+ begin
+ (x_, y_) = tf.tf_promote(x_, y_)
+ end
+ end
+ begin
+ begin
+ tf.add_input(desc, x_)
+ end
+ begin
+ tf.add_input(desc, y_)
+ end
+ end
+ begin
+ end
+ end
+ tf.Tensor(tf.Operation(desc))
+ end
+ end
+ begin
+ function floor_mod_eager(x_, y_; name=nothing)
+ desc = tf.EagerOp("FloorMod")
+ x_ = convert(tf.EagerTensor, x_)
+ y_ = convert(tf.EagerTensor, y_)
+ begin
+ begin
+ tf.add_input(desc, x_)
+ end
+ begin
+ tf.add_input(desc, y_)
+ end
+ end
+ begin
+ end
+ begin
+ desc["T"] = tf.data_type(x_)
+ end
+ begin
+ desc["T"] = tf.data_type(y_)
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(floor_mod, [x_, y_], name=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res[1]
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function floor_mod(x_, y_; name=nothing)
+ if tf.in_eager_mode()
+ floor_mod_eager(x_, y_; name=name)
+ else
+ floor_mod_graph(x_, y_; name=name)
+ end
+ end
+ end
+end
+
+
+"""
+ experimental_ignore_errors_dataset(input_dataset)
+
+
+"""
+begin
+ begin
+ function experimental_ignore_errors_dataset_graph(input_dataset_; name=nothing, output_types=nothing, output_shapes=nothing)
+ local desc
+ tf.with_op_name(name, "ExperimentalIgnoreErrorsDataset") do
+ desc = tf.NodeDescription("ExperimentalIgnoreErrorsDataset")
+ begin
+ begin
+ input_dataset_ = convert(Tensor{Any}, input_dataset_)
+ begin
+ end
+ end
+ end
+ begin
+ begin
+ tf.add_input(desc, input_dataset_)
+ end
+ end
+ begin
+ begin
+ if output_types !== nothing
+ desc["output_types"] = map(Base.identity, output_types)
+ end
+ end
+ begin
+ if output_shapes !== nothing
+ desc["output_shapes"] = map(Base.identity, output_shapes)
+ end
+ end
+ end
+ end
+ tf.Tensor(tf.Operation(desc))
+ end
+ end
+ begin
+ function experimental_ignore_errors_dataset_eager(input_dataset_; name=nothing, output_types=nothing, output_shapes=nothing)
+ desc = tf.EagerOp("ExperimentalIgnoreErrorsDataset")
+ input_dataset_ = convert(tf.EagerTensor, input_dataset_)
+ begin
+ begin
+ tf.add_input(desc, input_dataset_)
+ end
+ end
+ begin
+ begin
+ if output_types !== nothing
+ desc["output_types"] = map(Base.identity, output_types)
+ end
+ end
+ begin
+ if output_shapes !== nothing
+ desc["output_shapes"] = map(Base.identity, output_shapes)
+ end
+ end
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(experimental_ignore_errors_dataset, [input_dataset_], name=nothing, output_types=nothing, output_shapes=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res[1]
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function experimental_ignore_errors_dataset(input_dataset_; name=nothing, output_types=nothing, output_shapes=nothing)
+ if tf.in_eager_mode()
+ experimental_ignore_errors_dataset_eager(input_dataset_; name=name, output_types=output_types, output_shapes=output_shapes)
+ else
+ experimental_ignore_errors_dataset_graph(input_dataset_; name=name, output_types=output_types, output_shapes=output_shapes)
+ end
+ end
+ end
+end
+
+
+"""
+ experimental_set_stats_aggregator_dataset(input_dataset, stats_aggregator, tag, counter_prefix)
+
+
+"""
+begin
+ begin
+ function experimental_set_stats_aggregator_dataset_graph(input_dataset_, stats_aggregator_, tag_, counter_prefix_; name=nothing, output_types=nothing, output_shapes=nothing)
+ local desc
+ tf.with_op_name(name, "ExperimentalSetStatsAggregatorDataset") do
+ desc = tf.NodeDescription("ExperimentalSetStatsAggregatorDataset")
+ begin
+ begin
+ input_dataset_ = convert(Tensor{Any}, input_dataset_)
+ begin
+ end
+ end
+ begin
+ stats_aggregator_ = convert(Tensor{Any}, stats_aggregator_)
+ begin
+ end
+ end
+ begin
+ tag_ = convert(Tensor{String}, tag_)
+ begin
+ end
+ end
+ begin
+ counter_prefix_ = convert(Tensor{String}, counter_prefix_)
+ begin
+ end
+ end
+ end
+ begin
+ begin
+ tf.add_input(desc, input_dataset_)
+ end
+ begin
+ tf.add_input(desc, stats_aggregator_)
+ end
+ begin
+ tf.add_input(desc, tag_)
+ end
+ begin
+ tf.add_input(desc, counter_prefix_)
+ end
+ end
+ begin
+ begin
+ if output_types !== nothing
+ desc["output_types"] = map(Base.identity, output_types)
+ end
+ end
+ begin
+ if output_shapes !== nothing
+ desc["output_shapes"] = map(Base.identity, output_shapes)
+ end
+ end
+ end
+ end
+ tf.Tensor(tf.Operation(desc))
+ end
+ end
+ begin
+ function experimental_set_stats_aggregator_dataset_eager(input_dataset_, stats_aggregator_, tag_, counter_prefix_; name=nothing, output_types=nothing, output_shapes=nothing)
+ desc = tf.EagerOp("ExperimentalSetStatsAggregatorDataset")
+ input_dataset_ = convert(tf.EagerTensor, input_dataset_)
+ stats_aggregator_ = convert(tf.EagerTensor, stats_aggregator_)
+ tag_ = convert(tf.EagerTensor, tag_)
+ counter_prefix_ = convert(tf.EagerTensor, counter_prefix_)
+ begin
+ begin
+ tf.add_input(desc, input_dataset_)
+ end
+ begin
+ tf.add_input(desc, stats_aggregator_)
+ end
+ begin
+ tf.add_input(desc, tag_)
+ end
+ begin
+ tf.add_input(desc, counter_prefix_)
+ end
+ end
+ begin
+ begin
+ if output_types !== nothing
+ desc["output_types"] = map(Base.identity, output_types)
+ end
+ end
+ begin
+ if output_shapes !== nothing
+ desc["output_shapes"] = map(Base.identity, output_shapes)
+ end
+ end
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(experimental_set_stats_aggregator_dataset, [input_dataset_, stats_aggregator_, tag_, counter_prefix_], name=nothing, output_types=nothing, output_shapes=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res[1]
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function experimental_set_stats_aggregator_dataset(input_dataset_, stats_aggregator_, tag_, counter_prefix_; name=nothing, output_types=nothing, output_shapes=nothing)
+ if tf.in_eager_mode()
+ experimental_set_stats_aggregator_dataset_eager(input_dataset_, stats_aggregator_, tag_, counter_prefix_; name=name, output_types=output_types, output_shapes=output_shapes)
+ else
+ experimental_set_stats_aggregator_dataset_graph(input_dataset_, stats_aggregator_, tag_, counter_prefix_; name=name, output_types=output_types, output_shapes=output_shapes)
+ end
+ end
+ end
+end
+
+
+"""
+ compute_accidental_hits(true_classes, sampled_candidates; seed=0, seed2=0)
+
+
+"""
+begin
+ begin
+ function compute_accidental_hits_graph(true_classes_, sampled_candidates_; name=nothing, num_true=nothing, seed=nothing, seed2=nothing)
+ local desc
+ tf.with_op_name(name, "ComputeAccidentalHits") do
+ desc = tf.NodeDescription("ComputeAccidentalHits")
+ begin
+ begin
+ true_classes_ = convert(Tensor{Int64}, true_classes_)
+ begin
+ end
+ end
+ begin
+ sampled_candidates_ = convert(Tensor{Int64}, sampled_candidates_)
+ begin
+ end
+ end
+ end
+ begin
+ begin
+ tf.add_input(desc, true_classes_)
+ end
+ begin
+ tf.add_input(desc, sampled_candidates_)
+ end
+ end
+ begin
+ begin
+ if num_true !== nothing
+ desc["num_true"] = Base.Int(num_true)
+ end
+ end
+ begin
+ if seed !== nothing
+ desc["seed"] = Base.Int(seed)
+ end
+ end
+ begin
+ if seed2 !== nothing
+ desc["seed2"] = Base.Int(seed2)
+ end
+ end
+ end
+ end
+ begin
+ out = tf.Tensor[]
+ op = tf.Operation(desc)
+ for out_idx = 1:3
+ push!(out, tf.Tensor(op, out_idx))
+ end
+ out
+ end
+ end
+ end
+ begin
+ function compute_accidental_hits_eager(true_classes_, sampled_candidates_; name=nothing, num_true=nothing, seed=nothing, seed2=nothing)
+ desc = tf.EagerOp("ComputeAccidentalHits")
+ true_classes_ = convert(tf.EagerTensor, true_classes_)
+ sampled_candidates_ = convert(tf.EagerTensor, sampled_candidates_)
+ begin
+ begin
+ tf.add_input(desc, true_classes_)
+ end
+ begin
+ tf.add_input(desc, sampled_candidates_)
+ end
+ end
+ begin
+ begin
+ if num_true !== nothing
+ desc["num_true"] = Base.Int(num_true)
+ end
+ end
+ begin
+ if seed !== nothing
+ desc["seed"] = Base.Int(seed)
+ end
+ end
+ begin
+ if seed2 !== nothing
+ desc["seed2"] = Base.Int(seed2)
+ end
+ end
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(compute_accidental_hits, [true_classes_, sampled_candidates_], name=nothing, num_true=nothing, seed=nothing, seed2=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function compute_accidental_hits(true_classes_, sampled_candidates_; name=nothing, num_true=nothing, seed=nothing, seed2=nothing)
+ if tf.in_eager_mode()
+ compute_accidental_hits_eager(true_classes_, sampled_candidates_; name=name, num_true=num_true, seed=seed, seed2=seed2)
+ else
+ compute_accidental_hits_graph(true_classes_, sampled_candidates_; name=name, num_true=num_true, seed=seed, seed2=seed2)
+ end
+ end
+ end
+end
+
+
+"""
+ string_to_number(string_tensor; out_type=Float32)
+
+
+"""
+begin
+ begin
+ function string_to_number_graph(string_tensor_; name=nothing, out_type=nothing)
+ local desc
+ tf.with_op_name(name, "StringToNumber") do
+ desc = tf.NodeDescription("StringToNumber")
+ begin
+ begin
+ string_tensor_ = convert(Tensor{String}, string_tensor_)
+ begin
+ end
+ end
+ end
+ begin
+ begin
+ tf.add_input(desc, string_tensor_)
+ end
+ end
+ begin
+ begin
+ if out_type !== nothing
+ desc["out_type"] = Base.identity(out_type)
+ end
+ end
+ end
+ end
+ tf.Tensor(tf.Operation(desc))
+ end
+ end
+ begin
+ function string_to_number_eager(string_tensor_; name=nothing, out_type=nothing)
+ desc = tf.EagerOp("StringToNumber")
+ string_tensor_ = convert(tf.EagerTensor, string_tensor_)
+ begin
+ begin
+ tf.add_input(desc, string_tensor_)
+ end
+ end
+ begin
+ begin
+ if out_type !== nothing
+ desc["out_type"] = Base.identity(out_type)
+ end
+ end
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(string_to_number, [string_tensor_], name=nothing, out_type=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res[1]
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function string_to_number(string_tensor_; name=nothing, out_type=nothing)
+ if tf.in_eager_mode()
+ string_to_number_eager(string_tensor_; name=name, out_type=out_type)
+ else
+ string_to_number_graph(string_tensor_; name=name, out_type=out_type)
+ end
+ end
+ end
+end
+
+
+"""
+ snapshot(input)
+
+
+"""
+begin
+ begin
+ function snapshot_graph(input_; name=nothing)
+ local desc
+ tf.with_op_name(name, "Snapshot") do
+ desc = tf.NodeDescription("Snapshot")
+ begin
+ begin
+ input_ = convert(Tensor{Any}, input_)
+ begin
+ end
+ end
+ begin
+ (input_,) = tf.tf_promote(input_)
+ end
+ end
+ begin
+ begin
+ tf.add_input(desc, input_)
+ end
+ end
+ begin
+ end
+ end
+ tf.Tensor(tf.Operation(desc))
+ end
+ end
+ begin
+ function snapshot_eager(input_; name=nothing)
+ desc = tf.EagerOp("Snapshot")
+ input_ = convert(tf.EagerTensor, input_)
+ begin
+ begin
+ tf.add_input(desc, input_)
+ end
+ end
+ begin
+ end
+ begin
+ desc["T"] = tf.data_type(input_)
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(snapshot, [input_], name=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res[1]
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function snapshot(input_; name=nothing)
+ if tf.in_eager_mode()
+ snapshot_eager(input_; name=name)
+ else
+ snapshot_graph(input_; name=name)
+ end
+ end
+ end
+end
+
+
+"""
+ deserialize_iterator(resource_handle, serialized)
+
+
+"""
+begin
+ begin
+ function deserialize_iterator_graph(resource_handle_, serialized_; name=nothing)
+ local desc
+ tf.with_op_name(name, "DeserializeIterator") do
+ desc = tf.NodeDescription("DeserializeIterator")
+ begin
+ begin
+ resource_handle_ = convert(Tensor{Any}, resource_handle_)
+ begin
+ end
+ end
+ begin
+ serialized_ = convert(Tensor{Any}, serialized_)
+ begin
+ end
+ end
+ end
+ begin
+ begin
+ tf.add_input(desc, resource_handle_)
+ end
+ begin
+ tf.add_input(desc, serialized_)
+ end
+ end
+ begin
+ end
+ end
+ tf.Tensor(tf.Operation(desc))
+ end
+ end
+ begin
+ function deserialize_iterator_eager(resource_handle_, serialized_; name=nothing)
+ desc = tf.EagerOp("DeserializeIterator")
+ resource_handle_ = convert(tf.EagerTensor, resource_handle_)
+ serialized_ = convert(tf.EagerTensor, serialized_)
+ begin
+ begin
+ tf.add_input(desc, resource_handle_)
+ end
+ begin
+ tf.add_input(desc, serialized_)
+ end
+ end
+ begin
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(deserialize_iterator, [resource_handle_, serialized_], name=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res[1]
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function deserialize_iterator(resource_handle_, serialized_; name=nothing)
+ if tf.in_eager_mode()
+ deserialize_iterator_eager(resource_handle_, serialized_; name=name)
+ else
+ deserialize_iterator_graph(resource_handle_, serialized_; name=name)
+ end
+ end
+ end
+end
+
+
+"""
+ atan(x)
+
+
+"""
+begin
+ begin
+ function atan_graph(x_; name=nothing)
+ local desc
+ tf.with_op_name(name, "Atan") do
+ desc = tf.NodeDescription("Atan")
+ begin
+ begin
+ x_ = convert(Tensor{Any}, x_)
+ begin
+ end
+ end
+ begin
+ (x_,) = tf.tf_promote(x_)
+ end
+ end
+ begin
+ begin
+ tf.add_input(desc, x_)
+ end
+ end
+ begin
+ end
+ end
+ tf.Tensor(tf.Operation(desc))
+ end
+ end
+ begin
+ function atan_eager(x_; name=nothing)
+ desc = tf.EagerOp("Atan")
+ x_ = convert(tf.EagerTensor, x_)
+ begin
+ begin
+ tf.add_input(desc, x_)
+ end
+ end
+ begin
+ end
+ begin
+ desc["T"] = tf.data_type(x_)
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(atan, [x_], name=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res[1]
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function atan(x_; name=nothing)
+ if tf.in_eager_mode()
+ atan_eager(x_; name=name)
+ else
+ atan_graph(x_; name=name)
+ end
+ end
+ end
+end
+
+
+"""
+ mat_mul(a, b; transpose_a=false, transpose_b=false)
+
+
+"""
+begin
+ begin
+ function mat_mul_graph(a_, b_; name=nothing, transpose_a=nothing, transpose_b=nothing)
+ local desc
+ tf.with_op_name(name, "MatMul") do
+ desc = tf.NodeDescription("MatMul")
+ begin
+ begin
+ a_ = convert(Tensor{Any}, a_)
+ begin
+ end
+ end
+ begin
+ b_ = convert(Tensor{Any}, b_)
+ begin
+ end
+ end
+ begin
+ (a_, b_) = tf.tf_promote(a_, b_)
+ end
+ end
+ begin
+ begin
+ tf.add_input(desc, a_)
+ end
+ begin
+ tf.add_input(desc, b_)
+ end
+ end
+ begin
+ begin
+ if transpose_a !== nothing
+ desc["transpose_a"] = Base.Bool(transpose_a)
+ end
+ end
+ begin
+ if transpose_b !== nothing
+ desc["transpose_b"] = Base.Bool(transpose_b)
+ end
+ end
+ end
+ end
+ tf.Tensor(tf.Operation(desc))
+ end
+ end
+ begin
+ function mat_mul_eager(a_, b_; name=nothing, transpose_a=nothing, transpose_b=nothing)
+ desc = tf.EagerOp("MatMul")
+ a_ = convert(tf.EagerTensor, a_)
+ b_ = convert(tf.EagerTensor, b_)
+ begin
+ begin
+ tf.add_input(desc, a_)
+ end
+ begin
+ tf.add_input(desc, b_)
+ end
+ end
+ begin
+ begin
+ if transpose_a !== nothing
+ desc["transpose_a"] = Base.Bool(transpose_a)
+ end
+ end
+ begin
+ if transpose_b !== nothing
+ desc["transpose_b"] = Base.Bool(transpose_b)
+ end
+ end
+ end
+ begin
+ desc["T"] = tf.data_type(a_)
+ end
+ begin
+ desc["T"] = tf.data_type(b_)
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(mat_mul, [a_, b_], name=nothing, transpose_a=nothing, transpose_b=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res[1]
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function mat_mul(a_, b_; name=nothing, transpose_a=nothing, transpose_b=nothing)
+ if tf.in_eager_mode()
+ mat_mul_eager(a_, b_; name=name, transpose_a=transpose_a, transpose_b=transpose_b)
+ else
+ mat_mul_graph(a_, b_; name=name, transpose_a=transpose_a, transpose_b=transpose_b)
+ end
+ end
+ end
+end
+
+
+"""
+ erfc(x)
+
+
+"""
+begin
+ begin
+ function erfc_graph(x_; name=nothing)
+ local desc
+ tf.with_op_name(name, "Erfc") do
+ desc = tf.NodeDescription("Erfc")
+ begin
+ begin
+ x_ = convert(Tensor{Any}, x_)
+ begin
+ end
+ end
+ begin
+ (x_,) = tf.tf_promote(x_)
+ end
+ end
+ begin
+ begin
+ tf.add_input(desc, x_)
+ end
+ end
+ begin
+ end
+ end
+ tf.Tensor(tf.Operation(desc))
+ end
+ end
+ begin
+ function erfc_eager(x_; name=nothing)
+ desc = tf.EagerOp("Erfc")
+ x_ = convert(tf.EagerTensor, x_)
+ begin
+ begin
+ tf.add_input(desc, x_)
+ end
+ end
+ begin
+ end
+ begin
+ desc["T"] = tf.data_type(x_)
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(erfc, [x_], name=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res[1]
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function erfc(x_; name=nothing)
+ if tf.in_eager_mode()
+ erfc_eager(x_; name=name)
+ else
+ erfc_graph(x_; name=name)
+ end
+ end
+ end
+end
+
+
+"""
+ sigmoid_grad(y, dy)
+
+
+"""
+begin
+ begin
+ function sigmoid_grad_graph(y_, dy_; name=nothing)
+ local desc
+ tf.with_op_name(name, "SigmoidGrad") do
+ desc = tf.NodeDescription("SigmoidGrad")
+ begin
+ begin
+ y_ = convert(Tensor{Any}, y_)
+ begin
+ end
+ end
+ begin
+ dy_ = convert(Tensor{Any}, dy_)
+ begin
+ end
+ end
+ begin
+ (y_, dy_) = tf.tf_promote(y_, dy_)
+ end
+ end
+ begin
+ begin
+ tf.add_input(desc, y_)
+ end
+ begin
+ tf.add_input(desc, dy_)
+ end
+ end
+ begin
+ end
+ end
+ tf.Tensor(tf.Operation(desc))
+ end
+ end
+ begin
+ function sigmoid_grad_eager(y_, dy_; name=nothing)
+ desc = tf.EagerOp("SigmoidGrad")
+ y_ = convert(tf.EagerTensor, y_)
+ dy_ = convert(tf.EagerTensor, dy_)
+ begin
+ begin
+ tf.add_input(desc, y_)
+ end
+ begin
+ tf.add_input(desc, dy_)
+ end
+ end
+ begin
+ end
+ begin
+ desc["T"] = tf.data_type(y_)
+ end
+ begin
+ desc["T"] = tf.data_type(dy_)
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(sigmoid_grad, [y_, dy_], name=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res[1]
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function sigmoid_grad(y_, dy_; name=nothing)
+ if tf.in_eager_mode()
+ sigmoid_grad_eager(y_, dy_; name=name)
+ else
+ sigmoid_grad_graph(y_, dy_; name=name)
+ end
+ end
+ end
+end
+
+
+"""
+ fixed_length_record_reader_v2(; header_bytes=0, footer_bytes=0, hop_bytes=0, container=, shared_name=, encoding=)
+
+
+"""
+begin
+ begin
+ function fixed_length_record_reader_v2_graph(; name=nothing, header_bytes=nothing, record_bytes=nothing, footer_bytes=nothing, hop_bytes=nothing, container=nothing, shared_name=nothing, encoding=nothing)
+ local desc
+ tf.with_op_name(name, "FixedLengthRecordReaderV2") do
+ desc = tf.NodeDescription("FixedLengthRecordReaderV2")
+ begin
+ end
+ begin
+ end
+ begin
+ begin
+ if header_bytes !== nothing
+ desc["header_bytes"] = Base.Int(header_bytes)
+ end
+ end
+ begin
+ if record_bytes !== nothing
+ desc["record_bytes"] = Base.Int(record_bytes)
+ end
+ end
+ begin
+ if footer_bytes !== nothing
+ desc["footer_bytes"] = Base.Int(footer_bytes)
+ end
+ end
+ begin
+ if hop_bytes !== nothing
+ desc["hop_bytes"] = Base.Int(hop_bytes)
+ end
+ end
+ begin
+ if container !== nothing
+ desc["container"] = Base.String(container)
+ end
+ end
+ begin
+ if shared_name !== nothing
+ desc["shared_name"] = Base.String(shared_name)
+ end
+ end
+ begin
+ if encoding !== nothing
+ desc["encoding"] = Base.String(encoding)
+ end
+ end
+ end
+ end
+ tf.Tensor(tf.Operation(desc))
+ end
+ end
+ begin
+ function fixed_length_record_reader_v2_eager(; name=nothing, header_bytes=nothing, record_bytes=nothing, footer_bytes=nothing, hop_bytes=nothing, container=nothing, shared_name=nothing, encoding=nothing)
+ desc = tf.EagerOp("FixedLengthRecordReaderV2")
+ begin
+ end
+ begin
+ begin
+ if header_bytes !== nothing
+ desc["header_bytes"] = Base.Int(header_bytes)
+ end
+ end
+ begin
+ if record_bytes !== nothing
+ desc["record_bytes"] = Base.Int(record_bytes)
+ end
+ end
+ begin
+ if footer_bytes !== nothing
+ desc["footer_bytes"] = Base.Int(footer_bytes)
+ end
+ end
+ begin
+ if hop_bytes !== nothing
+ desc["hop_bytes"] = Base.Int(hop_bytes)
+ end
+ end
+ begin
+ if container !== nothing
+ desc["container"] = Base.String(container)
+ end
+ end
+ begin
+ if shared_name !== nothing
+ desc["shared_name"] = Base.String(shared_name)
+ end
+ end
+ begin
+ if encoding !== nothing
+ desc["encoding"] = Base.String(encoding)
+ end
+ end
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(fixed_length_record_reader_v2, [], name=nothing, header_bytes=nothing, record_bytes=nothing, footer_bytes=nothing, hop_bytes=nothing, container=nothing, shared_name=nothing, encoding=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res[1]
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function fixed_length_record_reader_v2(; name=nothing, header_bytes=nothing, record_bytes=nothing, footer_bytes=nothing, hop_bytes=nothing, container=nothing, shared_name=nothing, encoding=nothing)
+ if tf.in_eager_mode()
+ fixed_length_record_reader_v2_eager(; name=name, header_bytes=header_bytes, record_bytes=record_bytes, footer_bytes=footer_bytes, hop_bytes=hop_bytes, container=container, shared_name=shared_name, encoding=encoding)
+ else
+ fixed_length_record_reader_v2_graph(; name=name, header_bytes=header_bytes, record_bytes=record_bytes, footer_bytes=footer_bytes, hop_bytes=hop_bytes, container=container, shared_name=shared_name, encoding=encoding)
+ end
+ end
+ end
+end
+
+
+"""
+ non_max_suppression_v3(boxes, scores, max_output_size, iou_threshold, score_threshold)
+
+
+"""
+begin
+ begin
+ function non_max_suppression_v3_graph(boxes_, scores_, max_output_size_, iou_threshold_, score_threshold_; name=nothing)
+ local desc
+ tf.with_op_name(name, "NonMaxSuppressionV3") do
+ desc = tf.NodeDescription("NonMaxSuppressionV3")
+ begin
+ begin
+ boxes_ = convert(Tensor{Float32}, boxes_)
+ begin
+ end
+ end
+ begin
+ scores_ = convert(Tensor{Float32}, scores_)
+ begin
+ end
+ end
+ begin
+ max_output_size_ = convert(Tensor{Int32}, max_output_size_)
+ begin
+ end
+ end
+ begin
+ iou_threshold_ = convert(Tensor{Float32}, iou_threshold_)
+ begin
+ end
+ end
+ begin
+ score_threshold_ = convert(Tensor{Float32}, score_threshold_)
+ begin
+ end
+ end
+ begin
+ (boxes_, scores_) = tf.tf_promote(boxes_, scores_)
+ end
+ end
+ begin
+ begin
+ tf.add_input(desc, boxes_)
+ end
+ begin
+ tf.add_input(desc, scores_)
+ end
+ begin
+ tf.add_input(desc, max_output_size_)
+ end
+ begin
+ tf.add_input(desc, iou_threshold_)
+ end
+ begin
+ tf.add_input(desc, score_threshold_)
+ end
+ end
+ begin
+ end
+ end
+ tf.Tensor(tf.Operation(desc))
+ end
+ end
+ begin
+ function non_max_suppression_v3_eager(boxes_, scores_, max_output_size_, iou_threshold_, score_threshold_; name=nothing)
+ desc = tf.EagerOp("NonMaxSuppressionV3")
+ boxes_ = convert(tf.EagerTensor, boxes_)
+ scores_ = convert(tf.EagerTensor, scores_)
+ max_output_size_ = convert(tf.EagerTensor, max_output_size_)
+ iou_threshold_ = convert(tf.EagerTensor, iou_threshold_)
+ score_threshold_ = convert(tf.EagerTensor, score_threshold_)
+ begin
+ begin
+ tf.add_input(desc, boxes_)
+ end
+ begin
+ tf.add_input(desc, scores_)
+ end
+ begin
+ tf.add_input(desc, max_output_size_)
+ end
+ begin
+ tf.add_input(desc, iou_threshold_)
+ end
+ begin
+ tf.add_input(desc, score_threshold_)
+ end
+ end
+ begin
+ end
+ begin
+ desc["T"] = tf.data_type(boxes_)
+ end
+ begin
+ desc["T"] = tf.data_type(scores_)
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(non_max_suppression_v3, [boxes_, scores_, max_output_size_, iou_threshold_, score_threshold_], name=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res[1]
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function non_max_suppression_v3(boxes_, scores_, max_output_size_, iou_threshold_, score_threshold_; name=nothing)
+ if tf.in_eager_mode()
+ non_max_suppression_v3_eager(boxes_, scores_, max_output_size_, iou_threshold_, score_threshold_; name=name)
+ else
+ non_max_suppression_v3_graph(boxes_, scores_, max_output_size_, iou_threshold_, score_threshold_; name=name)
+ end
+ end
+ end
+end
+
+
+"""
+ dilation2d_backprop_input(input, filter, out_backprop)
+
+
+"""
+begin
+ begin
+ function dilation2d_backprop_input_graph(input_, filter_, out_backprop_; name=nothing, strides=nothing, rates=nothing, padding=nothing)
+ local desc
+ tf.with_op_name(name, "Dilation2DBackpropInput") do
+ desc = tf.NodeDescription("Dilation2DBackpropInput")
+ begin
+ begin
+ input_ = convert(Tensor{Any}, input_)
+ begin
+ end
+ end
+ begin
+ filter_ = convert(Tensor{Any}, filter_)
+ begin
+ end
+ end
+ begin
+ out_backprop_ = convert(Tensor{Any}, out_backprop_)
+ begin
+ end
+ end
+ begin
+ (input_, filter_, out_backprop_) = tf.tf_promote(input_, filter_, out_backprop_)
+ end
+ end
+ begin
+ begin
+ tf.add_input(desc, input_)
+ end
+ begin
+ tf.add_input(desc, filter_)
+ end
+ begin
+ tf.add_input(desc, out_backprop_)
+ end
+ end
+ begin
+ begin
+ if strides !== nothing
+ desc["strides"] = map(Base.identity, strides)
+ end
+ end
+ begin
+ if rates !== nothing
+ desc["rates"] = map(Base.identity, rates)
+ end
+ end
+ begin
+ if padding !== nothing
+ desc["padding"] = Base.String(padding)
+ end
+ end
+ end
+ end
+ tf.Tensor(tf.Operation(desc))
+ end
+ end
+ begin
+ function dilation2d_backprop_input_eager(input_, filter_, out_backprop_; name=nothing, strides=nothing, rates=nothing, padding=nothing)
+ desc = tf.EagerOp("Dilation2DBackpropInput")
+ input_ = convert(tf.EagerTensor, input_)
+ filter_ = convert(tf.EagerTensor, filter_)
+ out_backprop_ = convert(tf.EagerTensor, out_backprop_)
+ begin
+ begin
+ tf.add_input(desc, input_)
+ end
+ begin
+ tf.add_input(desc, filter_)
+ end
+ begin
+ tf.add_input(desc, out_backprop_)
+ end
+ end
+ begin
+ begin
+ if strides !== nothing
+ desc["strides"] = map(Base.identity, strides)
+ end
+ end
+ begin
+ if rates !== nothing
+ desc["rates"] = map(Base.identity, rates)
+ end
+ end
+ begin
+ if padding !== nothing
+ desc["padding"] = Base.String(padding)
+ end
+ end
+ end
+ begin
+ desc["T"] = tf.data_type(input_)
+ end
+ begin
+ desc["T"] = tf.data_type(filter_)
+ end
+ begin
+ desc["T"] = tf.data_type(out_backprop_)
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(dilation2d_backprop_input, [input_, filter_, out_backprop_], name=nothing, strides=nothing, rates=nothing, padding=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res[1]
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function dilation2d_backprop_input(input_, filter_, out_backprop_; name=nothing, strides=nothing, rates=nothing, padding=nothing)
+ if tf.in_eager_mode()
+ dilation2d_backprop_input_eager(input_, filter_, out_backprop_; name=name, strides=strides, rates=rates, padding=padding)
+ else
+ dilation2d_backprop_input_graph(input_, filter_, out_backprop_; name=name, strides=strides, rates=rates, padding=padding)
+ end
+ end
+ end
+end
+
+
+"""
+ resource_apply_adadelta(var, accum, accum_update, lr, rho, epsilon, grad; use_locking=false)
+
+
+"""
+begin
+ begin
+ function resource_apply_adadelta_graph(var_, accum_, accum_update_, lr_, rho_, epsilon_, grad_; name=nothing, use_locking=nothing)
+ local desc
+ tf.with_op_name(name, "ResourceApplyAdadelta") do
+ desc = tf.NodeDescription("ResourceApplyAdadelta")
+ begin
+ begin
+ var_ = convert(Tensor{Any}, var_)
+ begin
+ end
+ end
+ begin
+ accum_ = convert(Tensor{Any}, accum_)
+ begin
+ end
+ end
+ begin
+ accum_update_ = convert(Tensor{Any}, accum_update_)
+ begin
+ end
+ end
+ begin
+ lr_ = convert(Tensor{Any}, lr_)
+ begin
+ end
+ end
+ begin
+ rho_ = convert(Tensor{Any}, rho_)
+ begin
+ end
+ end
+ begin
+ epsilon_ = convert(Tensor{Any}, epsilon_)
+ begin
+ end
+ end
+ begin
+ grad_ = convert(Tensor{Any}, grad_)
+ begin
+ end
+ end
+ begin
+ (lr_, rho_, epsilon_, grad_) = tf.tf_promote(lr_, rho_, epsilon_, grad_)
+ end
+ end
+ begin
+ begin
+ tf.add_input(desc, var_)
+ end
+ begin
+ tf.add_input(desc, accum_)
+ end
+ begin
+ tf.add_input(desc, accum_update_)
+ end
+ begin
+ tf.add_input(desc, lr_)
+ end
+ begin
+ tf.add_input(desc, rho_)
+ end
+ begin
+ tf.add_input(desc, epsilon_)
+ end
+ begin
+ tf.add_input(desc, grad_)
+ end
+ end
+ begin
+ begin
+ if use_locking !== nothing
+ desc["use_locking"] = Base.Bool(use_locking)
+ end
+ end
+ end
+ end
+ tf.Tensor(tf.Operation(desc))
+ end
+ end
+ begin
+ function resource_apply_adadelta_eager(var_, accum_, accum_update_, lr_, rho_, epsilon_, grad_; name=nothing, use_locking=nothing)
+ desc = tf.EagerOp("ResourceApplyAdadelta")
+ var_ = convert(tf.EagerTensor, var_)
+ accum_ = convert(tf.EagerTensor, accum_)
+ accum_update_ = convert(tf.EagerTensor, accum_update_)
+ lr_ = convert(tf.EagerTensor, lr_)
+ rho_ = convert(tf.EagerTensor, rho_)
+ epsilon_ = convert(tf.EagerTensor, epsilon_)
+ grad_ = convert(tf.EagerTensor, grad_)
+ begin
+ begin
+ tf.add_input(desc, var_)
+ end
+ begin
+ tf.add_input(desc, accum_)
+ end
+ begin
+ tf.add_input(desc, accum_update_)
+ end
+ begin
+ tf.add_input(desc, lr_)
+ end
+ begin
+ tf.add_input(desc, rho_)
+ end
+ begin
+ tf.add_input(desc, epsilon_)
+ end
+ begin
+ tf.add_input(desc, grad_)
+ end
+ end
+ begin
+ begin
+ if use_locking !== nothing
+ desc["use_locking"] = Base.Bool(use_locking)
+ end
+ end
+ end
+ begin
+ desc["T"] = tf.data_type(lr_)
+ end
+ begin
+ desc["T"] = tf.data_type(rho_)
+ end
+ begin
+ desc["T"] = tf.data_type(epsilon_)
+ end
+ begin
+ desc["T"] = tf.data_type(grad_)
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(resource_apply_adadelta, [var_, accum_, accum_update_, lr_, rho_, epsilon_, grad_], name=nothing, use_locking=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res[1]
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function resource_apply_adadelta(var_, accum_, accum_update_, lr_, rho_, epsilon_, grad_; name=nothing, use_locking=nothing)
+ if tf.in_eager_mode()
+ resource_apply_adadelta_eager(var_, accum_, accum_update_, lr_, rho_, epsilon_, grad_; name=name, use_locking=use_locking)
+ else
+ resource_apply_adadelta_graph(var_, accum_, accum_update_, lr_, rho_, epsilon_, grad_; name=name, use_locking=use_locking)
+ end
+ end
+ end
+end
+
+
+"""
+ logical_or(x, y)
+
+
+"""
+begin
+ begin
+ function logical_or_graph(x_, y_; name=nothing)
+ local desc
+ tf.with_op_name(name, "LogicalOr") do
+ desc = tf.NodeDescription("LogicalOr")
+ begin
+ begin
+ x_ = convert(Tensor{Bool}, x_)
+ begin
+ end
+ end
+ begin
+ y_ = convert(Tensor{Bool}, y_)
+ begin
+ end
+ end
+ end
+ begin
+ begin
+ tf.add_input(desc, x_)
+ end
+ begin
+ tf.add_input(desc, y_)
+ end
+ end
+ begin
+ end
+ end
+ tf.Tensor(tf.Operation(desc))
+ end
+ end
+ begin
+ function logical_or_eager(x_, y_; name=nothing)
+ desc = tf.EagerOp("LogicalOr")
+ x_ = convert(tf.EagerTensor, x_)
+ y_ = convert(tf.EagerTensor, y_)
+ begin
+ begin
+ tf.add_input(desc, x_)
+ end
+ begin
+ tf.add_input(desc, y_)
+ end
+ end
+ begin
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(logical_or, [x_, y_], name=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res[1]
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function logical_or(x_, y_; name=nothing)
+ if tf.in_eager_mode()
+ logical_or_eager(x_, y_; name=name)
+ else
+ logical_or_graph(x_, y_; name=name)
+ end
+ end
+ end
+end
+
+
+"""
+ dense_to_sparse_set_operation(set1, set2_indices, set2_values, set2_shape; validate_indices=true)
+
+
+"""
+begin
+ begin
+ function dense_to_sparse_set_operation_graph(set1_, set2_indices_, set2_values_, set2_shape_; name=nothing, set_operation=nothing, validate_indices=nothing)
+ local desc
+ tf.with_op_name(name, "DenseToSparseSetOperation") do
+ desc = tf.NodeDescription("DenseToSparseSetOperation")
+ begin
+ begin
+ set1_ = convert(Tensor{Any}, set1_)
+ begin
+ end
+ end
+ begin
+ set2_indices_ = convert(Tensor{Int64}, set2_indices_)
+ begin
+ end
+ end
+ begin
+ set2_values_ = convert(Tensor{Any}, set2_values_)
+ begin
+ end
+ end
+ begin
+ set2_shape_ = convert(Tensor{Int64}, set2_shape_)
+ begin
+ end
+ end
+ begin
+ (set1_, set2_values_) = tf.tf_promote(set1_, set2_values_)
+ end
+ end
+ begin
+ begin
+ tf.add_input(desc, set1_)
+ end
+ begin
+ tf.add_input(desc, set2_indices_)
+ end
+ begin
+ tf.add_input(desc, set2_values_)
+ end
+ begin
+ tf.add_input(desc, set2_shape_)
+ end
+ end
+ begin
+ begin
+ if set_operation !== nothing
+ desc["set_operation"] = Base.String(set_operation)
+ end
+ end
+ begin
+ if validate_indices !== nothing
+ desc["validate_indices"] = Base.Bool(validate_indices)
+ end
+ end
+ end
+ end
+ begin
+ out = tf.Tensor[]
+ op = tf.Operation(desc)
+ for out_idx = 1:3
+ push!(out, tf.Tensor(op, out_idx))
+ end
+ out
+ end
+ end
+ end
+ begin
+ function dense_to_sparse_set_operation_eager(set1_, set2_indices_, set2_values_, set2_shape_; name=nothing, set_operation=nothing, validate_indices=nothing)
+ desc = tf.EagerOp("DenseToSparseSetOperation")
+ set1_ = convert(tf.EagerTensor, set1_)
+ set2_indices_ = convert(tf.EagerTensor, set2_indices_)
+ set2_values_ = convert(tf.EagerTensor, set2_values_)
+ set2_shape_ = convert(tf.EagerTensor, set2_shape_)
+ begin
+ begin
+ tf.add_input(desc, set1_)
+ end
+ begin
+ tf.add_input(desc, set2_indices_)
+ end
+ begin
+ tf.add_input(desc, set2_values_)
+ end
+ begin
+ tf.add_input(desc, set2_shape_)
+ end
+ end
+ begin
+ begin
+ if set_operation !== nothing
+ desc["set_operation"] = Base.String(set_operation)
+ end
+ end
+ begin
+ if validate_indices !== nothing
+ desc["validate_indices"] = Base.Bool(validate_indices)
+ end
+ end
+ end
+ begin
+ desc["T"] = tf.data_type(set1_)
+ end
+ begin
+ desc["T"] = tf.data_type(set2_values_)
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(dense_to_sparse_set_operation, [set1_, set2_indices_, set2_values_, set2_shape_], name=nothing, set_operation=nothing, validate_indices=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function dense_to_sparse_set_operation(set1_, set2_indices_, set2_values_, set2_shape_; name=nothing, set_operation=nothing, validate_indices=nothing)
+ if tf.in_eager_mode()
+ dense_to_sparse_set_operation_eager(set1_, set2_indices_, set2_values_, set2_shape_; name=name, set_operation=set_operation, validate_indices=validate_indices)
+ else
+ dense_to_sparse_set_operation_graph(set1_, set2_indices_, set2_values_, set2_shape_; name=name, set_operation=set_operation, validate_indices=validate_indices)
+ end
+ end
+ end
+end
+
+
+"""
+ reader_num_records_produced(reader_handle)
+
+
+"""
+begin
+ begin
+ function reader_num_records_produced_graph(reader_handle_; name=nothing)
+ local desc
+ tf.with_op_name(name, "ReaderNumRecordsProduced") do
+ desc = tf.NodeDescription("ReaderNumRecordsProduced")
+ begin
+ begin
+ reader_handle_ = convert(Tensor{String}, reader_handle_)
+ begin
+ end
+ end
+ end
+ begin
+ begin
+ tf.add_input(desc, reader_handle_)
+ end
+ end
+ begin
+ end
+ end
+ tf.Tensor(tf.Operation(desc))
+ end
+ end
+ begin
+ function reader_num_records_produced_eager(reader_handle_; name=nothing)
+ desc = tf.EagerOp("ReaderNumRecordsProduced")
+ reader_handle_ = convert(tf.EagerTensor, reader_handle_)
+ begin
+ begin
+ tf.add_input(desc, reader_handle_)
+ end
+ end
+ begin
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(reader_num_records_produced, [reader_handle_], name=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res[1]
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function reader_num_records_produced(reader_handle_; name=nothing)
+ if tf.in_eager_mode()
+ reader_num_records_produced_eager(reader_handle_; name=name)
+ else
+ reader_num_records_produced_graph(reader_handle_; name=name)
+ end
+ end
+ end
+end
+
+
+"""
+ adjust_hue(images, delta)
+
+
+"""
+begin
+ begin
+ function adjust_hue_graph(images_, delta_; name=nothing)
+ local desc
+ tf.with_op_name(name, "AdjustHue") do
+ desc = tf.NodeDescription("AdjustHue")
+ begin
+ begin
+ images_ = convert(Tensor{Float32}, images_)
+ begin
+ end
+ end
+ begin
+ delta_ = convert(Tensor{Float32}, delta_)
+ begin
+ end
+ end
+ end
+ begin
+ begin
+ tf.add_input(desc, images_)
+ end
+ begin
+ tf.add_input(desc, delta_)
+ end
+ end
+ begin
+ end
+ end
+ tf.Tensor(tf.Operation(desc))
+ end
+ end
+ begin
+ function adjust_hue_eager(images_, delta_; name=nothing)
+ desc = tf.EagerOp("AdjustHue")
+ images_ = convert(tf.EagerTensor, images_)
+ delta_ = convert(tf.EagerTensor, delta_)
+ begin
+ begin
+ tf.add_input(desc, images_)
+ end
+ begin
+ tf.add_input(desc, delta_)
+ end
+ end
+ begin
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(adjust_hue, [images_, delta_], name=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res[1]
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function adjust_hue(images_, delta_; name=nothing)
+ if tf.in_eager_mode()
+ adjust_hue_eager(images_, delta_; name=name)
+ else
+ adjust_hue_graph(images_, delta_; name=name)
+ end
+ end
+ end
+end
+
+
+"""
+ boosted_trees_quantile_stream_resource_flush(quantile_stream_resource_handle, num_buckets; generate_quantiles=false)
+
+
+"""
+begin
+ begin
+ function boosted_trees_quantile_stream_resource_flush_graph(quantile_stream_resource_handle_, num_buckets_; name=nothing, generate_quantiles=nothing)
+ local desc
+ tf.with_op_name(name, "BoostedTreesQuantileStreamResourceFlush") do
+ desc = tf.NodeDescription("BoostedTreesQuantileStreamResourceFlush")
+ begin
+ begin
+ quantile_stream_resource_handle_ = convert(Tensor{Any}, quantile_stream_resource_handle_)
+ begin
+ end
+ end
+ begin
+ num_buckets_ = convert(Tensor{Int64}, num_buckets_)
+ begin
+ end
+ end
+ end
+ begin
+ begin
+ tf.add_input(desc, quantile_stream_resource_handle_)
+ end
+ begin
+ tf.add_input(desc, num_buckets_)
+ end
+ end
+ begin
+ begin
+ if generate_quantiles !== nothing
+ desc["generate_quantiles"] = Base.Bool(generate_quantiles)
+ end
+ end
+ end
+ end
+ tf.Tensor(tf.Operation(desc))
+ end
+ end
+ begin
+ function boosted_trees_quantile_stream_resource_flush_eager(quantile_stream_resource_handle_, num_buckets_; name=nothing, generate_quantiles=nothing)
+ desc = tf.EagerOp("BoostedTreesQuantileStreamResourceFlush")
+ quantile_stream_resource_handle_ = convert(tf.EagerTensor, quantile_stream_resource_handle_)
+ num_buckets_ = convert(tf.EagerTensor, num_buckets_)
+ begin
+ begin
+ tf.add_input(desc, quantile_stream_resource_handle_)
+ end
+ begin
+ tf.add_input(desc, num_buckets_)
+ end
+ end
+ begin
+ begin
+ if generate_quantiles !== nothing
+ desc["generate_quantiles"] = Base.Bool(generate_quantiles)
+ end
+ end
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(boosted_trees_quantile_stream_resource_flush, [quantile_stream_resource_handle_, num_buckets_], name=nothing, generate_quantiles=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res[1]
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function boosted_trees_quantile_stream_resource_flush(quantile_stream_resource_handle_, num_buckets_; name=nothing, generate_quantiles=nothing)
+ if tf.in_eager_mode()
+ boosted_trees_quantile_stream_resource_flush_eager(quantile_stream_resource_handle_, num_buckets_; name=name, generate_quantiles=generate_quantiles)
+ else
+ boosted_trees_quantile_stream_resource_flush_graph(quantile_stream_resource_handle_, num_buckets_; name=name, generate_quantiles=generate_quantiles)
+ end
+ end
+ end
+end
+
+
+"""
+ experimental_map_and_batch_dataset(input_dataset, other_arguments, batch_size, num_parallel_calls, drop_remainder; preserve_cardinality=false)
+
+
+"""
+begin
+ begin
+ function experimental_map_and_batch_dataset_graph(input_dataset_, other_arguments_, batch_size_, num_parallel_calls_, drop_remainder_; name=nothing, f=nothing, Targuments=nothing, output_types=nothing, output_shapes=nothing, preserve_cardinality=nothing)
+ local desc
+ tf.with_op_name(name, "ExperimentalMapAndBatchDataset") do
+ desc = tf.NodeDescription("ExperimentalMapAndBatchDataset")
+ begin
+ begin
+ input_dataset_ = convert(Tensor{Any}, input_dataset_)
+ begin
+ end
+ end
+ begin
+ other_arguments_ = [convert(Tensor{Any}, x) for x = other_arguments_]
+ begin
+ end
+ end
+ begin
+ batch_size_ = convert(Tensor{Int64}, batch_size_)
+ begin
+ end
+ end
+ begin
+ num_parallel_calls_ = convert(Tensor{Int64}, num_parallel_calls_)
+ begin
+ end
+ end
+ begin
+ drop_remainder_ = convert(Tensor{Bool}, drop_remainder_)
+ begin
+ end
+ end
+ end
+ begin
+ begin
+ tf.add_input(desc, input_dataset_)
+ end
+ begin
+ tf.add_input(desc, other_arguments_)
+ end
+ begin
+ tf.add_input(desc, batch_size_)
+ end
+ begin
+ tf.add_input(desc, num_parallel_calls_)
+ end
+ begin
+ tf.add_input(desc, drop_remainder_)
+ end
+ end
+ begin
+ begin
+ if f !== nothing
+ desc["f"] = Base.identity(f)
+ end
+ end
+ begin
+ if Targuments !== nothing
+ desc["Targuments"] = map(Base.identity, Targuments)
+ end
+ end
+ begin
+ if output_types !== nothing
+ desc["output_types"] = map(Base.identity, output_types)
+ end
+ end
+ begin
+ if output_shapes !== nothing
+ desc["output_shapes"] = map(Base.identity, output_shapes)
+ end
+ end
+ begin
+ if preserve_cardinality !== nothing
+ desc["preserve_cardinality"] = Base.Bool(preserve_cardinality)
+ end
+ end
+ end
+ end
+ tf.Tensor(tf.Operation(desc))
+ end
+ end
+ begin
+ function experimental_map_and_batch_dataset_eager(input_dataset_, other_arguments_, batch_size_, num_parallel_calls_, drop_remainder_; name=nothing, f=nothing, Targuments=nothing, output_types=nothing, output_shapes=nothing, preserve_cardinality=nothing)
+ desc = tf.EagerOp("ExperimentalMapAndBatchDataset")
+ input_dataset_ = convert(tf.EagerTensor, input_dataset_)
+ other_arguments_ = convert(tf.EagerTensor, other_arguments_)
+ batch_size_ = convert(tf.EagerTensor, batch_size_)
+ num_parallel_calls_ = convert(tf.EagerTensor, num_parallel_calls_)
+ drop_remainder_ = convert(tf.EagerTensor, drop_remainder_)
+ begin
+ begin
+ tf.add_input(desc, input_dataset_)
+ end
+ begin
+ tf.add_input(desc, other_arguments_)
+ end
+ begin
+ tf.add_input(desc, batch_size_)
+ end
+ begin
+ tf.add_input(desc, num_parallel_calls_)
+ end
+ begin
+ tf.add_input(desc, drop_remainder_)
+ end
+ end
+ begin
+ begin
+ if f !== nothing
+ desc["f"] = Base.identity(f)
+ end
+ end
+ begin
+ if Targuments !== nothing
+ desc["Targuments"] = map(Base.identity, Targuments)
+ end
+ end
+ begin
+ if output_types !== nothing
+ desc["output_types"] = map(Base.identity, output_types)
+ end
+ end
+ begin
+ if output_shapes !== nothing
+ desc["output_shapes"] = map(Base.identity, output_shapes)
+ end
+ end
+ begin
+ if preserve_cardinality !== nothing
+ desc["preserve_cardinality"] = Base.Bool(preserve_cardinality)
+ end
+ end
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(experimental_map_and_batch_dataset, [input_dataset_, other_arguments_, batch_size_, num_parallel_calls_, drop_remainder_], name=nothing, f=nothing, Targuments=nothing, output_types=nothing, output_shapes=nothing, preserve_cardinality=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res[1]
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function experimental_map_and_batch_dataset(input_dataset_, other_arguments_, batch_size_, num_parallel_calls_, drop_remainder_; name=nothing, f=nothing, Targuments=nothing, output_types=nothing, output_shapes=nothing, preserve_cardinality=nothing)
+ if tf.in_eager_mode()
+ experimental_map_and_batch_dataset_eager(input_dataset_, other_arguments_, batch_size_, num_parallel_calls_, drop_remainder_; name=name, f=f, Targuments=Targuments, output_types=output_types, output_shapes=output_shapes, preserve_cardinality=preserve_cardinality)
+ else
+ experimental_map_and_batch_dataset_graph(input_dataset_, other_arguments_, batch_size_, num_parallel_calls_, drop_remainder_; name=name, f=f, Targuments=Targuments, output_types=output_types, output_shapes=output_shapes, preserve_cardinality=preserve_cardinality)
+ end
+ end
+ end
+end
+
+
+"""
+ real_div(x, y)
+
+
+"""
+begin
+ begin
+ function real_div_graph(x_, y_; name=nothing)
+ local desc
+ tf.with_op_name(name, "RealDiv") do
+ desc = tf.NodeDescription("RealDiv")
+ begin
+ begin
+ x_ = convert(Tensor{Any}, x_)
+ begin
+ end
+ end
+ begin
+ y_ = convert(Tensor{Any}, y_)
+ begin
+ end
+ end
+ begin
+ (x_, y_) = tf.tf_promote(x_, y_)
+ end
+ end
+ begin
+ begin
+ tf.add_input(desc, x_)
+ end
+ begin
+ tf.add_input(desc, y_)
+ end
+ end
+ begin
+ end
+ end
+ tf.Tensor(tf.Operation(desc))
+ end
+ end
+ begin
+ function real_div_eager(x_, y_; name=nothing)
+ desc = tf.EagerOp("RealDiv")
+ x_ = convert(tf.EagerTensor, x_)
+ y_ = convert(tf.EagerTensor, y_)
+ begin
+ begin
+ tf.add_input(desc, x_)
+ end
+ begin
+ tf.add_input(desc, y_)
+ end
+ end
+ begin
+ end
+ begin
+ desc["T"] = tf.data_type(x_)
+ end
+ begin
+ desc["T"] = tf.data_type(y_)
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(real_div, [x_, y_], name=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res[1]
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function real_div(x_, y_; name=nothing)
+ if tf.in_eager_mode()
+ real_div_eager(x_, y_; name=name)
+ else
+ real_div_graph(x_, y_; name=name)
+ end
+ end
+ end
+end
+
+
+"""
+ restore_slice(file_pattern, tensor_name, shape_and_slice; preferred_shard=-1)
+
+
+"""
+begin
+ begin
+ function restore_slice_graph(file_pattern_, tensor_name_, shape_and_slice_; name=nothing, dt=nothing, preferred_shard=nothing)
+ local desc
+ tf.with_op_name(name, "RestoreSlice") do
+ desc = tf.NodeDescription("RestoreSlice")
+ begin
+ begin
+ file_pattern_ = convert(Tensor{String}, file_pattern_)
+ begin
+ end
+ end
+ begin
+ tensor_name_ = convert(Tensor{String}, tensor_name_)
+ begin
+ end
+ end
+ begin
+ shape_and_slice_ = convert(Tensor{String}, shape_and_slice_)
+ begin
+ end
+ end
+ end
+ begin
+ begin
+ tf.add_input(desc, file_pattern_)
+ end
+ begin
+ tf.add_input(desc, tensor_name_)
+ end
+ begin
+ tf.add_input(desc, shape_and_slice_)
+ end
+ end
+ begin
+ begin
+ if dt !== nothing
+ desc["dt"] = Base.identity(dt)
+ end
+ end
+ begin
+ if preferred_shard !== nothing
+ desc["preferred_shard"] = Base.Int(preferred_shard)
+ end
+ end
+ end
+ end
+ tf.Tensor(tf.Operation(desc))
+ end
+ end
+ begin
+ function restore_slice_eager(file_pattern_, tensor_name_, shape_and_slice_; name=nothing, dt=nothing, preferred_shard=nothing)
+ desc = tf.EagerOp("RestoreSlice")
+ file_pattern_ = convert(tf.EagerTensor, file_pattern_)
+ tensor_name_ = convert(tf.EagerTensor, tensor_name_)
+ shape_and_slice_ = convert(tf.EagerTensor, shape_and_slice_)
+ begin
+ begin
+ tf.add_input(desc, file_pattern_)
+ end
+ begin
+ tf.add_input(desc, tensor_name_)
+ end
+ begin
+ tf.add_input(desc, shape_and_slice_)
+ end
+ end
+ begin
+ begin
+ if dt !== nothing
+ desc["dt"] = Base.identity(dt)
+ end
+ end
+ begin
+ if preferred_shard !== nothing
+ desc["preferred_shard"] = Base.Int(preferred_shard)
+ end
+ end
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(restore_slice, [file_pattern_, tensor_name_, shape_and_slice_], name=nothing, dt=nothing, preferred_shard=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res[1]
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function restore_slice(file_pattern_, tensor_name_, shape_and_slice_; name=nothing, dt=nothing, preferred_shard=nothing)
+ if tf.in_eager_mode()
+ restore_slice_eager(file_pattern_, tensor_name_, shape_and_slice_; name=name, dt=dt, preferred_shard=preferred_shard)
+ else
+ restore_slice_graph(file_pattern_, tensor_name_, shape_and_slice_; name=name, dt=dt, preferred_shard=preferred_shard)
+ end
+ end
+ end
+end
+
+
+"""
+ stack_pop_v2(handle)
+
+
+"""
+begin
+ begin
+ function stack_pop_v2_graph(handle_; name=nothing, elem_type=nothing)
+ local desc
+ tf.with_op_name(name, "StackPopV2") do
+ desc = tf.NodeDescription("StackPopV2")
+ begin
+ begin
+ handle_ = convert(Tensor{Any}, handle_)
+ begin
+ end
+ end
+ end
+ begin
+ begin
+ tf.add_input(desc, handle_)
+ end
+ end
+ begin
+ begin
+ if elem_type !== nothing
+ desc["elem_type"] = Base.identity(elem_type)
+ end
+ end
+ end
+ end
+ tf.Tensor(tf.Operation(desc))
+ end
+ end
+ begin
+ function stack_pop_v2_eager(handle_; name=nothing, elem_type=nothing)
+ desc = tf.EagerOp("StackPopV2")
+ handle_ = convert(tf.EagerTensor, handle_)
+ begin
+ begin
+ tf.add_input(desc, handle_)
+ end
+ end
+ begin
+ begin
+ if elem_type !== nothing
+ desc["elem_type"] = Base.identity(elem_type)
+ end
+ end
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(stack_pop_v2, [handle_], name=nothing, elem_type=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res[1]
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function stack_pop_v2(handle_; name=nothing, elem_type=nothing)
+ if tf.in_eager_mode()
+ stack_pop_v2_eager(handle_; name=name, elem_type=elem_type)
+ else
+ stack_pop_v2_graph(handle_; name=name, elem_type=elem_type)
+ end
+ end
+ end
+end
+
+
+"""
+ reverse(tensor, dims)
+
+
+"""
+begin
+ begin
+ function reverse_graph(tensor_, dims_; name=nothing)
+ local desc
+ tf.with_op_name(name, "Reverse") do
+ desc = tf.NodeDescription("Reverse")
+ begin
+ begin
+ tensor_ = convert(Tensor{Any}, tensor_)
+ begin
+ end
+ end
+ begin
+ dims_ = convert(Tensor{Bool}, dims_)
+ begin
+ end
+ end
+ begin
+ (tensor_,) = tf.tf_promote(tensor_)
+ end
+ end
+ begin
+ begin
+ tf.add_input(desc, tensor_)
+ end
+ begin
+ tf.add_input(desc, dims_)
+ end
+ end
+ begin
+ end
+ end
+ tf.Tensor(tf.Operation(desc))
+ end
+ end
+ begin
+ function reverse_eager(tensor_, dims_; name=nothing)
+ desc = tf.EagerOp("Reverse")
+ tensor_ = convert(tf.EagerTensor, tensor_)
+ dims_ = convert(tf.EagerTensor, dims_)
+ begin
+ begin
+ tf.add_input(desc, tensor_)
+ end
+ begin
+ tf.add_input(desc, dims_)
+ end
+ end
+ begin
+ end
+ begin
+ desc["T"] = tf.data_type(tensor_)
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(reverse, [tensor_, dims_], name=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res[1]
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function reverse(tensor_, dims_; name=nothing)
+ if tf.in_eager_mode()
+ reverse_eager(tensor_, dims_; name=name)
+ else
+ reverse_graph(tensor_, dims_; name=name)
+ end
+ end
+ end
+end
+
+
+"""
+ decode_png(contents; channels=0, dtype=UInt8)
+
+
+"""
+begin
+ begin
+ function decode_png_graph(contents_; name=nothing, channels=nothing, dtype=nothing)
+ local desc
+ tf.with_op_name(name, "DecodePng") do
+ desc = tf.NodeDescription("DecodePng")
+ begin
+ begin
+ contents_ = convert(Tensor{String}, contents_)
+ begin
+ end
+ end
+ end
+ begin
+ begin
+ tf.add_input(desc, contents_)
+ end
+ end
+ begin
+ begin
+ if channels !== nothing
+ desc["channels"] = Base.Int(channels)
+ end
+ end
+ begin
+ if dtype !== nothing
+ desc["dtype"] = Base.identity(dtype)
+ end
+ end
+ end
+ end
+ tf.Tensor(tf.Operation(desc))
+ end
+ end
+ begin
+ function decode_png_eager(contents_; name=nothing, channels=nothing, dtype=nothing)
+ desc = tf.EagerOp("DecodePng")
+ contents_ = convert(tf.EagerTensor, contents_)
+ begin
+ begin
+ tf.add_input(desc, contents_)
+ end
+ end
+ begin
+ begin
+ if channels !== nothing
+ desc["channels"] = Base.Int(channels)
+ end
+ end
+ begin
+ if dtype !== nothing
+ desc["dtype"] = Base.identity(dtype)
+ end
+ end
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(decode_png, [contents_], name=nothing, channels=nothing, dtype=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res[1]
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function decode_png(contents_; name=nothing, channels=nothing, dtype=nothing)
+ if tf.in_eager_mode()
+ decode_png_eager(contents_; name=name, channels=channels, dtype=dtype)
+ else
+ decode_png_graph(contents_; name=name, channels=channels, dtype=dtype)
+ end
+ end
+ end
+end
+
+
+"""
+ non_max_suppression_v2(boxes, scores, max_output_size, iou_threshold)
+
+
+"""
+begin
+ begin
+ function non_max_suppression_v2_graph(boxes_, scores_, max_output_size_, iou_threshold_; name=nothing)
+ local desc
+ tf.with_op_name(name, "NonMaxSuppressionV2") do
+ desc = tf.NodeDescription("NonMaxSuppressionV2")
+ begin
+ begin
+ boxes_ = convert(Tensor{Float32}, boxes_)
+ begin
+ end
+ end
+ begin
+ scores_ = convert(Tensor{Float32}, scores_)
+ begin
+ end
+ end
+ begin
+ max_output_size_ = convert(Tensor{Int32}, max_output_size_)
+ begin
+ end
+ end
+ begin
+ iou_threshold_ = convert(Tensor{Float32}, iou_threshold_)
+ begin
+ end
+ end
+ begin
+ (boxes_, scores_) = tf.tf_promote(boxes_, scores_)
+ end
+ end
+ begin
+ begin
+ tf.add_input(desc, boxes_)
+ end
+ begin
+ tf.add_input(desc, scores_)
+ end
+ begin
+ tf.add_input(desc, max_output_size_)
+ end
+ begin
+ tf.add_input(desc, iou_threshold_)
+ end
+ end
+ begin
+ end
+ end
+ tf.Tensor(tf.Operation(desc))
+ end
+ end
+ begin
+ function non_max_suppression_v2_eager(boxes_, scores_, max_output_size_, iou_threshold_; name=nothing)
+ desc = tf.EagerOp("NonMaxSuppressionV2")
+ boxes_ = convert(tf.EagerTensor, boxes_)
+ scores_ = convert(tf.EagerTensor, scores_)
+ max_output_size_ = convert(tf.EagerTensor, max_output_size_)
+ iou_threshold_ = convert(tf.EagerTensor, iou_threshold_)
+ begin
+ begin
+ tf.add_input(desc, boxes_)
+ end
+ begin
+ tf.add_input(desc, scores_)
+ end
+ begin
+ tf.add_input(desc, max_output_size_)
+ end
+ begin
+ tf.add_input(desc, iou_threshold_)
+ end
+ end
+ begin
+ end
+ begin
+ desc["T"] = tf.data_type(boxes_)
+ end
+ begin
+ desc["T"] = tf.data_type(scores_)
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(non_max_suppression_v2, [boxes_, scores_, max_output_size_, iou_threshold_], name=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res[1]
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function non_max_suppression_v2(boxes_, scores_, max_output_size_, iou_threshold_; name=nothing)
+ if tf.in_eager_mode()
+ non_max_suppression_v2_eager(boxes_, scores_, max_output_size_, iou_threshold_; name=name)
+ else
+ non_max_suppression_v2_graph(boxes_, scores_, max_output_size_, iou_threshold_; name=name)
+ end
+ end
+ end
+end
+
+
+"""
+ igamma(a, x)
+
+
+"""
+begin
+ begin
+ function igamma_graph(a_, x_; name=nothing)
+ local desc
+ tf.with_op_name(name, "Igamma") do
+ desc = tf.NodeDescription("Igamma")
+ begin
+ begin
+ a_ = convert(Tensor{Any}, a_)
+ begin
+ end
+ end
+ begin
+ x_ = convert(Tensor{Any}, x_)
+ begin
+ end
+ end
+ begin
+ (a_, x_) = tf.tf_promote(a_, x_)
+ end
+ end
+ begin
+ begin
+ tf.add_input(desc, a_)
+ end
+ begin
+ tf.add_input(desc, x_)
+ end
+ end
+ begin
+ end
+ end
+ tf.Tensor(tf.Operation(desc))
+ end
+ end
+ begin
+ function igamma_eager(a_, x_; name=nothing)
+ desc = tf.EagerOp("Igamma")
+ a_ = convert(tf.EagerTensor, a_)
+ x_ = convert(tf.EagerTensor, x_)
+ begin
+ begin
+ tf.add_input(desc, a_)
+ end
+ begin
+ tf.add_input(desc, x_)
+ end
+ end
+ begin
+ end
+ begin
+ desc["T"] = tf.data_type(a_)
+ end
+ begin
+ desc["T"] = tf.data_type(x_)
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(igamma, [a_, x_], name=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res[1]
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function igamma(a_, x_; name=nothing)
+ if tf.in_eager_mode()
+ igamma_eager(a_, x_; name=name)
+ else
+ igamma_graph(a_, x_; name=name)
+ end
+ end
+ end
+end
+
+
+"""
+ digamma(x)
+
+
+"""
+begin
+ begin
+ function digamma_graph(x_; name=nothing)
+ local desc
+ tf.with_op_name(name, "Digamma") do
+ desc = tf.NodeDescription("Digamma")
+ begin
+ begin
+ x_ = convert(Tensor{Any}, x_)
+ begin
+ end
+ end
+ begin
+ (x_,) = tf.tf_promote(x_)
+ end
+ end
+ begin
+ begin
+ tf.add_input(desc, x_)
+ end
+ end
+ begin
+ end
+ end
+ tf.Tensor(tf.Operation(desc))
+ end
+ end
+ begin
+ function digamma_eager(x_; name=nothing)
+ desc = tf.EagerOp("Digamma")
+ x_ = convert(tf.EagerTensor, x_)
+ begin
+ begin
+ tf.add_input(desc, x_)
+ end
+ end
+ begin
+ end
+ begin
+ desc["T"] = tf.data_type(x_)
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(digamma, [x_], name=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res[1]
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function digamma(x_; name=nothing)
+ if tf.in_eager_mode()
+ digamma_eager(x_; name=name)
+ else
+ digamma_graph(x_; name=name)
+ end
+ end
+ end
+end
+
+
+"""
+ resource_apply_ada_max(var, m, v, beta1_power, lr, beta1, beta2, epsilon, grad; use_locking=false)
+
+
+"""
+begin
+ begin
+ function resource_apply_ada_max_graph(var_, m_, v_, beta1_power_, lr_, beta1_, beta2_, epsilon_, grad_; name=nothing, use_locking=nothing)
+ local desc
+ tf.with_op_name(name, "ResourceApplyAdaMax") do
+ desc = tf.NodeDescription("ResourceApplyAdaMax")
+ begin
+ begin
+ var_ = convert(Tensor{Any}, var_)
+ begin
+ end
+ end
+ begin
+ m_ = convert(Tensor{Any}, m_)
+ begin
+ end
+ end
+ begin
+ v_ = convert(Tensor{Any}, v_)
+ begin
+ end
+ end
+ begin
+ beta1_power_ = convert(Tensor{Any}, beta1_power_)
+ begin
+ end
+ end
+ begin
+ lr_ = convert(Tensor{Any}, lr_)
+ begin
+ end
+ end
+ begin
+ beta1_ = convert(Tensor{Any}, beta1_)
+ begin
+ end
+ end
+ begin
+ beta2_ = convert(Tensor{Any}, beta2_)
+ begin
+ end
+ end
+ begin
+ epsilon_ = convert(Tensor{Any}, epsilon_)
+ begin
+ end
+ end
+ begin
+ grad_ = convert(Tensor{Any}, grad_)
+ begin
+ end
+ end
+ begin
+ (beta1_power_, lr_, beta1_, beta2_, epsilon_, grad_) = tf.tf_promote(beta1_power_, lr_, beta1_, beta2_, epsilon_, grad_)
+ end
+ end
+ begin
+ begin
+ tf.add_input(desc, var_)
+ end
+ begin
+ tf.add_input(desc, m_)
+ end
+ begin
+ tf.add_input(desc, v_)
+ end
+ begin
+ tf.add_input(desc, beta1_power_)
+ end
+ begin
+ tf.add_input(desc, lr_)
+ end
+ begin
+ tf.add_input(desc, beta1_)
+ end
+ begin
+ tf.add_input(desc, beta2_)
+ end
+ begin
+ tf.add_input(desc, epsilon_)
+ end
+ begin
+ tf.add_input(desc, grad_)
+ end
+ end
+ begin
+ begin
+ if use_locking !== nothing
+ desc["use_locking"] = Base.Bool(use_locking)
+ end
+ end
+ end
+ end
+ tf.Tensor(tf.Operation(desc))
+ end
+ end
+ begin
+ function resource_apply_ada_max_eager(var_, m_, v_, beta1_power_, lr_, beta1_, beta2_, epsilon_, grad_; name=nothing, use_locking=nothing)
+ desc = tf.EagerOp("ResourceApplyAdaMax")
+ var_ = convert(tf.EagerTensor, var_)
+ m_ = convert(tf.EagerTensor, m_)
+ v_ = convert(tf.EagerTensor, v_)
+ beta1_power_ = convert(tf.EagerTensor, beta1_power_)
+ lr_ = convert(tf.EagerTensor, lr_)
+ beta1_ = convert(tf.EagerTensor, beta1_)
+ beta2_ = convert(tf.EagerTensor, beta2_)
+ epsilon_ = convert(tf.EagerTensor, epsilon_)
+ grad_ = convert(tf.EagerTensor, grad_)
+ begin
+ begin
+ tf.add_input(desc, var_)
+ end
+ begin
+ tf.add_input(desc, m_)
+ end
+ begin
+ tf.add_input(desc, v_)
+ end
+ begin
+ tf.add_input(desc, beta1_power_)
+ end
+ begin
+ tf.add_input(desc, lr_)
+ end
+ begin
+ tf.add_input(desc, beta1_)
+ end
+ begin
+ tf.add_input(desc, beta2_)
+ end
+ begin
+ tf.add_input(desc, epsilon_)
+ end
+ begin
+ tf.add_input(desc, grad_)
+ end
+ end
+ begin
+ begin
+ if use_locking !== nothing
+ desc["use_locking"] = Base.Bool(use_locking)
+ end
+ end
+ end
+ begin
+ desc["T"] = tf.data_type(beta1_power_)
+ end
+ begin
+ desc["T"] = tf.data_type(lr_)
+ end
+ begin
+ desc["T"] = tf.data_type(beta1_)
+ end
+ begin
+ desc["T"] = tf.data_type(beta2_)
+ end
+ begin
+ desc["T"] = tf.data_type(epsilon_)
+ end
+ begin
+ desc["T"] = tf.data_type(grad_)
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(resource_apply_ada_max, [var_, m_, v_, beta1_power_, lr_, beta1_, beta2_, epsilon_, grad_], name=nothing, use_locking=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res[1]
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function resource_apply_ada_max(var_, m_, v_, beta1_power_, lr_, beta1_, beta2_, epsilon_, grad_; name=nothing, use_locking=nothing)
+ if tf.in_eager_mode()
+ resource_apply_ada_max_eager(var_, m_, v_, beta1_power_, lr_, beta1_, beta2_, epsilon_, grad_; name=name, use_locking=use_locking)
+ else
+ resource_apply_ada_max_graph(var_, m_, v_, beta1_power_, lr_, beta1_, beta2_, epsilon_, grad_; name=name, use_locking=use_locking)
+ end
+ end
+ end
+end
+
+
+"""
+ space_to_depth(input; data_format=)
+
+
+"""
+begin
+ begin
+ function space_to_depth_graph(input_; name=nothing, block_size=nothing, data_format=nothing)
+ local desc
+ tf.with_op_name(name, "SpaceToDepth") do
+ desc = tf.NodeDescription("SpaceToDepth")
+ begin
+ begin
+ input_ = convert(Tensor{Any}, input_)
+ begin
+ end
+ end
+ begin
+ (input_,) = tf.tf_promote(input_)
+ end
+ end
+ begin
+ begin
+ tf.add_input(desc, input_)
+ end
+ end
+ begin
+ begin
+ if block_size !== nothing
+ desc["block_size"] = Base.Int(block_size)
+ end
+ end
+ begin
+ if data_format !== nothing
+ desc["data_format"] = Base.String(data_format)
+ end
+ end
+ end
+ end
+ tf.Tensor(tf.Operation(desc))
+ end
+ end
+ begin
+ function space_to_depth_eager(input_; name=nothing, block_size=nothing, data_format=nothing)
+ desc = tf.EagerOp("SpaceToDepth")
+ input_ = convert(tf.EagerTensor, input_)
+ begin
+ begin
+ tf.add_input(desc, input_)
+ end
+ end
+ begin
+ begin
+ if block_size !== nothing
+ desc["block_size"] = Base.Int(block_size)
+ end
+ end
+ begin
+ if data_format !== nothing
+ desc["data_format"] = Base.String(data_format)
+ end
+ end
+ end
+ begin
+ desc["T"] = tf.data_type(input_)
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(space_to_depth, [input_], name=nothing, block_size=nothing, data_format=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res[1]
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function space_to_depth(input_; name=nothing, block_size=nothing, data_format=nothing)
+ if tf.in_eager_mode()
+ space_to_depth_eager(input_; name=name, block_size=block_size, data_format=data_format)
+ else
+ space_to_depth_graph(input_; name=name, block_size=block_size, data_format=data_format)
+ end
+ end
+ end
+end
+
+
+"""
+ sqrt_grad(y, dy)
+
+
+"""
+begin
+ begin
+ function sqrt_grad_graph(y_, dy_; name=nothing)
+ local desc
+ tf.with_op_name(name, "SqrtGrad") do
+ desc = tf.NodeDescription("SqrtGrad")
+ begin
+ begin
+ y_ = convert(Tensor{Any}, y_)
+ begin
+ end
+ end
+ begin
+ dy_ = convert(Tensor{Any}, dy_)
+ begin
+ end
+ end
+ begin
+ (y_, dy_) = tf.tf_promote(y_, dy_)
+ end
+ end
+ begin
+ begin
+ tf.add_input(desc, y_)
+ end
+ begin
+ tf.add_input(desc, dy_)
+ end
+ end
+ begin
+ end
+ end
+ tf.Tensor(tf.Operation(desc))
+ end
+ end
+ begin
+ function sqrt_grad_eager(y_, dy_; name=nothing)
+ desc = tf.EagerOp("SqrtGrad")
+ y_ = convert(tf.EagerTensor, y_)
+ dy_ = convert(tf.EagerTensor, dy_)
+ begin
+ begin
+ tf.add_input(desc, y_)
+ end
+ begin
+ tf.add_input(desc, dy_)
+ end
+ end
+ begin
+ end
+ begin
+ desc["T"] = tf.data_type(y_)
+ end
+ begin
+ desc["T"] = tf.data_type(dy_)
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(sqrt_grad, [y_, dy_], name=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res[1]
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function sqrt_grad(y_, dy_; name=nothing)
+ if tf.in_eager_mode()
+ sqrt_grad_eager(y_, dy_; name=name)
+ else
+ sqrt_grad_graph(y_, dy_; name=name)
+ end
+ end
+ end
+end
+
+
+"""
+ map_unstage(key, indices; capacity=0, memory_limit=0, container=, shared_name=)
+
+
+"""
+begin
+ begin
+ function map_unstage_graph(key_, indices_; name=nothing, capacity=nothing, memory_limit=nothing, dtypes=nothing, container=nothing, shared_name=nothing)
+ local desc
+ tf.with_op_name(name, "MapUnstage") do
+ desc = tf.NodeDescription("MapUnstage")
+ begin
+ begin
+ key_ = convert(Tensor{Int64}, key_)
+ begin
+ end
+ end
+ begin
+ indices_ = convert(Tensor{Int32}, indices_)
+ begin
+ end
+ end
+ end
+ begin
+ begin
+ tf.add_input(desc, key_)
+ end
+ begin
+ tf.add_input(desc, indices_)
+ end
+ end
+ begin
+ begin
+ if capacity !== nothing
+ desc["capacity"] = Base.Int(capacity)
+ end
+ end
+ begin
+ if memory_limit !== nothing
+ desc["memory_limit"] = Base.Int(memory_limit)
+ end
+ end
+ begin
+ if dtypes !== nothing
+ desc["dtypes"] = map(Base.identity, dtypes)
+ end
+ end
+ begin
+ if container !== nothing
+ desc["container"] = Base.String(container)
+ end
+ end
+ begin
+ if shared_name !== nothing
+ desc["shared_name"] = Base.String(shared_name)
+ end
+ end
+ end
+ end
+ tf.Tensor(tf.Operation(desc))
+ end
+ end
+ begin
+ function map_unstage_eager(key_, indices_; name=nothing, capacity=nothing, memory_limit=nothing, dtypes=nothing, container=nothing, shared_name=nothing)
+ desc = tf.EagerOp("MapUnstage")
+ key_ = convert(tf.EagerTensor, key_)
+ indices_ = convert(tf.EagerTensor, indices_)
+ begin
+ begin
+ tf.add_input(desc, key_)
+ end
+ begin
+ tf.add_input(desc, indices_)
+ end
+ end
+ begin
+ begin
+ if capacity !== nothing
+ desc["capacity"] = Base.Int(capacity)
+ end
+ end
+ begin
+ if memory_limit !== nothing
+ desc["memory_limit"] = Base.Int(memory_limit)
+ end
+ end
+ begin
+ if dtypes !== nothing
+ desc["dtypes"] = map(Base.identity, dtypes)
+ end
+ end
+ begin
+ if container !== nothing
+ desc["container"] = Base.String(container)
+ end
+ end
+ begin
+ if shared_name !== nothing
+ desc["shared_name"] = Base.String(shared_name)
+ end
+ end
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(map_unstage, [key_, indices_], name=nothing, capacity=nothing, memory_limit=nothing, dtypes=nothing, container=nothing, shared_name=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res[1]
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function map_unstage(key_, indices_; name=nothing, capacity=nothing, memory_limit=nothing, dtypes=nothing, container=nothing, shared_name=nothing)
+ if tf.in_eager_mode()
+ map_unstage_eager(key_, indices_; name=name, capacity=capacity, memory_limit=memory_limit, dtypes=dtypes, container=container, shared_name=shared_name)
+ else
+ map_unstage_graph(key_, indices_; name=name, capacity=capacity, memory_limit=memory_limit, dtypes=dtypes, container=container, shared_name=shared_name)
+ end
+ end
+ end
+end
+
+
+"""
+ qr(input; full_matrices=false)
+
+
+"""
+begin
+ begin
+ function qr_graph(input_; name=nothing, full_matrices=nothing)
+ local desc
+ tf.with_op_name(name, "Qr") do
+ desc = tf.NodeDescription("Qr")
+ begin
+ begin
+ input_ = convert(Tensor{Any}, input_)
+ begin
+ end
+ end
+ begin
+ (input_,) = tf.tf_promote(input_)
+ end
+ end
+ begin
+ begin
+ tf.add_input(desc, input_)
+ end
+ end
+ begin
+ begin
+ if full_matrices !== nothing
+ desc["full_matrices"] = Base.Bool(full_matrices)
+ end
+ end
+ end
+ end
+ begin
+ out = tf.Tensor[]
+ op = tf.Operation(desc)
+ for out_idx = 1:2
+ push!(out, tf.Tensor(op, out_idx))
+ end
+ out
+ end
+ end
+ end
+ begin
+ function qr_eager(input_; name=nothing, full_matrices=nothing)
+ desc = tf.EagerOp("Qr")
+ input_ = convert(tf.EagerTensor, input_)
+ begin
+ begin
+ tf.add_input(desc, input_)
+ end
+ end
+ begin
+ begin
+ if full_matrices !== nothing
+ desc["full_matrices"] = Base.Bool(full_matrices)
+ end
+ end
+ end
+ begin
+ desc["T"] = tf.data_type(input_)
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(qr, [input_], name=nothing, full_matrices=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function qr(input_; name=nothing, full_matrices=nothing)
+ if tf.in_eager_mode()
+ qr_eager(input_; name=name, full_matrices=full_matrices)
+ else
+ qr_graph(input_; name=name, full_matrices=full_matrices)
+ end
+ end
+ end
+end
+
+
+"""
+ boosted_trees_calculate_best_gains_per_feature(node_id_range, stats_summary_list, l1, l2, tree_complexity, min_node_weight)
+
+
+"""
+begin
+ begin
+ function boosted_trees_calculate_best_gains_per_feature_graph(node_id_range_, stats_summary_list_, l1_, l2_, tree_complexity_, min_node_weight_; name=nothing, max_splits=nothing, num_features=nothing)
+ local desc
+ tf.with_op_name(name, "BoostedTreesCalculateBestGainsPerFeature") do
+ desc = tf.NodeDescription("BoostedTreesCalculateBestGainsPerFeature")
+ begin
+ begin
+ node_id_range_ = convert(Tensor{Int32}, node_id_range_)
+ begin
+ end
+ end
+ begin
+ stats_summary_list_ = [convert(Tensor{Float32}, x) for x = stats_summary_list_]
+ begin
+ end
+ end
+ begin
+ l1_ = convert(Tensor{Float32}, l1_)
+ begin
+ end
+ end
+ begin
+ l2_ = convert(Tensor{Float32}, l2_)
+ begin
+ end
+ end
+ begin
+ tree_complexity_ = convert(Tensor{Float32}, tree_complexity_)
+ begin
+ end
+ end
+ begin
+ min_node_weight_ = convert(Tensor{Float32}, min_node_weight_)
+ begin
+ end
+ end
+ end
+ begin
+ begin
+ tf.add_input(desc, node_id_range_)
+ end
+ begin
+ tf.add_input(desc, stats_summary_list_)
+ end
+ begin
+ tf.add_input(desc, l1_)
+ end
+ begin
+ tf.add_input(desc, l2_)
+ end
+ begin
+ tf.add_input(desc, tree_complexity_)
+ end
+ begin
+ tf.add_input(desc, min_node_weight_)
+ end
+ end
+ begin
+ begin
+ if max_splits !== nothing
+ desc["max_splits"] = Base.Int(max_splits)
+ end
+ end
+ begin
+ if num_features !== nothing
+ desc["num_features"] = Base.Int(num_features)
+ end
+ end
+ end
+ end
+ begin
+ out = tf.Tensor[]
+ op = tf.Operation(desc)
+ for out_idx = 1:5
+ push!(out, tf.Tensor(op, out_idx))
+ end
+ out
+ end
+ end
+ end
+ begin
+ function boosted_trees_calculate_best_gains_per_feature_eager(node_id_range_, stats_summary_list_, l1_, l2_, tree_complexity_, min_node_weight_; name=nothing, max_splits=nothing, num_features=nothing)
+ desc = tf.EagerOp("BoostedTreesCalculateBestGainsPerFeature")
+ node_id_range_ = convert(tf.EagerTensor, node_id_range_)
+ stats_summary_list_ = convert(tf.EagerTensor, stats_summary_list_)
+ l1_ = convert(tf.EagerTensor, l1_)
+ l2_ = convert(tf.EagerTensor, l2_)
+ tree_complexity_ = convert(tf.EagerTensor, tree_complexity_)
+ min_node_weight_ = convert(tf.EagerTensor, min_node_weight_)
+ begin
+ begin
+ tf.add_input(desc, node_id_range_)
+ end
+ begin
+ tf.add_input(desc, stats_summary_list_)
+ end
+ begin
+ tf.add_input(desc, l1_)
+ end
+ begin
+ tf.add_input(desc, l2_)
+ end
+ begin
+ tf.add_input(desc, tree_complexity_)
+ end
+ begin
+ tf.add_input(desc, min_node_weight_)
+ end
+ end
+ begin
+ begin
+ if max_splits !== nothing
+ desc["max_splits"] = Base.Int(max_splits)
+ end
+ end
+ begin
+ if num_features !== nothing
+ desc["num_features"] = Base.Int(num_features)
+ end
+ end
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(boosted_trees_calculate_best_gains_per_feature, [node_id_range_, stats_summary_list_, l1_, l2_, tree_complexity_, min_node_weight_], name=nothing, max_splits=nothing, num_features=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function boosted_trees_calculate_best_gains_per_feature(node_id_range_, stats_summary_list_, l1_, l2_, tree_complexity_, min_node_weight_; name=nothing, max_splits=nothing, num_features=nothing)
+ if tf.in_eager_mode()
+ boosted_trees_calculate_best_gains_per_feature_eager(node_id_range_, stats_summary_list_, l1_, l2_, tree_complexity_, min_node_weight_; name=name, max_splits=max_splits, num_features=num_features)
+ else
+ boosted_trees_calculate_best_gains_per_feature_graph(node_id_range_, stats_summary_list_, l1_, l2_, tree_complexity_, min_node_weight_; name=name, max_splits=max_splits, num_features=num_features)
+ end
+ end
+ end
+end
+
+
+"""
+ unbatch_grad(original_input, batch_index, grad, id; container=, shared_name=)
+
+
+"""
+begin
+ begin
+ function unbatch_grad_graph(original_input_, batch_index_, grad_, id_; name=nothing, container=nothing, shared_name=nothing)
+ local desc
+ tf.with_op_name(name, "UnbatchGrad") do
+ desc = tf.NodeDescription("UnbatchGrad")
+ begin
+ begin
+ original_input_ = convert(Tensor{Any}, original_input_)
+ begin
+ end
+ end
+ begin
+ batch_index_ = convert(Tensor{Int64}, batch_index_)
+ begin
+ end
+ end
+ begin
+ grad_ = convert(Tensor{Any}, grad_)
+ begin
+ end
+ end
+ begin
+ id_ = convert(Tensor{Int64}, id_)
+ begin
+ end
+ end
+ begin
+ (original_input_, grad_) = tf.tf_promote(original_input_, grad_)
+ end
+ end
+ begin
+ begin
+ tf.add_input(desc, original_input_)
+ end
+ begin
+ tf.add_input(desc, batch_index_)
+ end
+ begin
+ tf.add_input(desc, grad_)
+ end
+ begin
+ tf.add_input(desc, id_)
+ end
+ end
+ begin
+ begin
+ if container !== nothing
+ desc["container"] = Base.String(container)
+ end
+ end
+ begin
+ if shared_name !== nothing
+ desc["shared_name"] = Base.String(shared_name)
+ end
+ end
+ end
+ end
+ tf.Tensor(tf.Operation(desc))
+ end
+ end
+ begin
+ function unbatch_grad_eager(original_input_, batch_index_, grad_, id_; name=nothing, container=nothing, shared_name=nothing)
+ desc = tf.EagerOp("UnbatchGrad")
+ original_input_ = convert(tf.EagerTensor, original_input_)
+ batch_index_ = convert(tf.EagerTensor, batch_index_)
+ grad_ = convert(tf.EagerTensor, grad_)
+ id_ = convert(tf.EagerTensor, id_)
+ begin
+ begin
+ tf.add_input(desc, original_input_)
+ end
+ begin
+ tf.add_input(desc, batch_index_)
+ end
+ begin
+ tf.add_input(desc, grad_)
+ end
+ begin
+ tf.add_input(desc, id_)
+ end
+ end
+ begin
+ begin
+ if container !== nothing
+ desc["container"] = Base.String(container)
+ end
+ end
+ begin
+ if shared_name !== nothing
+ desc["shared_name"] = Base.String(shared_name)
+ end
+ end
+ end
+ begin
+ desc["T"] = tf.data_type(original_input_)
+ end
+ begin
+ desc["T"] = tf.data_type(grad_)
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(unbatch_grad, [original_input_, batch_index_, grad_, id_], name=nothing, container=nothing, shared_name=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res[1]
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function unbatch_grad(original_input_, batch_index_, grad_, id_; name=nothing, container=nothing, shared_name=nothing)
+ if tf.in_eager_mode()
+ unbatch_grad_eager(original_input_, batch_index_, grad_, id_; name=name, container=container, shared_name=shared_name)
+ else
+ unbatch_grad_graph(original_input_, batch_index_, grad_, id_; name=name, container=container, shared_name=shared_name)
+ end
+ end
+ end
+end
+
+
+"""
+ log_softmax(logits)
+
+
+"""
+begin
+ begin
+ function log_softmax_graph(logits_; name=nothing)
+ local desc
+ tf.with_op_name(name, "LogSoftmax") do
+ desc = tf.NodeDescription("LogSoftmax")
+ begin
+ begin
+ logits_ = convert(Tensor{Any}, logits_)
+ begin
+ end
+ end
+ begin
+ (logits_,) = tf.tf_promote(logits_)
+ end
+ end
+ begin
+ begin
+ tf.add_input(desc, logits_)
+ end
+ end
+ begin
+ end
+ end
+ tf.Tensor(tf.Operation(desc))
+ end
+ end
+ begin
+ function log_softmax_eager(logits_; name=nothing)
+ desc = tf.EagerOp("LogSoftmax")
+ logits_ = convert(tf.EagerTensor, logits_)
+ begin
+ begin
+ tf.add_input(desc, logits_)
+ end
+ end
+ begin
+ end
+ begin
+ desc["T"] = tf.data_type(logits_)
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(log_softmax, [logits_], name=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res[1]
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function log_softmax(logits_; name=nothing)
+ if tf.in_eager_mode()
+ log_softmax_eager(logits_; name=name)
+ else
+ log_softmax_graph(logits_; name=name)
+ end
+ end
+ end
+end
+
+
+"""
+ resource_count_up_to(resource)
+
+
+"""
+begin
+ begin
+ function resource_count_up_to_graph(resource_; name=nothing, limit=nothing)
+ local desc
+ tf.with_op_name(name, "ResourceCountUpTo") do
+ desc = tf.NodeDescription("ResourceCountUpTo")
+ begin
+ begin
+ resource_ = convert(Tensor{Any}, resource_)
+ begin
+ end
+ end
+ end
+ begin
+ begin
+ tf.add_input(desc, resource_)
+ end
+ end
+ begin
+ begin
+ if limit !== nothing
+ desc["limit"] = Base.Int(limit)
+ end
+ end
+ end
+ end
+ tf.Tensor(tf.Operation(desc))
+ end
+ end
+ begin
+ function resource_count_up_to_eager(resource_; name=nothing, limit=nothing)
+ desc = tf.EagerOp("ResourceCountUpTo")
+ resource_ = convert(tf.EagerTensor, resource_)
+ begin
+ begin
+ tf.add_input(desc, resource_)
+ end
+ end
+ begin
+ begin
+ if limit !== nothing
+ desc["limit"] = Base.Int(limit)
+ end
+ end
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(resource_count_up_to, [resource_], name=nothing, limit=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res[1]
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function resource_count_up_to(resource_; name=nothing, limit=nothing)
+ if tf.in_eager_mode()
+ resource_count_up_to_eager(resource_; name=name, limit=limit)
+ else
+ resource_count_up_to_graph(resource_; name=name, limit=limit)
+ end
+ end
+ end
+end
+
+
+"""
+ accumulate_nv2(inputs)
+
+
+"""
+begin
+ begin
+ function accumulate_nv2_graph(inputs_; name=nothing, N=nothing, shape=nothing)
+ local desc
+ tf.with_op_name(name, "AccumulateNV2") do
+ desc = tf.NodeDescription("AccumulateNV2")
+ begin
+ begin
+ inputs_ = [convert(Tensor{Any}, x) for x = inputs_]
+ begin
+ end
+ end
+ begin
+ (inputs_,) = tf.tf_promote(inputs_)
+ end
+ end
+ begin
+ begin
+ tf.add_input(desc, inputs_)
+ end
+ end
+ begin
+ begin
+ if N !== nothing
+ desc["N"] = Base.Int(N)
+ end
+ end
+ begin
+ if shape !== nothing
+ desc["shape"] = Base.identity(shape)
+ end
+ end
+ end
+ end
+ tf.Tensor(tf.Operation(desc))
+ end
+ end
+ begin
+ function accumulate_nv2_eager(inputs_; name=nothing, N=nothing, shape=nothing)
+ desc = tf.EagerOp("AccumulateNV2")
+ inputs_ = convert(tf.EagerTensor, inputs_)
+ begin
+ begin
+ tf.add_input(desc, inputs_)
+ end
+ end
+ begin
+ begin
+ if N !== nothing
+ desc["N"] = Base.Int(N)
+ end
+ end
+ begin
+ if shape !== nothing
+ desc["shape"] = Base.identity(shape)
+ end
+ end
+ end
+ begin
+ desc["T"] = tf.data_type(inputs_)
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(accumulate_nv2, [inputs_], name=nothing, N=nothing, shape=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res[1]
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function accumulate_nv2(inputs_; name=nothing, N=nothing, shape=nothing)
+ if tf.in_eager_mode()
+ accumulate_nv2_eager(inputs_; name=name, N=N, shape=shape)
+ else
+ accumulate_nv2_graph(inputs_; name=name, N=N, shape=shape)
+ end
+ end
+ end
+end
+
+
+"""
+ parallel_map_dataset(input_dataset, other_arguments, num_parallel_calls; use_inter_op_parallelism=true, sloppy=false, preserve_cardinality=false)
+
+
+"""
+begin
+ begin
+ function parallel_map_dataset_graph(input_dataset_, other_arguments_, num_parallel_calls_; name=nothing, f=nothing, Targuments=nothing, output_types=nothing, output_shapes=nothing, use_inter_op_parallelism=nothing, sloppy=nothing, preserve_cardinality=nothing)
+ local desc
+ tf.with_op_name(name, "ParallelMapDataset") do
+ desc = tf.NodeDescription("ParallelMapDataset")
+ begin
+ begin
+ input_dataset_ = convert(Tensor{Any}, input_dataset_)
+ begin
+ end
+ end
+ begin
+ other_arguments_ = [convert(Tensor{Any}, x) for x = other_arguments_]
+ begin
+ end
+ end
+ begin
+ num_parallel_calls_ = convert(Tensor{Int32}, num_parallel_calls_)
+ begin
+ end
+ end
+ end
+ begin
+ begin
+ tf.add_input(desc, input_dataset_)
+ end
+ begin
+ tf.add_input(desc, other_arguments_)
+ end
+ begin
+ tf.add_input(desc, num_parallel_calls_)
+ end
+ end
+ begin
+ begin
+ if f !== nothing
+ desc["f"] = Base.identity(f)
+ end
+ end
+ begin
+ if Targuments !== nothing
+ desc["Targuments"] = map(Base.identity, Targuments)
+ end
+ end
+ begin
+ if output_types !== nothing
+ desc["output_types"] = map(Base.identity, output_types)
+ end
+ end
+ begin
+ if output_shapes !== nothing
+ desc["output_shapes"] = map(Base.identity, output_shapes)
+ end
+ end
+ begin
+ if use_inter_op_parallelism !== nothing
+ desc["use_inter_op_parallelism"] = Base.Bool(use_inter_op_parallelism)
+ end
+ end
+ begin
+ if sloppy !== nothing
+ desc["sloppy"] = Base.Bool(sloppy)
+ end
+ end
+ begin
+ if preserve_cardinality !== nothing
+ desc["preserve_cardinality"] = Base.Bool(preserve_cardinality)
+ end
+ end
+ end
+ end
+ tf.Tensor(tf.Operation(desc))
+ end
+ end
+ begin
+ function parallel_map_dataset_eager(input_dataset_, other_arguments_, num_parallel_calls_; name=nothing, f=nothing, Targuments=nothing, output_types=nothing, output_shapes=nothing, use_inter_op_parallelism=nothing, sloppy=nothing, preserve_cardinality=nothing)
+ desc = tf.EagerOp("ParallelMapDataset")
+ input_dataset_ = convert(tf.EagerTensor, input_dataset_)
+ other_arguments_ = convert(tf.EagerTensor, other_arguments_)
+ num_parallel_calls_ = convert(tf.EagerTensor, num_parallel_calls_)
+ begin
+ begin
+ tf.add_input(desc, input_dataset_)
+ end
+ begin
+ tf.add_input(desc, other_arguments_)
+ end
+ begin
+ tf.add_input(desc, num_parallel_calls_)
+ end
+ end
+ begin
+ begin
+ if f !== nothing
+ desc["f"] = Base.identity(f)
+ end
+ end
+ begin
+ if Targuments !== nothing
+ desc["Targuments"] = map(Base.identity, Targuments)
+ end
+ end
+ begin
+ if output_types !== nothing
+ desc["output_types"] = map(Base.identity, output_types)
+ end
+ end
+ begin
+ if output_shapes !== nothing
+ desc["output_shapes"] = map(Base.identity, output_shapes)
+ end
+ end
+ begin
+ if use_inter_op_parallelism !== nothing
+ desc["use_inter_op_parallelism"] = Base.Bool(use_inter_op_parallelism)
+ end
+ end
+ begin
+ if sloppy !== nothing
+ desc["sloppy"] = Base.Bool(sloppy)
+ end
+ end
+ begin
+ if preserve_cardinality !== nothing
+ desc["preserve_cardinality"] = Base.Bool(preserve_cardinality)
+ end
+ end
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(parallel_map_dataset, [input_dataset_, other_arguments_, num_parallel_calls_], name=nothing, f=nothing, Targuments=nothing, output_types=nothing, output_shapes=nothing, use_inter_op_parallelism=nothing, sloppy=nothing, preserve_cardinality=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res[1]
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function parallel_map_dataset(input_dataset_, other_arguments_, num_parallel_calls_; name=nothing, f=nothing, Targuments=nothing, output_types=nothing, output_shapes=nothing, use_inter_op_parallelism=nothing, sloppy=nothing, preserve_cardinality=nothing)
+ if tf.in_eager_mode()
+ parallel_map_dataset_eager(input_dataset_, other_arguments_, num_parallel_calls_; name=name, f=f, Targuments=Targuments, output_types=output_types, output_shapes=output_shapes, use_inter_op_parallelism=use_inter_op_parallelism, sloppy=sloppy, preserve_cardinality=preserve_cardinality)
+ else
+ parallel_map_dataset_graph(input_dataset_, other_arguments_, num_parallel_calls_; name=name, f=f, Targuments=Targuments, output_types=output_types, output_shapes=output_shapes, use_inter_op_parallelism=use_inter_op_parallelism, sloppy=sloppy, preserve_cardinality=preserve_cardinality)
+ end
+ end
+ end
+end
+
+
+"""
+ random_uniform(shape; seed=0, seed2=0)
+
+
+"""
+begin
+ begin
+ function random_uniform_graph(shape_; name=nothing, seed=nothing, seed2=nothing, dtype=nothing)
+ local desc
+ tf.with_op_name(name, "RandomUniform") do
+ desc = tf.NodeDescription("RandomUniform")
+ begin
+ begin
+ shape_ = convert(Tensor{Any}, shape_)
+ begin
+ end
+ end
+ begin
+ (shape_,) = tf.tf_promote(shape_)
+ end
+ end
+ begin
+ begin
+ tf.add_input(desc, shape_)
+ end
+ end
+ begin
+ begin
+ if seed !== nothing
+ desc["seed"] = Base.Int(seed)
+ end
+ end
+ begin
+ if seed2 !== nothing
+ desc["seed2"] = Base.Int(seed2)
+ end
+ end
+ begin
+ if dtype !== nothing
+ desc["dtype"] = Base.identity(dtype)
+ end
+ end
+ end
+ end
+ tf.Tensor(tf.Operation(desc))
+ end
+ end
+ begin
+ function random_uniform_eager(shape_; name=nothing, seed=nothing, seed2=nothing, dtype=nothing)
+ desc = tf.EagerOp("RandomUniform")
+ shape_ = convert(tf.EagerTensor, shape_)
+ begin
+ begin
+ tf.add_input(desc, shape_)
+ end
+ end
+ begin
+ begin
+ if seed !== nothing
+ desc["seed"] = Base.Int(seed)
+ end
+ end
+ begin
+ if seed2 !== nothing
+ desc["seed2"] = Base.Int(seed2)
+ end
+ end
+ begin
+ if dtype !== nothing
+ desc["dtype"] = Base.identity(dtype)
+ end
+ end
+ end
+ begin
+ desc["T"] = tf.data_type(shape_)
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(random_uniform, [shape_], name=nothing, seed=nothing, seed2=nothing, dtype=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res[1]
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function random_uniform(shape_; name=nothing, seed=nothing, seed2=nothing, dtype=nothing)
+ if tf.in_eager_mode()
+ random_uniform_eager(shape_; name=name, seed=seed, seed2=seed2, dtype=dtype)
+ else
+ random_uniform_graph(shape_; name=name, seed=seed, seed2=seed2, dtype=dtype)
+ end
+ end
+ end
+end
+
+
+"""
+ unicode_transcode(input; errors=, replacement_char=65533, replace_control_characters=false)
+
+
+"""
+begin
+ begin
+ function unicode_transcode_graph(input_; name=nothing, input_encoding=nothing, output_encoding=nothing, errors=nothing, replacement_char=nothing, replace_control_characters=nothing)
+ local desc
+ tf.with_op_name(name, "UnicodeTranscode") do
+ desc = tf.NodeDescription("UnicodeTranscode")
+ begin
+ begin
+ input_ = convert(Tensor{String}, input_)
+ begin
+ end
+ end
+ end
+ begin
+ begin
+ tf.add_input(desc, input_)
+ end
+ end
+ begin
+ begin
+ if input_encoding !== nothing
+ desc["input_encoding"] = Base.String(input_encoding)
+ end
+ end
+ begin
+ if output_encoding !== nothing
+ desc["output_encoding"] = Base.String(output_encoding)
+ end
+ end
+ begin
+ if errors !== nothing
+ desc["errors"] = Base.String(errors)
+ end
+ end
+ begin
+ if replacement_char !== nothing
+ desc["replacement_char"] = Base.Int(replacement_char)
+ end
+ end
+ begin
+ if replace_control_characters !== nothing
+ desc["replace_control_characters"] = Base.Bool(replace_control_characters)
+ end
+ end
+ end
+ end
+ tf.Tensor(tf.Operation(desc))
+ end
+ end
+ begin
+ function unicode_transcode_eager(input_; name=nothing, input_encoding=nothing, output_encoding=nothing, errors=nothing, replacement_char=nothing, replace_control_characters=nothing)
+ desc = tf.EagerOp("UnicodeTranscode")
+ input_ = convert(tf.EagerTensor, input_)
+ begin
+ begin
+ tf.add_input(desc, input_)
+ end
+ end
+ begin
+ begin
+ if input_encoding !== nothing
+ desc["input_encoding"] = Base.String(input_encoding)
+ end
+ end
+ begin
+ if output_encoding !== nothing
+ desc["output_encoding"] = Base.String(output_encoding)
+ end
+ end
+ begin
+ if errors !== nothing
+ desc["errors"] = Base.String(errors)
+ end
+ end
+ begin
+ if replacement_char !== nothing
+ desc["replacement_char"] = Base.Int(replacement_char)
+ end
+ end
+ begin
+ if replace_control_characters !== nothing
+ desc["replace_control_characters"] = Base.Bool(replace_control_characters)
+ end
+ end
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(unicode_transcode, [input_], name=nothing, input_encoding=nothing, output_encoding=nothing, errors=nothing, replacement_char=nothing, replace_control_characters=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res[1]
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function unicode_transcode(input_; name=nothing, input_encoding=nothing, output_encoding=nothing, errors=nothing, replacement_char=nothing, replace_control_characters=nothing)
+ if tf.in_eager_mode()
+ unicode_transcode_eager(input_; name=name, input_encoding=input_encoding, output_encoding=output_encoding, errors=errors, replacement_char=replacement_char, replace_control_characters=replace_control_characters)
+ else
+ unicode_transcode_graph(input_; name=name, input_encoding=input_encoding, output_encoding=output_encoding, errors=errors, replacement_char=replacement_char, replace_control_characters=replace_control_characters)
+ end
+ end
+ end
+end
+
+
+"""
+ reader_reset(reader_handle)
+
+
+"""
+begin
+ begin
+ function reader_reset_graph(reader_handle_; name=nothing)
+ local desc
+ tf.with_op_name(name, "ReaderReset") do
+ desc = tf.NodeDescription("ReaderReset")
+ begin
+ begin
+ reader_handle_ = convert(Tensor{String}, reader_handle_)
+ begin
+ end
+ end
+ end
+ begin
+ begin
+ tf.add_input(desc, reader_handle_)
+ end
+ end
+ begin
+ end
+ end
+ tf.Tensor(tf.Operation(desc))
+ end
+ end
+ begin
+ function reader_reset_eager(reader_handle_; name=nothing)
+ desc = tf.EagerOp("ReaderReset")
+ reader_handle_ = convert(tf.EagerTensor, reader_handle_)
+ begin
+ begin
+ tf.add_input(desc, reader_handle_)
+ end
+ end
+ begin
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(reader_reset, [reader_handle_], name=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res[1]
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function reader_reset(reader_handle_; name=nothing)
+ if tf.in_eager_mode()
+ reader_reset_eager(reader_handle_; name=name)
+ else
+ reader_reset_graph(reader_handle_; name=name)
+ end
+ end
+ end
+end
+
+
+"""
+ _nccl_broadcast_send(input)
+
+Replacement node for NcclBroadcast.
+"""
+begin
+ begin
+ function _nccl_broadcast_send_graph(input_; name=nothing, num_devices=nothing, shared_name=nothing)
+ local desc
+ tf.with_op_name(name, "_NcclBroadcastSend") do
+ desc = tf.NodeDescription("_NcclBroadcastSend")
+ begin
+ begin
+ input_ = convert(Tensor{Any}, input_)
+ begin
+ end
+ end
+ begin
+ (input_,) = tf.tf_promote(input_)
+ end
+ end
+ begin
+ begin
+ tf.add_input(desc, input_)
+ end
+ end
+ begin
+ begin
+ if num_devices !== nothing
+ desc["num_devices"] = Base.Int(num_devices)
+ end
+ end
+ begin
+ if shared_name !== nothing
+ desc["shared_name"] = Base.String(shared_name)
+ end
+ end
+ end
+ end
+ tf.Tensor(tf.Operation(desc))
+ end
+ end
+ begin
+ function _nccl_broadcast_send_eager(input_; name=nothing, num_devices=nothing, shared_name=nothing)
+ desc = tf.EagerOp("_NcclBroadcastSend")
+ input_ = convert(tf.EagerTensor, input_)
+ begin
+ begin
+ tf.add_input(desc, input_)
+ end
+ end
+ begin
+ begin
+ if num_devices !== nothing
+ desc["num_devices"] = Base.Int(num_devices)
+ end
+ end
+ begin
+ if shared_name !== nothing
+ desc["shared_name"] = Base.String(shared_name)
+ end
+ end
+ end
+ begin
+ desc["T"] = tf.data_type(input_)
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(_nccl_broadcast_send, [input_], name=nothing, num_devices=nothing, shared_name=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res[1]
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function _nccl_broadcast_send(input_; name=nothing, num_devices=nothing, shared_name=nothing)
+ if tf.in_eager_mode()
+ _nccl_broadcast_send_eager(input_; name=name, num_devices=num_devices, shared_name=shared_name)
+ else
+ _nccl_broadcast_send_graph(input_; name=name, num_devices=num_devices, shared_name=shared_name)
+ end
+ end
+ end
+end
+
+
+"""
+ batch_matrix_determinant(input)
+
+
+"""
+begin
+ begin
+ function batch_matrix_determinant_graph(input_; name=nothing)
+ local desc
+ tf.with_op_name(name, "BatchMatrixDeterminant") do
+ desc = tf.NodeDescription("BatchMatrixDeterminant")
+ begin
+ begin
+ input_ = convert(Tensor{Any}, input_)
+ begin
+ end
+ end
+ begin
+ (input_,) = tf.tf_promote(input_)
+ end
+ end
+ begin
+ begin
+ tf.add_input(desc, input_)
+ end
+ end
+ begin
+ end
+ end
+ tf.Tensor(tf.Operation(desc))
+ end
+ end
+ begin
+ function batch_matrix_determinant_eager(input_; name=nothing)
+ desc = tf.EagerOp("BatchMatrixDeterminant")
+ input_ = convert(tf.EagerTensor, input_)
+ begin
+ begin
+ tf.add_input(desc, input_)
+ end
+ end
+ begin
+ end
+ begin
+ desc["T"] = tf.data_type(input_)
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(batch_matrix_determinant, [input_], name=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res[1]
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function batch_matrix_determinant(input_; name=nothing)
+ if tf.in_eager_mode()
+ batch_matrix_determinant_eager(input_; name=name)
+ else
+ batch_matrix_determinant_graph(input_; name=name)
+ end
+ end
+ end
+end
+
+
+"""
+ less_equal(x, y)
+
+
+"""
+begin
+ begin
+ function less_equal_graph(x_, y_; name=nothing)
+ local desc
+ tf.with_op_name(name, "LessEqual") do
+ desc = tf.NodeDescription("LessEqual")
+ begin
+ begin
+ x_ = convert(Tensor{Any}, x_)
+ begin
+ end
+ end
+ begin
+ y_ = convert(Tensor{Any}, y_)
+ begin
+ end
+ end
+ begin
+ (x_, y_) = tf.tf_promote(x_, y_)
+ end
+ end
+ begin
+ begin
+ tf.add_input(desc, x_)
+ end
+ begin
+ tf.add_input(desc, y_)
+ end
+ end
+ begin
+ end
+ end
+ tf.Tensor(tf.Operation(desc))
+ end
+ end
+ begin
+ function less_equal_eager(x_, y_; name=nothing)
+ desc = tf.EagerOp("LessEqual")
+ x_ = convert(tf.EagerTensor, x_)
+ y_ = convert(tf.EagerTensor, y_)
+ begin
+ begin
+ tf.add_input(desc, x_)
+ end
+ begin
+ tf.add_input(desc, y_)
+ end
+ end
+ begin
+ end
+ begin
+ desc["T"] = tf.data_type(x_)
+ end
+ begin
+ desc["T"] = tf.data_type(y_)
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(less_equal, [x_, y_], name=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res[1]
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function less_equal(x_, y_; name=nothing)
+ if tf.in_eager_mode()
+ less_equal_eager(x_, y_; name=name)
+ else
+ less_equal_graph(x_, y_; name=name)
+ end
+ end
+ end
+end
+
+
+"""
+ apply_gradient_descent(var, alpha, delta; use_locking=false)
+
+
+"""
+begin
+ begin
+ function apply_gradient_descent_graph(var_, alpha_, delta_; name=nothing, use_locking=nothing)
+ local desc
+ tf.with_op_name(name, "ApplyGradientDescent") do
+ desc = tf.NodeDescription("ApplyGradientDescent")
+ begin
+ begin
+ var_ = convert(Tensor{Any}, var_)
+ begin
+ end
+ end
+ begin
+ alpha_ = convert(Tensor{Any}, alpha_)
+ begin
+ end
+ end
+ begin
+ delta_ = convert(Tensor{Any}, delta_)
+ begin
+ end
+ end
+ begin
+ (var_, alpha_, delta_) = tf.tf_promote(var_, alpha_, delta_)
+ end
+ end
+ begin
+ begin
+ tf.add_input(desc, var_)
+ end
+ begin
+ tf.add_input(desc, alpha_)
+ end
+ begin
+ tf.add_input(desc, delta_)
+ end
+ end
+ begin
+ begin
+ if use_locking !== nothing
+ desc["use_locking"] = Base.Bool(use_locking)
+ end
+ end
+ end
+ end
+ tf.Tensor(tf.Operation(desc))
+ end
+ end
+ begin
+ function apply_gradient_descent_eager(var_, alpha_, delta_; name=nothing, use_locking=nothing)
+ desc = tf.EagerOp("ApplyGradientDescent")
+ var_ = convert(tf.EagerTensor, var_)
+ alpha_ = convert(tf.EagerTensor, alpha_)
+ delta_ = convert(tf.EagerTensor, delta_)
+ begin
+ begin
+ tf.add_input(desc, var_)
+ end
+ begin
+ tf.add_input(desc, alpha_)
+ end
+ begin
+ tf.add_input(desc, delta_)
+ end
+ end
+ begin
+ begin
+ if use_locking !== nothing
+ desc["use_locking"] = Base.Bool(use_locking)
+ end
+ end
+ end
+ begin
+ desc["T"] = tf.data_type(var_)
+ end
+ begin
+ desc["T"] = tf.data_type(alpha_)
+ end
+ begin
+ desc["T"] = tf.data_type(delta_)
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(apply_gradient_descent, [var_, alpha_, delta_], name=nothing, use_locking=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res[1]
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function apply_gradient_descent(var_, alpha_, delta_; name=nothing, use_locking=nothing)
+ if tf.in_eager_mode()
+ apply_gradient_descent_eager(var_, alpha_, delta_; name=name, use_locking=use_locking)
+ else
+ apply_gradient_descent_graph(var_, alpha_, delta_; name=name, use_locking=use_locking)
+ end
+ end
+ end
+end
+
+
+"""
+ sparse_segment_sqrt_n(data, indices, segment_ids)
+
+
+"""
+begin
+ begin
+ function sparse_segment_sqrt_n_graph(data_, indices_, segment_ids_; name=nothing)
+ local desc
+ tf.with_op_name(name, "SparseSegmentSqrtN") do
+ desc = tf.NodeDescription("SparseSegmentSqrtN")
+ begin
+ begin
+ data_ = convert(Tensor{Any}, data_)
+ begin
+ end
+ end
+ begin
+ indices_ = convert(Tensor{Int32}, indices_)
+ begin
+ indices_ = indices_ - convert(tf.Tensor{eltype(indices_)}, 1)
+ end
+ end
+ begin
+ segment_ids_ = convert(Tensor{Int32}, segment_ids_)
+ begin
+ end
+ end
+ begin
+ (data_,) = tf.tf_promote(data_)
+ end
+ begin
+ (indices_,) = tf.tf_promote(indices_)
+ end
+ end
+ begin
+ begin
+ tf.add_input(desc, data_)
+ end
+ begin
+ tf.add_input(desc, indices_)
+ end
+ begin
+ tf.add_input(desc, segment_ids_)
+ end
+ end
+ begin
+ end
+ end
+ tf.Tensor(tf.Operation(desc))
+ end
+ end
+ begin
+ function sparse_segment_sqrt_n_eager(data_, indices_, segment_ids_; name=nothing)
+ desc = tf.EagerOp("SparseSegmentSqrtN")
+ data_ = convert(tf.EagerTensor, data_)
+ indices_ = convert(tf.EagerTensor, indices_)
+ segment_ids_ = convert(tf.EagerTensor, segment_ids_)
+ begin
+ begin
+ tf.add_input(desc, data_)
+ end
+ begin
+ tf.add_input(desc, indices_)
+ end
+ begin
+ tf.add_input(desc, segment_ids_)
+ end
+ end
+ begin
+ end
+ begin
+ desc["T"] = tf.data_type(data_)
+ end
+ begin
+ desc["Tidx"] = tf.data_type(indices_)
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(sparse_segment_sqrt_n, [data_, indices_, segment_ids_], name=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res[1]
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function sparse_segment_sqrt_n(data_, indices_, segment_ids_; name=nothing)
+ if tf.in_eager_mode()
+ sparse_segment_sqrt_n_eager(data_, indices_, segment_ids_; name=name)
+ else
+ sparse_segment_sqrt_n_graph(data_, indices_, segment_ids_; name=name)
+ end
+ end
+ end
+end
+
+
+"""
+ matrix_logarithm(input)
+
+
+"""
+begin
+ begin
+ function matrix_logarithm_graph(input_; name=nothing)
+ local desc
+ tf.with_op_name(name, "MatrixLogarithm") do
+ desc = tf.NodeDescription("MatrixLogarithm")
+ begin
+ begin
+ input_ = convert(Tensor{Any}, input_)
+ begin
+ end
+ end
+ begin
+ (input_,) = tf.tf_promote(input_)
+ end
+ end
+ begin
+ begin
+ tf.add_input(desc, input_)
+ end
+ end
+ begin
+ end
+ end
+ tf.Tensor(tf.Operation(desc))
+ end
+ end
+ begin
+ function matrix_logarithm_eager(input_; name=nothing)
+ desc = tf.EagerOp("MatrixLogarithm")
+ input_ = convert(tf.EagerTensor, input_)
+ begin
+ begin
+ tf.add_input(desc, input_)
+ end
+ end
+ begin
+ end
+ begin
+ desc["T"] = tf.data_type(input_)
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(matrix_logarithm, [input_], name=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res[1]
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function matrix_logarithm(input_; name=nothing)
+ if tf.in_eager_mode()
+ matrix_logarithm_eager(input_; name=name)
+ else
+ matrix_logarithm_graph(input_; name=name)
+ end
+ end
+ end
+end
+
+
+"""
+ scatter_mul(ref, indices, updates; use_locking=false)
+
+
+"""
+begin
+ begin
+ function scatter_mul_graph(ref_, indices_, updates_; name=nothing, use_locking=nothing)
+ local desc
+ tf.with_op_name(name, "ScatterMul") do
+ desc = tf.NodeDescription("ScatterMul")
+ begin
+ begin
+ ref_ = convert(Tensor{Any}, ref_)
+ begin
+ end
+ end
+ begin
+ indices_ = convert(Tensor{Any}, indices_)
+ begin
+ indices_ = indices_ - convert(tf.Tensor{eltype(indices_)}, 1)
+ end
+ end
+ begin
+ updates_ = convert(Tensor{Any}, updates_)
+ begin
+ end
+ end
+ begin
+ (ref_, updates_) = tf.tf_promote(ref_, updates_)
+ end
+ begin
+ (indices_,) = tf.tf_promote(indices_)
+ end
+ end
+ begin
+ begin
+ tf.add_input(desc, ref_)
+ end
+ begin
+ tf.add_input(desc, indices_)
+ end
+ begin
+ tf.add_input(desc, updates_)
+ end
+ end
+ begin
+ begin
+ if use_locking !== nothing
+ desc["use_locking"] = Base.Bool(use_locking)
+ end
+ end
+ end
+ end
+ tf.Tensor(tf.Operation(desc))
+ end
+ end
+ begin
+ function scatter_mul_eager(ref_, indices_, updates_; name=nothing, use_locking=nothing)
+ desc = tf.EagerOp("ScatterMul")
+ ref_ = convert(tf.EagerTensor, ref_)
+ indices_ = convert(tf.EagerTensor, indices_)
+ updates_ = convert(tf.EagerTensor, updates_)
+ begin
+ begin
+ tf.add_input(desc, ref_)
+ end
+ begin
+ tf.add_input(desc, indices_)
+ end
+ begin
+ tf.add_input(desc, updates_)
+ end
+ end
+ begin
+ begin
+ if use_locking !== nothing
+ desc["use_locking"] = Base.Bool(use_locking)
+ end
+ end
+ end
+ begin
+ desc["T"] = tf.data_type(ref_)
+ end
+ begin
+ desc["Tindices"] = tf.data_type(indices_)
+ end
+ begin
+ desc["T"] = tf.data_type(updates_)
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(scatter_mul, [ref_, indices_, updates_], name=nothing, use_locking=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res[1]
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function scatter_mul(ref_, indices_, updates_; name=nothing, use_locking=nothing)
+ if tf.in_eager_mode()
+ scatter_mul_eager(ref_, indices_, updates_; name=name, use_locking=use_locking)
+ else
+ scatter_mul_graph(ref_, indices_, updates_; name=name, use_locking=use_locking)
+ end
+ end
+ end
+end
+
+
+"""
+ decode_jpeg(contents; channels=0, ratio=1, fancy_upscaling=true, try_recover_truncated=false, acceptable_fraction=?, dct_method=)
+
+
+"""
+begin
+ begin
+ function decode_jpeg_graph(contents_; name=nothing, channels=nothing, ratio=nothing, fancy_upscaling=nothing, try_recover_truncated=nothing, acceptable_fraction=nothing, dct_method=nothing)
+ local desc
+ tf.with_op_name(name, "DecodeJpeg") do
+ desc = tf.NodeDescription("DecodeJpeg")
+ begin
+ begin
+ contents_ = convert(Tensor{String}, contents_)
+ begin
+ end
+ end
+ end
+ begin
+ begin
+ tf.add_input(desc, contents_)
+ end
+ end
+ begin
+ begin
+ if channels !== nothing
+ desc["channels"] = Base.Int(channels)
+ end
+ end
+ begin
+ if ratio !== nothing
+ desc["ratio"] = Base.Int(ratio)
+ end
+ end
+ begin
+ if fancy_upscaling !== nothing
+ desc["fancy_upscaling"] = Base.Bool(fancy_upscaling)
+ end
+ end
+ begin
+ if try_recover_truncated !== nothing
+ desc["try_recover_truncated"] = Base.Bool(try_recover_truncated)
+ end
+ end
+ begin
+ if acceptable_fraction !== nothing
+ desc["acceptable_fraction"] = Base.identity(acceptable_fraction)
+ end
+ end
+ begin
+ if dct_method !== nothing
+ desc["dct_method"] = Base.String(dct_method)
+ end
+ end
+ end
+ end
+ tf.Tensor(tf.Operation(desc))
+ end
+ end
+ begin
+ function decode_jpeg_eager(contents_; name=nothing, channels=nothing, ratio=nothing, fancy_upscaling=nothing, try_recover_truncated=nothing, acceptable_fraction=nothing, dct_method=nothing)
+ desc = tf.EagerOp("DecodeJpeg")
+ contents_ = convert(tf.EagerTensor, contents_)
+ begin
+ begin
+ tf.add_input(desc, contents_)
+ end
+ end
+ begin
+ begin
+ if channels !== nothing
+ desc["channels"] = Base.Int(channels)
+ end
+ end
+ begin
+ if ratio !== nothing
+ desc["ratio"] = Base.Int(ratio)
+ end
+ end
+ begin
+ if fancy_upscaling !== nothing
+ desc["fancy_upscaling"] = Base.Bool(fancy_upscaling)
+ end
+ end
+ begin
+ if try_recover_truncated !== nothing
+ desc["try_recover_truncated"] = Base.Bool(try_recover_truncated)
+ end
+ end
+ begin
+ if acceptable_fraction !== nothing
+ desc["acceptable_fraction"] = Base.identity(acceptable_fraction)
+ end
+ end
+ begin
+ if dct_method !== nothing
+ desc["dct_method"] = Base.String(dct_method)
+ end
+ end
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(decode_jpeg, [contents_], name=nothing, channels=nothing, ratio=nothing, fancy_upscaling=nothing, try_recover_truncated=nothing, acceptable_fraction=nothing, dct_method=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res[1]
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function decode_jpeg(contents_; name=nothing, channels=nothing, ratio=nothing, fancy_upscaling=nothing, try_recover_truncated=nothing, acceptable_fraction=nothing, dct_method=nothing)
+ if tf.in_eager_mode()
+ decode_jpeg_eager(contents_; name=name, channels=channels, ratio=ratio, fancy_upscaling=fancy_upscaling, try_recover_truncated=try_recover_truncated, acceptable_fraction=acceptable_fraction, dct_method=dct_method)
+ else
+ decode_jpeg_graph(contents_; name=name, channels=channels, ratio=ratio, fancy_upscaling=fancy_upscaling, try_recover_truncated=try_recover_truncated, acceptable_fraction=acceptable_fraction, dct_method=dct_method)
+ end
+ end
+ end
+end
+
+
+"""
+ random_shuffle_queue_v2(; shapes=Int64[], capacity=-1, min_after_dequeue=0, seed=0, seed2=0, container=, shared_name=)
+
+
+"""
+begin
+ begin
+ function random_shuffle_queue_v2_graph(; name=nothing, component_types=nothing, shapes=nothing, capacity=nothing, min_after_dequeue=nothing, seed=nothing, seed2=nothing, container=nothing, shared_name=nothing)
+ local desc
+ tf.with_op_name(name, "RandomShuffleQueueV2") do
+ desc = tf.NodeDescription("RandomShuffleQueueV2")
+ begin
+ end
+ begin
+ end
+ begin
+ begin
+ if component_types !== nothing
+ desc["component_types"] = map(Base.identity, component_types)
+ end
+ end
+ begin
+ if shapes !== nothing
+ desc["shapes"] = map(Base.identity, shapes)
+ end
+ end
+ begin
+ if capacity !== nothing
+ desc["capacity"] = Base.Int(capacity)
+ end
+ end
+ begin
+ if min_after_dequeue !== nothing
+ desc["min_after_dequeue"] = Base.Int(min_after_dequeue)
+ end
+ end
+ begin
+ if seed !== nothing
+ desc["seed"] = Base.Int(seed)
+ end
+ end
+ begin
+ if seed2 !== nothing
+ desc["seed2"] = Base.Int(seed2)
+ end
+ end
+ begin
+ if container !== nothing
+ desc["container"] = Base.String(container)
+ end
+ end
+ begin
+ if shared_name !== nothing
+ desc["shared_name"] = Base.String(shared_name)
+ end
+ end
+ end
+ end
+ tf.Tensor(tf.Operation(desc))
+ end
+ end
+ begin
+ function random_shuffle_queue_v2_eager(; name=nothing, component_types=nothing, shapes=nothing, capacity=nothing, min_after_dequeue=nothing, seed=nothing, seed2=nothing, container=nothing, shared_name=nothing)
+ desc = tf.EagerOp("RandomShuffleQueueV2")
+ begin
+ end
+ begin
+ begin
+ if component_types !== nothing
+ desc["component_types"] = map(Base.identity, component_types)
+ end
+ end
+ begin
+ if shapes !== nothing
+ desc["shapes"] = map(Base.identity, shapes)
+ end
+ end
+ begin
+ if capacity !== nothing
+ desc["capacity"] = Base.Int(capacity)
+ end
+ end
+ begin
+ if min_after_dequeue !== nothing
+ desc["min_after_dequeue"] = Base.Int(min_after_dequeue)
+ end
+ end
+ begin
+ if seed !== nothing
+ desc["seed"] = Base.Int(seed)
+ end
+ end
+ begin
+ if seed2 !== nothing
+ desc["seed2"] = Base.Int(seed2)
+ end
+ end
+ begin
+ if container !== nothing
+ desc["container"] = Base.String(container)
+ end
+ end
+ begin
+ if shared_name !== nothing
+ desc["shared_name"] = Base.String(shared_name)
+ end
+ end
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(random_shuffle_queue_v2, [], name=nothing, component_types=nothing, shapes=nothing, capacity=nothing, min_after_dequeue=nothing, seed=nothing, seed2=nothing, container=nothing, shared_name=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res[1]
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function random_shuffle_queue_v2(; name=nothing, component_types=nothing, shapes=nothing, capacity=nothing, min_after_dequeue=nothing, seed=nothing, seed2=nothing, container=nothing, shared_name=nothing)
+ if tf.in_eager_mode()
+ random_shuffle_queue_v2_eager(; name=name, component_types=component_types, shapes=shapes, capacity=capacity, min_after_dequeue=min_after_dequeue, seed=seed, seed2=seed2, container=container, shared_name=shared_name)
+ else
+ random_shuffle_queue_v2_graph(; name=name, component_types=component_types, shapes=shapes, capacity=capacity, min_after_dequeue=min_after_dequeue, seed=seed, seed2=seed2, container=container, shared_name=shared_name)
+ end
+ end
+ end
+end
+
+
+"""
+ queue_enqueue_many_v2(handle, components; timeout_ms=-1)
+
+
+"""
+begin
+ begin
+ function queue_enqueue_many_v2_graph(handle_, components_; name=nothing, Tcomponents=nothing, timeout_ms=nothing)
+ local desc
+ tf.with_op_name(name, "QueueEnqueueManyV2") do
+ desc = tf.NodeDescription("QueueEnqueueManyV2")
+ begin
+ begin
+ handle_ = convert(Tensor{Any}, handle_)
+ begin
+ end
+ end
+ begin
+ components_ = [convert(Tensor{Any}, x) for x = components_]
+ begin
+ end
+ end
+ end
+ begin
+ begin
+ tf.add_input(desc, handle_)
+ end
+ begin
+ tf.add_input(desc, components_)
+ end
+ end
+ begin
+ begin
+ if Tcomponents !== nothing
+ desc["Tcomponents"] = map(Base.identity, Tcomponents)
+ end
+ end
+ begin
+ if timeout_ms !== nothing
+ desc["timeout_ms"] = Base.Int(timeout_ms)
+ end
+ end
+ end
+ end
+ tf.Tensor(tf.Operation(desc))
+ end
+ end
+ begin
+ function queue_enqueue_many_v2_eager(handle_, components_; name=nothing, Tcomponents=nothing, timeout_ms=nothing)
+ desc = tf.EagerOp("QueueEnqueueManyV2")
+ handle_ = convert(tf.EagerTensor, handle_)
+ components_ = convert(tf.EagerTensor, components_)
+ begin
+ begin
+ tf.add_input(desc, handle_)
+ end
+ begin
+ tf.add_input(desc, components_)
+ end
+ end
+ begin
+ begin
+ if Tcomponents !== nothing
+ desc["Tcomponents"] = map(Base.identity, Tcomponents)
+ end
+ end
+ begin
+ if timeout_ms !== nothing
+ desc["timeout_ms"] = Base.Int(timeout_ms)
+ end
+ end
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(queue_enqueue_many_v2, [handle_, components_], name=nothing, Tcomponents=nothing, timeout_ms=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res[1]
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function queue_enqueue_many_v2(handle_, components_; name=nothing, Tcomponents=nothing, timeout_ms=nothing)
+ if tf.in_eager_mode()
+ queue_enqueue_many_v2_eager(handle_, components_; name=name, Tcomponents=Tcomponents, timeout_ms=timeout_ms)
+ else
+ queue_enqueue_many_v2_graph(handle_, components_; name=name, Tcomponents=Tcomponents, timeout_ms=timeout_ms)
+ end
+ end
+ end
+end
+
+
+"""
+ resource_sparse_apply_centered_rms_prop(var, mg, ms, mom, lr, rho, momentum, epsilon, grad, indices; use_locking=false)
+
+
+"""
+begin
+ begin
+ function resource_sparse_apply_centered_rms_prop_graph(var_, mg_, ms_, mom_, lr_, rho_, momentum_, epsilon_, grad_, indices_; name=nothing, use_locking=nothing)
+ local desc
+ tf.with_op_name(name, "ResourceSparseApplyCenteredRMSProp") do
+ desc = tf.NodeDescription("ResourceSparseApplyCenteredRMSProp")
+ begin
+ begin
+ var_ = convert(Tensor{Any}, var_)
+ begin
+ end
+ end
+ begin
+ mg_ = convert(Tensor{Any}, mg_)
+ begin
+ end
+ end
+ begin
+ ms_ = convert(Tensor{Any}, ms_)
+ begin
+ end
+ end
+ begin
+ mom_ = convert(Tensor{Any}, mom_)
+ begin
+ end
+ end
+ begin
+ lr_ = convert(Tensor{Any}, lr_)
+ begin
+ end
+ end
+ begin
+ rho_ = convert(Tensor{Any}, rho_)
+ begin
+ end
+ end
+ begin
+ momentum_ = convert(Tensor{Any}, momentum_)
+ begin
+ end
+ end
+ begin
+ epsilon_ = convert(Tensor{Any}, epsilon_)
+ begin
+ end
+ end
+ begin
+ grad_ = convert(Tensor{Any}, grad_)
+ begin
+ end
+ end
+ begin
+ indices_ = convert(Tensor{Any}, indices_)
+ begin
+ indices_ = indices_ - convert(tf.Tensor{eltype(indices_)}, 1)
+ end
+ end
+ begin
+ (lr_, rho_, momentum_, epsilon_, grad_) = tf.tf_promote(lr_, rho_, momentum_, epsilon_, grad_)
+ end
+ begin
+ (indices_,) = tf.tf_promote(indices_)
+ end
+ end
+ begin
+ begin
+ tf.add_input(desc, var_)
+ end
+ begin
+ tf.add_input(desc, mg_)
+ end
+ begin
+ tf.add_input(desc, ms_)
+ end
+ begin
+ tf.add_input(desc, mom_)
+ end
+ begin
+ tf.add_input(desc, lr_)
+ end
+ begin
+ tf.add_input(desc, rho_)
+ end
+ begin
+ tf.add_input(desc, momentum_)
+ end
+ begin
+ tf.add_input(desc, epsilon_)
+ end
+ begin
+ tf.add_input(desc, grad_)
+ end
+ begin
+ tf.add_input(desc, indices_)
+ end
+ end
+ begin
+ begin
+ if use_locking !== nothing
+ desc["use_locking"] = Base.Bool(use_locking)
+ end
+ end
+ end
+ end
+ tf.Tensor(tf.Operation(desc))
+ end
+ end
+ begin
+ function resource_sparse_apply_centered_rms_prop_eager(var_, mg_, ms_, mom_, lr_, rho_, momentum_, epsilon_, grad_, indices_; name=nothing, use_locking=nothing)
+ desc = tf.EagerOp("ResourceSparseApplyCenteredRMSProp")
+ var_ = convert(tf.EagerTensor, var_)
+ mg_ = convert(tf.EagerTensor, mg_)
+ ms_ = convert(tf.EagerTensor, ms_)
+ mom_ = convert(tf.EagerTensor, mom_)
+ lr_ = convert(tf.EagerTensor, lr_)
+ rho_ = convert(tf.EagerTensor, rho_)
+ momentum_ = convert(tf.EagerTensor, momentum_)
+ epsilon_ = convert(tf.EagerTensor, epsilon_)
+ grad_ = convert(tf.EagerTensor, grad_)
+ indices_ = convert(tf.EagerTensor, indices_)
+ begin
+ begin
+ tf.add_input(desc, var_)
+ end
+ begin
+ tf.add_input(desc, mg_)
+ end
+ begin
+ tf.add_input(desc, ms_)
+ end
+ begin
+ tf.add_input(desc, mom_)
+ end
+ begin
+ tf.add_input(desc, lr_)
+ end
+ begin
+ tf.add_input(desc, rho_)
+ end
+ begin
+ tf.add_input(desc, momentum_)
+ end
+ begin
+ tf.add_input(desc, epsilon_)
+ end
+ begin
+ tf.add_input(desc, grad_)
+ end
+ begin
+ tf.add_input(desc, indices_)
+ end
+ end
+ begin
+ begin
+ if use_locking !== nothing
+ desc["use_locking"] = Base.Bool(use_locking)
+ end
+ end
+ end
+ begin
+ desc["T"] = tf.data_type(lr_)
+ end
+ begin
+ desc["T"] = tf.data_type(rho_)
+ end
+ begin
+ desc["T"] = tf.data_type(momentum_)
+ end
+ begin
+ desc["T"] = tf.data_type(epsilon_)
+ end
+ begin
+ desc["T"] = tf.data_type(grad_)
+ end
+ begin
+ desc["Tindices"] = tf.data_type(indices_)
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(resource_sparse_apply_centered_rms_prop, [var_, mg_, ms_, mom_, lr_, rho_, momentum_, epsilon_, grad_, indices_], name=nothing, use_locking=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res[1]
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function resource_sparse_apply_centered_rms_prop(var_, mg_, ms_, mom_, lr_, rho_, momentum_, epsilon_, grad_, indices_; name=nothing, use_locking=nothing)
+ if tf.in_eager_mode()
+ resource_sparse_apply_centered_rms_prop_eager(var_, mg_, ms_, mom_, lr_, rho_, momentum_, epsilon_, grad_, indices_; name=name, use_locking=use_locking)
+ else
+ resource_sparse_apply_centered_rms_prop_graph(var_, mg_, ms_, mom_, lr_, rho_, momentum_, epsilon_, grad_, indices_; name=name, use_locking=use_locking)
+ end
+ end
+ end
+end
+
+
+"""
+ interleave_dataset(input_dataset, other_arguments, cycle_length, block_length)
+
+
+"""
+begin
+ begin
+ function interleave_dataset_graph(input_dataset_, other_arguments_, cycle_length_, block_length_; name=nothing, f=nothing, Targuments=nothing, output_types=nothing, output_shapes=nothing)
+ local desc
+ tf.with_op_name(name, "InterleaveDataset") do
+ desc = tf.NodeDescription("InterleaveDataset")
+ begin
+ begin
+ input_dataset_ = convert(Tensor{Any}, input_dataset_)
+ begin
+ end
+ end
+ begin
+ other_arguments_ = [convert(Tensor{Any}, x) for x = other_arguments_]
+ begin
+ end
+ end
+ begin
+ cycle_length_ = convert(Tensor{Int64}, cycle_length_)
+ begin
+ end
+ end
+ begin
+ block_length_ = convert(Tensor{Int64}, block_length_)
+ begin
+ end
+ end
+ end
+ begin
+ begin
+ tf.add_input(desc, input_dataset_)
+ end
+ begin
+ tf.add_input(desc, other_arguments_)
+ end
+ begin
+ tf.add_input(desc, cycle_length_)
+ end
+ begin
+ tf.add_input(desc, block_length_)
+ end
+ end
+ begin
+ begin
+ if f !== nothing
+ desc["f"] = Base.identity(f)
+ end
+ end
+ begin
+ if Targuments !== nothing
+ desc["Targuments"] = map(Base.identity, Targuments)
+ end
+ end
+ begin
+ if output_types !== nothing
+ desc["output_types"] = map(Base.identity, output_types)
+ end
+ end
+ begin
+ if output_shapes !== nothing
+ desc["output_shapes"] = map(Base.identity, output_shapes)
+ end
+ end
+ end
+ end
+ tf.Tensor(tf.Operation(desc))
+ end
+ end
+ begin
+ function interleave_dataset_eager(input_dataset_, other_arguments_, cycle_length_, block_length_; name=nothing, f=nothing, Targuments=nothing, output_types=nothing, output_shapes=nothing)
+ desc = tf.EagerOp("InterleaveDataset")
+ input_dataset_ = convert(tf.EagerTensor, input_dataset_)
+ other_arguments_ = convert(tf.EagerTensor, other_arguments_)
+ cycle_length_ = convert(tf.EagerTensor, cycle_length_)
+ block_length_ = convert(tf.EagerTensor, block_length_)
+ begin
+ begin
+ tf.add_input(desc, input_dataset_)
+ end
+ begin
+ tf.add_input(desc, other_arguments_)
+ end
+ begin
+ tf.add_input(desc, cycle_length_)
+ end
+ begin
+ tf.add_input(desc, block_length_)
+ end
+ end
+ begin
+ begin
+ if f !== nothing
+ desc["f"] = Base.identity(f)
+ end
+ end
+ begin
+ if Targuments !== nothing
+ desc["Targuments"] = map(Base.identity, Targuments)
+ end
+ end
+ begin
+ if output_types !== nothing
+ desc["output_types"] = map(Base.identity, output_types)
+ end
+ end
+ begin
+ if output_shapes !== nothing
+ desc["output_shapes"] = map(Base.identity, output_shapes)
+ end
+ end
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(interleave_dataset, [input_dataset_, other_arguments_, cycle_length_, block_length_], name=nothing, f=nothing, Targuments=nothing, output_types=nothing, output_shapes=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res[1]
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function interleave_dataset(input_dataset_, other_arguments_, cycle_length_, block_length_; name=nothing, f=nothing, Targuments=nothing, output_types=nothing, output_shapes=nothing)
+ if tf.in_eager_mode()
+ interleave_dataset_eager(input_dataset_, other_arguments_, cycle_length_, block_length_; name=name, f=f, Targuments=Targuments, output_types=output_types, output_shapes=output_shapes)
+ else
+ interleave_dataset_graph(input_dataset_, other_arguments_, cycle_length_, block_length_; name=name, f=f, Targuments=Targuments, output_types=output_types, output_shapes=output_shapes)
+ end
+ end
+ end
+end
+
+
+"""
+ stack_pop(handle)
+
+
+"""
+begin
+ begin
+ function stack_pop_graph(handle_; name=nothing, elem_type=nothing)
+ local desc
+ tf.with_op_name(name, "StackPop") do
+ desc = tf.NodeDescription("StackPop")
+ begin
+ begin
+ handle_ = convert(Tensor{String}, handle_)
+ begin
+ end
+ end
+ end
+ begin
+ begin
+ tf.add_input(desc, handle_)
+ end
+ end
+ begin
+ begin
+ if elem_type !== nothing
+ desc["elem_type"] = Base.identity(elem_type)
+ end
+ end
+ end
+ end
+ tf.Tensor(tf.Operation(desc))
+ end
+ end
+ begin
+ function stack_pop_eager(handle_; name=nothing, elem_type=nothing)
+ desc = tf.EagerOp("StackPop")
+ handle_ = convert(tf.EagerTensor, handle_)
+ begin
+ begin
+ tf.add_input(desc, handle_)
+ end
+ end
+ begin
+ begin
+ if elem_type !== nothing
+ desc["elem_type"] = Base.identity(elem_type)
+ end
+ end
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(stack_pop, [handle_], name=nothing, elem_type=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res[1]
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function stack_pop(handle_; name=nothing, elem_type=nothing)
+ if tf.in_eager_mode()
+ stack_pop_eager(handle_; name=name, elem_type=elem_type)
+ else
+ stack_pop_graph(handle_; name=name, elem_type=elem_type)
+ end
+ end
+ end
+end
+
+
+"""
+ boosted_trees_deserialize_ensemble(tree_ensemble_handle, stamp_token, tree_ensemble_serialized)
+
+
+"""
+begin
+ begin
+ function boosted_trees_deserialize_ensemble_graph(tree_ensemble_handle_, stamp_token_, tree_ensemble_serialized_; name=nothing)
+ local desc
+ tf.with_op_name(name, "BoostedTreesDeserializeEnsemble") do
+ desc = tf.NodeDescription("BoostedTreesDeserializeEnsemble")
+ begin
+ begin
+ tree_ensemble_handle_ = convert(Tensor{Any}, tree_ensemble_handle_)
+ begin
+ end
+ end
+ begin
+ stamp_token_ = convert(Tensor{Int64}, stamp_token_)
+ begin
+ end
+ end
+ begin
+ tree_ensemble_serialized_ = convert(Tensor{String}, tree_ensemble_serialized_)
+ begin
+ end
+ end
+ end
+ begin
+ begin
+ tf.add_input(desc, tree_ensemble_handle_)
+ end
+ begin
+ tf.add_input(desc, stamp_token_)
+ end
+ begin
+ tf.add_input(desc, tree_ensemble_serialized_)
+ end
+ end
+ begin
+ end
+ end
+ tf.Tensor(tf.Operation(desc))
+ end
+ end
+ begin
+ function boosted_trees_deserialize_ensemble_eager(tree_ensemble_handle_, stamp_token_, tree_ensemble_serialized_; name=nothing)
+ desc = tf.EagerOp("BoostedTreesDeserializeEnsemble")
+ tree_ensemble_handle_ = convert(tf.EagerTensor, tree_ensemble_handle_)
+ stamp_token_ = convert(tf.EagerTensor, stamp_token_)
+ tree_ensemble_serialized_ = convert(tf.EagerTensor, tree_ensemble_serialized_)
+ begin
+ begin
+ tf.add_input(desc, tree_ensemble_handle_)
+ end
+ begin
+ tf.add_input(desc, stamp_token_)
+ end
+ begin
+ tf.add_input(desc, tree_ensemble_serialized_)
+ end
+ end
+ begin
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(boosted_trees_deserialize_ensemble, [tree_ensemble_handle_, stamp_token_, tree_ensemble_serialized_], name=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res[1]
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function boosted_trees_deserialize_ensemble(tree_ensemble_handle_, stamp_token_, tree_ensemble_serialized_; name=nothing)
+ if tf.in_eager_mode()
+ boosted_trees_deserialize_ensemble_eager(tree_ensemble_handle_, stamp_token_, tree_ensemble_serialized_; name=name)
+ else
+ boosted_trees_deserialize_ensemble_graph(tree_ensemble_handle_, stamp_token_, tree_ensemble_serialized_; name=name)
+ end
+ end
+ end
+end
+
+
+"""
+ max_pool_v2(input, ksize, strides; data_format=)
+
+
+"""
+begin
+ begin
+ function max_pool_v2_graph(input_, ksize_, strides_; name=nothing, padding=nothing, data_format=nothing)
+ local desc
+ tf.with_op_name(name, "MaxPoolV2") do
+ desc = tf.NodeDescription("MaxPoolV2")
+ begin
+ begin
+ input_ = convert(Tensor{Float32}, input_)
+ begin
+ end
+ end
+ begin
+ ksize_ = convert(Tensor{Int32}, ksize_)
+ begin
+ end
+ end
+ begin
+ strides_ = convert(Tensor{Int32}, strides_)
+ begin
+ end
+ end
+ begin
+ (input_,) = tf.tf_promote(input_)
+ end
+ end
+ begin
+ begin
+ tf.add_input(desc, input_)
+ end
+ begin
+ tf.add_input(desc, ksize_)
+ end
+ begin
+ tf.add_input(desc, strides_)
+ end
+ end
+ begin
+ begin
+ if padding !== nothing
+ desc["padding"] = Base.String(padding)
+ end
+ end
+ begin
+ if data_format !== nothing
+ desc["data_format"] = Base.String(data_format)
+ end
+ end
+ end
+ end
+ tf.Tensor(tf.Operation(desc))
+ end
+ end
+ begin
+ function max_pool_v2_eager(input_, ksize_, strides_; name=nothing, padding=nothing, data_format=nothing)
+ desc = tf.EagerOp("MaxPoolV2")
+ input_ = convert(tf.EagerTensor, input_)
+ ksize_ = convert(tf.EagerTensor, ksize_)
+ strides_ = convert(tf.EagerTensor, strides_)
+ begin
+ begin
+ tf.add_input(desc, input_)
+ end
+ begin
+ tf.add_input(desc, ksize_)
+ end
+ begin
+ tf.add_input(desc, strides_)
+ end
+ end
+ begin
+ begin
+ if padding !== nothing
+ desc["padding"] = Base.String(padding)
+ end
+ end
+ begin
+ if data_format !== nothing
+ desc["data_format"] = Base.String(data_format)
+ end
+ end
+ end
+ begin
+ desc["T"] = tf.data_type(input_)
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(max_pool_v2, [input_, ksize_, strides_], name=nothing, padding=nothing, data_format=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res[1]
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function max_pool_v2(input_, ksize_, strides_; name=nothing, padding=nothing, data_format=nothing)
+ if tf.in_eager_mode()
+ max_pool_v2_eager(input_, ksize_, strides_; name=name, padding=padding, data_format=data_format)
+ else
+ max_pool_v2_graph(input_, ksize_, strides_; name=name, padding=padding, data_format=data_format)
+ end
+ end
+ end
+end
+
+
+"""
+ load_and_remap_matrix(ckpt_path, old_tensor_name, row_remapping, col_remapping, initializing_values; max_rows_in_memory=-1)
+
+
+"""
+begin
+ begin
+ function load_and_remap_matrix_graph(ckpt_path_, old_tensor_name_, row_remapping_, col_remapping_, initializing_values_; name=nothing, num_rows=nothing, num_cols=nothing, max_rows_in_memory=nothing)
+ local desc
+ tf.with_op_name(name, "LoadAndRemapMatrix") do
+ desc = tf.NodeDescription("LoadAndRemapMatrix")
+ begin
+ begin
+ ckpt_path_ = convert(Tensor{String}, ckpt_path_)
+ begin
+ end
+ end
+ begin
+ old_tensor_name_ = convert(Tensor{String}, old_tensor_name_)
+ begin
+ end
+ end
+ begin
+ row_remapping_ = convert(Tensor{Int64}, row_remapping_)
+ begin
+ end
+ end
+ begin
+ col_remapping_ = convert(Tensor{Int64}, col_remapping_)
+ begin
+ end
+ end
+ begin
+ initializing_values_ = convert(Tensor{Float32}, initializing_values_)
+ begin
+ end
+ end
+ end
+ begin
+ begin
+ tf.add_input(desc, ckpt_path_)
+ end
+ begin
+ tf.add_input(desc, old_tensor_name_)
+ end
+ begin
+ tf.add_input(desc, row_remapping_)
+ end
+ begin
+ tf.add_input(desc, col_remapping_)
+ end
+ begin
+ tf.add_input(desc, initializing_values_)
+ end
+ end
+ begin
+ begin
+ if num_rows !== nothing
+ desc["num_rows"] = Base.Int(num_rows)
+ end
+ end
+ begin
+ if num_cols !== nothing
+ desc["num_cols"] = Base.Int(num_cols)
+ end
+ end
+ begin
+ if max_rows_in_memory !== nothing
+ desc["max_rows_in_memory"] = Base.Int(max_rows_in_memory)
+ end
+ end
+ end
+ end
+ tf.Tensor(tf.Operation(desc))
+ end
+ end
+ begin
+ function load_and_remap_matrix_eager(ckpt_path_, old_tensor_name_, row_remapping_, col_remapping_, initializing_values_; name=nothing, num_rows=nothing, num_cols=nothing, max_rows_in_memory=nothing)
+ desc = tf.EagerOp("LoadAndRemapMatrix")
+ ckpt_path_ = convert(tf.EagerTensor, ckpt_path_)
+ old_tensor_name_ = convert(tf.EagerTensor, old_tensor_name_)
+ row_remapping_ = convert(tf.EagerTensor, row_remapping_)
+ col_remapping_ = convert(tf.EagerTensor, col_remapping_)
+ initializing_values_ = convert(tf.EagerTensor, initializing_values_)
+ begin
+ begin
+ tf.add_input(desc, ckpt_path_)
+ end
+ begin
+ tf.add_input(desc, old_tensor_name_)
+ end
+ begin
+ tf.add_input(desc, row_remapping_)
+ end
+ begin
+ tf.add_input(desc, col_remapping_)
+ end
+ begin
+ tf.add_input(desc, initializing_values_)
+ end
+ end
+ begin
+ begin
+ if num_rows !== nothing
+ desc["num_rows"] = Base.Int(num_rows)
+ end
+ end
+ begin
+ if num_cols !== nothing
+ desc["num_cols"] = Base.Int(num_cols)
+ end
+ end
+ begin
+ if max_rows_in_memory !== nothing
+ desc["max_rows_in_memory"] = Base.Int(max_rows_in_memory)
+ end
+ end
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(load_and_remap_matrix, [ckpt_path_, old_tensor_name_, row_remapping_, col_remapping_, initializing_values_], name=nothing, num_rows=nothing, num_cols=nothing, max_rows_in_memory=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res[1]
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function load_and_remap_matrix(ckpt_path_, old_tensor_name_, row_remapping_, col_remapping_, initializing_values_; name=nothing, num_rows=nothing, num_cols=nothing, max_rows_in_memory=nothing)
+ if tf.in_eager_mode()
+ load_and_remap_matrix_eager(ckpt_path_, old_tensor_name_, row_remapping_, col_remapping_, initializing_values_; name=name, num_rows=num_rows, num_cols=num_cols, max_rows_in_memory=max_rows_in_memory)
+ else
+ load_and_remap_matrix_graph(ckpt_path_, old_tensor_name_, row_remapping_, col_remapping_, initializing_values_; name=name, num_rows=num_rows, num_cols=num_cols, max_rows_in_memory=max_rows_in_memory)
+ end
+ end
+ end
+end
+
+
+"""
+ sparse_apply_proximal_gradient_descent(var, alpha, l1, l2, grad, indices; use_locking=false)
+
+
+"""
+begin
+ begin
+ function sparse_apply_proximal_gradient_descent_graph(var_, alpha_, l1_, l2_, grad_, indices_; name=nothing, use_locking=nothing)
+ local desc
+ tf.with_op_name(name, "SparseApplyProximalGradientDescent") do
+ desc = tf.NodeDescription("SparseApplyProximalGradientDescent")
+ begin
+ begin
+ var_ = convert(Tensor{Any}, var_)
+ begin
+ end
+ end
+ begin
+ alpha_ = convert(Tensor{Any}, alpha_)
+ begin
+ end
+ end
+ begin
+ l1_ = convert(Tensor{Any}, l1_)
+ begin
+ end
+ end
+ begin
+ l2_ = convert(Tensor{Any}, l2_)
+ begin
+ end
+ end
+ begin
+ grad_ = convert(Tensor{Any}, grad_)
+ begin
+ end
+ end
+ begin
+ indices_ = convert(Tensor{Any}, indices_)
+ begin
+ indices_ = indices_ - convert(tf.Tensor{eltype(indices_)}, 1)
+ end
+ end
+ begin
+ (var_, alpha_, l1_, l2_, grad_) = tf.tf_promote(var_, alpha_, l1_, l2_, grad_)
+ end
+ begin
+ (indices_,) = tf.tf_promote(indices_)
+ end
+ end
+ begin
+ begin
+ tf.add_input(desc, var_)
+ end
+ begin
+ tf.add_input(desc, alpha_)
+ end
+ begin
+ tf.add_input(desc, l1_)
+ end
+ begin
+ tf.add_input(desc, l2_)
+ end
+ begin
+ tf.add_input(desc, grad_)
+ end
+ begin
+ tf.add_input(desc, indices_)
+ end
+ end
+ begin
+ begin
+ if use_locking !== nothing
+ desc["use_locking"] = Base.Bool(use_locking)
+ end
+ end
+ end
+ end
+ tf.Tensor(tf.Operation(desc))
+ end
+ end
+ begin
+ function sparse_apply_proximal_gradient_descent_eager(var_, alpha_, l1_, l2_, grad_, indices_; name=nothing, use_locking=nothing)
+ desc = tf.EagerOp("SparseApplyProximalGradientDescent")
+ var_ = convert(tf.EagerTensor, var_)
+ alpha_ = convert(tf.EagerTensor, alpha_)
+ l1_ = convert(tf.EagerTensor, l1_)
+ l2_ = convert(tf.EagerTensor, l2_)
+ grad_ = convert(tf.EagerTensor, grad_)
+ indices_ = convert(tf.EagerTensor, indices_)
+ begin
+ begin
+ tf.add_input(desc, var_)
+ end
+ begin
+ tf.add_input(desc, alpha_)
+ end
+ begin
+ tf.add_input(desc, l1_)
+ end
+ begin
+ tf.add_input(desc, l2_)
+ end
+ begin
+ tf.add_input(desc, grad_)
+ end
+ begin
+ tf.add_input(desc, indices_)
+ end
+ end
+ begin
+ begin
+ if use_locking !== nothing
+ desc["use_locking"] = Base.Bool(use_locking)
+ end
+ end
+ end
+ begin
+ desc["T"] = tf.data_type(var_)
+ end
+ begin
+ desc["T"] = tf.data_type(alpha_)
+ end
+ begin
+ desc["T"] = tf.data_type(l1_)
+ end
+ begin
+ desc["T"] = tf.data_type(l2_)
+ end
+ begin
+ desc["T"] = tf.data_type(grad_)
+ end
+ begin
+ desc["Tindices"] = tf.data_type(indices_)
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(sparse_apply_proximal_gradient_descent, [var_, alpha_, l1_, l2_, grad_, indices_], name=nothing, use_locking=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res[1]
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function sparse_apply_proximal_gradient_descent(var_, alpha_, l1_, l2_, grad_, indices_; name=nothing, use_locking=nothing)
+ if tf.in_eager_mode()
+ sparse_apply_proximal_gradient_descent_eager(var_, alpha_, l1_, l2_, grad_, indices_; name=name, use_locking=use_locking)
+ else
+ sparse_apply_proximal_gradient_descent_graph(var_, alpha_, l1_, l2_, grad_, indices_; name=name, use_locking=use_locking)
+ end
+ end
+ end
+end
+
+
+"""
+ py_func_stateless(input)
+
+
+"""
+begin
+ begin
+ function py_func_stateless_graph(input_; name=nothing, token=nothing, Tin=nothing, Tout=nothing)
+ local desc
+ tf.with_op_name(name, "PyFuncStateless") do
+ desc = tf.NodeDescription("PyFuncStateless")
+ begin
+ begin
+ input_ = [convert(Tensor{Any}, x) for x = input_]
+ begin
+ end
+ end
+ end
+ begin
+ begin
+ tf.add_input(desc, input_)
+ end
+ end
+ begin
+ begin
+ if token !== nothing
+ desc["token"] = Base.String(token)
+ end
+ end
+ begin
+ if Tin !== nothing
+ desc["Tin"] = map(Base.identity, Tin)
+ end
+ end
+ begin
+ if Tout !== nothing
+ desc["Tout"] = map(Base.identity, Tout)
+ end
+ end
+ end
+ end
+ tf.Tensor(tf.Operation(desc))
+ end
+ end
+ begin
+ function py_func_stateless_eager(input_; name=nothing, token=nothing, Tin=nothing, Tout=nothing)
+ desc = tf.EagerOp("PyFuncStateless")
+ input_ = convert(tf.EagerTensor, input_)
+ begin
+ begin
+ tf.add_input(desc, input_)
+ end
+ end
+ begin
+ begin
+ if token !== nothing
+ desc["token"] = Base.String(token)
+ end
+ end
+ begin
+ if Tin !== nothing
+ desc["Tin"] = map(Base.identity, Tin)
+ end
+ end
+ begin
+ if Tout !== nothing
+ desc["Tout"] = map(Base.identity, Tout)
+ end
+ end
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(py_func_stateless, [input_], name=nothing, token=nothing, Tin=nothing, Tout=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res[1]
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function py_func_stateless(input_; name=nothing, token=nothing, Tin=nothing, Tout=nothing)
+ if tf.in_eager_mode()
+ py_func_stateless_eager(input_; name=name, token=token, Tin=Tin, Tout=Tout)
+ else
+ py_func_stateless_graph(input_; name=name, token=token, Tin=Tin, Tout=Tout)
+ end
+ end
+ end
+end
+
+
+"""
+ where(input)
+
+
+"""
+begin
+ begin
+ function where_graph(input_; name=nothing)
+ local desc
+ tf.with_op_name(name, "Where") do
+ desc = tf.NodeDescription("Where")
+ begin
+ begin
+ input_ = convert(Tensor{Bool}, input_)
+ begin
+ end
+ end
+ begin
+ (input_,) = tf.tf_promote(input_)
+ end
+ end
+ begin
+ begin
+ tf.add_input(desc, input_)
+ end
+ end
+ begin
+ end
+ end
+ tf.Tensor(tf.Operation(desc))
+ end
+ end
+ begin
+ function where_eager(input_; name=nothing)
+ desc = tf.EagerOp("Where")
+ input_ = convert(tf.EagerTensor, input_)
+ begin
+ begin
+ tf.add_input(desc, input_)
+ end
+ end
+ begin
+ end
+ begin
+ desc["T"] = tf.data_type(input_)
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(where, [input_], name=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res[1]
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function where(input_; name=nothing)
+ if tf.in_eager_mode()
+ where_eager(input_; name=name)
+ else
+ where_graph(input_; name=name)
+ end
+ end
+ end
+end
+
+
+"""
+ mfcc(spectrogram, sample_rate; upper_frequency_limit=?, lower_frequency_limit=?, filterbank_channel_count=40, dct_coefficient_count=13)
+
+
+"""
+begin
+ begin
+ function mfcc_graph(spectrogram_, sample_rate_; name=nothing, upper_frequency_limit=nothing, lower_frequency_limit=nothing, filterbank_channel_count=nothing, dct_coefficient_count=nothing)
+ local desc
+ tf.with_op_name(name, "Mfcc") do
+ desc = tf.NodeDescription("Mfcc")
+ begin
+ begin
+ spectrogram_ = convert(Tensor{Float32}, spectrogram_)
+ begin
+ end
+ end
+ begin
+ sample_rate_ = convert(Tensor{Int32}, sample_rate_)
+ begin
+ end
+ end
+ end
+ begin
+ begin
+ tf.add_input(desc, spectrogram_)
+ end
+ begin
+ tf.add_input(desc, sample_rate_)
+ end
+ end
+ begin
+ begin
+ if upper_frequency_limit !== nothing
+ desc["upper_frequency_limit"] = Base.identity(upper_frequency_limit)
+ end
+ end
+ begin
+ if lower_frequency_limit !== nothing
+ desc["lower_frequency_limit"] = Base.identity(lower_frequency_limit)
+ end
+ end
+ begin
+ if filterbank_channel_count !== nothing
+ desc["filterbank_channel_count"] = Base.Int(filterbank_channel_count)
+ end
+ end
+ begin
+ if dct_coefficient_count !== nothing
+ desc["dct_coefficient_count"] = Base.Int(dct_coefficient_count)
+ end
+ end
+ end
+ end
+ tf.Tensor(tf.Operation(desc))
+ end
+ end
+ begin
+ function mfcc_eager(spectrogram_, sample_rate_; name=nothing, upper_frequency_limit=nothing, lower_frequency_limit=nothing, filterbank_channel_count=nothing, dct_coefficient_count=nothing)
+ desc = tf.EagerOp("Mfcc")
+ spectrogram_ = convert(tf.EagerTensor, spectrogram_)
+ sample_rate_ = convert(tf.EagerTensor, sample_rate_)
+ begin
+ begin
+ tf.add_input(desc, spectrogram_)
+ end
+ begin
+ tf.add_input(desc, sample_rate_)
+ end
+ end
+ begin
+ begin
+ if upper_frequency_limit !== nothing
+ desc["upper_frequency_limit"] = Base.identity(upper_frequency_limit)
+ end
+ end
+ begin
+ if lower_frequency_limit !== nothing
+ desc["lower_frequency_limit"] = Base.identity(lower_frequency_limit)
+ end
+ end
+ begin
+ if filterbank_channel_count !== nothing
+ desc["filterbank_channel_count"] = Base.Int(filterbank_channel_count)
+ end
+ end
+ begin
+ if dct_coefficient_count !== nothing
+ desc["dct_coefficient_count"] = Base.Int(dct_coefficient_count)
+ end
+ end
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(mfcc, [spectrogram_, sample_rate_], name=nothing, upper_frequency_limit=nothing, lower_frequency_limit=nothing, filterbank_channel_count=nothing, dct_coefficient_count=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res[1]
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function mfcc(spectrogram_, sample_rate_; name=nothing, upper_frequency_limit=nothing, lower_frequency_limit=nothing, filterbank_channel_count=nothing, dct_coefficient_count=nothing)
+ if tf.in_eager_mode()
+ mfcc_eager(spectrogram_, sample_rate_; name=name, upper_frequency_limit=upper_frequency_limit, lower_frequency_limit=lower_frequency_limit, filterbank_channel_count=filterbank_channel_count, dct_coefficient_count=dct_coefficient_count)
+ else
+ mfcc_graph(spectrogram_, sample_rate_; name=name, upper_frequency_limit=upper_frequency_limit, lower_frequency_limit=lower_frequency_limit, filterbank_channel_count=filterbank_channel_count, dct_coefficient_count=dct_coefficient_count)
+ end
+ end
+ end
+end
+
+
+"""
+ check_numerics(tensor)
+
+
+"""
+begin
+ begin
+ function check_numerics_graph(tensor_; name=nothing, message=nothing)
+ local desc
+ tf.with_op_name(name, "CheckNumerics") do
+ desc = tf.NodeDescription("CheckNumerics")
+ begin
+ begin
+ tensor_ = convert(Tensor{Any}, tensor_)
+ begin
+ end
+ end
+ begin
+ (tensor_,) = tf.tf_promote(tensor_)
+ end
+ end
+ begin
+ begin
+ tf.add_input(desc, tensor_)
+ end
+ end
+ begin
+ begin
+ if message !== nothing
+ desc["message"] = Base.String(message)
+ end
+ end
+ end
+ end
+ tf.Tensor(tf.Operation(desc))
+ end
+ end
+ begin
+ function check_numerics_eager(tensor_; name=nothing, message=nothing)
+ desc = tf.EagerOp("CheckNumerics")
+ tensor_ = convert(tf.EagerTensor, tensor_)
+ begin
+ begin
+ tf.add_input(desc, tensor_)
+ end
+ end
+ begin
+ begin
+ if message !== nothing
+ desc["message"] = Base.String(message)
+ end
+ end
+ end
+ begin
+ desc["T"] = tf.data_type(tensor_)
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(check_numerics, [tensor_], name=nothing, message=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res[1]
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function check_numerics(tensor_; name=nothing, message=nothing)
+ if tf.in_eager_mode()
+ check_numerics_eager(tensor_; name=name, message=message)
+ else
+ check_numerics_graph(tensor_; name=name, message=message)
+ end
+ end
+ end
+end
+
+
+"""
+ tpu_compilation_result()
+
+
+"""
+begin
+ begin
+ function tpu_compilation_result_graph(; name=nothing)
+ local desc
+ tf.with_op_name(name, "TPUCompilationResult") do
+ desc = tf.NodeDescription("TPUCompilationResult")
+ begin
+ end
+ begin
+ end
+ begin
+ end
+ end
+ tf.Tensor(tf.Operation(desc))
+ end
+ end
+ begin
+ function tpu_compilation_result_eager(; name=nothing)
+ desc = tf.EagerOp("TPUCompilationResult")
+ begin
+ end
+ begin
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(tpu_compilation_result, [], name=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res[1]
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function tpu_compilation_result(; name=nothing)
+ if tf.in_eager_mode()
+ tpu_compilation_result_eager(; name=name)
+ else
+ tpu_compilation_result_graph(; name=name)
+ end
+ end
+ end
+end
+
+
+"""
+ retrieve_tpu_embedding_stochastic_gradient_descent_parameters(; table_id=-1, table_name=)
+
+Retrieve embedding parameters for a single table.
+"""
+begin
+ begin
+ function retrieve_tpu_embedding_stochastic_gradient_descent_parameters_graph(; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing)
+ local desc
+ tf.with_op_name(name, "RetrieveTPUEmbeddingStochasticGradientDescentParameters") do
+ desc = tf.NodeDescription("RetrieveTPUEmbeddingStochasticGradientDescentParameters")
+ begin
+ end
+ begin
+ end
+ begin
+ begin
+ if table_id !== nothing
+ desc["table_id"] = Base.Int(table_id)
+ end
+ end
+ begin
+ if table_name !== nothing
+ desc["table_name"] = Base.String(table_name)
+ end
+ end
+ begin
+ if num_shards !== nothing
+ desc["num_shards"] = Base.Int(num_shards)
+ end
+ end
+ begin
+ if shard_id !== nothing
+ desc["shard_id"] = Base.Int(shard_id)
+ end
+ end
+ end
+ end
+ tf.Tensor(tf.Operation(desc))
+ end
+ end
+ begin
+ function retrieve_tpu_embedding_stochastic_gradient_descent_parameters_eager(; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing)
+ desc = tf.EagerOp("RetrieveTPUEmbeddingStochasticGradientDescentParameters")
+ begin
+ end
+ begin
+ begin
+ if table_id !== nothing
+ desc["table_id"] = Base.Int(table_id)
+ end
+ end
+ begin
+ if table_name !== nothing
+ desc["table_name"] = Base.String(table_name)
+ end
+ end
+ begin
+ if num_shards !== nothing
+ desc["num_shards"] = Base.Int(num_shards)
+ end
+ end
+ begin
+ if shard_id !== nothing
+ desc["shard_id"] = Base.Int(shard_id)
+ end
+ end
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(retrieve_tpu_embedding_stochastic_gradient_descent_parameters, [], name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res[1]
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function retrieve_tpu_embedding_stochastic_gradient_descent_parameters(; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing)
+ if tf.in_eager_mode()
+ retrieve_tpu_embedding_stochastic_gradient_descent_parameters_eager(; name=name, table_id=table_id, table_name=table_name, num_shards=num_shards, shard_id=shard_id)
+ else
+ retrieve_tpu_embedding_stochastic_gradient_descent_parameters_graph(; name=name, table_id=table_id, table_name=table_name, num_shards=num_shards, shard_id=shard_id)
+ end
+ end
+ end
+end
+
+
+"""
+ sparse_segment_mean_grad(grad, indices, segment_ids, output_dim0)
+
+
+"""
+begin
+ begin
+ function sparse_segment_mean_grad_graph(grad_, indices_, segment_ids_, output_dim0_; name=nothing)
+ local desc
+ tf.with_op_name(name, "SparseSegmentMeanGrad") do
+ desc = tf.NodeDescription("SparseSegmentMeanGrad")
+ begin
+ begin
+ grad_ = convert(Tensor{Any}, grad_)
+ begin
+ end
+ end
+ begin
+ indices_ = convert(Tensor{Int32}, indices_)
+ begin
+ indices_ = indices_ - convert(tf.Tensor{eltype(indices_)}, 1)
+ end
+ end
+ begin
+ segment_ids_ = convert(Tensor{Int32}, segment_ids_)
+ begin
+ end
+ end
+ begin
+ output_dim0_ = convert(Tensor{Int32}, output_dim0_)
+ begin
+ end
+ end
+ begin
+ (grad_,) = tf.tf_promote(grad_)
+ end
+ begin
+ (indices_,) = tf.tf_promote(indices_)
+ end
+ end
+ begin
+ begin
+ tf.add_input(desc, grad_)
+ end
+ begin
+ tf.add_input(desc, indices_)
+ end
+ begin
+ tf.add_input(desc, segment_ids_)
+ end
+ begin
+ tf.add_input(desc, output_dim0_)
+ end
+ end
+ begin
+ end
+ end
+ tf.Tensor(tf.Operation(desc))
+ end
+ end
+ begin
+ function sparse_segment_mean_grad_eager(grad_, indices_, segment_ids_, output_dim0_; name=nothing)
+ desc = tf.EagerOp("SparseSegmentMeanGrad")
+ grad_ = convert(tf.EagerTensor, grad_)
+ indices_ = convert(tf.EagerTensor, indices_)
+ segment_ids_ = convert(tf.EagerTensor, segment_ids_)
+ output_dim0_ = convert(tf.EagerTensor, output_dim0_)
+ begin
+ begin
+ tf.add_input(desc, grad_)
+ end
+ begin
+ tf.add_input(desc, indices_)
+ end
+ begin
+ tf.add_input(desc, segment_ids_)
+ end
+ begin
+ tf.add_input(desc, output_dim0_)
+ end
+ end
+ begin
+ end
+ begin
+ desc["T"] = tf.data_type(grad_)
+ end
+ begin
+ desc["Tidx"] = tf.data_type(indices_)
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(sparse_segment_mean_grad, [grad_, indices_, segment_ids_, output_dim0_], name=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res[1]
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function sparse_segment_mean_grad(grad_, indices_, segment_ids_, output_dim0_; name=nothing)
+ if tf.in_eager_mode()
+ sparse_segment_mean_grad_eager(grad_, indices_, segment_ids_, output_dim0_; name=name)
+ else
+ sparse_segment_mean_grad_graph(grad_, indices_, segment_ids_, output_dim0_; name=name)
+ end
+ end
+ end
+end
+
+
+"""
+ try_rpc(address, method, request; protocol=, fail_fast=true, timeout_in_ms=0)
+
+
+"""
+begin
+ begin
+ function try_rpc_graph(address_, method_, request_; name=nothing, protocol=nothing, fail_fast=nothing, timeout_in_ms=nothing)
+ local desc
+ tf.with_op_name(name, "TryRpc") do
+ desc = tf.NodeDescription("TryRpc")
+ begin
+ begin
+ address_ = convert(Tensor{String}, address_)
+ begin
+ end
+ end
+ begin
+ method_ = convert(Tensor{String}, method_)
+ begin
+ end
+ end
+ begin
+ request_ = convert(Tensor{String}, request_)
+ begin
+ end
+ end
+ end
+ begin
+ begin
+ tf.add_input(desc, address_)
+ end
+ begin
+ tf.add_input(desc, method_)
+ end
+ begin
+ tf.add_input(desc, request_)
+ end
+ end
+ begin
+ begin
+ if protocol !== nothing
+ desc["protocol"] = Base.String(protocol)
+ end
+ end
+ begin
+ if fail_fast !== nothing
+ desc["fail_fast"] = Base.Bool(fail_fast)
+ end
+ end
+ begin
+ if timeout_in_ms !== nothing
+ desc["timeout_in_ms"] = Base.Int(timeout_in_ms)
+ end
+ end
+ end
+ end
+ begin
+ out = tf.Tensor[]
+ op = tf.Operation(desc)
+ for out_idx = 1:3
+ push!(out, tf.Tensor(op, out_idx))
+ end
+ out
+ end
+ end
+ end
+ begin
+ function try_rpc_eager(address_, method_, request_; name=nothing, protocol=nothing, fail_fast=nothing, timeout_in_ms=nothing)
+ desc = tf.EagerOp("TryRpc")
+ address_ = convert(tf.EagerTensor, address_)
+ method_ = convert(tf.EagerTensor, method_)
+ request_ = convert(tf.EagerTensor, request_)
+ begin
+ begin
+ tf.add_input(desc, address_)
+ end
+ begin
+ tf.add_input(desc, method_)
+ end
+ begin
+ tf.add_input(desc, request_)
+ end
+ end
+ begin
+ begin
+ if protocol !== nothing
+ desc["protocol"] = Base.String(protocol)
+ end
+ end
+ begin
+ if fail_fast !== nothing
+ desc["fail_fast"] = Base.Bool(fail_fast)
+ end
+ end
+ begin
+ if timeout_in_ms !== nothing
+ desc["timeout_in_ms"] = Base.Int(timeout_in_ms)
+ end
+ end
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(try_rpc, [address_, method_, request_], name=nothing, protocol=nothing, fail_fast=nothing, timeout_in_ms=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function try_rpc(address_, method_, request_; name=nothing, protocol=nothing, fail_fast=nothing, timeout_in_ms=nothing)
+ if tf.in_eager_mode()
+ try_rpc_eager(address_, method_, request_; name=name, protocol=protocol, fail_fast=fail_fast, timeout_in_ms=timeout_in_ms)
+ else
+ try_rpc_graph(address_, method_, request_; name=name, protocol=protocol, fail_fast=fail_fast, timeout_in_ms=timeout_in_ms)
+ end
+ end
+ end
+end
+
+
+"""
+ batch_matrix_triangular_solve(matrix, rhs; lower=true, adjoint=false)
+
+
+"""
+begin
+ begin
+ function batch_matrix_triangular_solve_graph(matrix_, rhs_; name=nothing, lower=nothing, adjoint=nothing)
+ local desc
+ tf.with_op_name(name, "BatchMatrixTriangularSolve") do
+ desc = tf.NodeDescription("BatchMatrixTriangularSolve")
+ begin
+ begin
+ matrix_ = convert(Tensor{Any}, matrix_)
+ begin
+ end
+ end
+ begin
+ rhs_ = convert(Tensor{Any}, rhs_)
+ begin
+ end
+ end
+ begin
+ (matrix_, rhs_) = tf.tf_promote(matrix_, rhs_)
+ end
+ end
+ begin
+ begin
+ tf.add_input(desc, matrix_)
+ end
+ begin
+ tf.add_input(desc, rhs_)
+ end
+ end
+ begin
+ begin
+ if lower !== nothing
+ desc["lower"] = Base.Bool(lower)
+ end
+ end
+ begin
+ if adjoint !== nothing
+ desc["adjoint"] = Base.Bool(adjoint)
+ end
+ end
+ end
+ end
+ tf.Tensor(tf.Operation(desc))
+ end
+ end
+ begin
+ function batch_matrix_triangular_solve_eager(matrix_, rhs_; name=nothing, lower=nothing, adjoint=nothing)
+ desc = tf.EagerOp("BatchMatrixTriangularSolve")
+ matrix_ = convert(tf.EagerTensor, matrix_)
+ rhs_ = convert(tf.EagerTensor, rhs_)
+ begin
+ begin
+ tf.add_input(desc, matrix_)
+ end
+ begin
+ tf.add_input(desc, rhs_)
+ end
+ end
+ begin
+ begin
+ if lower !== nothing
+ desc["lower"] = Base.Bool(lower)
+ end
+ end
+ begin
+ if adjoint !== nothing
+ desc["adjoint"] = Base.Bool(adjoint)
+ end
+ end
+ end
+ begin
+ desc["T"] = tf.data_type(matrix_)
+ end
+ begin
+ desc["T"] = tf.data_type(rhs_)
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(batch_matrix_triangular_solve, [matrix_, rhs_], name=nothing, lower=nothing, adjoint=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res[1]
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function batch_matrix_triangular_solve(matrix_, rhs_; name=nothing, lower=nothing, adjoint=nothing)
+ if tf.in_eager_mode()
+ batch_matrix_triangular_solve_eager(matrix_, rhs_; name=name, lower=lower, adjoint=adjoint)
+ else
+ batch_matrix_triangular_solve_graph(matrix_, rhs_; name=name, lower=lower, adjoint=adjoint)
+ end
+ end
+ end
+end
+
+
+"""
+ _retval(input)
+
+A graph node which represents a return value of a function.
+"""
+begin
+ begin
+ function _retval_graph(input_; name=nothing, index=nothing)
+ local desc
+ tf.with_op_name(name, "_Retval") do
+ desc = tf.NodeDescription("_Retval")
+ begin
+ begin
+ input_ = convert(Tensor{Any}, input_)
+ begin
+ end
+ end
+ begin
+ (input_,) = tf.tf_promote(input_)
+ end
+ end
+ begin
+ begin
+ tf.add_input(desc, input_)
+ end
+ end
+ begin
+ begin
+ if index !== nothing
+ desc["index"] = Base.Int(index)
+ end
+ end
+ end
+ end
+ tf.Tensor(tf.Operation(desc))
+ end
+ end
+ begin
+ function _retval_eager(input_; name=nothing, index=nothing)
+ desc = tf.EagerOp("_Retval")
+ input_ = convert(tf.EagerTensor, input_)
+ begin
+ begin
+ tf.add_input(desc, input_)
+ end
+ end
+ begin
+ begin
+ if index !== nothing
+ desc["index"] = Base.Int(index)
+ end
+ end
+ end
+ begin
+ desc["T"] = tf.data_type(input_)
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(_retval, [input_], name=nothing, index=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res[1]
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function _retval(input_; name=nothing, index=nothing)
+ if tf.in_eager_mode()
+ _retval_eager(input_; name=name, index=index)
+ else
+ _retval_graph(input_; name=name, index=index)
+ end
+ end
+ end
+end
+
+
+"""
+ unique_with_counts(x; out_idx=Int32)
+
+
+"""
+begin
+ begin
+ function unique_with_counts_graph(x_; name=nothing, out_idx=nothing)
+ local desc
+ tf.with_op_name(name, "UniqueWithCounts") do
+ desc = tf.NodeDescription("UniqueWithCounts")
+ begin
+ begin
+ x_ = convert(Tensor{Any}, x_)
+ begin
+ end
+ end
+ begin
+ (x_,) = tf.tf_promote(x_)
+ end
+ end
+ begin
+ begin
+ tf.add_input(desc, x_)
+ end
+ end
+ begin
+ begin
+ if out_idx !== nothing
+ desc["out_idx"] = Base.identity(out_idx)
+ end
+ end
+ end
+ end
+ begin
+ out = tf.Tensor[]
+ op = tf.Operation(desc)
+ for out_idx = 1:3
+ push!(out, tf.Tensor(op, out_idx))
+ end
+ out
+ end
+ end
+ end
+ begin
+ function unique_with_counts_eager(x_; name=nothing, out_idx=nothing)
+ desc = tf.EagerOp("UniqueWithCounts")
+ x_ = convert(tf.EagerTensor, x_)
+ begin
+ begin
+ tf.add_input(desc, x_)
+ end
+ end
+ begin
+ begin
+ if out_idx !== nothing
+ desc["out_idx"] = Base.identity(out_idx)
+ end
+ end
+ end
+ begin
+ desc["T"] = tf.data_type(x_)
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(unique_with_counts, [x_], name=nothing, out_idx=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function unique_with_counts(x_; name=nothing, out_idx=nothing)
+ if tf.in_eager_mode()
+ unique_with_counts_eager(x_; name=name, out_idx=out_idx)
+ else
+ unique_with_counts_graph(x_; name=name, out_idx=out_idx)
+ end
+ end
+ end
+end
+
+
+"""
+ add(x, y)
+
+
+"""
+begin
+ begin
+ function add_graph(x_, y_; name=nothing)
+ local desc
+ tf.with_op_name(name, "Add") do
+ desc = tf.NodeDescription("Add")
+ begin
+ begin
+ x_ = convert(Tensor{Any}, x_)
+ begin
+ end
+ end
+ begin
+ y_ = convert(Tensor{Any}, y_)
+ begin
+ end
+ end
+ begin
+ (x_, y_) = tf.tf_promote(x_, y_)
+ end
+ end
+ begin
+ begin
+ tf.add_input(desc, x_)
+ end
+ begin
+ tf.add_input(desc, y_)
+ end
+ end
+ begin
+ end
+ end
+ tf.Tensor(tf.Operation(desc))
+ end
+ end
+ begin
+ function add_eager(x_, y_; name=nothing)
+ desc = tf.EagerOp("Add")
+ x_ = convert(tf.EagerTensor, x_)
+ y_ = convert(tf.EagerTensor, y_)
+ begin
+ begin
+ tf.add_input(desc, x_)
+ end
+ begin
+ tf.add_input(desc, y_)
+ end
+ end
+ begin
+ end
+ begin
+ desc["T"] = tf.data_type(x_)
+ end
+ begin
+ desc["T"] = tf.data_type(y_)
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(add, [x_, y_], name=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res[1]
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function add(x_, y_; name=nothing)
+ if tf.in_eager_mode()
+ add_eager(x_, y_; name=name)
+ else
+ add_graph(x_, y_; name=name)
+ end
+ end
+ end
+end
+
+
+"""
+ experimental_scan_dataset(input_dataset, initial_state, other_arguments; preserve_cardinality=false)
+
+
+"""
+begin
+ begin
+ function experimental_scan_dataset_graph(input_dataset_, initial_state_, other_arguments_; name=nothing, f=nothing, Tstate=nothing, Targuments=nothing, output_types=nothing, output_shapes=nothing, preserve_cardinality=nothing)
+ local desc
+ tf.with_op_name(name, "ExperimentalScanDataset") do
+ desc = tf.NodeDescription("ExperimentalScanDataset")
+ begin
+ begin
+ input_dataset_ = convert(Tensor{Any}, input_dataset_)
+ begin
+ end
+ end
+ begin
+ initial_state_ = [convert(Tensor{Any}, x) for x = initial_state_]
+ begin
+ end
+ end
+ begin
+ other_arguments_ = [convert(Tensor{Any}, x) for x = other_arguments_]
+ begin
+ end
+ end
+ end
+ begin
+ begin
+ tf.add_input(desc, input_dataset_)
+ end
+ begin
+ tf.add_input(desc, initial_state_)
+ end
+ begin
+ tf.add_input(desc, other_arguments_)
+ end
+ end
+ begin
+ begin
+ if f !== nothing
+ desc["f"] = Base.identity(f)
+ end
+ end
+ begin
+ if Tstate !== nothing
+ desc["Tstate"] = map(Base.identity, Tstate)
+ end
+ end
+ begin
+ if Targuments !== nothing
+ desc["Targuments"] = map(Base.identity, Targuments)
+ end
+ end
+ begin
+ if output_types !== nothing
+ desc["output_types"] = map(Base.identity, output_types)
+ end
+ end
+ begin
+ if output_shapes !== nothing
+ desc["output_shapes"] = map(Base.identity, output_shapes)
+ end
+ end
+ begin
+ if preserve_cardinality !== nothing
+ desc["preserve_cardinality"] = Base.Bool(preserve_cardinality)
+ end
+ end
+ end
+ end
+ tf.Tensor(tf.Operation(desc))
+ end
+ end
+ begin
+ function experimental_scan_dataset_eager(input_dataset_, initial_state_, other_arguments_; name=nothing, f=nothing, Tstate=nothing, Targuments=nothing, output_types=nothing, output_shapes=nothing, preserve_cardinality=nothing)
+ desc = tf.EagerOp("ExperimentalScanDataset")
+ input_dataset_ = convert(tf.EagerTensor, input_dataset_)
+ initial_state_ = convert(tf.EagerTensor, initial_state_)
+ other_arguments_ = convert(tf.EagerTensor, other_arguments_)
+ begin
+ begin
+ tf.add_input(desc, input_dataset_)
+ end
+ begin
+ tf.add_input(desc, initial_state_)
+ end
+ begin
+ tf.add_input(desc, other_arguments_)
+ end
+ end
+ begin
+ begin
+ if f !== nothing
+ desc["f"] = Base.identity(f)
+ end
+ end
+ begin
+ if Tstate !== nothing
+ desc["Tstate"] = map(Base.identity, Tstate)
+ end
+ end
+ begin
+ if Targuments !== nothing
+ desc["Targuments"] = map(Base.identity, Targuments)
+ end
+ end
+ begin
+ if output_types !== nothing
+ desc["output_types"] = map(Base.identity, output_types)
+ end
+ end
+ begin
+ if output_shapes !== nothing
+ desc["output_shapes"] = map(Base.identity, output_shapes)
+ end
+ end
+ begin
+ if preserve_cardinality !== nothing
+ desc["preserve_cardinality"] = Base.Bool(preserve_cardinality)
+ end
+ end
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(experimental_scan_dataset, [input_dataset_, initial_state_, other_arguments_], name=nothing, f=nothing, Tstate=nothing, Targuments=nothing, output_types=nothing, output_shapes=nothing, preserve_cardinality=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res[1]
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function experimental_scan_dataset(input_dataset_, initial_state_, other_arguments_; name=nothing, f=nothing, Tstate=nothing, Targuments=nothing, output_types=nothing, output_shapes=nothing, preserve_cardinality=nothing)
+ if tf.in_eager_mode()
+ experimental_scan_dataset_eager(input_dataset_, initial_state_, other_arguments_; name=name, f=f, Tstate=Tstate, Targuments=Targuments, output_types=output_types, output_shapes=output_shapes, preserve_cardinality=preserve_cardinality)
+ else
+ experimental_scan_dataset_graph(input_dataset_, initial_state_, other_arguments_; name=name, f=f, Tstate=Tstate, Targuments=Targuments, output_types=output_types, output_shapes=output_shapes, preserve_cardinality=preserve_cardinality)
+ end
+ end
+ end
+end
+
+
+"""
+ assign_add_variable_op(resource, value)
+
+
+"""
+begin
+ begin
+ function assign_add_variable_op_graph(resource_, value_; name=nothing, dtype=nothing)
+ local desc
+ tf.with_op_name(name, "AssignAddVariableOp") do
+ desc = tf.NodeDescription("AssignAddVariableOp")
+ begin
+ begin
+ resource_ = convert(Tensor{Any}, resource_)
+ begin
+ end
+ end
+ begin
+ value_ = convert(Tensor{Any}, value_)
+ begin
+ end
+ end
+ begin
+ (value_,) = tf.tf_promote(value_)
+ end
+ end
+ begin
+ begin
+ tf.add_input(desc, resource_)
+ end
+ begin
+ tf.add_input(desc, value_)
+ end
+ end
+ begin
+ begin
+ if dtype !== nothing
+ desc["dtype"] = Base.identity(dtype)
+ end
+ end
+ end
+ end
+ tf.Tensor(tf.Operation(desc))
+ end
+ end
+ begin
+ function assign_add_variable_op_eager(resource_, value_; name=nothing, dtype=nothing)
+ desc = tf.EagerOp("AssignAddVariableOp")
+ resource_ = convert(tf.EagerTensor, resource_)
+ value_ = convert(tf.EagerTensor, value_)
+ begin
+ begin
+ tf.add_input(desc, resource_)
+ end
+ begin
+ tf.add_input(desc, value_)
+ end
+ end
+ begin
+ begin
+ if dtype !== nothing
+ desc["dtype"] = Base.identity(dtype)
+ end
+ end
+ end
+ begin
+ desc["dtype"] = tf.data_type(value_)
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(assign_add_variable_op, [resource_, value_], name=nothing, dtype=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res[1]
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function assign_add_variable_op(resource_, value_; name=nothing, dtype=nothing)
+ if tf.in_eager_mode()
+ assign_add_variable_op_eager(resource_, value_; name=name, dtype=dtype)
+ else
+ assign_add_variable_op_graph(resource_, value_; name=name, dtype=dtype)
+ end
+ end
+ end
+end
+
+
+"""
+ split_v(value, size_splits, split_dim)
+
+
+"""
+begin
+ begin
+ function split_v_graph(value_, size_splits_, split_dim_; name=nothing, num_split=nothing)
+ local desc
+ tf.with_op_name(name, "SplitV") do
+ desc = tf.NodeDescription("SplitV")
+ begin
+ begin
+ value_ = convert(Tensor{Any}, value_)
+ begin
+ end
+ end
+ begin
+ size_splits_ = convert(Tensor{Int64}, size_splits_)
+ begin
+ end
+ end
+ begin
+ split_dim_ = convert(Tensor{Int32}, split_dim_)
+ begin
+ split_dim_ = split_dim_ - convert(tf.Tensor{eltype(split_dim_)}, 1)
+ end
+ end
+ begin
+ (value_,) = tf.tf_promote(value_)
+ end
+ begin
+ (size_splits_,) = tf.tf_promote(size_splits_)
+ end
+ end
+ begin
+ begin
+ tf.add_input(desc, value_)
+ end
+ begin
+ tf.add_input(desc, size_splits_)
+ end
+ begin
+ tf.add_input(desc, split_dim_)
+ end
+ end
+ begin
+ begin
+ if num_split !== nothing
+ desc["num_split"] = Base.Int(num_split)
+ end
+ end
+ end
+ end
+ begin
+ out = tf.Tensor[]
+ op = tf.Operation(desc)
+ for out_idx = 1:num_split
+ push!(out, tf.Tensor(op, out_idx))
+ end
+ out
+ end
+ end
+ end
+ begin
+ function split_v_eager(value_, size_splits_, split_dim_; name=nothing, num_split=nothing)
+ desc = tf.EagerOp("SplitV")
+ value_ = convert(tf.EagerTensor, value_)
+ size_splits_ = convert(tf.EagerTensor, size_splits_)
+ split_dim_ = convert(tf.EagerTensor, split_dim_)
+ begin
+ begin
+ tf.add_input(desc, value_)
+ end
+ begin
+ tf.add_input(desc, size_splits_)
+ end
+ begin
+ tf.add_input(desc, split_dim_)
+ end
+ end
+ begin
+ begin
+ if num_split !== nothing
+ desc["num_split"] = Base.Int(num_split)
+ end
+ end
+ end
+ begin
+ desc["T"] = tf.data_type(value_)
+ end
+ begin
+ desc["Tlen"] = tf.data_type(size_splits_)
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(split_v, [value_, size_splits_, split_dim_], name=nothing, num_split=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function split_v(value_, size_splits_, split_dim_; name=nothing, num_split=nothing)
+ if tf.in_eager_mode()
+ split_v_eager(value_, size_splits_, split_dim_; name=name, num_split=num_split)
+ else
+ split_v_graph(value_, size_splits_, split_dim_; name=name, num_split=num_split)
+ end
+ end
+ end
+end
+
+
+"""
+ assign(ref, value; validate_shape=true, use_locking=true)
+
+
+"""
+begin
+ begin
+ function assign_graph(ref_, value_; name=nothing, validate_shape=nothing, use_locking=nothing)
+ local desc
+ tf.with_op_name(name, "Assign") do
+ desc = tf.NodeDescription("Assign")
+ begin
+ begin
+ ref_ = convert(Tensor{Any}, ref_)
+ begin
+ end
+ end
+ begin
+ value_ = convert(Tensor{Any}, value_)
+ begin
+ end
+ end
+ begin
+ (ref_, value_) = tf.tf_promote(ref_, value_)
+ end
+ end
+ begin
+ begin
+ tf.add_input(desc, ref_)
+ end
+ begin
+ tf.add_input(desc, value_)
+ end
+ end
+ begin
+ begin
+ if validate_shape !== nothing
+ desc["validate_shape"] = Base.Bool(validate_shape)
+ end
+ end
+ begin
+ if use_locking !== nothing
+ desc["use_locking"] = Base.Bool(use_locking)
+ end
+ end
+ end
+ end
+ tf.Tensor(tf.Operation(desc))
+ end
+ end
+ begin
+ function assign_eager(ref_, value_; name=nothing, validate_shape=nothing, use_locking=nothing)
+ desc = tf.EagerOp("Assign")
+ ref_ = convert(tf.EagerTensor, ref_)
+ value_ = convert(tf.EagerTensor, value_)
+ begin
+ begin
+ tf.add_input(desc, ref_)
+ end
+ begin
+ tf.add_input(desc, value_)
+ end
+ end
+ begin
+ begin
+ if validate_shape !== nothing
+ desc["validate_shape"] = Base.Bool(validate_shape)
+ end
+ end
+ begin
+ if use_locking !== nothing
+ desc["use_locking"] = Base.Bool(use_locking)
+ end
+ end
+ end
+ begin
+ desc["T"] = tf.data_type(ref_)
+ end
+ begin
+ desc["T"] = tf.data_type(value_)
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(assign, [ref_, value_], name=nothing, validate_shape=nothing, use_locking=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res[1]
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function assign(ref_, value_; name=nothing, validate_shape=nothing, use_locking=nothing)
+ if tf.in_eager_mode()
+ assign_eager(ref_, value_; name=name, validate_shape=validate_shape, use_locking=use_locking)
+ else
+ assign_graph(ref_, value_; name=name, validate_shape=validate_shape, use_locking=use_locking)
+ end
+ end
+ end
+end
+
+
+"""
+ max_pool_with_argmax(input)
+
+
+"""
+begin
+ begin
+ function max_pool_with_argmax_graph(input_; name=nothing, ksize=nothing, strides=nothing, padding=nothing)
+ local desc
+ tf.with_op_name(name, "MaxPoolWithArgmax") do
+ desc = tf.NodeDescription("MaxPoolWithArgmax")
+ begin
+ begin
+ input_ = convert(Tensor{Any}, input_)
+ begin
+ end
+ end
+ begin
+ (input_,) = tf.tf_promote(input_)
+ end
+ end
+ begin
+ begin
+ tf.add_input(desc, input_)
+ end
+ end
+ begin
+ begin
+ if ksize !== nothing
+ desc["ksize"] = map(Base.identity, ksize)
+ end
+ end
+ begin
+ if strides !== nothing
+ desc["strides"] = map(Base.identity, strides)
+ end
+ end
+ begin
+ if padding !== nothing
+ desc["padding"] = Base.String(padding)
+ end
+ end
+ end
+ end
+ begin
+ out = tf.Tensor[]
+ op = tf.Operation(desc)
+ for out_idx = 1:2
+ push!(out, tf.Tensor(op, out_idx))
+ end
+ out
+ end
+ end
+ end
+ begin
+ function max_pool_with_argmax_eager(input_; name=nothing, ksize=nothing, strides=nothing, padding=nothing)
+ desc = tf.EagerOp("MaxPoolWithArgmax")
+ input_ = convert(tf.EagerTensor, input_)
+ begin
+ begin
+ tf.add_input(desc, input_)
+ end
+ end
+ begin
+ begin
+ if ksize !== nothing
+ desc["ksize"] = map(Base.identity, ksize)
+ end
+ end
+ begin
+ if strides !== nothing
+ desc["strides"] = map(Base.identity, strides)
+ end
+ end
+ begin
+ if padding !== nothing
+ desc["padding"] = Base.String(padding)
+ end
+ end
+ end
+ begin
+ desc["T"] = tf.data_type(input_)
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(max_pool_with_argmax, [input_], name=nothing, ksize=nothing, strides=nothing, padding=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function max_pool_with_argmax(input_; name=nothing, ksize=nothing, strides=nothing, padding=nothing)
+ if tf.in_eager_mode()
+ max_pool_with_argmax_eager(input_; name=name, ksize=ksize, strides=strides, padding=padding)
+ else
+ max_pool_with_argmax_graph(input_; name=name, ksize=ksize, strides=strides, padding=padding)
+ end
+ end
+ end
+end
+
+
+"""
+ quantized_relu_x(features, max_value, min_features, max_features; out_type=Float32)
+
+
+"""
+begin
+ begin
+ function quantized_relu_x_graph(features_, max_value_, min_features_, max_features_; name=nothing, out_type=nothing)
+ local desc
+ tf.with_op_name(name, "QuantizedReluX") do
+ desc = tf.NodeDescription("QuantizedReluX")
+ begin
+ begin
+ features_ = convert(Tensor{Any}, features_)
+ begin
+ end
+ end
+ begin
+ max_value_ = convert(Tensor{Float32}, max_value_)
+ begin
+ end
+ end
+ begin
+ min_features_ = convert(Tensor{Float32}, min_features_)
+ begin
+ end
+ end
+ begin
+ max_features_ = convert(Tensor{Float32}, max_features_)
+ begin
+ end
+ end
+ begin
+ (features_,) = tf.tf_promote(features_)
+ end
+ end
+ begin
+ begin
+ tf.add_input(desc, features_)
+ end
+ begin
+ tf.add_input(desc, max_value_)
+ end
+ begin
+ tf.add_input(desc, min_features_)
+ end
+ begin
+ tf.add_input(desc, max_features_)
+ end
+ end
+ begin
+ begin
+ if out_type !== nothing
+ desc["out_type"] = Base.identity(out_type)
+ end
+ end
+ end
+ end
+ begin
+ out = tf.Tensor[]
+ op = tf.Operation(desc)
+ for out_idx = 1:3
+ push!(out, tf.Tensor(op, out_idx))
+ end
+ out
+ end
+ end
+ end
+ begin
+ function quantized_relu_x_eager(features_, max_value_, min_features_, max_features_; name=nothing, out_type=nothing)
+ desc = tf.EagerOp("QuantizedReluX")
+ features_ = convert(tf.EagerTensor, features_)
+ max_value_ = convert(tf.EagerTensor, max_value_)
+ min_features_ = convert(tf.EagerTensor, min_features_)
+ max_features_ = convert(tf.EagerTensor, max_features_)
+ begin
+ begin
+ tf.add_input(desc, features_)
+ end
+ begin
+ tf.add_input(desc, max_value_)
+ end
+ begin
+ tf.add_input(desc, min_features_)
+ end
+ begin
+ tf.add_input(desc, max_features_)
+ end
+ end
+ begin
+ begin
+ if out_type !== nothing
+ desc["out_type"] = Base.identity(out_type)
+ end
+ end
+ end
+ begin
+ desc["Tinput"] = tf.data_type(features_)
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(quantized_relu_x, [features_, max_value_, min_features_, max_features_], name=nothing, out_type=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function quantized_relu_x(features_, max_value_, min_features_, max_features_; name=nothing, out_type=nothing)
+ if tf.in_eager_mode()
+ quantized_relu_x_eager(features_, max_value_, min_features_, max_features_; name=name, out_type=out_type)
+ else
+ quantized_relu_x_graph(features_, max_value_, min_features_, max_features_; name=name, out_type=out_type)
+ end
+ end
+ end
+end
+
+
+"""
+ random_shuffle_queue(; shapes=Int64[], capacity=-1, min_after_dequeue=0, seed=0, seed2=0, container=, shared_name=)
+
+
+"""
+begin
+ begin
+ function random_shuffle_queue_graph(; name=nothing, component_types=nothing, shapes=nothing, capacity=nothing, min_after_dequeue=nothing, seed=nothing, seed2=nothing, container=nothing, shared_name=nothing)
+ local desc
+ tf.with_op_name(name, "RandomShuffleQueue") do
+ desc = tf.NodeDescription("RandomShuffleQueue")
+ begin
+ end
+ begin
+ end
+ begin
+ begin
+ if component_types !== nothing
+ desc["component_types"] = map(Base.identity, component_types)
+ end
+ end
+ begin
+ if shapes !== nothing
+ desc["shapes"] = map(Base.identity, shapes)
+ end
+ end
+ begin
+ if capacity !== nothing
+ desc["capacity"] = Base.Int(capacity)
+ end
+ end
+ begin
+ if min_after_dequeue !== nothing
+ desc["min_after_dequeue"] = Base.Int(min_after_dequeue)
+ end
+ end
+ begin
+ if seed !== nothing
+ desc["seed"] = Base.Int(seed)
+ end
+ end
+ begin
+ if seed2 !== nothing
+ desc["seed2"] = Base.Int(seed2)
+ end
+ end
+ begin
+ if container !== nothing
+ desc["container"] = Base.String(container)
+ end
+ end
+ begin
+ if shared_name !== nothing
+ desc["shared_name"] = Base.String(shared_name)
+ end
+ end
+ end
+ end
+ tf.Tensor(tf.Operation(desc))
+ end
+ end
+ begin
+ function random_shuffle_queue_eager(; name=nothing, component_types=nothing, shapes=nothing, capacity=nothing, min_after_dequeue=nothing, seed=nothing, seed2=nothing, container=nothing, shared_name=nothing)
+ desc = tf.EagerOp("RandomShuffleQueue")
+ begin
+ end
+ begin
+ begin
+ if component_types !== nothing
+ desc["component_types"] = map(Base.identity, component_types)
+ end
+ end
+ begin
+ if shapes !== nothing
+ desc["shapes"] = map(Base.identity, shapes)
+ end
+ end
+ begin
+ if capacity !== nothing
+ desc["capacity"] = Base.Int(capacity)
+ end
+ end
+ begin
+ if min_after_dequeue !== nothing
+ desc["min_after_dequeue"] = Base.Int(min_after_dequeue)
+ end
+ end
+ begin
+ if seed !== nothing
+ desc["seed"] = Base.Int(seed)
+ end
+ end
+ begin
+ if seed2 !== nothing
+ desc["seed2"] = Base.Int(seed2)
+ end
+ end
+ begin
+ if container !== nothing
+ desc["container"] = Base.String(container)
+ end
+ end
+ begin
+ if shared_name !== nothing
+ desc["shared_name"] = Base.String(shared_name)
+ end
+ end
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(random_shuffle_queue, [], name=nothing, component_types=nothing, shapes=nothing, capacity=nothing, min_after_dequeue=nothing, seed=nothing, seed2=nothing, container=nothing, shared_name=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res[1]
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function random_shuffle_queue(; name=nothing, component_types=nothing, shapes=nothing, capacity=nothing, min_after_dequeue=nothing, seed=nothing, seed2=nothing, container=nothing, shared_name=nothing)
+ if tf.in_eager_mode()
+ random_shuffle_queue_eager(; name=name, component_types=component_types, shapes=shapes, capacity=capacity, min_after_dequeue=min_after_dequeue, seed=seed, seed2=seed2, container=container, shared_name=shared_name)
+ else
+ random_shuffle_queue_graph(; name=name, component_types=component_types, shapes=shapes, capacity=capacity, min_after_dequeue=min_after_dequeue, seed=seed, seed2=seed2, container=container, shared_name=shared_name)
+ end
+ end
+ end
+end
+
+
+"""
+ fft2d(input)
+
+
+"""
+begin
+ begin
+ function fft2d_graph(input_; name=nothing)
+ local desc
+ tf.with_op_name(name, "FFT2D") do
+ desc = tf.NodeDescription("FFT2D")
+ begin
+ begin
+ input_ = convert(Tensor{Complex{Float32}}, input_)
+ begin
+ end
+ end
+ begin
+ (input_,) = tf.tf_promote(input_)
+ end
+ end
+ begin
+ begin
+ tf.add_input(desc, input_)
+ end
+ end
+ begin
+ end
+ end
+ tf.Tensor(tf.Operation(desc))
+ end
+ end
+ begin
+ function fft2d_eager(input_; name=nothing)
+ desc = tf.EagerOp("FFT2D")
+ input_ = convert(tf.EagerTensor, input_)
+ begin
+ begin
+ tf.add_input(desc, input_)
+ end
+ end
+ begin
+ end
+ begin
+ desc["Tcomplex"] = tf.data_type(input_)
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(fft2d, [input_], name=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res[1]
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function fft2d(input_; name=nothing)
+ if tf.in_eager_mode()
+ fft2d_eager(input_; name=name)
+ else
+ fft2d_graph(input_; name=name)
+ end
+ end
+ end
+end
+
+
+"""
+ experimental_thread_pool_dataset(input_dataset, thread_pool)
+
+
+"""
+begin
+ begin
+ function experimental_thread_pool_dataset_graph(input_dataset_, thread_pool_; name=nothing, output_types=nothing, output_shapes=nothing)
+ local desc
+ tf.with_op_name(name, "ExperimentalThreadPoolDataset") do
+ desc = tf.NodeDescription("ExperimentalThreadPoolDataset")
+ begin
+ begin
+ input_dataset_ = convert(Tensor{Any}, input_dataset_)
+ begin
+ end
+ end
+ begin
+ thread_pool_ = convert(Tensor{Any}, thread_pool_)
+ begin
+ end
+ end
+ end
+ begin
+ begin
+ tf.add_input(desc, input_dataset_)
+ end
+ begin
+ tf.add_input(desc, thread_pool_)
+ end
+ end
+ begin
+ begin
+ if output_types !== nothing
+ desc["output_types"] = map(Base.identity, output_types)
+ end
+ end
+ begin
+ if output_shapes !== nothing
+ desc["output_shapes"] = map(Base.identity, output_shapes)
+ end
+ end
+ end
+ end
+ tf.Tensor(tf.Operation(desc))
+ end
+ end
+ begin
+ function experimental_thread_pool_dataset_eager(input_dataset_, thread_pool_; name=nothing, output_types=nothing, output_shapes=nothing)
+ desc = tf.EagerOp("ExperimentalThreadPoolDataset")
+ input_dataset_ = convert(tf.EagerTensor, input_dataset_)
+ thread_pool_ = convert(tf.EagerTensor, thread_pool_)
+ begin
+ begin
+ tf.add_input(desc, input_dataset_)
+ end
+ begin
+ tf.add_input(desc, thread_pool_)
+ end
+ end
+ begin
+ begin
+ if output_types !== nothing
+ desc["output_types"] = map(Base.identity, output_types)
+ end
+ end
+ begin
+ if output_shapes !== nothing
+ desc["output_shapes"] = map(Base.identity, output_shapes)
+ end
+ end
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(experimental_thread_pool_dataset, [input_dataset_, thread_pool_], name=nothing, output_types=nothing, output_shapes=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res[1]
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function experimental_thread_pool_dataset(input_dataset_, thread_pool_; name=nothing, output_types=nothing, output_shapes=nothing)
+ if tf.in_eager_mode()
+ experimental_thread_pool_dataset_eager(input_dataset_, thread_pool_; name=name, output_types=output_types, output_shapes=output_shapes)
+ else
+ experimental_thread_pool_dataset_graph(input_dataset_, thread_pool_; name=name, output_types=output_types, output_shapes=output_shapes)
+ end
+ end
+ end
+end
+
+
+"""
+ ordered_map_unstage(key, indices; capacity=0, memory_limit=0, container=, shared_name=)
+
+
+"""
+begin
+ begin
+ function ordered_map_unstage_graph(key_, indices_; name=nothing, capacity=nothing, memory_limit=nothing, dtypes=nothing, container=nothing, shared_name=nothing)
+ local desc
+ tf.with_op_name(name, "OrderedMapUnstage") do
+ desc = tf.NodeDescription("OrderedMapUnstage")
+ begin
+ begin
+ key_ = convert(Tensor{Int64}, key_)
+ begin
+ end
+ end
+ begin
+ indices_ = convert(Tensor{Int32}, indices_)
+ begin
+ end
+ end
+ end
+ begin
+ begin
+ tf.add_input(desc, key_)
+ end
+ begin
+ tf.add_input(desc, indices_)
+ end
+ end
+ begin
+ begin
+ if capacity !== nothing
+ desc["capacity"] = Base.Int(capacity)
+ end
+ end
+ begin
+ if memory_limit !== nothing
+ desc["memory_limit"] = Base.Int(memory_limit)
+ end
+ end
+ begin
+ if dtypes !== nothing
+ desc["dtypes"] = map(Base.identity, dtypes)
+ end
+ end
+ begin
+ if container !== nothing
+ desc["container"] = Base.String(container)
+ end
+ end
+ begin
+ if shared_name !== nothing
+ desc["shared_name"] = Base.String(shared_name)
+ end
+ end
+ end
+ end
+ tf.Tensor(tf.Operation(desc))
+ end
+ end
+ begin
+ function ordered_map_unstage_eager(key_, indices_; name=nothing, capacity=nothing, memory_limit=nothing, dtypes=nothing, container=nothing, shared_name=nothing)
+ desc = tf.EagerOp("OrderedMapUnstage")
+ key_ = convert(tf.EagerTensor, key_)
+ indices_ = convert(tf.EagerTensor, indices_)
+ begin
+ begin
+ tf.add_input(desc, key_)
+ end
+ begin
+ tf.add_input(desc, indices_)
+ end
+ end
+ begin
+ begin
+ if capacity !== nothing
+ desc["capacity"] = Base.Int(capacity)
+ end
+ end
+ begin
+ if memory_limit !== nothing
+ desc["memory_limit"] = Base.Int(memory_limit)
+ end
+ end
+ begin
+ if dtypes !== nothing
+ desc["dtypes"] = map(Base.identity, dtypes)
+ end
+ end
+ begin
+ if container !== nothing
+ desc["container"] = Base.String(container)
+ end
+ end
+ begin
+ if shared_name !== nothing
+ desc["shared_name"] = Base.String(shared_name)
+ end
+ end
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(ordered_map_unstage, [key_, indices_], name=nothing, capacity=nothing, memory_limit=nothing, dtypes=nothing, container=nothing, shared_name=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res[1]
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function ordered_map_unstage(key_, indices_; name=nothing, capacity=nothing, memory_limit=nothing, dtypes=nothing, container=nothing, shared_name=nothing)
+ if tf.in_eager_mode()
+ ordered_map_unstage_eager(key_, indices_; name=name, capacity=capacity, memory_limit=memory_limit, dtypes=dtypes, container=container, shared_name=shared_name)
+ else
+ ordered_map_unstage_graph(key_, indices_; name=name, capacity=capacity, memory_limit=memory_limit, dtypes=dtypes, container=container, shared_name=shared_name)
+ end
+ end
+ end
+end
+
+
+"""
+ experimental_directed_interleave_dataset(selector_input_dataset, data_input_datasets)
+
+
+"""
+begin
+ begin
+ function experimental_directed_interleave_dataset_graph(selector_input_dataset_, data_input_datasets_; name=nothing, output_types=nothing, output_shapes=nothing, N=nothing)
+ local desc
+ tf.with_op_name(name, "ExperimentalDirectedInterleaveDataset") do
+ desc = tf.NodeDescription("ExperimentalDirectedInterleaveDataset")
+ begin
+ begin
+ selector_input_dataset_ = convert(Tensor{Any}, selector_input_dataset_)
+ begin
+ end
+ end
+ begin
+ data_input_datasets_ = [convert(Tensor{Any}, x) for x = data_input_datasets_]
+ begin
+ end
+ end
+ end
+ begin
+ begin
+ tf.add_input(desc, selector_input_dataset_)
+ end
+ begin
+ tf.add_input(desc, data_input_datasets_)
+ end
+ end
+ begin
+ begin
+ if output_types !== nothing
+ desc["output_types"] = map(Base.identity, output_types)
+ end
+ end
+ begin
+ if output_shapes !== nothing
+ desc["output_shapes"] = map(Base.identity, output_shapes)
+ end
+ end
+ begin
+ if N !== nothing
+ desc["N"] = Base.Int(N)
+ end
+ end
+ end
+ end
+ tf.Tensor(tf.Operation(desc))
+ end
+ end
+ begin
+ function experimental_directed_interleave_dataset_eager(selector_input_dataset_, data_input_datasets_; name=nothing, output_types=nothing, output_shapes=nothing, N=nothing)
+ desc = tf.EagerOp("ExperimentalDirectedInterleaveDataset")
+ selector_input_dataset_ = convert(tf.EagerTensor, selector_input_dataset_)
+ data_input_datasets_ = convert(tf.EagerTensor, data_input_datasets_)
+ begin
+ begin
+ tf.add_input(desc, selector_input_dataset_)
+ end
+ begin
+ tf.add_input(desc, data_input_datasets_)
+ end
+ end
+ begin
+ begin
+ if output_types !== nothing
+ desc["output_types"] = map(Base.identity, output_types)
+ end
+ end
+ begin
+ if output_shapes !== nothing
+ desc["output_shapes"] = map(Base.identity, output_shapes)
+ end
+ end
+ begin
+ if N !== nothing
+ desc["N"] = Base.Int(N)
+ end
+ end
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(experimental_directed_interleave_dataset, [selector_input_dataset_, data_input_datasets_], name=nothing, output_types=nothing, output_shapes=nothing, N=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res[1]
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function experimental_directed_interleave_dataset(selector_input_dataset_, data_input_datasets_; name=nothing, output_types=nothing, output_shapes=nothing, N=nothing)
+ if tf.in_eager_mode()
+ experimental_directed_interleave_dataset_eager(selector_input_dataset_, data_input_datasets_; name=name, output_types=output_types, output_shapes=output_shapes, N=N)
+ else
+ experimental_directed_interleave_dataset_graph(selector_input_dataset_, data_input_datasets_; name=name, output_types=output_types, output_shapes=output_shapes, N=N)
+ end
+ end
+ end
+end
+
+
+"""
+ real(input)
+
+
+"""
+begin
+ begin
+ function real_graph(input_; name=nothing)
+ local desc
+ tf.with_op_name(name, "Real") do
+ desc = tf.NodeDescription("Real")
+ begin
+ begin
+ input_ = convert(Tensor{Complex{Float32}}, input_)
+ begin
+ end
+ end
+ begin
+ (input_,) = tf.tf_promote(input_)
+ end
+ end
+ begin
+ begin
+ tf.add_input(desc, input_)
+ end
+ end
+ begin
+ end
+ end
+ tf.Tensor(tf.Operation(desc))
+ end
+ end
+ begin
+ function real_eager(input_; name=nothing)
+ desc = tf.EagerOp("Real")
+ input_ = convert(tf.EagerTensor, input_)
+ begin
+ begin
+ tf.add_input(desc, input_)
+ end
+ end
+ begin
+ end
+ begin
+ desc["T"] = tf.data_type(input_)
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(real, [input_], name=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res[1]
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function real(input_; name=nothing)
+ if tf.in_eager_mode()
+ real_eager(input_; name=name)
+ else
+ real_graph(input_; name=name)
+ end
+ end
+ end
+end
+
+
+"""
+ sparse_segment_sqrt_n_grad(grad, indices, segment_ids, output_dim0)
+
+
+"""
+begin
+ begin
+ function sparse_segment_sqrt_n_grad_graph(grad_, indices_, segment_ids_, output_dim0_; name=nothing)
+ local desc
+ tf.with_op_name(name, "SparseSegmentSqrtNGrad") do
+ desc = tf.NodeDescription("SparseSegmentSqrtNGrad")
+ begin
+ begin
+ grad_ = convert(Tensor{Any}, grad_)
+ begin
+ end
+ end
+ begin
+ indices_ = convert(Tensor{Int32}, indices_)
+ begin
+ indices_ = indices_ - convert(tf.Tensor{eltype(indices_)}, 1)
+ end
+ end
+ begin
+ segment_ids_ = convert(Tensor{Int32}, segment_ids_)
+ begin
+ end
+ end
+ begin
+ output_dim0_ = convert(Tensor{Int32}, output_dim0_)
+ begin
+ end
+ end
+ begin
+ (grad_,) = tf.tf_promote(grad_)
+ end
+ begin
+ (indices_,) = tf.tf_promote(indices_)
+ end
+ end
+ begin
+ begin
+ tf.add_input(desc, grad_)
+ end
+ begin
+ tf.add_input(desc, indices_)
+ end
+ begin
+ tf.add_input(desc, segment_ids_)
+ end
+ begin
+ tf.add_input(desc, output_dim0_)
+ end
+ end
+ begin
+ end
+ end
+ tf.Tensor(tf.Operation(desc))
+ end
+ end
+ begin
+ function sparse_segment_sqrt_n_grad_eager(grad_, indices_, segment_ids_, output_dim0_; name=nothing)
+ desc = tf.EagerOp("SparseSegmentSqrtNGrad")
+ grad_ = convert(tf.EagerTensor, grad_)
+ indices_ = convert(tf.EagerTensor, indices_)
+ segment_ids_ = convert(tf.EagerTensor, segment_ids_)
+ output_dim0_ = convert(tf.EagerTensor, output_dim0_)
+ begin
+ begin
+ tf.add_input(desc, grad_)
+ end
+ begin
+ tf.add_input(desc, indices_)
+ end
+ begin
+ tf.add_input(desc, segment_ids_)
+ end
+ begin
+ tf.add_input(desc, output_dim0_)
+ end
+ end
+ begin
+ end
+ begin
+ desc["T"] = tf.data_type(grad_)
+ end
+ begin
+ desc["Tidx"] = tf.data_type(indices_)
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(sparse_segment_sqrt_n_grad, [grad_, indices_, segment_ids_, output_dim0_], name=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res[1]
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function sparse_segment_sqrt_n_grad(grad_, indices_, segment_ids_, output_dim0_; name=nothing)
+ if tf.in_eager_mode()
+ sparse_segment_sqrt_n_grad_eager(grad_, indices_, segment_ids_, output_dim0_; name=name)
+ else
+ sparse_segment_sqrt_n_grad_graph(grad_, indices_, segment_ids_, output_dim0_; name=name)
+ end
+ end
+ end
+end
+
+
+"""
+ rfft2d(input, fft_length)
+
+
+"""
+begin
+ begin
+ function rfft2d_graph(input_, fft_length_; name=nothing)
+ local desc
+ tf.with_op_name(name, "RFFT2D") do
+ desc = tf.NodeDescription("RFFT2D")
+ begin
+ begin
+ input_ = convert(Tensor{Float32}, input_)
+ begin
+ end
+ end
+ begin
+ fft_length_ = convert(Tensor{Int32}, fft_length_)
+ begin
+ end
+ end
+ end
+ begin
+ begin
+ tf.add_input(desc, input_)
+ end
+ begin
+ tf.add_input(desc, fft_length_)
+ end
+ end
+ begin
+ end
+ end
+ tf.Tensor(tf.Operation(desc))
+ end
+ end
+ begin
+ function rfft2d_eager(input_, fft_length_; name=nothing)
+ desc = tf.EagerOp("RFFT2D")
+ input_ = convert(tf.EagerTensor, input_)
+ fft_length_ = convert(tf.EagerTensor, fft_length_)
+ begin
+ begin
+ tf.add_input(desc, input_)
+ end
+ begin
+ tf.add_input(desc, fft_length_)
+ end
+ end
+ begin
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(rfft2d, [input_, fft_length_], name=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res[1]
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function rfft2d(input_, fft_length_; name=nothing)
+ if tf.in_eager_mode()
+ rfft2d_eager(input_, fft_length_; name=name)
+ else
+ rfft2d_graph(input_, fft_length_; name=name)
+ end
+ end
+ end
+end
+
+
+"""
+ var_is_initialized_op(resource)
+
+
+"""
+begin
+ begin
+ function var_is_initialized_op_graph(resource_; name=nothing)
+ local desc
+ tf.with_op_name(name, "VarIsInitializedOp") do
+ desc = tf.NodeDescription("VarIsInitializedOp")
+ begin
+ begin
+ resource_ = convert(Tensor{Any}, resource_)
+ begin
+ end
+ end
+ end
+ begin
+ begin
+ tf.add_input(desc, resource_)
+ end
+ end
+ begin
+ end
+ end
+ tf.Tensor(tf.Operation(desc))
+ end
+ end
+ begin
+ function var_is_initialized_op_eager(resource_; name=nothing)
+ desc = tf.EagerOp("VarIsInitializedOp")
+ resource_ = convert(tf.EagerTensor, resource_)
+ begin
+ begin
+ tf.add_input(desc, resource_)
+ end
+ end
+ begin
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(var_is_initialized_op, [resource_], name=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res[1]
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function var_is_initialized_op(resource_; name=nothing)
+ if tf.in_eager_mode()
+ var_is_initialized_op_eager(resource_; name=name)
+ else
+ var_is_initialized_op_graph(resource_; name=name)
+ end
+ end
+ end
+end
+
+
+"""
+ boosted_trees_quantile_stream_resource_handle_op(; container=, shared_name=)
+
+
+"""
+begin
+ begin
+ function boosted_trees_quantile_stream_resource_handle_op_graph(; name=nothing, container=nothing, shared_name=nothing)
+ local desc
+ tf.with_op_name(name, "BoostedTreesQuantileStreamResourceHandleOp") do
+ desc = tf.NodeDescription("BoostedTreesQuantileStreamResourceHandleOp")
+ begin
+ end
+ begin
+ end
+ begin
+ begin
+ if container !== nothing
+ desc["container"] = Base.String(container)
+ end
+ end
+ begin
+ if shared_name !== nothing
+ desc["shared_name"] = Base.String(shared_name)
+ end
+ end
+ end
+ end
+ tf.Tensor(tf.Operation(desc))
+ end
+ end
+ begin
+ function boosted_trees_quantile_stream_resource_handle_op_eager(; name=nothing, container=nothing, shared_name=nothing)
+ desc = tf.EagerOp("BoostedTreesQuantileStreamResourceHandleOp")
+ begin
+ end
+ begin
+ begin
+ if container !== nothing
+ desc["container"] = Base.String(container)
+ end
+ end
+ begin
+ if shared_name !== nothing
+ desc["shared_name"] = Base.String(shared_name)
+ end
+ end
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(boosted_trees_quantile_stream_resource_handle_op, [], name=nothing, container=nothing, shared_name=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res[1]
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function boosted_trees_quantile_stream_resource_handle_op(; name=nothing, container=nothing, shared_name=nothing)
+ if tf.in_eager_mode()
+ boosted_trees_quantile_stream_resource_handle_op_eager(; name=name, container=container, shared_name=shared_name)
+ else
+ boosted_trees_quantile_stream_resource_handle_op_graph(; name=name, container=container, shared_name=shared_name)
+ end
+ end
+ end
+end
+
+
+"""
+ atan2(y, x)
+
+
+"""
+begin
+ begin
+ function atan2_graph(y_, x_; name=nothing)
+ local desc
+ tf.with_op_name(name, "Atan2") do
+ desc = tf.NodeDescription("Atan2")
+ begin
+ begin
+ y_ = convert(Tensor{Any}, y_)
+ begin
+ end
+ end
+ begin
+ x_ = convert(Tensor{Any}, x_)
+ begin
+ end
+ end
+ begin
+ (y_, x_) = tf.tf_promote(y_, x_)
+ end
+ end
+ begin
+ begin
+ tf.add_input(desc, y_)
+ end
+ begin
+ tf.add_input(desc, x_)
+ end
+ end
+ begin
+ end
+ end
+ tf.Tensor(tf.Operation(desc))
+ end
+ end
+ begin
+ function atan2_eager(y_, x_; name=nothing)
+ desc = tf.EagerOp("Atan2")
+ y_ = convert(tf.EagerTensor, y_)
+ x_ = convert(tf.EagerTensor, x_)
+ begin
+ begin
+ tf.add_input(desc, y_)
+ end
+ begin
+ tf.add_input(desc, x_)
+ end
+ end
+ begin
+ end
+ begin
+ desc["T"] = tf.data_type(y_)
+ end
+ begin
+ desc["T"] = tf.data_type(x_)
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(atan2, [y_, x_], name=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res[1]
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function atan2(y_, x_; name=nothing)
+ if tf.in_eager_mode()
+ atan2_eager(y_, x_; name=name)
+ else
+ atan2_graph(y_, x_; name=name)
+ end
+ end
+ end
+end
+
+
+"""
+ random_poisson(shape, rate; seed=0, seed2=0)
+
+
+"""
+begin
+ begin
+ function random_poisson_graph(shape_, rate_; name=nothing, seed=nothing, seed2=nothing, S=nothing, dtype=nothing)
+ local desc
+ tf.with_op_name(name, "RandomPoisson") do
+ desc = tf.NodeDescription("RandomPoisson")
+ begin
+ begin
+ shape_ = convert(Tensor{Any}, shape_)
+ begin
+ end
+ end
+ begin
+ rate_ = convert(Tensor{Any}, rate_)
+ begin
+ end
+ end
+ begin
+ (rate_,) = tf.tf_promote(rate_)
+ end
+ begin
+ (shape_,) = tf.tf_promote(shape_)
+ end
+ end
+ begin
+ begin
+ tf.add_input(desc, shape_)
+ end
+ begin
+ tf.add_input(desc, rate_)
+ end
+ end
+ begin
+ begin
+ if seed !== nothing
+ desc["seed"] = Base.Int(seed)
+ end
+ end
+ begin
+ if seed2 !== nothing
+ desc["seed2"] = Base.Int(seed2)
+ end
+ end
+ begin
+ if S !== nothing
+ desc["S"] = Base.identity(S)
+ end
+ end
+ begin
+ if dtype !== nothing
+ desc["dtype"] = Base.identity(dtype)
+ end
+ end
+ end
+ end
+ tf.Tensor(tf.Operation(desc))
+ end
+ end
+ begin
+ function random_poisson_eager(shape_, rate_; name=nothing, seed=nothing, seed2=nothing, S=nothing, dtype=nothing)
+ desc = tf.EagerOp("RandomPoisson")
+ shape_ = convert(tf.EagerTensor, shape_)
+ rate_ = convert(tf.EagerTensor, rate_)
+ begin
+ begin
+ tf.add_input(desc, shape_)
+ end
+ begin
+ tf.add_input(desc, rate_)
+ end
+ end
+ begin
+ begin
+ if seed !== nothing
+ desc["seed"] = Base.Int(seed)
+ end
+ end
+ begin
+ if seed2 !== nothing
+ desc["seed2"] = Base.Int(seed2)
+ end
+ end
+ begin
+ if S !== nothing
+ desc["S"] = Base.identity(S)
+ end
+ end
+ begin
+ if dtype !== nothing
+ desc["dtype"] = Base.identity(dtype)
+ end
+ end
+ end
+ begin
+ desc["S"] = tf.data_type(shape_)
+ end
+ begin
+ desc["dtype"] = tf.data_type(rate_)
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(random_poisson, [shape_, rate_], name=nothing, seed=nothing, seed2=nothing, S=nothing, dtype=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res[1]
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function random_poisson(shape_, rate_; name=nothing, seed=nothing, seed2=nothing, S=nothing, dtype=nothing)
+ if tf.in_eager_mode()
+ random_poisson_eager(shape_, rate_; name=name, seed=seed, seed2=seed2, S=S, dtype=dtype)
+ else
+ random_poisson_graph(shape_, rate_; name=name, seed=seed, seed2=seed2, S=S, dtype=dtype)
+ end
+ end
+ end
+end
+
+
+"""
+ reverse_sequence(input, seq_lengths; batch_dim=0)
+
+
+"""
+begin
+ begin
+ function reverse_sequence_graph(input_, seq_lengths_; name=nothing, seq_dim=nothing, batch_dim=nothing)
+ local desc
+ tf.with_op_name(name, "ReverseSequence") do
+ desc = tf.NodeDescription("ReverseSequence")
+ begin
+ begin
+ input_ = convert(Tensor{Any}, input_)
+ begin
+ end
+ end
+ begin
+ seq_lengths_ = convert(Tensor{Int64}, seq_lengths_)
+ begin
+ end
+ end
+ begin
+ (input_,) = tf.tf_promote(input_)
+ end
+ begin
+ (seq_lengths_,) = tf.tf_promote(seq_lengths_)
+ end
+ end
+ begin
+ begin
+ tf.add_input(desc, input_)
+ end
+ begin
+ tf.add_input(desc, seq_lengths_)
+ end
+ end
+ begin
+ begin
+ if seq_dim !== nothing
+ desc["seq_dim"] = Base.Int(seq_dim)
+ end
+ end
+ begin
+ if batch_dim !== nothing
+ desc["batch_dim"] = Base.Int(batch_dim)
+ end
+ end
+ end
+ end
+ tf.Tensor(tf.Operation(desc))
+ end
+ end
+ begin
+ function reverse_sequence_eager(input_, seq_lengths_; name=nothing, seq_dim=nothing, batch_dim=nothing)
+ desc = tf.EagerOp("ReverseSequence")
+ input_ = convert(tf.EagerTensor, input_)
+ seq_lengths_ = convert(tf.EagerTensor, seq_lengths_)
+ begin
+ begin
+ tf.add_input(desc, input_)
+ end
+ begin
+ tf.add_input(desc, seq_lengths_)
+ end
+ end
+ begin
+ begin
+ if seq_dim !== nothing
+ desc["seq_dim"] = Base.Int(seq_dim)
+ end
+ end
+ begin
+ if batch_dim !== nothing
+ desc["batch_dim"] = Base.Int(batch_dim)
+ end
+ end
+ end
+ begin
+ desc["T"] = tf.data_type(input_)
+ end
+ begin
+ desc["Tlen"] = tf.data_type(seq_lengths_)
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(reverse_sequence, [input_, seq_lengths_], name=nothing, seq_dim=nothing, batch_dim=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res[1]
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function reverse_sequence(input_, seq_lengths_; name=nothing, seq_dim=nothing, batch_dim=nothing)
+ if tf.in_eager_mode()
+ reverse_sequence_eager(input_, seq_lengths_; name=name, seq_dim=seq_dim, batch_dim=batch_dim)
+ else
+ reverse_sequence_graph(input_, seq_lengths_; name=name, seq_dim=seq_dim, batch_dim=batch_dim)
+ end
+ end
+ end
+end
+
+
+"""
+ outfeed_enqueue(input)
+
+An op which emits a single Tensor value from an XLA computation.
+"""
+begin
+ begin
+ function outfeed_enqueue_graph(input_; name=nothing, dtype=nothing)
+ local desc
+ tf.with_op_name(name, "OutfeedEnqueue") do
+ desc = tf.NodeDescription("OutfeedEnqueue")
+ begin
+ begin
+ input_ = convert(Tensor{Any}, input_)
+ begin
+ end
+ end
+ begin
+ (input_,) = tf.tf_promote(input_)
+ end
+ end
+ begin
+ begin
+ tf.add_input(desc, input_)
+ end
+ end
+ begin
+ begin
+ if dtype !== nothing
+ desc["dtype"] = Base.identity(dtype)
+ end
+ end
+ end
+ end
+ tf.Tensor(tf.Operation(desc))
+ end
+ end
+ begin
+ function outfeed_enqueue_eager(input_; name=nothing, dtype=nothing)
+ desc = tf.EagerOp("OutfeedEnqueue")
+ input_ = convert(tf.EagerTensor, input_)
+ begin
+ begin
+ tf.add_input(desc, input_)
+ end
+ end
+ begin
+ begin
+ if dtype !== nothing
+ desc["dtype"] = Base.identity(dtype)
+ end
+ end
+ end
+ begin
+ desc["dtype"] = tf.data_type(input_)
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(outfeed_enqueue, [input_], name=nothing, dtype=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res[1]
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function outfeed_enqueue(input_; name=nothing, dtype=nothing)
+ if tf.in_eager_mode()
+ outfeed_enqueue_eager(input_; name=name, dtype=dtype)
+ else
+ outfeed_enqueue_graph(input_; name=name, dtype=dtype)
+ end
+ end
+ end
+end
+
+
+"""
+ sub(x, y)
+
+
+"""
+begin
+ begin
+ function sub_graph(x_, y_; name=nothing)
+ local desc
+ tf.with_op_name(name, "Sub") do
+ desc = tf.NodeDescription("Sub")
+ begin
+ begin
+ x_ = convert(Tensor{Any}, x_)
+ begin
+ end
+ end
+ begin
+ y_ = convert(Tensor{Any}, y_)
+ begin
+ end
+ end
+ begin
+ (x_, y_) = tf.tf_promote(x_, y_)
+ end
+ end
+ begin
+ begin
+ tf.add_input(desc, x_)
+ end
+ begin
+ tf.add_input(desc, y_)
+ end
+ end
+ begin
+ end
+ end
+ tf.Tensor(tf.Operation(desc))
+ end
+ end
+ begin
+ function sub_eager(x_, y_; name=nothing)
+ desc = tf.EagerOp("Sub")
+ x_ = convert(tf.EagerTensor, x_)
+ y_ = convert(tf.EagerTensor, y_)
+ begin
+ begin
+ tf.add_input(desc, x_)
+ end
+ begin
+ tf.add_input(desc, y_)
+ end
+ end
+ begin
+ end
+ begin
+ desc["T"] = tf.data_type(x_)
+ end
+ begin
+ desc["T"] = tf.data_type(y_)
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(sub, [x_, y_], name=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res[1]
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function sub(x_, y_; name=nothing)
+ if tf.in_eager_mode()
+ sub_eager(x_, y_; name=name)
+ else
+ sub_graph(x_, y_; name=name)
+ end
+ end
+ end
+end
+
+
+"""
+ string_split(input, delimiter; skip_empty=true)
+
+
+"""
+begin
+ begin
+ function string_split_graph(input_, delimiter_; name=nothing, skip_empty=nothing)
+ local desc
+ tf.with_op_name(name, "StringSplit") do
+ desc = tf.NodeDescription("StringSplit")
+ begin
+ begin
+ input_ = convert(Tensor{String}, input_)
+ begin
+ end
+ end
+ begin
+ delimiter_ = convert(Tensor{String}, delimiter_)
+ begin
+ end
+ end
+ end
+ begin
+ begin
+ tf.add_input(desc, input_)
+ end
+ begin
+ tf.add_input(desc, delimiter_)
+ end
+ end
+ begin
+ begin
+ if skip_empty !== nothing
+ desc["skip_empty"] = Base.Bool(skip_empty)
+ end
+ end
+ end
+ end
+ begin
+ out = tf.Tensor[]
+ op = tf.Operation(desc)
+ for out_idx = 1:3
+ push!(out, tf.Tensor(op, out_idx))
+ end
+ out
+ end
+ end
+ end
+ begin
+ function string_split_eager(input_, delimiter_; name=nothing, skip_empty=nothing)
+ desc = tf.EagerOp("StringSplit")
+ input_ = convert(tf.EagerTensor, input_)
+ delimiter_ = convert(tf.EagerTensor, delimiter_)
+ begin
+ begin
+ tf.add_input(desc, input_)
+ end
+ begin
+ tf.add_input(desc, delimiter_)
+ end
+ end
+ begin
+ begin
+ if skip_empty !== nothing
+ desc["skip_empty"] = Base.Bool(skip_empty)
+ end
+ end
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(string_split, [input_, delimiter_], name=nothing, skip_empty=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function string_split(input_, delimiter_; name=nothing, skip_empty=nothing)
+ if tf.in_eager_mode()
+ string_split_eager(input_, delimiter_; name=name, skip_empty=skip_empty)
+ else
+ string_split_graph(input_, delimiter_; name=name, skip_empty=skip_empty)
+ end
+ end
+ end
+end
+
+
+"""
+ cumprod(x, axis; exclusive=false, reverse=false)
+
+
+"""
+begin
+ begin
+ function cumprod_graph(x_, axis_; name=nothing, exclusive=nothing, reverse=nothing)
+ local desc
+ tf.with_op_name(name, "Cumprod") do
+ desc = tf.NodeDescription("Cumprod")
+ begin
+ begin
+ x_ = convert(Tensor{Any}, x_)
+ begin
+ end
+ end
+ begin
+ axis_ = convert(Tensor{Int32}, axis_)
+ begin
+ axis_ = axis_ - convert(tf.Tensor{eltype(axis_)}, 1)
+ end
+ end
+ begin
+ (x_,) = tf.tf_promote(x_)
+ end
+ begin
+ (axis_,) = tf.tf_promote(axis_)
+ end
+ end
+ begin
+ begin
+ tf.add_input(desc, x_)
+ end
+ begin
+ tf.add_input(desc, axis_)
+ end
+ end
+ begin
+ begin
+ if exclusive !== nothing
+ desc["exclusive"] = Base.Bool(exclusive)
+ end
+ end
+ begin
+ if reverse !== nothing
+ desc["reverse"] = Base.Bool(reverse)
+ end
+ end
+ end
+ end
+ tf.Tensor(tf.Operation(desc))
+ end
+ end
+ begin
+ function cumprod_eager(x_, axis_; name=nothing, exclusive=nothing, reverse=nothing)
+ desc = tf.EagerOp("Cumprod")
+ x_ = convert(tf.EagerTensor, x_)
+ axis_ = convert(tf.EagerTensor, axis_)
+ begin
+ begin
+ tf.add_input(desc, x_)
+ end
+ begin
+ tf.add_input(desc, axis_)
+ end
+ end
+ begin
+ begin
+ if exclusive !== nothing
+ desc["exclusive"] = Base.Bool(exclusive)
+ end
+ end
+ begin
+ if reverse !== nothing
+ desc["reverse"] = Base.Bool(reverse)
+ end
+ end
+ end
+ begin
+ desc["T"] = tf.data_type(x_)
+ end
+ begin
+ desc["Tidx"] = tf.data_type(axis_)
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(cumprod, [x_, axis_], name=nothing, exclusive=nothing, reverse=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res[1]
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function cumprod(x_, axis_; name=nothing, exclusive=nothing, reverse=nothing)
+ if tf.in_eager_mode()
+ cumprod_eager(x_, axis_; name=name, exclusive=exclusive, reverse=reverse)
+ else
+ cumprod_graph(x_, axis_; name=name, exclusive=exclusive, reverse=reverse)
+ end
+ end
+ end
+end
+
+
+"""
+ quantized_resize_bilinear(images, size, min, max; align_corners=false)
+
+
+"""
+begin
+ begin
+ function quantized_resize_bilinear_graph(images_, size_, min_, max_; name=nothing, align_corners=nothing)
+ local desc
+ tf.with_op_name(name, "QuantizedResizeBilinear") do
+ desc = tf.NodeDescription("QuantizedResizeBilinear")
+ begin
+ begin
+ images_ = convert(Tensor{Any}, images_)
+ begin
+ end
+ end
+ begin
+ size_ = convert(Tensor{Int32}, size_)
+ begin
+ end
+ end
+ begin
+ min_ = convert(Tensor{Float32}, min_)
+ begin
+ end
+ end
+ begin
+ max_ = convert(Tensor{Float32}, max_)
+ begin
+ end
+ end
+ begin
+ (images_,) = tf.tf_promote(images_)
+ end
+ end
+ begin
+ begin
+ tf.add_input(desc, images_)
+ end
+ begin
+ tf.add_input(desc, size_)
+ end
+ begin
+ tf.add_input(desc, min_)
+ end
+ begin
+ tf.add_input(desc, max_)
+ end
+ end
+ begin
+ begin
+ if align_corners !== nothing
+ desc["align_corners"] = Base.Bool(align_corners)
+ end
+ end
+ end
+ end
+ begin
+ out = tf.Tensor[]
+ op = tf.Operation(desc)
+ for out_idx = 1:3
+ push!(out, tf.Tensor(op, out_idx))
+ end
+ out
+ end
+ end
+ end
+ begin
+ function quantized_resize_bilinear_eager(images_, size_, min_, max_; name=nothing, align_corners=nothing)
+ desc = tf.EagerOp("QuantizedResizeBilinear")
+ images_ = convert(tf.EagerTensor, images_)
+ size_ = convert(tf.EagerTensor, size_)
+ min_ = convert(tf.EagerTensor, min_)
+ max_ = convert(tf.EagerTensor, max_)
+ begin
+ begin
+ tf.add_input(desc, images_)
+ end
+ begin
+ tf.add_input(desc, size_)
+ end
+ begin
+ tf.add_input(desc, min_)
+ end
+ begin
+ tf.add_input(desc, max_)
+ end
+ end
+ begin
+ begin
+ if align_corners !== nothing
+ desc["align_corners"] = Base.Bool(align_corners)
+ end
+ end
+ end
+ begin
+ desc["T"] = tf.data_type(images_)
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(quantized_resize_bilinear, [images_, size_, min_, max_], name=nothing, align_corners=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function quantized_resize_bilinear(images_, size_, min_, max_; name=nothing, align_corners=nothing)
+ if tf.in_eager_mode()
+ quantized_resize_bilinear_eager(images_, size_, min_, max_; name=name, align_corners=align_corners)
+ else
+ quantized_resize_bilinear_graph(images_, size_, min_, max_; name=name, align_corners=align_corners)
+ end
+ end
+ end
+end
+
+
+"""
+ parse_single_example(serialized, dense_defaults)
+
+
+"""
+begin
+ begin
+ function parse_single_example_graph(serialized_, dense_defaults_; name=nothing, num_sparse=nothing, sparse_keys=nothing, dense_keys=nothing, sparse_types=nothing, Tdense=nothing, dense_shapes=nothing)
+ local desc
+ tf.with_op_name(name, "ParseSingleExample") do
+ desc = tf.NodeDescription("ParseSingleExample")
+ begin
+ begin
+ serialized_ = convert(Tensor{String}, serialized_)
+ begin
+ end
+ end
+ begin
+ dense_defaults_ = [convert(Tensor{Any}, x) for x = dense_defaults_]
+ begin
+ end
+ end
+ end
+ begin
+ begin
+ tf.add_input(desc, serialized_)
+ end
+ begin
+ tf.add_input(desc, dense_defaults_)
+ end
+ end
+ begin
+ begin
+ if num_sparse !== nothing
+ desc["num_sparse"] = Base.Int(num_sparse)
+ end
+ end
+ begin
+ if sparse_keys !== nothing
+ desc["sparse_keys"] = map(Base.identity, sparse_keys)
+ end
+ end
+ begin
+ if dense_keys !== nothing
+ desc["dense_keys"] = map(Base.identity, dense_keys)
+ end
+ end
+ begin
+ if sparse_types !== nothing
+ desc["sparse_types"] = map(Base.identity, sparse_types)
+ end
+ end
+ begin
+ if Tdense !== nothing
+ desc["Tdense"] = map(Base.identity, Tdense)
+ end
+ end
+ begin
+ if dense_shapes !== nothing
+ desc["dense_shapes"] = map(Base.identity, dense_shapes)
+ end
+ end
+ end
+ end
+ begin
+ out = tf.Tensor[]
+ op = tf.Operation(desc)
+ for out_idx = 1:4
+ push!(out, tf.Tensor(op, out_idx))
+ end
+ out
+ end
+ end
+ end
+ begin
+ function parse_single_example_eager(serialized_, dense_defaults_; name=nothing, num_sparse=nothing, sparse_keys=nothing, dense_keys=nothing, sparse_types=nothing, Tdense=nothing, dense_shapes=nothing)
+ desc = tf.EagerOp("ParseSingleExample")
+ serialized_ = convert(tf.EagerTensor, serialized_)
+ dense_defaults_ = convert(tf.EagerTensor, dense_defaults_)
+ begin
+ begin
+ tf.add_input(desc, serialized_)
+ end
+ begin
+ tf.add_input(desc, dense_defaults_)
+ end
+ end
+ begin
+ begin
+ if num_sparse !== nothing
+ desc["num_sparse"] = Base.Int(num_sparse)
+ end
+ end
+ begin
+ if sparse_keys !== nothing
+ desc["sparse_keys"] = map(Base.identity, sparse_keys)
+ end
+ end
+ begin
+ if dense_keys !== nothing
+ desc["dense_keys"] = map(Base.identity, dense_keys)
+ end
+ end
+ begin
+ if sparse_types !== nothing
+ desc["sparse_types"] = map(Base.identity, sparse_types)
+ end
+ end
+ begin
+ if Tdense !== nothing
+ desc["Tdense"] = map(Base.identity, Tdense)
+ end
+ end
+ begin
+ if dense_shapes !== nothing
+ desc["dense_shapes"] = map(Base.identity, dense_shapes)
+ end
+ end
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(parse_single_example, [serialized_, dense_defaults_], name=nothing, num_sparse=nothing, sparse_keys=nothing, dense_keys=nothing, sparse_types=nothing, Tdense=nothing, dense_shapes=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function parse_single_example(serialized_, dense_defaults_; name=nothing, num_sparse=nothing, sparse_keys=nothing, dense_keys=nothing, sparse_types=nothing, Tdense=nothing, dense_shapes=nothing)
+ if tf.in_eager_mode()
+ parse_single_example_eager(serialized_, dense_defaults_; name=name, num_sparse=num_sparse, sparse_keys=sparse_keys, dense_keys=dense_keys, sparse_types=sparse_types, Tdense=Tdense, dense_shapes=dense_shapes)
+ else
+ parse_single_example_graph(serialized_, dense_defaults_; name=name, num_sparse=num_sparse, sparse_keys=sparse_keys, dense_keys=dense_keys, sparse_types=sparse_types, Tdense=Tdense, dense_shapes=dense_shapes)
+ end
+ end
+ end
+end
+
+
+"""
+ is_variable_initialized(ref)
+
+
+"""
+begin
+ begin
+ function is_variable_initialized_graph(ref_; name=nothing, dtype=nothing)
+ local desc
+ tf.with_op_name(name, "IsVariableInitialized") do
+ desc = tf.NodeDescription("IsVariableInitialized")
+ begin
+ begin
+ ref_ = convert(Tensor{Any}, ref_)
+ begin
+ end
+ end
+ begin
+ (ref_,) = tf.tf_promote(ref_)
+ end
+ end
+ begin
+ begin
+ tf.add_input(desc, ref_)
+ end
+ end
+ begin
+ begin
+ if dtype !== nothing
+ desc["dtype"] = Base.identity(dtype)
+ end
+ end
+ end
+ end
+ tf.Tensor(tf.Operation(desc))
+ end
+ end
+ begin
+ function is_variable_initialized_eager(ref_; name=nothing, dtype=nothing)
+ desc = tf.EagerOp("IsVariableInitialized")
+ ref_ = convert(tf.EagerTensor, ref_)
+ begin
+ begin
+ tf.add_input(desc, ref_)
+ end
+ end
+ begin
+ begin
+ if dtype !== nothing
+ desc["dtype"] = Base.identity(dtype)
+ end
+ end
+ end
+ begin
+ desc["dtype"] = tf.data_type(ref_)
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(is_variable_initialized, [ref_], name=nothing, dtype=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res[1]
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function is_variable_initialized(ref_; name=nothing, dtype=nothing)
+ if tf.in_eager_mode()
+ is_variable_initialized_eager(ref_; name=name, dtype=dtype)
+ else
+ is_variable_initialized_graph(ref_; name=name, dtype=dtype)
+ end
+ end
+ end
+end
+
+
+"""
+ resource_scatter_sub(resource, indices, updates)
+
+
+"""
+begin
+ begin
+ function resource_scatter_sub_graph(resource_, indices_, updates_; name=nothing, dtype=nothing)
+ local desc
+ tf.with_op_name(name, "ResourceScatterSub") do
+ desc = tf.NodeDescription("ResourceScatterSub")
+ begin
+ begin
+ resource_ = convert(Tensor{Any}, resource_)
+ begin
+ end
+ end
+ begin
+ indices_ = convert(Tensor{Any}, indices_)
+ begin
+ indices_ = indices_ - convert(tf.Tensor{eltype(indices_)}, 1)
+ end
+ end
+ begin
+ updates_ = convert(Tensor{Any}, updates_)
+ begin
+ end
+ end
+ begin
+ (updates_,) = tf.tf_promote(updates_)
+ end
+ begin
+ (indices_,) = tf.tf_promote(indices_)
+ end
+ end
+ begin
+ begin
+ tf.add_input(desc, resource_)
+ end
+ begin
+ tf.add_input(desc, indices_)
+ end
+ begin
+ tf.add_input(desc, updates_)
+ end
+ end
+ begin
+ begin
+ if dtype !== nothing
+ desc["dtype"] = Base.identity(dtype)
+ end
+ end
+ end
+ end
+ tf.Tensor(tf.Operation(desc))
+ end
+ end
+ begin
+ function resource_scatter_sub_eager(resource_, indices_, updates_; name=nothing, dtype=nothing)
+ desc = tf.EagerOp("ResourceScatterSub")
+ resource_ = convert(tf.EagerTensor, resource_)
+ indices_ = convert(tf.EagerTensor, indices_)
+ updates_ = convert(tf.EagerTensor, updates_)
+ begin
+ begin
+ tf.add_input(desc, resource_)
+ end
+ begin
+ tf.add_input(desc, indices_)
+ end
+ begin
+ tf.add_input(desc, updates_)
+ end
+ end
+ begin
+ begin
+ if dtype !== nothing
+ desc["dtype"] = Base.identity(dtype)
+ end
+ end
+ end
+ begin
+ desc["Tindices"] = tf.data_type(indices_)
+ end
+ begin
+ desc["dtype"] = tf.data_type(updates_)
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(resource_scatter_sub, [resource_, indices_, updates_], name=nothing, dtype=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res[1]
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function resource_scatter_sub(resource_, indices_, updates_; name=nothing, dtype=nothing)
+ if tf.in_eager_mode()
+ resource_scatter_sub_eager(resource_, indices_, updates_; name=name, dtype=dtype)
+ else
+ resource_scatter_sub_graph(resource_, indices_, updates_; name=name, dtype=dtype)
+ end
+ end
+ end
+end
+
+
+"""
+ experimental_stats_aggregator_handle(; container=, shared_name=)
+
+
+"""
+begin
+ begin
+ function experimental_stats_aggregator_handle_graph(; name=nothing, container=nothing, shared_name=nothing)
+ local desc
+ tf.with_op_name(name, "ExperimentalStatsAggregatorHandle") do
+ desc = tf.NodeDescription("ExperimentalStatsAggregatorHandle")
+ begin
+ end
+ begin
+ end
+ begin
+ begin
+ if container !== nothing
+ desc["container"] = Base.String(container)
+ end
+ end
+ begin
+ if shared_name !== nothing
+ desc["shared_name"] = Base.String(shared_name)
+ end
+ end
+ end
+ end
+ tf.Tensor(tf.Operation(desc))
+ end
+ end
+ begin
+ function experimental_stats_aggregator_handle_eager(; name=nothing, container=nothing, shared_name=nothing)
+ desc = tf.EagerOp("ExperimentalStatsAggregatorHandle")
+ begin
+ end
+ begin
+ begin
+ if container !== nothing
+ desc["container"] = Base.String(container)
+ end
+ end
+ begin
+ if shared_name !== nothing
+ desc["shared_name"] = Base.String(shared_name)
+ end
+ end
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(experimental_stats_aggregator_handle, [], name=nothing, container=nothing, shared_name=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res[1]
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function experimental_stats_aggregator_handle(; name=nothing, container=nothing, shared_name=nothing)
+ if tf.in_eager_mode()
+ experimental_stats_aggregator_handle_eager(; name=name, container=container, shared_name=shared_name)
+ else
+ experimental_stats_aggregator_handle_graph(; name=name, container=container, shared_name=shared_name)
+ end
+ end
+ end
+end
+
+
+"""
+ cudnn_rnnv2(input, input_h, input_c, params; rnn_mode=, input_mode=, direction=, dropout=?, seed=0, seed2=0, is_training=true)
+
+
+"""
+begin
+ begin
+ function cudnn_rnnv2_graph(input_, input_h_, input_c_, params_; name=nothing, rnn_mode=nothing, input_mode=nothing, direction=nothing, dropout=nothing, seed=nothing, seed2=nothing, is_training=nothing)
+ local desc
+ tf.with_op_name(name, "CudnnRNNV2") do
+ desc = tf.NodeDescription("CudnnRNNV2")
+ begin
+ begin
+ input_ = convert(Tensor{Any}, input_)
+ begin
+ end
+ end
+ begin
+ input_h_ = convert(Tensor{Any}, input_h_)
+ begin
+ end
+ end
+ begin
+ input_c_ = convert(Tensor{Any}, input_c_)
+ begin
+ end
+ end
+ begin
+ params_ = convert(Tensor{Any}, params_)
+ begin
+ end
+ end
+ begin
+ (input_, input_h_, input_c_, params_) = tf.tf_promote(input_, input_h_, input_c_, params_)
+ end
+ end
+ begin
+ begin
+ tf.add_input(desc, input_)
+ end
+ begin
+ tf.add_input(desc, input_h_)
+ end
+ begin
+ tf.add_input(desc, input_c_)
+ end
+ begin
+ tf.add_input(desc, params_)
+ end
+ end
+ begin
+ begin
+ if rnn_mode !== nothing
+ desc["rnn_mode"] = Base.String(rnn_mode)
+ end
+ end
+ begin
+ if input_mode !== nothing
+ desc["input_mode"] = Base.String(input_mode)
+ end
+ end
+ begin
+ if direction !== nothing
+ desc["direction"] = Base.String(direction)
+ end
+ end
+ begin
+ if dropout !== nothing
+ desc["dropout"] = Base.identity(dropout)
+ end
+ end
+ begin
+ if seed !== nothing
+ desc["seed"] = Base.Int(seed)
+ end
+ end
+ begin
+ if seed2 !== nothing
+ desc["seed2"] = Base.Int(seed2)
+ end
+ end
+ begin
+ if is_training !== nothing
+ desc["is_training"] = Base.Bool(is_training)
+ end
+ end
+ end
+ end
+ begin
+ out = tf.Tensor[]
+ op = tf.Operation(desc)
+ for out_idx = 1:5
+ push!(out, tf.Tensor(op, out_idx))
+ end
+ out
+ end
+ end
+ end
+ begin
+ function cudnn_rnnv2_eager(input_, input_h_, input_c_, params_; name=nothing, rnn_mode=nothing, input_mode=nothing, direction=nothing, dropout=nothing, seed=nothing, seed2=nothing, is_training=nothing)
+ desc = tf.EagerOp("CudnnRNNV2")
+ input_ = convert(tf.EagerTensor, input_)
+ input_h_ = convert(tf.EagerTensor, input_h_)
+ input_c_ = convert(tf.EagerTensor, input_c_)
+ params_ = convert(tf.EagerTensor, params_)
+ begin
+ begin
+ tf.add_input(desc, input_)
+ end
+ begin
+ tf.add_input(desc, input_h_)
+ end
+ begin
+ tf.add_input(desc, input_c_)
+ end
+ begin
+ tf.add_input(desc, params_)
+ end
+ end
+ begin
+ begin
+ if rnn_mode !== nothing
+ desc["rnn_mode"] = Base.String(rnn_mode)
+ end
+ end
+ begin
+ if input_mode !== nothing
+ desc["input_mode"] = Base.String(input_mode)
+ end
+ end
+ begin
+ if direction !== nothing
+ desc["direction"] = Base.String(direction)
+ end
+ end
+ begin
+ if dropout !== nothing
+ desc["dropout"] = Base.identity(dropout)
+ end
+ end
+ begin
+ if seed !== nothing
+ desc["seed"] = Base.Int(seed)
+ end
+ end
+ begin
+ if seed2 !== nothing
+ desc["seed2"] = Base.Int(seed2)
+ end
+ end
+ begin
+ if is_training !== nothing
+ desc["is_training"] = Base.Bool(is_training)
+ end
+ end
+ end
+ begin
+ desc["T"] = tf.data_type(input_)
+ end
+ begin
+ desc["T"] = tf.data_type(input_h_)
+ end
+ begin
+ desc["T"] = tf.data_type(input_c_)
+ end
+ begin
+ desc["T"] = tf.data_type(params_)
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(cudnn_rnnv2, [input_, input_h_, input_c_, params_], name=nothing, rnn_mode=nothing, input_mode=nothing, direction=nothing, dropout=nothing, seed=nothing, seed2=nothing, is_training=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function cudnn_rnnv2(input_, input_h_, input_c_, params_; name=nothing, rnn_mode=nothing, input_mode=nothing, direction=nothing, dropout=nothing, seed=nothing, seed2=nothing, is_training=nothing)
+ if tf.in_eager_mode()
+ cudnn_rnnv2_eager(input_, input_h_, input_c_, params_; name=name, rnn_mode=rnn_mode, input_mode=input_mode, direction=direction, dropout=dropout, seed=seed, seed2=seed2, is_training=is_training)
+ else
+ cudnn_rnnv2_graph(input_, input_h_, input_c_, params_; name=name, rnn_mode=rnn_mode, input_mode=input_mode, direction=direction, dropout=dropout, seed=seed, seed2=seed2, is_training=is_training)
+ end
+ end
+ end
+end
+
+
+"""
+ assign_add(ref, value; use_locking=false)
+
+
+"""
+begin
+ begin
+ function assign_add_graph(ref_, value_; name=nothing, use_locking=nothing)
+ local desc
+ tf.with_op_name(name, "AssignAdd") do
+ desc = tf.NodeDescription("AssignAdd")
+ begin
+ begin
+ ref_ = convert(Tensor{Any}, ref_)
+ begin
+ end
+ end
+ begin
+ value_ = convert(Tensor{Any}, value_)
+ begin
+ end
+ end
+ begin
+ (ref_, value_) = tf.tf_promote(ref_, value_)
+ end
+ end
+ begin
+ begin
+ tf.add_input(desc, ref_)
+ end
+ begin
+ tf.add_input(desc, value_)
+ end
+ end
+ begin
+ begin
+ if use_locking !== nothing
+ desc["use_locking"] = Base.Bool(use_locking)
+ end
+ end
+ end
+ end
+ tf.Tensor(tf.Operation(desc))
+ end
+ end
+ begin
+ function assign_add_eager(ref_, value_; name=nothing, use_locking=nothing)
+ desc = tf.EagerOp("AssignAdd")
+ ref_ = convert(tf.EagerTensor, ref_)
+ value_ = convert(tf.EagerTensor, value_)
+ begin
+ begin
+ tf.add_input(desc, ref_)
+ end
+ begin
+ tf.add_input(desc, value_)
+ end
+ end
+ begin
+ begin
+ if use_locking !== nothing
+ desc["use_locking"] = Base.Bool(use_locking)
+ end
+ end
+ end
+ begin
+ desc["T"] = tf.data_type(ref_)
+ end
+ begin
+ desc["T"] = tf.data_type(value_)
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(assign_add, [ref_, value_], name=nothing, use_locking=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res[1]
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function assign_add(ref_, value_; name=nothing, use_locking=nothing)
+ if tf.in_eager_mode()
+ assign_add_eager(ref_, value_; name=name, use_locking=use_locking)
+ else
+ assign_add_graph(ref_, value_; name=name, use_locking=use_locking)
+ end
+ end
+ end
+end
+
+
+"""
+ tensor_dataset(components)
+
+
+"""
+begin
+ begin
+ function tensor_dataset_graph(components_; name=nothing, Toutput_types=nothing, output_shapes=nothing)
+ local desc
+ tf.with_op_name(name, "TensorDataset") do
+ desc = tf.NodeDescription("TensorDataset")
+ begin
+ begin
+ components_ = [convert(Tensor{Any}, x) for x = components_]
+ begin
+ end
+ end
+ end
+ begin
+ begin
+ tf.add_input(desc, components_)
+ end
+ end
+ begin
+ begin
+ if Toutput_types !== nothing
+ desc["Toutput_types"] = map(Base.identity, Toutput_types)
+ end
+ end
+ begin
+ if output_shapes !== nothing
+ desc["output_shapes"] = map(Base.identity, output_shapes)
+ end
+ end
+ end
+ end
+ tf.Tensor(tf.Operation(desc))
+ end
+ end
+ begin
+ function tensor_dataset_eager(components_; name=nothing, Toutput_types=nothing, output_shapes=nothing)
+ desc = tf.EagerOp("TensorDataset")
+ components_ = convert(tf.EagerTensor, components_)
+ begin
+ begin
+ tf.add_input(desc, components_)
+ end
+ end
+ begin
+ begin
+ if Toutput_types !== nothing
+ desc["Toutput_types"] = map(Base.identity, Toutput_types)
+ end
+ end
+ begin
+ if output_shapes !== nothing
+ desc["output_shapes"] = map(Base.identity, output_shapes)
+ end
+ end
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(tensor_dataset, [components_], name=nothing, Toutput_types=nothing, output_shapes=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res[1]
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function tensor_dataset(components_; name=nothing, Toutput_types=nothing, output_shapes=nothing)
+ if tf.in_eager_mode()
+ tensor_dataset_eager(components_; name=name, Toutput_types=Toutput_types, output_shapes=output_shapes)
+ else
+ tensor_dataset_graph(components_; name=name, Toutput_types=Toutput_types, output_shapes=output_shapes)
+ end
+ end
+ end
+end
+
+
+"""
+ bucketize(input)
+
+
+"""
+begin
+ begin
+ function bucketize_graph(input_; name=nothing, boundaries=nothing)
+ local desc
+ tf.with_op_name(name, "Bucketize") do
+ desc = tf.NodeDescription("Bucketize")
+ begin
+ begin
+ input_ = convert(Tensor{Any}, input_)
+ begin
+ end
+ end
+ begin
+ (input_,) = tf.tf_promote(input_)
+ end
+ end
+ begin
+ begin
+ tf.add_input(desc, input_)
+ end
+ end
+ begin
+ begin
+ if boundaries !== nothing
+ desc["boundaries"] = map(Base.identity, boundaries)
+ end
+ end
+ end
+ end
+ tf.Tensor(tf.Operation(desc))
+ end
+ end
+ begin
+ function bucketize_eager(input_; name=nothing, boundaries=nothing)
+ desc = tf.EagerOp("Bucketize")
+ input_ = convert(tf.EagerTensor, input_)
+ begin
+ begin
+ tf.add_input(desc, input_)
+ end
+ end
+ begin
+ begin
+ if boundaries !== nothing
+ desc["boundaries"] = map(Base.identity, boundaries)
+ end
+ end
+ end
+ begin
+ desc["T"] = tf.data_type(input_)
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(bucketize, [input_], name=nothing, boundaries=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res[1]
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function bucketize(input_; name=nothing, boundaries=nothing)
+ if tf.in_eager_mode()
+ bucketize_eager(input_; name=name, boundaries=boundaries)
+ else
+ bucketize_graph(input_; name=name, boundaries=boundaries)
+ end
+ end
+ end
+end
+
+
+"""
+ sparse_reduce_max(input_indices, input_values, input_shape, reduction_axes; keep_dims=false)
+
+
+"""
+begin
+ begin
+ function sparse_reduce_max_graph(input_indices_, input_values_, input_shape_, reduction_axes_; name=nothing, keep_dims=nothing)
+ local desc
+ tf.with_op_name(name, "SparseReduceMax") do
+ desc = tf.NodeDescription("SparseReduceMax")
+ begin
+ begin
+ input_indices_ = convert(Tensor{Int64}, input_indices_)
+ begin
+ end
+ end
+ begin
+ input_values_ = convert(Tensor{Any}, input_values_)
+ begin
+ end
+ end
+ begin
+ input_shape_ = convert(Tensor{Int64}, input_shape_)
+ begin
+ end
+ end
+ begin
+ reduction_axes_ = convert(Tensor{Int32}, reduction_axes_)
+ begin
+ end
+ end
+ begin
+ (input_values_,) = tf.tf_promote(input_values_)
+ end
+ end
+ begin
+ begin
+ tf.add_input(desc, input_indices_)
+ end
+ begin
+ tf.add_input(desc, input_values_)
+ end
+ begin
+ tf.add_input(desc, input_shape_)
+ end
+ begin
+ tf.add_input(desc, reduction_axes_)
+ end
+ end
+ begin
+ begin
+ if keep_dims !== nothing
+ desc["keep_dims"] = Base.Bool(keep_dims)
+ end
+ end
+ end
+ end
+ tf.Tensor(tf.Operation(desc))
+ end
+ end
+ begin
+ function sparse_reduce_max_eager(input_indices_, input_values_, input_shape_, reduction_axes_; name=nothing, keep_dims=nothing)
+ desc = tf.EagerOp("SparseReduceMax")
+ input_indices_ = convert(tf.EagerTensor, input_indices_)
+ input_values_ = convert(tf.EagerTensor, input_values_)
+ input_shape_ = convert(tf.EagerTensor, input_shape_)
+ reduction_axes_ = convert(tf.EagerTensor, reduction_axes_)
+ begin
+ begin
+ tf.add_input(desc, input_indices_)
+ end
+ begin
+ tf.add_input(desc, input_values_)
+ end
+ begin
+ tf.add_input(desc, input_shape_)
+ end
+ begin
+ tf.add_input(desc, reduction_axes_)
+ end
+ end
+ begin
+ begin
+ if keep_dims !== nothing
+ desc["keep_dims"] = Base.Bool(keep_dims)
+ end
+ end
+ end
+ begin
+ desc["T"] = tf.data_type(input_values_)
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(sparse_reduce_max, [input_indices_, input_values_, input_shape_, reduction_axes_], name=nothing, keep_dims=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res[1]
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function sparse_reduce_max(input_indices_, input_values_, input_shape_, reduction_axes_; name=nothing, keep_dims=nothing)
+ if tf.in_eager_mode()
+ sparse_reduce_max_eager(input_indices_, input_values_, input_shape_, reduction_axes_; name=name, keep_dims=keep_dims)
+ else
+ sparse_reduce_max_graph(input_indices_, input_values_, input_shape_, reduction_axes_; name=name, keep_dims=keep_dims)
+ end
+ end
+ end
+end
+
+
+"""
+ tensor_array_grad_with_shape(handle, flow_in, shape_to_prepend)
+
+
+"""
+begin
+ begin
+ function tensor_array_grad_with_shape_graph(handle_, flow_in_, shape_to_prepend_; name=nothing, source=nothing)
+ local desc
+ tf.with_op_name(name, "TensorArrayGradWithShape") do
+ desc = tf.NodeDescription("TensorArrayGradWithShape")
+ begin
+ begin
+ handle_ = convert(Tensor{Any}, handle_)
+ begin
+ end
+ end
+ begin
+ flow_in_ = convert(Tensor{Float32}, flow_in_)
+ begin
+ end
+ end
+ begin
+ shape_to_prepend_ = convert(Tensor{Int32}, shape_to_prepend_)
+ begin
+ end
+ end
+ end
+ begin
+ begin
+ tf.add_input(desc, handle_)
+ end
+ begin
+ tf.add_input(desc, flow_in_)
+ end
+ begin
+ tf.add_input(desc, shape_to_prepend_)
+ end
+ end
+ begin
+ begin
+ if source !== nothing
+ desc["source"] = Base.String(source)
+ end
+ end
+ end
+ end
+ begin
+ out = tf.Tensor[]
+ op = tf.Operation(desc)
+ for out_idx = 1:2
+ push!(out, tf.Tensor(op, out_idx))
+ end
+ out
+ end
+ end
+ end
+ begin
+ function tensor_array_grad_with_shape_eager(handle_, flow_in_, shape_to_prepend_; name=nothing, source=nothing)
+ desc = tf.EagerOp("TensorArrayGradWithShape")
+ handle_ = convert(tf.EagerTensor, handle_)
+ flow_in_ = convert(tf.EagerTensor, flow_in_)
+ shape_to_prepend_ = convert(tf.EagerTensor, shape_to_prepend_)
+ begin
+ begin
+ tf.add_input(desc, handle_)
+ end
+ begin
+ tf.add_input(desc, flow_in_)
+ end
+ begin
+ tf.add_input(desc, shape_to_prepend_)
+ end
+ end
+ begin
+ begin
+ if source !== nothing
+ desc["source"] = Base.String(source)
+ end
+ end
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(tensor_array_grad_with_shape, [handle_, flow_in_, shape_to_prepend_], name=nothing, source=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function tensor_array_grad_with_shape(handle_, flow_in_, shape_to_prepend_; name=nothing, source=nothing)
+ if tf.in_eager_mode()
+ tensor_array_grad_with_shape_eager(handle_, flow_in_, shape_to_prepend_; name=name, source=source)
+ else
+ tensor_array_grad_with_shape_graph(handle_, flow_in_, shape_to_prepend_; name=name, source=source)
+ end
+ end
+ end
+end
+
+
+"""
+ retrieve_tpu_embedding_mdl_adagrad_light_parameters(; table_id=-1, table_name=)
+
+Retrieve embedding parameters for a single table.
+"""
+begin
+ begin
+ function retrieve_tpu_embedding_mdl_adagrad_light_parameters_graph(; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing)
+ local desc
+ tf.with_op_name(name, "RetrieveTPUEmbeddingMDLAdagradLightParameters") do
+ desc = tf.NodeDescription("RetrieveTPUEmbeddingMDLAdagradLightParameters")
+ begin
+ end
+ begin
+ end
+ begin
+ begin
+ if table_id !== nothing
+ desc["table_id"] = Base.Int(table_id)
+ end
+ end
+ begin
+ if table_name !== nothing
+ desc["table_name"] = Base.String(table_name)
+ end
+ end
+ begin
+ if num_shards !== nothing
+ desc["num_shards"] = Base.Int(num_shards)
+ end
+ end
+ begin
+ if shard_id !== nothing
+ desc["shard_id"] = Base.Int(shard_id)
+ end
+ end
+ end
+ end
+ begin
+ out = tf.Tensor[]
+ op = tf.Operation(desc)
+ for out_idx = 1:4
+ push!(out, tf.Tensor(op, out_idx))
+ end
+ out
+ end
+ end
+ end
+ begin
+ function retrieve_tpu_embedding_mdl_adagrad_light_parameters_eager(; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing)
+ desc = tf.EagerOp("RetrieveTPUEmbeddingMDLAdagradLightParameters")
+ begin
+ end
+ begin
+ begin
+ if table_id !== nothing
+ desc["table_id"] = Base.Int(table_id)
+ end
+ end
+ begin
+ if table_name !== nothing
+ desc["table_name"] = Base.String(table_name)
+ end
+ end
+ begin
+ if num_shards !== nothing
+ desc["num_shards"] = Base.Int(num_shards)
+ end
+ end
+ begin
+ if shard_id !== nothing
+ desc["shard_id"] = Base.Int(shard_id)
+ end
+ end
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(retrieve_tpu_embedding_mdl_adagrad_light_parameters, [], name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function retrieve_tpu_embedding_mdl_adagrad_light_parameters(; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing)
+ if tf.in_eager_mode()
+ retrieve_tpu_embedding_mdl_adagrad_light_parameters_eager(; name=name, table_id=table_id, table_name=table_name, num_shards=num_shards, shard_id=shard_id)
+ else
+ retrieve_tpu_embedding_mdl_adagrad_light_parameters_graph(; name=name, table_id=table_id, table_name=table_name, num_shards=num_shards, shard_id=shard_id)
+ end
+ end
+ end
+end
+
+
+"""
+ tensor_array_close_v3(handle)
+
+
+"""
+begin
+ begin
+ function tensor_array_close_v3_graph(handle_; name=nothing)
+ local desc
+ tf.with_op_name(name, "TensorArrayCloseV3") do
+ desc = tf.NodeDescription("TensorArrayCloseV3")
+ begin
+ begin
+ handle_ = convert(Tensor{Any}, handle_)
+ begin
+ end
+ end
+ end
+ begin
+ begin
+ tf.add_input(desc, handle_)
+ end
+ end
+ begin
+ end
+ end
+ tf.Tensor(tf.Operation(desc))
+ end
+ end
+ begin
+ function tensor_array_close_v3_eager(handle_; name=nothing)
+ desc = tf.EagerOp("TensorArrayCloseV3")
+ handle_ = convert(tf.EagerTensor, handle_)
+ begin
+ begin
+ tf.add_input(desc, handle_)
+ end
+ end
+ begin
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(tensor_array_close_v3, [handle_], name=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res[1]
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function tensor_array_close_v3(handle_; name=nothing)
+ if tf.in_eager_mode()
+ tensor_array_close_v3_eager(handle_; name=name)
+ else
+ tensor_array_close_v3_graph(handle_; name=name)
+ end
+ end
+ end
+end
+
+
+"""
+ non_max_suppression_with_overlaps(overlaps, scores, max_output_size, overlap_threshold, score_threshold)
+
+
+"""
+begin
+ begin
+ function non_max_suppression_with_overlaps_graph(overlaps_, scores_, max_output_size_, overlap_threshold_, score_threshold_; name=nothing)
+ local desc
+ tf.with_op_name(name, "NonMaxSuppressionWithOverlaps") do
+ desc = tf.NodeDescription("NonMaxSuppressionWithOverlaps")
+ begin
+ begin
+ overlaps_ = convert(Tensor{Float32}, overlaps_)
+ begin
+ end
+ end
+ begin
+ scores_ = convert(Tensor{Float32}, scores_)
+ begin
+ end
+ end
+ begin
+ max_output_size_ = convert(Tensor{Int32}, max_output_size_)
+ begin
+ end
+ end
+ begin
+ overlap_threshold_ = convert(Tensor{Float32}, overlap_threshold_)
+ begin
+ end
+ end
+ begin
+ score_threshold_ = convert(Tensor{Float32}, score_threshold_)
+ begin
+ end
+ end
+ end
+ begin
+ begin
+ tf.add_input(desc, overlaps_)
+ end
+ begin
+ tf.add_input(desc, scores_)
+ end
+ begin
+ tf.add_input(desc, max_output_size_)
+ end
+ begin
+ tf.add_input(desc, overlap_threshold_)
+ end
+ begin
+ tf.add_input(desc, score_threshold_)
+ end
+ end
+ begin
+ end
+ end
+ tf.Tensor(tf.Operation(desc))
+ end
+ end
+ begin
+ function non_max_suppression_with_overlaps_eager(overlaps_, scores_, max_output_size_, overlap_threshold_, score_threshold_; name=nothing)
+ desc = tf.EagerOp("NonMaxSuppressionWithOverlaps")
+ overlaps_ = convert(tf.EagerTensor, overlaps_)
+ scores_ = convert(tf.EagerTensor, scores_)
+ max_output_size_ = convert(tf.EagerTensor, max_output_size_)
+ overlap_threshold_ = convert(tf.EagerTensor, overlap_threshold_)
+ score_threshold_ = convert(tf.EagerTensor, score_threshold_)
+ begin
+ begin
+ tf.add_input(desc, overlaps_)
+ end
+ begin
+ tf.add_input(desc, scores_)
+ end
+ begin
+ tf.add_input(desc, max_output_size_)
+ end
+ begin
+ tf.add_input(desc, overlap_threshold_)
+ end
+ begin
+ tf.add_input(desc, score_threshold_)
+ end
+ end
+ begin
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(non_max_suppression_with_overlaps, [overlaps_, scores_, max_output_size_, overlap_threshold_, score_threshold_], name=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res[1]
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function non_max_suppression_with_overlaps(overlaps_, scores_, max_output_size_, overlap_threshold_, score_threshold_; name=nothing)
+ if tf.in_eager_mode()
+ non_max_suppression_with_overlaps_eager(overlaps_, scores_, max_output_size_, overlap_threshold_, score_threshold_; name=name)
+ else
+ non_max_suppression_with_overlaps_graph(overlaps_, scores_, max_output_size_, overlap_threshold_, score_threshold_; name=name)
+ end
+ end
+ end
+end
+
+
+"""
+ pack(values; axis=0)
+
+
+"""
+begin
+ begin
+ function pack_graph(values_; name=nothing, N=nothing, axis=nothing)
+ local desc
+ tf.with_op_name(name, "Pack") do
+ desc = tf.NodeDescription("Pack")
+ begin
+ begin
+ values_ = [convert(Tensor{Any}, x) for x = values_]
+ begin
+ end
+ end
+ begin
+ (values_,) = tf.tf_promote(values_)
+ end
+ end
+ begin
+ begin
+ tf.add_input(desc, values_)
+ end
+ end
+ begin
+ begin
+ if N !== nothing
+ desc["N"] = Base.Int(N)
+ end
+ end
+ begin
+ if axis !== nothing
+ axis = Base.Int(axis) - 1
+ end
+ end
+ begin
+ if axis !== nothing
+ desc["axis"] = Base.Int(axis)
+ end
+ end
+ end
+ end
+ tf.Tensor(tf.Operation(desc))
+ end
+ end
+ begin
+ function pack_eager(values_; name=nothing, N=nothing, axis=nothing)
+ desc = tf.EagerOp("Pack")
+ values_ = convert(tf.EagerTensor, values_)
+ begin
+ begin
+ tf.add_input(desc, values_)
+ end
+ end
+ begin
+ begin
+ if N !== nothing
+ desc["N"] = Base.Int(N)
+ end
+ end
+ begin
+ if axis !== nothing
+ axis = Base.Int(axis) - 1
+ end
+ end
+ begin
+ if axis !== nothing
+ desc["axis"] = Base.Int(axis)
+ end
+ end
+ end
+ begin
+ desc["T"] = tf.data_type(values_)
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(pack, [values_], name=nothing, N=nothing, axis=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res[1]
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function pack(values_; name=nothing, N=nothing, axis=nothing)
+ if tf.in_eager_mode()
+ pack_eager(values_; name=name, N=N, axis=axis)
+ else
+ pack_graph(values_; name=name, N=N, axis=axis)
+ end
+ end
+ end
+end
+
+
+"""
+ tensor_array_grad_v2(handle, flow_in)
+
+
+"""
+begin
+ begin
+ function tensor_array_grad_v2_graph(handle_, flow_in_; name=nothing, source=nothing)
+ local desc
+ tf.with_op_name(name, "TensorArrayGradV2") do
+ desc = tf.NodeDescription("TensorArrayGradV2")
+ begin
+ begin
+ handle_ = convert(Tensor{String}, handle_)
+ begin
+ end
+ end
+ begin
+ flow_in_ = convert(Tensor{Float32}, flow_in_)
+ begin
+ end
+ end
+ end
+ begin
+ begin
+ tf.add_input(desc, handle_)
+ end
+ begin
+ tf.add_input(desc, flow_in_)
+ end
+ end
+ begin
+ begin
+ if source !== nothing
+ desc["source"] = Base.String(source)
+ end
+ end
+ end
+ end
+ tf.Tensor(tf.Operation(desc))
+ end
+ end
+ begin
+ function tensor_array_grad_v2_eager(handle_, flow_in_; name=nothing, source=nothing)
+ desc = tf.EagerOp("TensorArrayGradV2")
+ handle_ = convert(tf.EagerTensor, handle_)
+ flow_in_ = convert(tf.EagerTensor, flow_in_)
+ begin
+ begin
+ tf.add_input(desc, handle_)
+ end
+ begin
+ tf.add_input(desc, flow_in_)
+ end
+ end
+ begin
+ begin
+ if source !== nothing
+ desc["source"] = Base.String(source)
+ end
+ end
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(tensor_array_grad_v2, [handle_, flow_in_], name=nothing, source=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res[1]
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function tensor_array_grad_v2(handle_, flow_in_; name=nothing, source=nothing)
+ if tf.in_eager_mode()
+ tensor_array_grad_v2_eager(handle_, flow_in_; name=name, source=source)
+ else
+ tensor_array_grad_v2_graph(handle_, flow_in_; name=name, source=source)
+ end
+ end
+ end
+end
+
+
+"""
+ assign_sub_variable_op(resource, value)
+
+
+"""
+begin
+ begin
+ function assign_sub_variable_op_graph(resource_, value_; name=nothing, dtype=nothing)
+ local desc
+ tf.with_op_name(name, "AssignSubVariableOp") do
+ desc = tf.NodeDescription("AssignSubVariableOp")
+ begin
+ begin
+ resource_ = convert(Tensor{Any}, resource_)
+ begin
+ end
+ end
+ begin
+ value_ = convert(Tensor{Any}, value_)
+ begin
+ end
+ end
+ begin
+ (value_,) = tf.tf_promote(value_)
+ end
+ end
+ begin
+ begin
+ tf.add_input(desc, resource_)
+ end
+ begin
+ tf.add_input(desc, value_)
+ end
+ end
+ begin
+ begin
+ if dtype !== nothing
+ desc["dtype"] = Base.identity(dtype)
+ end
+ end
+ end
+ end
+ tf.Tensor(tf.Operation(desc))
+ end
+ end
+ begin
+ function assign_sub_variable_op_eager(resource_, value_; name=nothing, dtype=nothing)
+ desc = tf.EagerOp("AssignSubVariableOp")
+ resource_ = convert(tf.EagerTensor, resource_)
+ value_ = convert(tf.EagerTensor, value_)
+ begin
+ begin
+ tf.add_input(desc, resource_)
+ end
+ begin
+ tf.add_input(desc, value_)
+ end
+ end
+ begin
+ begin
+ if dtype !== nothing
+ desc["dtype"] = Base.identity(dtype)
+ end
+ end
+ end
+ begin
+ desc["dtype"] = tf.data_type(value_)
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(assign_sub_variable_op, [resource_, value_], name=nothing, dtype=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res[1]
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function assign_sub_variable_op(resource_, value_; name=nothing, dtype=nothing)
+ if tf.in_eager_mode()
+ assign_sub_variable_op_eager(resource_, value_; name=name, dtype=dtype)
+ else
+ assign_sub_variable_op_graph(resource_, value_; name=name, dtype=dtype)
+ end
+ end
+ end
+end
+
+
+"""
+ batch_fft2d(input)
+
+
+"""
+begin
+ begin
+ function batch_fft2d_graph(input_; name=nothing)
+ local desc
+ tf.with_op_name(name, "BatchFFT2D") do
+ desc = tf.NodeDescription("BatchFFT2D")
+ begin
+ begin
+ input_ = convert(Tensor{Complex{Float32}}, input_)
+ begin
+ end
+ end
+ end
+ begin
+ begin
+ tf.add_input(desc, input_)
+ end
+ end
+ begin
+ end
+ end
+ tf.Tensor(tf.Operation(desc))
+ end
+ end
+ begin
+ function batch_fft2d_eager(input_; name=nothing)
+ desc = tf.EagerOp("BatchFFT2D")
+ input_ = convert(tf.EagerTensor, input_)
+ begin
+ begin
+ tf.add_input(desc, input_)
+ end
+ end
+ begin
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(batch_fft2d, [input_], name=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res[1]
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function batch_fft2d(input_; name=nothing)
+ if tf.in_eager_mode()
+ batch_fft2d_eager(input_; name=name)
+ else
+ batch_fft2d_graph(input_; name=name)
+ end
+ end
+ end
+end
+
+
+"""
+ close_summary_writer(writer)
+
+
+"""
+begin
+ begin
+ function close_summary_writer_graph(writer_; name=nothing)
+ local desc
+ tf.with_op_name(name, "CloseSummaryWriter") do
+ desc = tf.NodeDescription("CloseSummaryWriter")
+ begin
+ begin
+ writer_ = convert(Tensor{Any}, writer_)
+ begin
+ end
+ end
+ end
+ begin
+ begin
+ tf.add_input(desc, writer_)
+ end
+ end
+ begin
+ end
+ end
+ tf.Tensor(tf.Operation(desc))
+ end
+ end
+ begin
+ function close_summary_writer_eager(writer_; name=nothing)
+ desc = tf.EagerOp("CloseSummaryWriter")
+ writer_ = convert(tf.EagerTensor, writer_)
+ begin
+ begin
+ tf.add_input(desc, writer_)
+ end
+ end
+ begin
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(close_summary_writer, [writer_], name=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res[1]
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function close_summary_writer(writer_; name=nothing)
+ if tf.in_eager_mode()
+ close_summary_writer_eager(writer_; name=name)
+ else
+ close_summary_writer_graph(writer_; name=name)
+ end
+ end
+ end
+end
+
+
+"""
+ rank(input)
+
+
+"""
+begin
+ begin
+ function rank_graph(input_; name=nothing)
+ local desc
+ tf.with_op_name(name, "Rank") do
+ desc = tf.NodeDescription("Rank")
+ begin
+ begin
+ input_ = convert(Tensor{Any}, input_)
+ begin
+ end
+ end
+ begin
+ (input_,) = tf.tf_promote(input_)
+ end
+ end
+ begin
+ begin
+ tf.add_input(desc, input_)
+ end
+ end
+ begin
+ end
+ end
+ tf.Tensor(tf.Operation(desc))
+ end
+ end
+ begin
+ function rank_eager(input_; name=nothing)
+ desc = tf.EagerOp("Rank")
+ input_ = convert(tf.EagerTensor, input_)
+ begin
+ begin
+ tf.add_input(desc, input_)
+ end
+ end
+ begin
+ end
+ begin
+ desc["T"] = tf.data_type(input_)
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(rank, [input_], name=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res[1]
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function rank(input_; name=nothing)
+ if tf.in_eager_mode()
+ rank_eager(input_; name=name)
+ else
+ rank_graph(input_; name=name)
+ end
+ end
+ end
+end
+
+
+"""
+ fft3d(input)
+
+
+"""
+begin
+ begin
+ function fft3d_graph(input_; name=nothing)
+ local desc
+ tf.with_op_name(name, "FFT3D") do
+ desc = tf.NodeDescription("FFT3D")
+ begin
+ begin
+ input_ = convert(Tensor{Complex{Float32}}, input_)
+ begin
+ end
+ end
+ begin
+ (input_,) = tf.tf_promote(input_)
+ end
+ end
+ begin
+ begin
+ tf.add_input(desc, input_)
+ end
+ end
+ begin
+ end
+ end
+ tf.Tensor(tf.Operation(desc))
+ end
+ end
+ begin
+ function fft3d_eager(input_; name=nothing)
+ desc = tf.EagerOp("FFT3D")
+ input_ = convert(tf.EagerTensor, input_)
+ begin
+ begin
+ tf.add_input(desc, input_)
+ end
+ end
+ begin
+ end
+ begin
+ desc["Tcomplex"] = tf.data_type(input_)
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(fft3d, [input_], name=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res[1]
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function fft3d(input_; name=nothing)
+ if tf.in_eager_mode()
+ fft3d_eager(input_; name=name)
+ else
+ fft3d_graph(input_; name=name)
+ end
+ end
+ end
+end
+
+
+"""
+ apply_ftrl(var, accum, linear, grad, lr, l1, l2, lr_power; use_locking=false)
+
+
+"""
+begin
+ begin
+ function apply_ftrl_graph(var_, accum_, linear_, grad_, lr_, l1_, l2_, lr_power_; name=nothing, use_locking=nothing)
+ local desc
+ tf.with_op_name(name, "ApplyFtrl") do
+ desc = tf.NodeDescription("ApplyFtrl")
+ begin
+ begin
+ var_ = convert(Tensor{Any}, var_)
+ begin
+ end
+ end
+ begin
+ accum_ = convert(Tensor{Any}, accum_)
+ begin
+ end
+ end
+ begin
+ linear_ = convert(Tensor{Any}, linear_)
+ begin
+ end
+ end
+ begin
+ grad_ = convert(Tensor{Any}, grad_)
+ begin
+ end
+ end
+ begin
+ lr_ = convert(Tensor{Any}, lr_)
+ begin
+ end
+ end
+ begin
+ l1_ = convert(Tensor{Any}, l1_)
+ begin
+ end
+ end
+ begin
+ l2_ = convert(Tensor{Any}, l2_)
+ begin
+ end
+ end
+ begin
+ lr_power_ = convert(Tensor{Any}, lr_power_)
+ begin
+ end
+ end
+ begin
+ (var_, accum_, linear_, grad_, lr_, l1_, l2_, lr_power_) = tf.tf_promote(var_, accum_, linear_, grad_, lr_, l1_, l2_, lr_power_)
+ end
+ end
+ begin
+ begin
+ tf.add_input(desc, var_)
+ end
+ begin
+ tf.add_input(desc, accum_)
+ end
+ begin
+ tf.add_input(desc, linear_)
+ end
+ begin
+ tf.add_input(desc, grad_)
+ end
+ begin
+ tf.add_input(desc, lr_)
+ end
+ begin
+ tf.add_input(desc, l1_)
+ end
+ begin
+ tf.add_input(desc, l2_)
+ end
+ begin
+ tf.add_input(desc, lr_power_)
+ end
+ end
+ begin
+ begin
+ if use_locking !== nothing
+ desc["use_locking"] = Base.Bool(use_locking)
+ end
+ end
+ end
+ end
+ tf.Tensor(tf.Operation(desc))
+ end
+ end
+ begin
+ function apply_ftrl_eager(var_, accum_, linear_, grad_, lr_, l1_, l2_, lr_power_; name=nothing, use_locking=nothing)
+ desc = tf.EagerOp("ApplyFtrl")
+ var_ = convert(tf.EagerTensor, var_)
+ accum_ = convert(tf.EagerTensor, accum_)
+ linear_ = convert(tf.EagerTensor, linear_)
+ grad_ = convert(tf.EagerTensor, grad_)
+ lr_ = convert(tf.EagerTensor, lr_)
+ l1_ = convert(tf.EagerTensor, l1_)
+ l2_ = convert(tf.EagerTensor, l2_)
+ lr_power_ = convert(tf.EagerTensor, lr_power_)
+ begin
+ begin
+ tf.add_input(desc, var_)
+ end
+ begin
+ tf.add_input(desc, accum_)
+ end
+ begin
+ tf.add_input(desc, linear_)
+ end
+ begin
+ tf.add_input(desc, grad_)
+ end
+ begin
+ tf.add_input(desc, lr_)
+ end
+ begin
+ tf.add_input(desc, l1_)
+ end
+ begin
+ tf.add_input(desc, l2_)
+ end
+ begin
+ tf.add_input(desc, lr_power_)
+ end
+ end
+ begin
+ begin
+ if use_locking !== nothing
+ desc["use_locking"] = Base.Bool(use_locking)
+ end
+ end
+ end
+ begin
+ desc["T"] = tf.data_type(var_)
+ end
+ begin
+ desc["T"] = tf.data_type(accum_)
+ end
+ begin
+ desc["T"] = tf.data_type(linear_)
+ end
+ begin
+ desc["T"] = tf.data_type(grad_)
+ end
+ begin
+ desc["T"] = tf.data_type(lr_)
+ end
+ begin
+ desc["T"] = tf.data_type(l1_)
+ end
+ begin
+ desc["T"] = tf.data_type(l2_)
+ end
+ begin
+ desc["T"] = tf.data_type(lr_power_)
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(apply_ftrl, [var_, accum_, linear_, grad_, lr_, l1_, l2_, lr_power_], name=nothing, use_locking=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res[1]
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function apply_ftrl(var_, accum_, linear_, grad_, lr_, l1_, l2_, lr_power_; name=nothing, use_locking=nothing)
+ if tf.in_eager_mode()
+ apply_ftrl_eager(var_, accum_, linear_, grad_, lr_, l1_, l2_, lr_power_; name=name, use_locking=use_locking)
+ else
+ apply_ftrl_graph(var_, accum_, linear_, grad_, lr_, l1_, l2_, lr_power_; name=name, use_locking=use_locking)
+ end
+ end
+ end
+end
+
+
+"""
+ abort(; error_msg=, exit_without_error=false)
+
+
+"""
+begin
+ begin
+ function abort_graph(; name=nothing, error_msg=nothing, exit_without_error=nothing)
+ local desc
+ tf.with_op_name(name, "Abort") do
+ desc = tf.NodeDescription("Abort")
+ begin
+ end
+ begin
+ end
+ begin
+ begin
+ if error_msg !== nothing
+ desc["error_msg"] = Base.String(error_msg)
+ end
+ end
+ begin
+ if exit_without_error !== nothing
+ desc["exit_without_error"] = Base.Bool(exit_without_error)
+ end
+ end
+ end
+ end
+ tf.Tensor(tf.Operation(desc))
+ end
+ end
+ begin
+ function abort_eager(; name=nothing, error_msg=nothing, exit_without_error=nothing)
+ desc = tf.EagerOp("Abort")
+ begin
+ end
+ begin
+ begin
+ if error_msg !== nothing
+ desc["error_msg"] = Base.String(error_msg)
+ end
+ end
+ begin
+ if exit_without_error !== nothing
+ desc["exit_without_error"] = Base.Bool(exit_without_error)
+ end
+ end
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(abort, [], name=nothing, error_msg=nothing, exit_without_error=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res[1]
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function abort(; name=nothing, error_msg=nothing, exit_without_error=nothing)
+ if tf.in_eager_mode()
+ abort_eager(; name=name, error_msg=error_msg, exit_without_error=exit_without_error)
+ else
+ abort_graph(; name=name, error_msg=error_msg, exit_without_error=exit_without_error)
+ end
+ end
+ end
+end
+
+
+"""
+ audio_spectrogram(input; magnitude_squared=false)
+
+
+"""
+begin
+ begin
+ function audio_spectrogram_graph(input_; name=nothing, window_size=nothing, stride=nothing, magnitude_squared=nothing)
+ local desc
+ tf.with_op_name(name, "AudioSpectrogram") do
+ desc = tf.NodeDescription("AudioSpectrogram")
+ begin
+ begin
+ input_ = convert(Tensor{Float32}, input_)
+ begin
+ end
+ end
+ end
+ begin
+ begin
+ tf.add_input(desc, input_)
+ end
+ end
+ begin
+ begin
+ if window_size !== nothing
+ desc["window_size"] = Base.Int(window_size)
+ end
+ end
+ begin
+ if stride !== nothing
+ desc["stride"] = Base.Int(stride)
+ end
+ end
+ begin
+ if magnitude_squared !== nothing
+ desc["magnitude_squared"] = Base.Bool(magnitude_squared)
+ end
+ end
+ end
+ end
+ tf.Tensor(tf.Operation(desc))
+ end
+ end
+ begin
+ function audio_spectrogram_eager(input_; name=nothing, window_size=nothing, stride=nothing, magnitude_squared=nothing)
+ desc = tf.EagerOp("AudioSpectrogram")
+ input_ = convert(tf.EagerTensor, input_)
+ begin
+ begin
+ tf.add_input(desc, input_)
+ end
+ end
+ begin
+ begin
+ if window_size !== nothing
+ desc["window_size"] = Base.Int(window_size)
+ end
+ end
+ begin
+ if stride !== nothing
+ desc["stride"] = Base.Int(stride)
+ end
+ end
+ begin
+ if magnitude_squared !== nothing
+ desc["magnitude_squared"] = Base.Bool(magnitude_squared)
+ end
+ end
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(audio_spectrogram, [input_], name=nothing, window_size=nothing, stride=nothing, magnitude_squared=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res[1]
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function audio_spectrogram(input_; name=nothing, window_size=nothing, stride=nothing, magnitude_squared=nothing)
+ if tf.in_eager_mode()
+ audio_spectrogram_eager(input_; name=name, window_size=window_size, stride=stride, magnitude_squared=magnitude_squared)
+ else
+ audio_spectrogram_graph(input_; name=name, window_size=window_size, stride=stride, magnitude_squared=magnitude_squared)
+ end
+ end
+ end
+end
+
+
+"""
+ variable_shape(input; out_type=Int32)
+
+
+"""
+begin
+ begin
+ function variable_shape_graph(input_; name=nothing, out_type=nothing)
+ local desc
+ tf.with_op_name(name, "VariableShape") do
+ desc = tf.NodeDescription("VariableShape")
+ begin
+ begin
+ input_ = convert(Tensor{Any}, input_)
+ begin
+ end
+ end
+ end
+ begin
+ begin
+ tf.add_input(desc, input_)
+ end
+ end
+ begin
+ begin
+ if out_type !== nothing
+ desc["out_type"] = Base.identity(out_type)
+ end
+ end
+ end
+ end
+ tf.Tensor(tf.Operation(desc))
+ end
+ end
+ begin
+ function variable_shape_eager(input_; name=nothing, out_type=nothing)
+ desc = tf.EagerOp("VariableShape")
+ input_ = convert(tf.EagerTensor, input_)
+ begin
+ begin
+ tf.add_input(desc, input_)
+ end
+ end
+ begin
+ begin
+ if out_type !== nothing
+ desc["out_type"] = Base.identity(out_type)
+ end
+ end
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(variable_shape, [input_], name=nothing, out_type=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res[1]
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function variable_shape(input_; name=nothing, out_type=nothing)
+ if tf.in_eager_mode()
+ variable_shape_eager(input_; name=name, out_type=out_type)
+ else
+ variable_shape_graph(input_; name=name, out_type=out_type)
+ end
+ end
+ end
+end
+
+
+"""
+ fifo_queue_v2(; shapes=Int64[], capacity=-1, container=, shared_name=)
+
+
+"""
+begin
+ begin
+ function fifo_queue_v2_graph(; name=nothing, component_types=nothing, shapes=nothing, capacity=nothing, container=nothing, shared_name=nothing)
+ local desc
+ tf.with_op_name(name, "FIFOQueueV2") do
+ desc = tf.NodeDescription("FIFOQueueV2")
+ begin
+ end
+ begin
+ end
+ begin
+ begin
+ if component_types !== nothing
+ desc["component_types"] = map(Base.identity, component_types)
+ end
+ end
+ begin
+ if shapes !== nothing
+ desc["shapes"] = map(Base.identity, shapes)
+ end
+ end
+ begin
+ if capacity !== nothing
+ desc["capacity"] = Base.Int(capacity)
+ end
+ end
+ begin
+ if container !== nothing
+ desc["container"] = Base.String(container)
+ end
+ end
+ begin
+ if shared_name !== nothing
+ desc["shared_name"] = Base.String(shared_name)
+ end
+ end
+ end
+ end
+ tf.Tensor(tf.Operation(desc))
+ end
+ end
+ begin
+ function fifo_queue_v2_eager(; name=nothing, component_types=nothing, shapes=nothing, capacity=nothing, container=nothing, shared_name=nothing)
+ desc = tf.EagerOp("FIFOQueueV2")
+ begin
+ end
+ begin
+ begin
+ if component_types !== nothing
+ desc["component_types"] = map(Base.identity, component_types)
+ end
+ end
+ begin
+ if shapes !== nothing
+ desc["shapes"] = map(Base.identity, shapes)
+ end
+ end
+ begin
+ if capacity !== nothing
+ desc["capacity"] = Base.Int(capacity)
+ end
+ end
+ begin
+ if container !== nothing
+ desc["container"] = Base.String(container)
+ end
+ end
+ begin
+ if shared_name !== nothing
+ desc["shared_name"] = Base.String(shared_name)
+ end
+ end
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(fifo_queue_v2, [], name=nothing, component_types=nothing, shapes=nothing, capacity=nothing, container=nothing, shared_name=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res[1]
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function fifo_queue_v2(; name=nothing, component_types=nothing, shapes=nothing, capacity=nothing, container=nothing, shared_name=nothing)
+ if tf.in_eager_mode()
+ fifo_queue_v2_eager(; name=name, component_types=component_types, shapes=shapes, capacity=capacity, container=container, shared_name=shared_name)
+ else
+ fifo_queue_v2_graph(; name=name, component_types=component_types, shapes=shapes, capacity=capacity, container=container, shared_name=shared_name)
+ end
+ end
+ end
+end
+
+
+"""
+ variable(; container=, shared_name=)
+
+
+"""
+begin
+ begin
+ function variable_graph(; name=nothing, shape=nothing, dtype=nothing, container=nothing, shared_name=nothing)
+ local desc
+ tf.with_op_name(name, "Variable") do
+ desc = tf.NodeDescription("Variable")
+ begin
+ end
+ begin
+ end
+ begin
+ begin
+ if shape !== nothing
+ desc["shape"] = Base.identity(shape)
+ end
+ end
+ begin
+ if dtype !== nothing
+ desc["dtype"] = Base.identity(dtype)
+ end
+ end
+ begin
+ if container !== nothing
+ desc["container"] = Base.String(container)
+ end
+ end
+ begin
+ if shared_name !== nothing
+ desc["shared_name"] = Base.String(shared_name)
+ end
+ end
+ end
+ end
+ tf.Tensor(tf.Operation(desc))
+ end
+ end
+ begin
+ function variable_eager(; name=nothing, shape=nothing, dtype=nothing, container=nothing, shared_name=nothing)
+ desc = tf.EagerOp("Variable")
+ begin
+ end
+ begin
+ begin
+ if shape !== nothing
+ desc["shape"] = Base.identity(shape)
+ end
+ end
+ begin
+ if dtype !== nothing
+ desc["dtype"] = Base.identity(dtype)
+ end
+ end
+ begin
+ if container !== nothing
+ desc["container"] = Base.String(container)
+ end
+ end
+ begin
+ if shared_name !== nothing
+ desc["shared_name"] = Base.String(shared_name)
+ end
+ end
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(variable, [], name=nothing, shape=nothing, dtype=nothing, container=nothing, shared_name=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res[1]
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function variable(; name=nothing, shape=nothing, dtype=nothing, container=nothing, shared_name=nothing)
+ if tf.in_eager_mode()
+ variable_eager(; name=name, shape=shape, dtype=dtype, container=container, shared_name=shared_name)
+ else
+ variable_graph(; name=name, shape=shape, dtype=dtype, container=container, shared_name=shared_name)
+ end
+ end
+ end
+end
+
+
+"""
+ tensor_forest_create_tree_variable(tree_handle, tree_config)
+
+
+"""
+begin
+ begin
+ function tensor_forest_create_tree_variable_graph(tree_handle_, tree_config_; name=nothing)
+ local desc
+ tf.with_op_name(name, "TensorForestCreateTreeVariable") do
+ desc = tf.NodeDescription("TensorForestCreateTreeVariable")
+ begin
+ begin
+ tree_handle_ = convert(Tensor{Any}, tree_handle_)
+ begin
+ end
+ end
+ begin
+ tree_config_ = convert(Tensor{String}, tree_config_)
+ begin
+ end
+ end
+ end
+ begin
+ begin
+ tf.add_input(desc, tree_handle_)
+ end
+ begin
+ tf.add_input(desc, tree_config_)
+ end
+ end
+ begin
+ end
+ end
+ tf.Tensor(tf.Operation(desc))
+ end
+ end
+ begin
+ function tensor_forest_create_tree_variable_eager(tree_handle_, tree_config_; name=nothing)
+ desc = tf.EagerOp("TensorForestCreateTreeVariable")
+ tree_handle_ = convert(tf.EagerTensor, tree_handle_)
+ tree_config_ = convert(tf.EagerTensor, tree_config_)
+ begin
+ begin
+ tf.add_input(desc, tree_handle_)
+ end
+ begin
+ tf.add_input(desc, tree_config_)
+ end
+ end
+ begin
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(tensor_forest_create_tree_variable, [tree_handle_, tree_config_], name=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res[1]
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function tensor_forest_create_tree_variable(tree_handle_, tree_config_; name=nothing)
+ if tf.in_eager_mode()
+ tensor_forest_create_tree_variable_eager(tree_handle_, tree_config_; name=name)
+ else
+ tensor_forest_create_tree_variable_graph(tree_handle_, tree_config_; name=name)
+ end
+ end
+ end
+end
+
+
+"""
+ max_pool_grad_with_argmax(input, grad, argmax)
+
+
+"""
+begin
+ begin
+ function max_pool_grad_with_argmax_graph(input_, grad_, argmax_; name=nothing, ksize=nothing, strides=nothing, padding=nothing)
+ local desc
+ tf.with_op_name(name, "MaxPoolGradWithArgmax") do
+ desc = tf.NodeDescription("MaxPoolGradWithArgmax")
+ begin
+ begin
+ input_ = convert(Tensor{Any}, input_)
+ begin
+ end
+ end
+ begin
+ grad_ = convert(Tensor{Any}, grad_)
+ begin
+ end
+ end
+ begin
+ argmax_ = convert(Tensor{Any}, argmax_)
+ begin
+ end
+ end
+ begin
+ (argmax_,) = tf.tf_promote(argmax_)
+ end
+ begin
+ (input_, grad_) = tf.tf_promote(input_, grad_)
+ end
+ end
+ begin
+ begin
+ tf.add_input(desc, input_)
+ end
+ begin
+ tf.add_input(desc, grad_)
+ end
+ begin
+ tf.add_input(desc, argmax_)
+ end
+ end
+ begin
+ begin
+ if ksize !== nothing
+ desc["ksize"] = map(Base.identity, ksize)
+ end
+ end
+ begin
+ if strides !== nothing
+ desc["strides"] = map(Base.identity, strides)
+ end
+ end
+ begin
+ if padding !== nothing
+ desc["padding"] = Base.String(padding)
+ end
+ end
+ end
+ end
+ tf.Tensor(tf.Operation(desc))
+ end
+ end
+ begin
+ function max_pool_grad_with_argmax_eager(input_, grad_, argmax_; name=nothing, ksize=nothing, strides=nothing, padding=nothing)
+ desc = tf.EagerOp("MaxPoolGradWithArgmax")
+ input_ = convert(tf.EagerTensor, input_)
+ grad_ = convert(tf.EagerTensor, grad_)
+ argmax_ = convert(tf.EagerTensor, argmax_)
+ begin
+ begin
+ tf.add_input(desc, input_)
+ end
+ begin
+ tf.add_input(desc, grad_)
+ end
+ begin
+ tf.add_input(desc, argmax_)
+ end
+ end
+ begin
+ begin
+ if ksize !== nothing
+ desc["ksize"] = map(Base.identity, ksize)
+ end
+ end
+ begin
+ if strides !== nothing
+ desc["strides"] = map(Base.identity, strides)
+ end
+ end
+ begin
+ if padding !== nothing
+ desc["padding"] = Base.String(padding)
+ end
+ end
+ end
+ begin
+ desc["T"] = tf.data_type(input_)
+ end
+ begin
+ desc["T"] = tf.data_type(grad_)
+ end
+ begin
+ desc["Targmax"] = tf.data_type(argmax_)
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(max_pool_grad_with_argmax, [input_, grad_, argmax_], name=nothing, ksize=nothing, strides=nothing, padding=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res[1]
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function max_pool_grad_with_argmax(input_, grad_, argmax_; name=nothing, ksize=nothing, strides=nothing, padding=nothing)
+ if tf.in_eager_mode()
+ max_pool_grad_with_argmax_eager(input_, grad_, argmax_; name=name, ksize=ksize, strides=strides, padding=padding)
+ else
+ max_pool_grad_with_argmax_graph(input_, grad_, argmax_; name=name, ksize=ksize, strides=strides, padding=padding)
+ end
+ end
+ end
+end
+
+
+"""
+ ref_switch(data, pred)
+
+
+"""
+begin
+ begin
+ function ref_switch_graph(data_, pred_; name=nothing)
+ local desc
+ tf.with_op_name(name, "RefSwitch") do
+ desc = tf.NodeDescription("RefSwitch")
+ begin
+ begin
+ data_ = convert(Tensor{Any}, data_)
+ begin
+ end
+ end
+ begin
+ pred_ = convert(Tensor{Bool}, pred_)
+ begin
+ end
+ end
+ begin
+ (data_,) = tf.tf_promote(data_)
+ end
+ end
+ begin
+ begin
+ tf.add_input(desc, data_)
+ end
+ begin
+ tf.add_input(desc, pred_)
+ end
+ end
+ begin
+ end
+ end
+ begin
+ out = tf.Tensor[]
+ op = tf.Operation(desc)
+ for out_idx = 1:2
+ push!(out, tf.Tensor(op, out_idx))
+ end
+ out
+ end
+ end
+ end
+ begin
+ function ref_switch_eager(data_, pred_; name=nothing)
+ desc = tf.EagerOp("RefSwitch")
+ data_ = convert(tf.EagerTensor, data_)
+ pred_ = convert(tf.EagerTensor, pred_)
+ begin
+ begin
+ tf.add_input(desc, data_)
+ end
+ begin
+ tf.add_input(desc, pred_)
+ end
+ end
+ begin
+ end
+ begin
+ desc["T"] = tf.data_type(data_)
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(ref_switch, [data_, pred_], name=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function ref_switch(data_, pred_; name=nothing)
+ if tf.in_eager_mode()
+ ref_switch_eager(data_, pred_; name=name)
+ else
+ ref_switch_graph(data_, pred_; name=name)
+ end
+ end
+ end
+end
+
+
+"""
+ sdca_fprint(input)
+
+
+"""
+begin
+ begin
+ function sdca_fprint_graph(input_; name=nothing)
+ local desc
+ tf.with_op_name(name, "SdcaFprint") do
+ desc = tf.NodeDescription("SdcaFprint")
+ begin
+ begin
+ input_ = convert(Tensor{String}, input_)
+ begin
+ end
+ end
+ end
+ begin
+ begin
+ tf.add_input(desc, input_)
+ end
+ end
+ begin
+ end
+ end
+ tf.Tensor(tf.Operation(desc))
+ end
+ end
+ begin
+ function sdca_fprint_eager(input_; name=nothing)
+ desc = tf.EagerOp("SdcaFprint")
+ input_ = convert(tf.EagerTensor, input_)
+ begin
+ begin
+ tf.add_input(desc, input_)
+ end
+ end
+ begin
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(sdca_fprint, [input_], name=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res[1]
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function sdca_fprint(input_; name=nothing)
+ if tf.in_eager_mode()
+ sdca_fprint_eager(input_; name=name)
+ else
+ sdca_fprint_graph(input_; name=name)
+ end
+ end
+ end
+end
+
+
+"""
+ leaky_relu(features; alpha=?)
+
+
+"""
+begin
+ begin
+ function leaky_relu_graph(features_; name=nothing, alpha=nothing)
+ local desc
+ tf.with_op_name(name, "LeakyRelu") do
+ desc = tf.NodeDescription("LeakyRelu")
+ begin
+ begin
+ features_ = convert(Tensor{Float32}, features_)
+ begin
+ end
+ end
+ begin
+ (features_,) = tf.tf_promote(features_)
+ end
+ end
+ begin
+ begin
+ tf.add_input(desc, features_)
+ end
+ end
+ begin
+ begin
+ if alpha !== nothing
+ desc["alpha"] = Base.identity(alpha)
+ end
+ end
+ end
+ end
+ tf.Tensor(tf.Operation(desc))
+ end
+ end
+ begin
+ function leaky_relu_eager(features_; name=nothing, alpha=nothing)
+ desc = tf.EagerOp("LeakyRelu")
+ features_ = convert(tf.EagerTensor, features_)
+ begin
+ begin
+ tf.add_input(desc, features_)
+ end
+ end
+ begin
+ begin
+ if alpha !== nothing
+ desc["alpha"] = Base.identity(alpha)
+ end
+ end
+ end
+ begin
+ desc["T"] = tf.data_type(features_)
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(leaky_relu, [features_], name=nothing, alpha=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res[1]
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function leaky_relu(features_; name=nothing, alpha=nothing)
+ if tf.in_eager_mode()
+ leaky_relu_eager(features_; name=name, alpha=alpha)
+ else
+ leaky_relu_graph(features_; name=name, alpha=alpha)
+ end
+ end
+ end
+end
+
+
+"""
+ identity_n(input)
+
+
+"""
+begin
+ begin
+ function identity_n_graph(input_; name=nothing, T=nothing)
+ local desc
+ tf.with_op_name(name, "IdentityN") do
+ desc = tf.NodeDescription("IdentityN")
+ begin
+ begin
+ input_ = [convert(Tensor{Any}, x) for x = input_]
+ begin
+ end
+ end
+ end
+ begin
+ begin
+ tf.add_input(desc, input_)
+ end
+ end
+ begin
+ begin
+ if T !== nothing
+ desc["T"] = map(Base.identity, T)
+ end
+ end
+ end
+ end
+ tf.Tensor(tf.Operation(desc))
+ end
+ end
+ begin
+ function identity_n_eager(input_; name=nothing, T=nothing)
+ desc = tf.EagerOp("IdentityN")
+ input_ = convert(tf.EagerTensor, input_)
+ begin
+ begin
+ tf.add_input(desc, input_)
+ end
+ end
+ begin
+ begin
+ if T !== nothing
+ desc["T"] = map(Base.identity, T)
+ end
+ end
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(identity_n, [input_], name=nothing, T=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res[1]
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function identity_n(input_; name=nothing, T=nothing)
+ if tf.in_eager_mode()
+ identity_n_eager(input_; name=name, T=T)
+ else
+ identity_n_graph(input_; name=name, T=T)
+ end
+ end
+ end
+end
+
+
+"""
+ cudnn_rnn_backprop_v2(input, input_h, input_c, params, output, output_h, output_c, output_backprop, output_h_backprop, output_c_backprop, reserve_space, host_reserved; rnn_mode=, input_mode=, direction=, dropout=?, seed=0, seed2=0)
+
+
+"""
+begin
+ begin
+ function cudnn_rnn_backprop_v2_graph(input_, input_h_, input_c_, params_, output_, output_h_, output_c_, output_backprop_, output_h_backprop_, output_c_backprop_, reserve_space_, host_reserved_; name=nothing, rnn_mode=nothing, input_mode=nothing, direction=nothing, dropout=nothing, seed=nothing, seed2=nothing)
+ local desc
+ tf.with_op_name(name, "CudnnRNNBackpropV2") do
+ desc = tf.NodeDescription("CudnnRNNBackpropV2")
+ begin
+ begin
+ input_ = convert(Tensor{Any}, input_)
+ begin
+ end
+ end
+ begin
+ input_h_ = convert(Tensor{Any}, input_h_)
+ begin
+ end
+ end
+ begin
+ input_c_ = convert(Tensor{Any}, input_c_)
+ begin
+ end
+ end
+ begin
+ params_ = convert(Tensor{Any}, params_)
+ begin
+ end
+ end
+ begin
+ output_ = convert(Tensor{Any}, output_)
+ begin
+ end
+ end
+ begin
+ output_h_ = convert(Tensor{Any}, output_h_)
+ begin
+ end
+ end
+ begin
+ output_c_ = convert(Tensor{Any}, output_c_)
+ begin
+ end
+ end
+ begin
+ output_backprop_ = convert(Tensor{Any}, output_backprop_)
+ begin
+ end
+ end
+ begin
+ output_h_backprop_ = convert(Tensor{Any}, output_h_backprop_)
+ begin
+ end
+ end
+ begin
+ output_c_backprop_ = convert(Tensor{Any}, output_c_backprop_)
+ begin
+ end
+ end
+ begin
+ reserve_space_ = convert(Tensor{Any}, reserve_space_)
+ begin
+ end
+ end
+ begin
+ host_reserved_ = convert(Tensor{Any}, host_reserved_)
+ begin
+ end
+ end
+ begin
+ (input_, input_h_, input_c_, params_, output_, output_h_, output_c_, output_backprop_, output_h_backprop_, output_c_backprop_, reserve_space_) = tf.tf_promote(input_, input_h_, input_c_, params_, output_, output_h_, output_c_, output_backprop_, output_h_backprop_, output_c_backprop_, reserve_space_)
+ end
+ end
+ begin
+ begin
+ tf.add_input(desc, input_)
+ end
+ begin
+ tf.add_input(desc, input_h_)
+ end
+ begin
+ tf.add_input(desc, input_c_)
+ end
+ begin
+ tf.add_input(desc, params_)
+ end
+ begin
+ tf.add_input(desc, output_)
+ end
+ begin
+ tf.add_input(desc, output_h_)
+ end
+ begin
+ tf.add_input(desc, output_c_)
+ end
+ begin
+ tf.add_input(desc, output_backprop_)
+ end
+ begin
+ tf.add_input(desc, output_h_backprop_)
+ end
+ begin
+ tf.add_input(desc, output_c_backprop_)
+ end
+ begin
+ tf.add_input(desc, reserve_space_)
+ end
+ begin
+ tf.add_input(desc, host_reserved_)
+ end
+ end
+ begin
+ begin
+ if rnn_mode !== nothing
+ desc["rnn_mode"] = Base.String(rnn_mode)
+ end
+ end
+ begin
+ if input_mode !== nothing
+ desc["input_mode"] = Base.String(input_mode)
+ end
+ end
+ begin
+ if direction !== nothing
+ desc["direction"] = Base.String(direction)
+ end
+ end
+ begin
+ if dropout !== nothing
+ desc["dropout"] = Base.identity(dropout)
+ end
+ end
+ begin
+ if seed !== nothing
+ desc["seed"] = Base.Int(seed)
+ end
+ end
+ begin
+ if seed2 !== nothing
+ desc["seed2"] = Base.Int(seed2)
+ end
+ end
+ end
+ end
+ begin
+ out = tf.Tensor[]
+ op = tf.Operation(desc)
+ for out_idx = 1:4
+ push!(out, tf.Tensor(op, out_idx))
+ end
+ out
+ end
+ end
+ end
+ begin
+ function cudnn_rnn_backprop_v2_eager(input_, input_h_, input_c_, params_, output_, output_h_, output_c_, output_backprop_, output_h_backprop_, output_c_backprop_, reserve_space_, host_reserved_; name=nothing, rnn_mode=nothing, input_mode=nothing, direction=nothing, dropout=nothing, seed=nothing, seed2=nothing)
+ desc = tf.EagerOp("CudnnRNNBackpropV2")
+ input_ = convert(tf.EagerTensor, input_)
+ input_h_ = convert(tf.EagerTensor, input_h_)
+ input_c_ = convert(tf.EagerTensor, input_c_)
+ params_ = convert(tf.EagerTensor, params_)
+ output_ = convert(tf.EagerTensor, output_)
+ output_h_ = convert(tf.EagerTensor, output_h_)
+ output_c_ = convert(tf.EagerTensor, output_c_)
+ output_backprop_ = convert(tf.EagerTensor, output_backprop_)
+ output_h_backprop_ = convert(tf.EagerTensor, output_h_backprop_)
+ output_c_backprop_ = convert(tf.EagerTensor, output_c_backprop_)
+ reserve_space_ = convert(tf.EagerTensor, reserve_space_)
+ host_reserved_ = convert(tf.EagerTensor, host_reserved_)
+ begin
+ begin
+ tf.add_input(desc, input_)
+ end
+ begin
+ tf.add_input(desc, input_h_)
+ end
+ begin
+ tf.add_input(desc, input_c_)
+ end
+ begin
+ tf.add_input(desc, params_)
+ end
+ begin
+ tf.add_input(desc, output_)
+ end
+ begin
+ tf.add_input(desc, output_h_)
+ end
+ begin
+ tf.add_input(desc, output_c_)
+ end
+ begin
+ tf.add_input(desc, output_backprop_)
+ end
+ begin
+ tf.add_input(desc, output_h_backprop_)
+ end
+ begin
+ tf.add_input(desc, output_c_backprop_)
+ end
+ begin
+ tf.add_input(desc, reserve_space_)
+ end
+ begin
+ tf.add_input(desc, host_reserved_)
+ end
+ end
+ begin
+ begin
+ if rnn_mode !== nothing
+ desc["rnn_mode"] = Base.String(rnn_mode)
+ end
+ end
+ begin
+ if input_mode !== nothing
+ desc["input_mode"] = Base.String(input_mode)
+ end
+ end
+ begin
+ if direction !== nothing
+ desc["direction"] = Base.String(direction)
+ end
+ end
+ begin
+ if dropout !== nothing
+ desc["dropout"] = Base.identity(dropout)
+ end
+ end
+ begin
+ if seed !== nothing
+ desc["seed"] = Base.Int(seed)
+ end
+ end
+ begin
+ if seed2 !== nothing
+ desc["seed2"] = Base.Int(seed2)
+ end
+ end
+ end
+ begin
+ desc["T"] = tf.data_type(input_)
+ end
+ begin
+ desc["T"] = tf.data_type(input_h_)
+ end
+ begin
+ desc["T"] = tf.data_type(input_c_)
+ end
+ begin
+ desc["T"] = tf.data_type(params_)
+ end
+ begin
+ desc["T"] = tf.data_type(output_)
+ end
+ begin
+ desc["T"] = tf.data_type(output_h_)
+ end
+ begin
+ desc["T"] = tf.data_type(output_c_)
+ end
+ begin
+ desc["T"] = tf.data_type(output_backprop_)
+ end
+ begin
+ desc["T"] = tf.data_type(output_h_backprop_)
+ end
+ begin
+ desc["T"] = tf.data_type(output_c_backprop_)
+ end
+ begin
+ desc["T"] = tf.data_type(reserve_space_)
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(cudnn_rnn_backprop_v2, [input_, input_h_, input_c_, params_, output_, output_h_, output_c_, output_backprop_, output_h_backprop_, output_c_backprop_, reserve_space_, host_reserved_], name=nothing, rnn_mode=nothing, input_mode=nothing, direction=nothing, dropout=nothing, seed=nothing, seed2=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function cudnn_rnn_backprop_v2(input_, input_h_, input_c_, params_, output_, output_h_, output_c_, output_backprop_, output_h_backprop_, output_c_backprop_, reserve_space_, host_reserved_; name=nothing, rnn_mode=nothing, input_mode=nothing, direction=nothing, dropout=nothing, seed=nothing, seed2=nothing)
+ if tf.in_eager_mode()
+ cudnn_rnn_backprop_v2_eager(input_, input_h_, input_c_, params_, output_, output_h_, output_c_, output_backprop_, output_h_backprop_, output_c_backprop_, reserve_space_, host_reserved_; name=name, rnn_mode=rnn_mode, input_mode=input_mode, direction=direction, dropout=dropout, seed=seed, seed2=seed2)
+ else
+ cudnn_rnn_backprop_v2_graph(input_, input_h_, input_c_, params_, output_, output_h_, output_c_, output_backprop_, output_h_backprop_, output_c_backprop_, reserve_space_, host_reserved_; name=name, rnn_mode=rnn_mode, input_mode=input_mode, direction=direction, dropout=dropout, seed=seed, seed2=seed2)
+ end
+ end
+ end
+end
+
+
+"""
+ requantization_range(input, input_min, input_max)
+
+
+"""
+begin
+ begin
+ function requantization_range_graph(input_, input_min_, input_max_; name=nothing)
+ local desc
+ tf.with_op_name(name, "RequantizationRange") do
+ desc = tf.NodeDescription("RequantizationRange")
+ begin
+ begin
+ input_ = convert(Tensor{Any}, input_)
+ begin
+ end
+ end
+ begin
+ input_min_ = convert(Tensor{Float32}, input_min_)
+ begin
+ end
+ end
+ begin
+ input_max_ = convert(Tensor{Float32}, input_max_)
+ begin
+ end
+ end
+ begin
+ (input_,) = tf.tf_promote(input_)
+ end
+ end
+ begin
+ begin
+ tf.add_input(desc, input_)
+ end
+ begin
+ tf.add_input(desc, input_min_)
+ end
+ begin
+ tf.add_input(desc, input_max_)
+ end
+ end
+ begin
+ end
+ end
+ begin
+ out = tf.Tensor[]
+ op = tf.Operation(desc)
+ for out_idx = 1:2
+ push!(out, tf.Tensor(op, out_idx))
+ end
+ out
+ end
+ end
+ end
+ begin
+ function requantization_range_eager(input_, input_min_, input_max_; name=nothing)
+ desc = tf.EagerOp("RequantizationRange")
+ input_ = convert(tf.EagerTensor, input_)
+ input_min_ = convert(tf.EagerTensor, input_min_)
+ input_max_ = convert(tf.EagerTensor, input_max_)
+ begin
+ begin
+ tf.add_input(desc, input_)
+ end
+ begin
+ tf.add_input(desc, input_min_)
+ end
+ begin
+ tf.add_input(desc, input_max_)
+ end
+ end
+ begin
+ end
+ begin
+ desc["Tinput"] = tf.data_type(input_)
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(requantization_range, [input_, input_min_, input_max_], name=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function requantization_range(input_, input_min_, input_max_; name=nothing)
+ if tf.in_eager_mode()
+ requantization_range_eager(input_, input_min_, input_max_; name=name)
+ else
+ requantization_range_graph(input_, input_min_, input_max_; name=name)
+ end
+ end
+ end
+end
+
+
+"""
+ maximum(x, y)
+
+
+"""
+begin
+ begin
+ function maximum_graph(x_, y_; name=nothing)
+ local desc
+ tf.with_op_name(name, "Maximum") do
+ desc = tf.NodeDescription("Maximum")
+ begin
+ begin
+ x_ = convert(Tensor{Any}, x_)
+ begin
+ end
+ end
+ begin
+ y_ = convert(Tensor{Any}, y_)
+ begin
+ end
+ end
+ begin
+ (x_, y_) = tf.tf_promote(x_, y_)
+ end
+ end
+ begin
+ begin
+ tf.add_input(desc, x_)
+ end
+ begin
+ tf.add_input(desc, y_)
+ end
+ end
+ begin
+ end
+ end
+ tf.Tensor(tf.Operation(desc))
+ end
+ end
+ begin
+ function maximum_eager(x_, y_; name=nothing)
+ desc = tf.EagerOp("Maximum")
+ x_ = convert(tf.EagerTensor, x_)
+ y_ = convert(tf.EagerTensor, y_)
+ begin
+ begin
+ tf.add_input(desc, x_)
+ end
+ begin
+ tf.add_input(desc, y_)
+ end
+ end
+ begin
+ end
+ begin
+ desc["T"] = tf.data_type(x_)
+ end
+ begin
+ desc["T"] = tf.data_type(y_)
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(maximum, [x_, y_], name=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res[1]
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function maximum(x_, y_; name=nothing)
+ if tf.in_eager_mode()
+ maximum_eager(x_, y_; name=name)
+ else
+ maximum_graph(x_, y_; name=name)
+ end
+ end
+ end
+end
+
+
+"""
+ reshape(tensor, shape)
+
+
+"""
+begin
+ begin
+ function reshape_graph(tensor_, shape_; name=nothing)
+ local desc
+ tf.with_op_name(name, "Reshape") do
+ desc = tf.NodeDescription("Reshape")
+ begin
+ begin
+ tensor_ = convert(Tensor{Any}, tensor_)
+ begin
+ end
+ end
+ begin
+ shape_ = convert(Tensor{Int32}, shape_)
+ begin
+ end
+ end
+ begin
+ (tensor_,) = tf.tf_promote(tensor_)
+ end
+ begin
+ (shape_,) = tf.tf_promote(shape_)
+ end
+ end
+ begin
+ begin
+ tf.add_input(desc, tensor_)
+ end
+ begin
+ tf.add_input(desc, shape_)
+ end
+ end
+ begin
+ end
+ end
+ tf.Tensor(tf.Operation(desc))
+ end
+ end
+ begin
+ function reshape_eager(tensor_, shape_; name=nothing)
+ desc = tf.EagerOp("Reshape")
+ tensor_ = convert(tf.EagerTensor, tensor_)
+ shape_ = convert(tf.EagerTensor, shape_)
+ begin
+ begin
+ tf.add_input(desc, tensor_)
+ end
+ begin
+ tf.add_input(desc, shape_)
+ end
+ end
+ begin
+ end
+ begin
+ desc["T"] = tf.data_type(tensor_)
+ end
+ begin
+ desc["Tshape"] = tf.data_type(shape_)
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(reshape, [tensor_, shape_], name=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res[1]
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function reshape(tensor_, shape_; name=nothing)
+ if tf.in_eager_mode()
+ reshape_eager(tensor_, shape_; name=name)
+ else
+ reshape_graph(tensor_, shape_; name=name)
+ end
+ end
+ end
+end
+
+
+"""
+ matrix_solve_ls(matrix, rhs, l2_regularizer; fast=true)
+
+
+"""
+begin
+ begin
+ function matrix_solve_ls_graph(matrix_, rhs_, l2_regularizer_; name=nothing, fast=nothing)
+ local desc
+ tf.with_op_name(name, "MatrixSolveLs") do
+ desc = tf.NodeDescription("MatrixSolveLs")
+ begin
+ begin
+ matrix_ = convert(Tensor{Any}, matrix_)
+ begin
+ end
+ end
+ begin
+ rhs_ = convert(Tensor{Any}, rhs_)
+ begin
+ end
+ end
+ begin
+ l2_regularizer_ = convert(Tensor{Float64}, l2_regularizer_)
+ begin
+ end
+ end
+ begin
+ (matrix_, rhs_) = tf.tf_promote(matrix_, rhs_)
+ end
+ end
+ begin
+ begin
+ tf.add_input(desc, matrix_)
+ end
+ begin
+ tf.add_input(desc, rhs_)
+ end
+ begin
+ tf.add_input(desc, l2_regularizer_)
+ end
+ end
+ begin
+ begin
+ if fast !== nothing
+ desc["fast"] = Base.Bool(fast)
+ end
+ end
+ end
+ end
+ tf.Tensor(tf.Operation(desc))
+ end
+ end
+ begin
+ function matrix_solve_ls_eager(matrix_, rhs_, l2_regularizer_; name=nothing, fast=nothing)
+ desc = tf.EagerOp("MatrixSolveLs")
+ matrix_ = convert(tf.EagerTensor, matrix_)
+ rhs_ = convert(tf.EagerTensor, rhs_)
+ l2_regularizer_ = convert(tf.EagerTensor, l2_regularizer_)
+ begin
+ begin
+ tf.add_input(desc, matrix_)
+ end
+ begin
+ tf.add_input(desc, rhs_)
+ end
+ begin
+ tf.add_input(desc, l2_regularizer_)
+ end
+ end
+ begin
+ begin
+ if fast !== nothing
+ desc["fast"] = Base.Bool(fast)
+ end
+ end
+ end
+ begin
+ desc["T"] = tf.data_type(matrix_)
+ end
+ begin
+ desc["T"] = tf.data_type(rhs_)
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(matrix_solve_ls, [matrix_, rhs_, l2_regularizer_], name=nothing, fast=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res[1]
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function matrix_solve_ls(matrix_, rhs_, l2_regularizer_; name=nothing, fast=nothing)
+ if tf.in_eager_mode()
+ matrix_solve_ls_eager(matrix_, rhs_, l2_regularizer_; name=name, fast=fast)
+ else
+ matrix_solve_ls_graph(matrix_, rhs_, l2_regularizer_; name=name, fast=fast)
+ end
+ end
+ end
+end
+
+
+"""
+ tf_record_dataset(filenames, compression_type, buffer_size)
+
+
+"""
+begin
+ begin
+ function tf_record_dataset_graph(filenames_, compression_type_, buffer_size_; name=nothing)
+ local desc
+ tf.with_op_name(name, "TFRecordDataset") do
+ desc = tf.NodeDescription("TFRecordDataset")
+ begin
+ begin
+ filenames_ = convert(Tensor{String}, filenames_)
+ begin
+ end
+ end
+ begin
+ compression_type_ = convert(Tensor{String}, compression_type_)
+ begin
+ end
+ end
+ begin
+ buffer_size_ = convert(Tensor{Int64}, buffer_size_)
+ begin
+ end
+ end
+ end
+ begin
+ begin
+ tf.add_input(desc, filenames_)
+ end
+ begin
+ tf.add_input(desc, compression_type_)
+ end
+ begin
+ tf.add_input(desc, buffer_size_)
+ end
+ end
+ begin
+ end
+ end
+ tf.Tensor(tf.Operation(desc))
+ end
+ end
+ begin
+ function tf_record_dataset_eager(filenames_, compression_type_, buffer_size_; name=nothing)
+ desc = tf.EagerOp("TFRecordDataset")
+ filenames_ = convert(tf.EagerTensor, filenames_)
+ compression_type_ = convert(tf.EagerTensor, compression_type_)
+ buffer_size_ = convert(tf.EagerTensor, buffer_size_)
+ begin
+ begin
+ tf.add_input(desc, filenames_)
+ end
+ begin
+ tf.add_input(desc, compression_type_)
+ end
+ begin
+ tf.add_input(desc, buffer_size_)
+ end
+ end
+ begin
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(tf_record_dataset, [filenames_, compression_type_, buffer_size_], name=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res[1]
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function tf_record_dataset(filenames_, compression_type_, buffer_size_; name=nothing)
+ if tf.in_eager_mode()
+ tf_record_dataset_eager(filenames_, compression_type_, buffer_size_; name=name)
+ else
+ tf_record_dataset_graph(filenames_, compression_type_, buffer_size_; name=name)
+ end
+ end
+ end
+end
+
+
+"""
+ boosted_trees_example_debug_outputs(tree_ensemble_handle, bucketized_features)
+
+
+"""
+begin
+ begin
+ function boosted_trees_example_debug_outputs_graph(tree_ensemble_handle_, bucketized_features_; name=nothing, num_bucketized_features=nothing, logits_dimension=nothing)
+ local desc
+ tf.with_op_name(name, "BoostedTreesExampleDebugOutputs") do
+ desc = tf.NodeDescription("BoostedTreesExampleDebugOutputs")
+ begin
+ begin
+ tree_ensemble_handle_ = convert(Tensor{Any}, tree_ensemble_handle_)
+ begin
+ end
+ end
+ begin
+ bucketized_features_ = [convert(Tensor{Int32}, x) for x = bucketized_features_]
+ begin
+ end
+ end
+ end
+ begin
+ begin
+ tf.add_input(desc, tree_ensemble_handle_)
+ end
+ begin
+ tf.add_input(desc, bucketized_features_)
+ end
+ end
+ begin
+ begin
+ if num_bucketized_features !== nothing
+ desc["num_bucketized_features"] = Base.Int(num_bucketized_features)
+ end
+ end
+ begin
+ if logits_dimension !== nothing
+ desc["logits_dimension"] = Base.Int(logits_dimension)
+ end
+ end
+ end
+ end
+ tf.Tensor(tf.Operation(desc))
+ end
+ end
+ begin
+ function boosted_trees_example_debug_outputs_eager(tree_ensemble_handle_, bucketized_features_; name=nothing, num_bucketized_features=nothing, logits_dimension=nothing)
+ desc = tf.EagerOp("BoostedTreesExampleDebugOutputs")
+ tree_ensemble_handle_ = convert(tf.EagerTensor, tree_ensemble_handle_)
+ bucketized_features_ = convert(tf.EagerTensor, bucketized_features_)
+ begin
+ begin
+ tf.add_input(desc, tree_ensemble_handle_)
+ end
+ begin
+ tf.add_input(desc, bucketized_features_)
+ end
+ end
+ begin
+ begin
+ if num_bucketized_features !== nothing
+ desc["num_bucketized_features"] = Base.Int(num_bucketized_features)
+ end
+ end
+ begin
+ if logits_dimension !== nothing
+ desc["logits_dimension"] = Base.Int(logits_dimension)
+ end
+ end
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(boosted_trees_example_debug_outputs, [tree_ensemble_handle_, bucketized_features_], name=nothing, num_bucketized_features=nothing, logits_dimension=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res[1]
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function boosted_trees_example_debug_outputs(tree_ensemble_handle_, bucketized_features_; name=nothing, num_bucketized_features=nothing, logits_dimension=nothing)
+ if tf.in_eager_mode()
+ boosted_trees_example_debug_outputs_eager(tree_ensemble_handle_, bucketized_features_; name=name, num_bucketized_features=num_bucketized_features, logits_dimension=logits_dimension)
+ else
+ boosted_trees_example_debug_outputs_graph(tree_ensemble_handle_, bucketized_features_; name=name, num_bucketized_features=num_bucketized_features, logits_dimension=logits_dimension)
+ end
+ end
+ end
+end
+
+
+"""
+ experimental_max_intra_op_parallelism_dataset(input_dataset, max_intra_op_parallelism)
+
+
+"""
+begin
+ begin
+ function experimental_max_intra_op_parallelism_dataset_graph(input_dataset_, max_intra_op_parallelism_; name=nothing, output_types=nothing, output_shapes=nothing)
+ local desc
+ tf.with_op_name(name, "ExperimentalMaxIntraOpParallelismDataset") do
+ desc = tf.NodeDescription("ExperimentalMaxIntraOpParallelismDataset")
+ begin
+ begin
+ input_dataset_ = convert(Tensor{Any}, input_dataset_)
+ begin
+ end
+ end
+ begin
+ max_intra_op_parallelism_ = convert(Tensor{Int64}, max_intra_op_parallelism_)
+ begin
+ end
+ end
+ end
+ begin
+ begin
+ tf.add_input(desc, input_dataset_)
+ end
+ begin
+ tf.add_input(desc, max_intra_op_parallelism_)
+ end
+ end
+ begin
+ begin
+ if output_types !== nothing
+ desc["output_types"] = map(Base.identity, output_types)
+ end
+ end
+ begin
+ if output_shapes !== nothing
+ desc["output_shapes"] = map(Base.identity, output_shapes)
+ end
+ end
+ end
+ end
+ tf.Tensor(tf.Operation(desc))
+ end
+ end
+ begin
+ function experimental_max_intra_op_parallelism_dataset_eager(input_dataset_, max_intra_op_parallelism_; name=nothing, output_types=nothing, output_shapes=nothing)
+ desc = tf.EagerOp("ExperimentalMaxIntraOpParallelismDataset")
+ input_dataset_ = convert(tf.EagerTensor, input_dataset_)
+ max_intra_op_parallelism_ = convert(tf.EagerTensor, max_intra_op_parallelism_)
+ begin
+ begin
+ tf.add_input(desc, input_dataset_)
+ end
+ begin
+ tf.add_input(desc, max_intra_op_parallelism_)
+ end
+ end
+ begin
+ begin
+ if output_types !== nothing
+ desc["output_types"] = map(Base.identity, output_types)
+ end
+ end
+ begin
+ if output_shapes !== nothing
+ desc["output_shapes"] = map(Base.identity, output_shapes)
+ end
+ end
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(experimental_max_intra_op_parallelism_dataset, [input_dataset_, max_intra_op_parallelism_], name=nothing, output_types=nothing, output_shapes=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res[1]
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function experimental_max_intra_op_parallelism_dataset(input_dataset_, max_intra_op_parallelism_; name=nothing, output_types=nothing, output_shapes=nothing)
+ if tf.in_eager_mode()
+ experimental_max_intra_op_parallelism_dataset_eager(input_dataset_, max_intra_op_parallelism_; name=name, output_types=output_types, output_shapes=output_shapes)
+ else
+ experimental_max_intra_op_parallelism_dataset_graph(input_dataset_, max_intra_op_parallelism_; name=name, output_types=output_types, output_shapes=output_shapes)
+ end
+ end
+ end
+end
+
+
+"""
+ hsv_to_rgb(images)
+
+
+"""
+begin
+ begin
+ function hsv_to_rgb_graph(images_; name=nothing)
+ local desc
+ tf.with_op_name(name, "HSVToRGB") do
+ desc = tf.NodeDescription("HSVToRGB")
+ begin
+ begin
+ images_ = convert(Tensor{Float32}, images_)
+ begin
+ end
+ end
+ begin
+ (images_,) = tf.tf_promote(images_)
+ end
+ end
+ begin
+ begin
+ tf.add_input(desc, images_)
+ end
+ end
+ begin
+ end
+ end
+ tf.Tensor(tf.Operation(desc))
+ end
+ end
+ begin
+ function hsv_to_rgb_eager(images_; name=nothing)
+ desc = tf.EagerOp("HSVToRGB")
+ images_ = convert(tf.EagerTensor, images_)
+ begin
+ begin
+ tf.add_input(desc, images_)
+ end
+ end
+ begin
+ end
+ begin
+ desc["T"] = tf.data_type(images_)
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(hsv_to_rgb, [images_], name=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res[1]
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function hsv_to_rgb(images_; name=nothing)
+ if tf.in_eager_mode()
+ hsv_to_rgb_eager(images_; name=name)
+ else
+ hsv_to_rgb_graph(images_; name=name)
+ end
+ end
+ end
+end
+
+
+"""
+ scatter_div(ref, indices, updates; use_locking=false)
+
+
+"""
+begin
+ begin
+ function scatter_div_graph(ref_, indices_, updates_; name=nothing, use_locking=nothing)
+ local desc
+ tf.with_op_name(name, "ScatterDiv") do
+ desc = tf.NodeDescription("ScatterDiv")
+ begin
+ begin
+ ref_ = convert(Tensor{Any}, ref_)
+ begin
+ end
+ end
+ begin
+ indices_ = convert(Tensor{Any}, indices_)
+ begin
+ indices_ = indices_ - convert(tf.Tensor{eltype(indices_)}, 1)
+ end
+ end
+ begin
+ updates_ = convert(Tensor{Any}, updates_)
+ begin
+ end
+ end
+ begin
+ (ref_, updates_) = tf.tf_promote(ref_, updates_)
+ end
+ begin
+ (indices_,) = tf.tf_promote(indices_)
+ end
+ end
+ begin
+ begin
+ tf.add_input(desc, ref_)
+ end
+ begin
+ tf.add_input(desc, indices_)
+ end
+ begin
+ tf.add_input(desc, updates_)
+ end
+ end
+ begin
+ begin
+ if use_locking !== nothing
+ desc["use_locking"] = Base.Bool(use_locking)
+ end
+ end
+ end
+ end
+ tf.Tensor(tf.Operation(desc))
+ end
+ end
+ begin
+ function scatter_div_eager(ref_, indices_, updates_; name=nothing, use_locking=nothing)
+ desc = tf.EagerOp("ScatterDiv")
+ ref_ = convert(tf.EagerTensor, ref_)
+ indices_ = convert(tf.EagerTensor, indices_)
+ updates_ = convert(tf.EagerTensor, updates_)
+ begin
+ begin
+ tf.add_input(desc, ref_)
+ end
+ begin
+ tf.add_input(desc, indices_)
+ end
+ begin
+ tf.add_input(desc, updates_)
+ end
+ end
+ begin
+ begin
+ if use_locking !== nothing
+ desc["use_locking"] = Base.Bool(use_locking)
+ end
+ end
+ end
+ begin
+ desc["T"] = tf.data_type(ref_)
+ end
+ begin
+ desc["Tindices"] = tf.data_type(indices_)
+ end
+ begin
+ desc["T"] = tf.data_type(updates_)
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(scatter_div, [ref_, indices_, updates_], name=nothing, use_locking=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res[1]
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function scatter_div(ref_, indices_, updates_; name=nothing, use_locking=nothing)
+ if tf.in_eager_mode()
+ scatter_div_eager(ref_, indices_, updates_; name=name, use_locking=use_locking)
+ else
+ scatter_div_graph(ref_, indices_, updates_; name=name, use_locking=use_locking)
+ end
+ end
+ end
+end
+
+
+"""
+ decode_wav(contents; desired_channels=-1, desired_samples=-1)
+
+
+"""
+begin
+ begin
+ function decode_wav_graph(contents_; name=nothing, desired_channels=nothing, desired_samples=nothing)
+ local desc
+ tf.with_op_name(name, "DecodeWav") do
+ desc = tf.NodeDescription("DecodeWav")
+ begin
+ begin
+ contents_ = convert(Tensor{String}, contents_)
+ begin
+ end
+ end
+ end
+ begin
+ begin
+ tf.add_input(desc, contents_)
+ end
+ end
+ begin
+ begin
+ if desired_channels !== nothing
+ desc["desired_channels"] = Base.Int(desired_channels)
+ end
+ end
+ begin
+ if desired_samples !== nothing
+ desc["desired_samples"] = Base.Int(desired_samples)
+ end
+ end
+ end
+ end
+ begin
+ out = tf.Tensor[]
+ op = tf.Operation(desc)
+ for out_idx = 1:2
+ push!(out, tf.Tensor(op, out_idx))
+ end
+ out
+ end
+ end
+ end
+ begin
+ function decode_wav_eager(contents_; name=nothing, desired_channels=nothing, desired_samples=nothing)
+ desc = tf.EagerOp("DecodeWav")
+ contents_ = convert(tf.EagerTensor, contents_)
+ begin
+ begin
+ tf.add_input(desc, contents_)
+ end
+ end
+ begin
+ begin
+ if desired_channels !== nothing
+ desc["desired_channels"] = Base.Int(desired_channels)
+ end
+ end
+ begin
+ if desired_samples !== nothing
+ desc["desired_samples"] = Base.Int(desired_samples)
+ end
+ end
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(decode_wav, [contents_], name=nothing, desired_channels=nothing, desired_samples=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function decode_wav(contents_; name=nothing, desired_channels=nothing, desired_samples=nothing)
+ if tf.in_eager_mode()
+ decode_wav_eager(contents_; name=name, desired_channels=desired_channels, desired_samples=desired_samples)
+ else
+ decode_wav_graph(contents_; name=name, desired_channels=desired_channels, desired_samples=desired_samples)
+ end
+ end
+ end
+end
+
+
+"""
+ log(x)
+
+
+"""
+begin
+ begin
+ function log_graph(x_; name=nothing)
+ local desc
+ tf.with_op_name(name, "Log") do
+ desc = tf.NodeDescription("Log")
+ begin
+ begin
+ x_ = convert(Tensor{Any}, x_)
+ begin
+ end
+ end
+ begin
+ (x_,) = tf.tf_promote(x_)
+ end
+ end
+ begin
+ begin
+ tf.add_input(desc, x_)
+ end
+ end
+ begin
+ end
+ end
+ tf.Tensor(tf.Operation(desc))
+ end
+ end
+ begin
+ function log_eager(x_; name=nothing)
+ desc = tf.EagerOp("Log")
+ x_ = convert(tf.EagerTensor, x_)
+ begin
+ begin
+ tf.add_input(desc, x_)
+ end
+ end
+ begin
+ end
+ begin
+ desc["T"] = tf.data_type(x_)
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(log, [x_], name=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res[1]
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function log(x_; name=nothing)
+ if tf.in_eager_mode()
+ log_eager(x_; name=name)
+ else
+ log_graph(x_; name=name)
+ end
+ end
+ end
+end
+
+
+"""
+ save_v2(prefix, tensor_names, shape_and_slices, tensors)
+
+
+"""
+begin
+ begin
+ function save_v2_graph(prefix_, tensor_names_, shape_and_slices_, tensors_; name=nothing, dtypes=nothing)
+ local desc
+ tf.with_op_name(name, "SaveV2") do
+ desc = tf.NodeDescription("SaveV2")
+ begin
+ begin
+ prefix_ = convert(Tensor{String}, prefix_)
+ begin
+ end
+ end
+ begin
+ tensor_names_ = convert(Tensor{String}, tensor_names_)
+ begin
+ end
+ end
+ begin
+ shape_and_slices_ = convert(Tensor{String}, shape_and_slices_)
+ begin
+ end
+ end
+ begin
+ tensors_ = [convert(Tensor{Any}, x) for x = tensors_]
+ begin
+ end
+ end
+ end
+ begin
+ begin
+ tf.add_input(desc, prefix_)
+ end
+ begin
+ tf.add_input(desc, tensor_names_)
+ end
+ begin
+ tf.add_input(desc, shape_and_slices_)
+ end
+ begin
+ tf.add_input(desc, tensors_)
+ end
+ end
+ begin
+ begin
+ if dtypes !== nothing
+ desc["dtypes"] = map(Base.identity, dtypes)
+ end
+ end
+ end
+ end
+ tf.Tensor(tf.Operation(desc))
+ end
+ end
+ begin
+ function save_v2_eager(prefix_, tensor_names_, shape_and_slices_, tensors_; name=nothing, dtypes=nothing)
+ desc = tf.EagerOp("SaveV2")
+ prefix_ = convert(tf.EagerTensor, prefix_)
+ tensor_names_ = convert(tf.EagerTensor, tensor_names_)
+ shape_and_slices_ = convert(tf.EagerTensor, shape_and_slices_)
+ tensors_ = convert(tf.EagerTensor, tensors_)
+ begin
+ begin
+ tf.add_input(desc, prefix_)
+ end
+ begin
+ tf.add_input(desc, tensor_names_)
+ end
+ begin
+ tf.add_input(desc, shape_and_slices_)
+ end
+ begin
+ tf.add_input(desc, tensors_)
+ end
+ end
+ begin
+ begin
+ if dtypes !== nothing
+ desc["dtypes"] = map(Base.identity, dtypes)
+ end
+ end
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(save_v2, [prefix_, tensor_names_, shape_and_slices_, tensors_], name=nothing, dtypes=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res[1]
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function save_v2(prefix_, tensor_names_, shape_and_slices_, tensors_; name=nothing, dtypes=nothing)
+ if tf.in_eager_mode()
+ save_v2_eager(prefix_, tensor_names_, shape_and_slices_, tensors_; name=name, dtypes=dtypes)
+ else
+ save_v2_graph(prefix_, tensor_names_, shape_and_slices_, tensors_; name=name, dtypes=dtypes)
+ end
+ end
+ end
+end
+
+
+"""
+ deep_copy(x)
+
+
+"""
+begin
+ begin
+ function deep_copy_graph(x_; name=nothing)
+ local desc
+ tf.with_op_name(name, "DeepCopy") do
+ desc = tf.NodeDescription("DeepCopy")
+ begin
+ begin
+ x_ = convert(Tensor{Any}, x_)
+ begin
+ end
+ end
+ begin
+ (x_,) = tf.tf_promote(x_)
+ end
+ end
+ begin
+ begin
+ tf.add_input(desc, x_)
+ end
+ end
+ begin
+ end
+ end
+ tf.Tensor(tf.Operation(desc))
+ end
+ end
+ begin
+ function deep_copy_eager(x_; name=nothing)
+ desc = tf.EagerOp("DeepCopy")
+ x_ = convert(tf.EagerTensor, x_)
+ begin
+ begin
+ tf.add_input(desc, x_)
+ end
+ end
+ begin
+ end
+ begin
+ desc["T"] = tf.data_type(x_)
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(deep_copy, [x_], name=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res[1]
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function deep_copy(x_; name=nothing)
+ if tf.in_eager_mode()
+ deep_copy_eager(x_; name=name)
+ else
+ deep_copy_graph(x_; name=name)
+ end
+ end
+ end
+end
+
+
+"""
+ model_dataset(input_dataset)
+
+
+"""
+begin
+ begin
+ function model_dataset_graph(input_dataset_; name=nothing, output_types=nothing, output_shapes=nothing)
+ local desc
+ tf.with_op_name(name, "ModelDataset") do
+ desc = tf.NodeDescription("ModelDataset")
+ begin
+ begin
+ input_dataset_ = convert(Tensor{Any}, input_dataset_)
+ begin
+ end
+ end
+ end
+ begin
+ begin
+ tf.add_input(desc, input_dataset_)
+ end
+ end
+ begin
+ begin
+ if output_types !== nothing
+ desc["output_types"] = map(Base.identity, output_types)
+ end
+ end
+ begin
+ if output_shapes !== nothing
+ desc["output_shapes"] = map(Base.identity, output_shapes)
+ end
+ end
+ end
+ end
+ tf.Tensor(tf.Operation(desc))
+ end
+ end
+ begin
+ function model_dataset_eager(input_dataset_; name=nothing, output_types=nothing, output_shapes=nothing)
+ desc = tf.EagerOp("ModelDataset")
+ input_dataset_ = convert(tf.EagerTensor, input_dataset_)
+ begin
+ begin
+ tf.add_input(desc, input_dataset_)
+ end
+ end
+ begin
+ begin
+ if output_types !== nothing
+ desc["output_types"] = map(Base.identity, output_types)
+ end
+ end
+ begin
+ if output_shapes !== nothing
+ desc["output_shapes"] = map(Base.identity, output_shapes)
+ end
+ end
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(model_dataset, [input_dataset_], name=nothing, output_types=nothing, output_shapes=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res[1]
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function model_dataset(input_dataset_; name=nothing, output_types=nothing, output_shapes=nothing)
+ if tf.in_eager_mode()
+ model_dataset_eager(input_dataset_; name=name, output_types=output_types, output_shapes=output_shapes)
+ else
+ model_dataset_graph(input_dataset_; name=name, output_types=output_types, output_shapes=output_shapes)
+ end
+ end
+ end
+end
+
+
+"""
+ parse_sequence_example(serialized, debug_name, context_dense_defaults; Ncontext_sparse=0, Ncontext_dense=0, Nfeature_list_sparse=0, Nfeature_list_dense=0, context_sparse_types=Int64[], Tcontext_dense=Int64[], feature_list_dense_types=Int64[], context_dense_shapes=Int64[], feature_list_sparse_types=Int64[], feature_list_dense_shapes=Int64[])
+
+
+"""
+begin
+ begin
+ function parse_sequence_example_graph(serialized_, debug_name_, context_dense_defaults_; name=nothing, feature_list_dense_missing_assumed_empty=nothing, context_sparse_keys=nothing, context_dense_keys=nothing, feature_list_sparse_keys=nothing, feature_list_dense_keys=nothing, Ncontext_sparse=nothing, Ncontext_dense=nothing, Nfeature_list_sparse=nothing, Nfeature_list_dense=nothing, context_sparse_types=nothing, Tcontext_dense=nothing, feature_list_dense_types=nothing, context_dense_shapes=nothing, feature_list_sparse_types=nothing, feature_list_dense_shapes=nothing)
+ local desc
+ tf.with_op_name(name, "ParseSequenceExample") do
+ desc = tf.NodeDescription("ParseSequenceExample")
+ begin
+ begin
+ serialized_ = convert(Tensor{String}, serialized_)
+ begin
+ end
+ end
+ begin
+ debug_name_ = convert(Tensor{String}, debug_name_)
+ begin
+ end
+ end
+ begin
+ context_dense_defaults_ = [convert(Tensor{Any}, x) for x = context_dense_defaults_]
+ begin
+ end
+ end
+ end
+ begin
+ begin
+ tf.add_input(desc, serialized_)
+ end
+ begin
+ tf.add_input(desc, debug_name_)
+ end
+ begin
+ tf.add_input(desc, context_dense_defaults_)
+ end
+ end
+ begin
+ begin
+ if feature_list_dense_missing_assumed_empty !== nothing
+ desc["feature_list_dense_missing_assumed_empty"] = map(Base.identity, feature_list_dense_missing_assumed_empty)
+ end
+ end
+ begin
+ if context_sparse_keys !== nothing
+ desc["context_sparse_keys"] = map(Base.identity, context_sparse_keys)
+ end
+ end
+ begin
+ if context_dense_keys !== nothing
+ desc["context_dense_keys"] = map(Base.identity, context_dense_keys)
+ end
+ end
+ begin
+ if feature_list_sparse_keys !== nothing
+ desc["feature_list_sparse_keys"] = map(Base.identity, feature_list_sparse_keys)
+ end
+ end
+ begin
+ if feature_list_dense_keys !== nothing
+ desc["feature_list_dense_keys"] = map(Base.identity, feature_list_dense_keys)
+ end
+ end
+ begin
+ if Ncontext_sparse !== nothing
+ desc["Ncontext_sparse"] = Base.Int(Ncontext_sparse)
+ end
+ end
+ begin
+ if Ncontext_dense !== nothing
+ desc["Ncontext_dense"] = Base.Int(Ncontext_dense)
+ end
+ end
+ begin
+ if Nfeature_list_sparse !== nothing
+ desc["Nfeature_list_sparse"] = Base.Int(Nfeature_list_sparse)
+ end
+ end
+ begin
+ if Nfeature_list_dense !== nothing
+ desc["Nfeature_list_dense"] = Base.Int(Nfeature_list_dense)
+ end
+ end
+ begin
+ if context_sparse_types !== nothing
+ desc["context_sparse_types"] = map(Base.identity, context_sparse_types)
+ end
+ end
+ begin
+ if Tcontext_dense !== nothing
+ desc["Tcontext_dense"] = map(Base.identity, Tcontext_dense)
+ end
+ end
+ begin
+ if feature_list_dense_types !== nothing
+ desc["feature_list_dense_types"] = map(Base.identity, feature_list_dense_types)
+ end
+ end
+ begin
+ if context_dense_shapes !== nothing
+ desc["context_dense_shapes"] = map(Base.identity, context_dense_shapes)
+ end
+ end
+ begin
+ if feature_list_sparse_types !== nothing
+ desc["feature_list_sparse_types"] = map(Base.identity, feature_list_sparse_types)
+ end
+ end
+ begin
+ if feature_list_dense_shapes !== nothing
+ desc["feature_list_dense_shapes"] = map(Base.identity, feature_list_dense_shapes)
+ end
+ end
+ end
+ end
+ begin
+ out = tf.Tensor[]
+ op = tf.Operation(desc)
+ for out_idx = 1:9
+ push!(out, tf.Tensor(op, out_idx))
+ end
+ out
+ end
+ end
+ end
+ begin
+ function parse_sequence_example_eager(serialized_, debug_name_, context_dense_defaults_; name=nothing, feature_list_dense_missing_assumed_empty=nothing, context_sparse_keys=nothing, context_dense_keys=nothing, feature_list_sparse_keys=nothing, feature_list_dense_keys=nothing, Ncontext_sparse=nothing, Ncontext_dense=nothing, Nfeature_list_sparse=nothing, Nfeature_list_dense=nothing, context_sparse_types=nothing, Tcontext_dense=nothing, feature_list_dense_types=nothing, context_dense_shapes=nothing, feature_list_sparse_types=nothing, feature_list_dense_shapes=nothing)
+ desc = tf.EagerOp("ParseSequenceExample")
+ serialized_ = convert(tf.EagerTensor, serialized_)
+ debug_name_ = convert(tf.EagerTensor, debug_name_)
+ context_dense_defaults_ = convert(tf.EagerTensor, context_dense_defaults_)
+ begin
+ begin
+ tf.add_input(desc, serialized_)
+ end
+ begin
+ tf.add_input(desc, debug_name_)
+ end
+ begin
+ tf.add_input(desc, context_dense_defaults_)
+ end
+ end
+ begin
+ begin
+ if feature_list_dense_missing_assumed_empty !== nothing
+ desc["feature_list_dense_missing_assumed_empty"] = map(Base.identity, feature_list_dense_missing_assumed_empty)
+ end
+ end
+ begin
+ if context_sparse_keys !== nothing
+ desc["context_sparse_keys"] = map(Base.identity, context_sparse_keys)
+ end
+ end
+ begin
+ if context_dense_keys !== nothing
+ desc["context_dense_keys"] = map(Base.identity, context_dense_keys)
+ end
+ end
+ begin
+ if feature_list_sparse_keys !== nothing
+ desc["feature_list_sparse_keys"] = map(Base.identity, feature_list_sparse_keys)
+ end
+ end
+ begin
+ if feature_list_dense_keys !== nothing
+ desc["feature_list_dense_keys"] = map(Base.identity, feature_list_dense_keys)
+ end
+ end
+ begin
+ if Ncontext_sparse !== nothing
+ desc["Ncontext_sparse"] = Base.Int(Ncontext_sparse)
+ end
+ end
+ begin
+ if Ncontext_dense !== nothing
+ desc["Ncontext_dense"] = Base.Int(Ncontext_dense)
+ end
+ end
+ begin
+ if Nfeature_list_sparse !== nothing
+ desc["Nfeature_list_sparse"] = Base.Int(Nfeature_list_sparse)
+ end
+ end
+ begin
+ if Nfeature_list_dense !== nothing
+ desc["Nfeature_list_dense"] = Base.Int(Nfeature_list_dense)
+ end
+ end
+ begin
+ if context_sparse_types !== nothing
+ desc["context_sparse_types"] = map(Base.identity, context_sparse_types)
+ end
+ end
+ begin
+ if Tcontext_dense !== nothing
+ desc["Tcontext_dense"] = map(Base.identity, Tcontext_dense)
+ end
+ end
+ begin
+ if feature_list_dense_types !== nothing
+ desc["feature_list_dense_types"] = map(Base.identity, feature_list_dense_types)
+ end
+ end
+ begin
+ if context_dense_shapes !== nothing
+ desc["context_dense_shapes"] = map(Base.identity, context_dense_shapes)
+ end
+ end
+ begin
+ if feature_list_sparse_types !== nothing
+ desc["feature_list_sparse_types"] = map(Base.identity, feature_list_sparse_types)
+ end
+ end
+ begin
+ if feature_list_dense_shapes !== nothing
+ desc["feature_list_dense_shapes"] = map(Base.identity, feature_list_dense_shapes)
+ end
+ end
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(parse_sequence_example, [serialized_, debug_name_, context_dense_defaults_], name=nothing, feature_list_dense_missing_assumed_empty=nothing, context_sparse_keys=nothing, context_dense_keys=nothing, feature_list_sparse_keys=nothing, feature_list_dense_keys=nothing, Ncontext_sparse=nothing, Ncontext_dense=nothing, Nfeature_list_sparse=nothing, Nfeature_list_dense=nothing, context_sparse_types=nothing, Tcontext_dense=nothing, feature_list_dense_types=nothing, context_dense_shapes=nothing, feature_list_sparse_types=nothing, feature_list_dense_shapes=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function parse_sequence_example(serialized_, debug_name_, context_dense_defaults_; name=nothing, feature_list_dense_missing_assumed_empty=nothing, context_sparse_keys=nothing, context_dense_keys=nothing, feature_list_sparse_keys=nothing, feature_list_dense_keys=nothing, Ncontext_sparse=nothing, Ncontext_dense=nothing, Nfeature_list_sparse=nothing, Nfeature_list_dense=nothing, context_sparse_types=nothing, Tcontext_dense=nothing, feature_list_dense_types=nothing, context_dense_shapes=nothing, feature_list_sparse_types=nothing, feature_list_dense_shapes=nothing)
+ if tf.in_eager_mode()
+ parse_sequence_example_eager(serialized_, debug_name_, context_dense_defaults_; name=name, feature_list_dense_missing_assumed_empty=feature_list_dense_missing_assumed_empty, context_sparse_keys=context_sparse_keys, context_dense_keys=context_dense_keys, feature_list_sparse_keys=feature_list_sparse_keys, feature_list_dense_keys=feature_list_dense_keys, Ncontext_sparse=Ncontext_sparse, Ncontext_dense=Ncontext_dense, Nfeature_list_sparse=Nfeature_list_sparse, Nfeature_list_dense=Nfeature_list_dense, context_sparse_types=context_sparse_types, Tcontext_dense=Tcontext_dense, feature_list_dense_types=feature_list_dense_types, context_dense_shapes=context_dense_shapes, feature_list_sparse_types=feature_list_sparse_types, feature_list_dense_shapes=feature_list_dense_shapes)
+ else
+ parse_sequence_example_graph(serialized_, debug_name_, context_dense_defaults_; name=name, feature_list_dense_missing_assumed_empty=feature_list_dense_missing_assumed_empty, context_sparse_keys=context_sparse_keys, context_dense_keys=context_dense_keys, feature_list_sparse_keys=feature_list_sparse_keys, feature_list_dense_keys=feature_list_dense_keys, Ncontext_sparse=Ncontext_sparse, Ncontext_dense=Ncontext_dense, Nfeature_list_sparse=Nfeature_list_sparse, Nfeature_list_dense=Nfeature_list_dense, context_sparse_types=context_sparse_types, Tcontext_dense=Tcontext_dense, feature_list_dense_types=feature_list_dense_types, context_dense_shapes=context_dense_shapes, feature_list_sparse_types=feature_list_sparse_types, feature_list_dense_shapes=feature_list_dense_shapes)
+ end
+ end
+ end
+end
+
+
+"""
+ sinh(x)
+
+
+"""
+begin
+ begin
+ function sinh_graph(x_; name=nothing)
+ local desc
+ tf.with_op_name(name, "Sinh") do
+ desc = tf.NodeDescription("Sinh")
+ begin
+ begin
+ x_ = convert(Tensor{Any}, x_)
+ begin
+ end
+ end
+ begin
+ (x_,) = tf.tf_promote(x_)
+ end
+ end
+ begin
+ begin
+ tf.add_input(desc, x_)
+ end
+ end
+ begin
+ end
+ end
+ tf.Tensor(tf.Operation(desc))
+ end
+ end
+ begin
+ function sinh_eager(x_; name=nothing)
+ desc = tf.EagerOp("Sinh")
+ x_ = convert(tf.EagerTensor, x_)
+ begin
+ begin
+ tf.add_input(desc, x_)
+ end
+ end
+ begin
+ end
+ begin
+ desc["T"] = tf.data_type(x_)
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(sinh, [x_], name=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res[1]
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function sinh(x_; name=nothing)
+ if tf.in_eager_mode()
+ sinh_eager(x_; name=name)
+ else
+ sinh_graph(x_; name=name)
+ end
+ end
+ end
+end
+
+
+"""
+ iterator_v2()
+
+
+"""
+begin
+ begin
+ function iterator_v2_graph(; name=nothing, shared_name=nothing, container=nothing, output_types=nothing, output_shapes=nothing)
+ local desc
+ tf.with_op_name(name, "IteratorV2") do
+ desc = tf.NodeDescription("IteratorV2")
+ begin
+ end
+ begin
+ end
+ begin
+ begin
+ if shared_name !== nothing
+ desc["shared_name"] = Base.String(shared_name)
+ end
+ end
+ begin
+ if container !== nothing
+ desc["container"] = Base.String(container)
+ end
+ end
+ begin
+ if output_types !== nothing
+ desc["output_types"] = map(Base.identity, output_types)
+ end
+ end
+ begin
+ if output_shapes !== nothing
+ desc["output_shapes"] = map(Base.identity, output_shapes)
+ end
+ end
+ end
+ end
+ tf.Tensor(tf.Operation(desc))
+ end
+ end
+ begin
+ function iterator_v2_eager(; name=nothing, shared_name=nothing, container=nothing, output_types=nothing, output_shapes=nothing)
+ desc = tf.EagerOp("IteratorV2")
+ begin
+ end
+ begin
+ begin
+ if shared_name !== nothing
+ desc["shared_name"] = Base.String(shared_name)
+ end
+ end
+ begin
+ if container !== nothing
+ desc["container"] = Base.String(container)
+ end
+ end
+ begin
+ if output_types !== nothing
+ desc["output_types"] = map(Base.identity, output_types)
+ end
+ end
+ begin
+ if output_shapes !== nothing
+ desc["output_shapes"] = map(Base.identity, output_shapes)
+ end
+ end
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(iterator_v2, [], name=nothing, shared_name=nothing, container=nothing, output_types=nothing, output_shapes=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res[1]
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function iterator_v2(; name=nothing, shared_name=nothing, container=nothing, output_types=nothing, output_shapes=nothing)
+ if tf.in_eager_mode()
+ iterator_v2_eager(; name=name, shared_name=shared_name, container=container, output_types=output_types, output_shapes=output_shapes)
+ else
+ iterator_v2_graph(; name=name, shared_name=shared_name, container=container, output_types=output_types, output_shapes=output_shapes)
+ end
+ end
+ end
+end
+
+
+"""
+ tensor_array_write_v2(handle, index, value, flow_in)
+
+
+"""
+begin
+ begin
+ function tensor_array_write_v2_graph(handle_, index_, value_, flow_in_; name=nothing)
+ local desc
+ tf.with_op_name(name, "TensorArrayWriteV2") do
+ desc = tf.NodeDescription("TensorArrayWriteV2")
+ begin
+ begin
+ handle_ = convert(Tensor{String}, handle_)
+ begin
+ end
+ end
+ begin
+ index_ = convert(Tensor{Int32}, index_)
+ begin
+ end
+ end
+ begin
+ value_ = convert(Tensor{Any}, value_)
+ begin
+ end
+ end
+ begin
+ flow_in_ = convert(Tensor{Float32}, flow_in_)
+ begin
+ end
+ end
+ begin
+ (value_,) = tf.tf_promote(value_)
+ end
+ end
+ begin
+ begin
+ tf.add_input(desc, handle_)
+ end
+ begin
+ tf.add_input(desc, index_)
+ end
+ begin
+ tf.add_input(desc, value_)
+ end
+ begin
+ tf.add_input(desc, flow_in_)
+ end
+ end
+ begin
+ end
+ end
+ tf.Tensor(tf.Operation(desc))
+ end
+ end
+ begin
+ function tensor_array_write_v2_eager(handle_, index_, value_, flow_in_; name=nothing)
+ desc = tf.EagerOp("TensorArrayWriteV2")
+ handle_ = convert(tf.EagerTensor, handle_)
+ index_ = convert(tf.EagerTensor, index_)
+ value_ = convert(tf.EagerTensor, value_)
+ flow_in_ = convert(tf.EagerTensor, flow_in_)
+ begin
+ begin
+ tf.add_input(desc, handle_)
+ end
+ begin
+ tf.add_input(desc, index_)
+ end
+ begin
+ tf.add_input(desc, value_)
+ end
+ begin
+ tf.add_input(desc, flow_in_)
+ end
+ end
+ begin
+ end
+ begin
+ desc["T"] = tf.data_type(value_)
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(tensor_array_write_v2, [handle_, index_, value_, flow_in_], name=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res[1]
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function tensor_array_write_v2(handle_, index_, value_, flow_in_; name=nothing)
+ if tf.in_eager_mode()
+ tensor_array_write_v2_eager(handle_, index_, value_, flow_in_; name=name)
+ else
+ tensor_array_write_v2_graph(handle_, index_, value_, flow_in_; name=name)
+ end
+ end
+ end
+end
+
+
+"""
+ tensor_list_element_shape(input_handle)
+
+
+"""
+begin
+ begin
+ function tensor_list_element_shape_graph(input_handle_; name=nothing, shape_type=nothing)
+ local desc
+ tf.with_op_name(name, "TensorListElementShape") do
+ desc = tf.NodeDescription("TensorListElementShape")
+ begin
+ begin
+ input_handle_ = convert(Tensor{Any}, input_handle_)
+ begin
+ end
+ end
+ end
+ begin
+ begin
+ tf.add_input(desc, input_handle_)
+ end
+ end
+ begin
+ begin
+ if shape_type !== nothing
+ desc["shape_type"] = Base.identity(shape_type)
+ end
+ end
+ end
+ end
+ tf.Tensor(tf.Operation(desc))
+ end
+ end
+ begin
+ function tensor_list_element_shape_eager(input_handle_; name=nothing, shape_type=nothing)
+ desc = tf.EagerOp("TensorListElementShape")
+ input_handle_ = convert(tf.EagerTensor, input_handle_)
+ begin
+ begin
+ tf.add_input(desc, input_handle_)
+ end
+ end
+ begin
+ begin
+ if shape_type !== nothing
+ desc["shape_type"] = Base.identity(shape_type)
+ end
+ end
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(tensor_list_element_shape, [input_handle_], name=nothing, shape_type=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res[1]
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function tensor_list_element_shape(input_handle_; name=nothing, shape_type=nothing)
+ if tf.in_eager_mode()
+ tensor_list_element_shape_eager(input_handle_; name=name, shape_type=shape_type)
+ else
+ tensor_list_element_shape_graph(input_handle_; name=name, shape_type=shape_type)
+ end
+ end
+ end
+end
+
+
+"""
+ queue_size_v2(handle)
+
+
+"""
+begin
+ begin
+ function queue_size_v2_graph(handle_; name=nothing)
+ local desc
+ tf.with_op_name(name, "QueueSizeV2") do
+ desc = tf.NodeDescription("QueueSizeV2")
+ begin
+ begin
+ handle_ = convert(Tensor{Any}, handle_)
+ begin
+ end
+ end
+ end
+ begin
+ begin
+ tf.add_input(desc, handle_)
+ end
+ end
+ begin
+ end
+ end
+ tf.Tensor(tf.Operation(desc))
+ end
+ end
+ begin
+ function queue_size_v2_eager(handle_; name=nothing)
+ desc = tf.EagerOp("QueueSizeV2")
+ handle_ = convert(tf.EagerTensor, handle_)
+ begin
+ begin
+ tf.add_input(desc, handle_)
+ end
+ end
+ begin
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(queue_size_v2, [handle_], name=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res[1]
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function queue_size_v2(handle_; name=nothing)
+ if tf.in_eager_mode()
+ queue_size_v2_eager(handle_; name=name)
+ else
+ queue_size_v2_graph(handle_; name=name)
+ end
+ end
+ end
+end
+
+
+"""
+ expm1(x)
+
+
+"""
+begin
+ begin
+ function expm1_graph(x_; name=nothing)
+ local desc
+ tf.with_op_name(name, "Expm1") do
+ desc = tf.NodeDescription("Expm1")
+ begin
+ begin
+ x_ = convert(Tensor{Any}, x_)
+ begin
+ end
+ end
+ begin
+ (x_,) = tf.tf_promote(x_)
+ end
+ end
+ begin
+ begin
+ tf.add_input(desc, x_)
+ end
+ end
+ begin
+ end
+ end
+ tf.Tensor(tf.Operation(desc))
+ end
+ end
+ begin
+ function expm1_eager(x_; name=nothing)
+ desc = tf.EagerOp("Expm1")
+ x_ = convert(tf.EagerTensor, x_)
+ begin
+ begin
+ tf.add_input(desc, x_)
+ end
+ end
+ begin
+ end
+ begin
+ desc["T"] = tf.data_type(x_)
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(expm1, [x_], name=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res[1]
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function expm1(x_; name=nothing)
+ if tf.in_eager_mode()
+ expm1_eager(x_; name=name)
+ else
+ expm1_graph(x_; name=name)
+ end
+ end
+ end
+end
+
+
+"""
+ batch_matrix_band_part(input, num_lower, num_upper)
+
+
+"""
+begin
+ begin
+ function batch_matrix_band_part_graph(input_, num_lower_, num_upper_; name=nothing)
+ local desc
+ tf.with_op_name(name, "BatchMatrixBandPart") do
+ desc = tf.NodeDescription("BatchMatrixBandPart")
+ begin
+ begin
+ input_ = convert(Tensor{Any}, input_)
+ begin
+ end
+ end
+ begin
+ num_lower_ = convert(Tensor{Int64}, num_lower_)
+ begin
+ end
+ end
+ begin
+ num_upper_ = convert(Tensor{Int64}, num_upper_)
+ begin
+ end
+ end
+ begin
+ (input_,) = tf.tf_promote(input_)
+ end
+ end
+ begin
+ begin
+ tf.add_input(desc, input_)
+ end
+ begin
+ tf.add_input(desc, num_lower_)
+ end
+ begin
+ tf.add_input(desc, num_upper_)
+ end
+ end
+ begin
+ end
+ end
+ tf.Tensor(tf.Operation(desc))
+ end
+ end
+ begin
+ function batch_matrix_band_part_eager(input_, num_lower_, num_upper_; name=nothing)
+ desc = tf.EagerOp("BatchMatrixBandPart")
+ input_ = convert(tf.EagerTensor, input_)
+ num_lower_ = convert(tf.EagerTensor, num_lower_)
+ num_upper_ = convert(tf.EagerTensor, num_upper_)
+ begin
+ begin
+ tf.add_input(desc, input_)
+ end
+ begin
+ tf.add_input(desc, num_lower_)
+ end
+ begin
+ tf.add_input(desc, num_upper_)
+ end
+ end
+ begin
+ end
+ begin
+ desc["T"] = tf.data_type(input_)
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(batch_matrix_band_part, [input_, num_lower_, num_upper_], name=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res[1]
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function batch_matrix_band_part(input_, num_lower_, num_upper_; name=nothing)
+ if tf.in_eager_mode()
+ batch_matrix_band_part_eager(input_, num_lower_, num_upper_; name=name)
+ else
+ batch_matrix_band_part_graph(input_, num_lower_, num_upper_; name=name)
+ end
+ end
+ end
+end
+
+
+"""
+ concatenate_dataset(input_dataset, another_dataset)
+
+
+"""
+begin
+ begin
+ function concatenate_dataset_graph(input_dataset_, another_dataset_; name=nothing, output_types=nothing, output_shapes=nothing)
+ local desc
+ tf.with_op_name(name, "ConcatenateDataset") do
+ desc = tf.NodeDescription("ConcatenateDataset")
+ begin
+ begin
+ input_dataset_ = convert(Tensor{Any}, input_dataset_)
+ begin
+ end
+ end
+ begin
+ another_dataset_ = convert(Tensor{Any}, another_dataset_)
+ begin
+ end
+ end
+ end
+ begin
+ begin
+ tf.add_input(desc, input_dataset_)
+ end
+ begin
+ tf.add_input(desc, another_dataset_)
+ end
+ end
+ begin
+ begin
+ if output_types !== nothing
+ desc["output_types"] = map(Base.identity, output_types)
+ end
+ end
+ begin
+ if output_shapes !== nothing
+ desc["output_shapes"] = map(Base.identity, output_shapes)
+ end
+ end
+ end
+ end
+ tf.Tensor(tf.Operation(desc))
+ end
+ end
+ begin
+ function concatenate_dataset_eager(input_dataset_, another_dataset_; name=nothing, output_types=nothing, output_shapes=nothing)
+ desc = tf.EagerOp("ConcatenateDataset")
+ input_dataset_ = convert(tf.EagerTensor, input_dataset_)
+ another_dataset_ = convert(tf.EagerTensor, another_dataset_)
+ begin
+ begin
+ tf.add_input(desc, input_dataset_)
+ end
+ begin
+ tf.add_input(desc, another_dataset_)
+ end
+ end
+ begin
+ begin
+ if output_types !== nothing
+ desc["output_types"] = map(Base.identity, output_types)
+ end
+ end
+ begin
+ if output_shapes !== nothing
+ desc["output_shapes"] = map(Base.identity, output_shapes)
+ end
+ end
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(concatenate_dataset, [input_dataset_, another_dataset_], name=nothing, output_types=nothing, output_shapes=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res[1]
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function concatenate_dataset(input_dataset_, another_dataset_; name=nothing, output_types=nothing, output_shapes=nothing)
+ if tf.in_eager_mode()
+ concatenate_dataset_eager(input_dataset_, another_dataset_; name=name, output_types=output_types, output_shapes=output_shapes)
+ else
+ concatenate_dataset_graph(input_dataset_, another_dataset_; name=name, output_types=output_types, output_shapes=output_shapes)
+ end
+ end
+ end
+end
+
+
+"""
+ decode_gif(contents)
+
+
+"""
+begin
+ begin
+ function decode_gif_graph(contents_; name=nothing)
+ local desc
+ tf.with_op_name(name, "DecodeGif") do
+ desc = tf.NodeDescription("DecodeGif")
+ begin
+ begin
+ contents_ = convert(Tensor{String}, contents_)
+ begin
+ end
+ end
+ end
+ begin
+ begin
+ tf.add_input(desc, contents_)
+ end
+ end
+ begin
+ end
+ end
+ tf.Tensor(tf.Operation(desc))
+ end
+ end
+ begin
+ function decode_gif_eager(contents_; name=nothing)
+ desc = tf.EagerOp("DecodeGif")
+ contents_ = convert(tf.EagerTensor, contents_)
+ begin
+ begin
+ tf.add_input(desc, contents_)
+ end
+ end
+ begin
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(decode_gif, [contents_], name=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res[1]
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function decode_gif(contents_; name=nothing)
+ if tf.in_eager_mode()
+ decode_gif_eager(contents_; name=name)
+ else
+ decode_gif_graph(contents_; name=name)
+ end
+ end
+ end
+end
+
+
+"""
+ tpu_replicate(inputs, broadcast_inputs, variables, guaranteed_constants; num_cores_per_replica=1, topology=, use_tpu=true, device_assignment=Int64[], host_compute_core=Int64[])
+
+Runs replicated computations on a distributed TPU system.
+"""
+begin
+ begin
+ function tpu_replicate_graph(inputs_, broadcast_inputs_, variables_, guaranteed_constants_; name=nothing, computation=nothing, num_replicas=nothing, num_cores_per_replica=nothing, topology=nothing, use_tpu=nothing, device_assignment=nothing, host_compute_core=nothing, Tinputs=nothing, Tbroadcast_inputs=nothing, NumVariables=nothing, Tguaranteed_constants=nothing, output_types=nothing)
+ local desc
+ tf.with_op_name(name, "TPUReplicate") do
+ desc = tf.NodeDescription("TPUReplicate")
+ begin
+ begin
+ inputs_ = [convert(Tensor{Any}, x) for x = inputs_]
+ begin
+ end
+ end
+ begin
+ broadcast_inputs_ = [convert(Tensor{Any}, x) for x = broadcast_inputs_]
+ begin
+ end
+ end
+ begin
+ variables_ = [convert(Tensor{Any}, x) for x = variables_]
+ begin
+ end
+ end
+ begin
+ guaranteed_constants_ = [convert(Tensor{Any}, x) for x = guaranteed_constants_]
+ begin
+ end
+ end
+ end
+ begin
+ begin
+ tf.add_input(desc, inputs_)
+ end
+ begin
+ tf.add_input(desc, broadcast_inputs_)
+ end
+ begin
+ tf.add_input(desc, variables_)
+ end
+ begin
+ tf.add_input(desc, guaranteed_constants_)
+ end
+ end
+ begin
+ begin
+ if computation !== nothing
+ desc["computation"] = Base.identity(computation)
+ end
+ end
+ begin
+ if num_replicas !== nothing
+ desc["num_replicas"] = Base.Int(num_replicas)
+ end
+ end
+ begin
+ if num_cores_per_replica !== nothing
+ desc["num_cores_per_replica"] = Base.Int(num_cores_per_replica)
+ end
+ end
+ begin
+ if topology !== nothing
+ desc["topology"] = Base.String(topology)
+ end
+ end
+ begin
+ if use_tpu !== nothing
+ desc["use_tpu"] = Base.Bool(use_tpu)
+ end
+ end
+ begin
+ if device_assignment !== nothing
+ desc["device_assignment"] = map(Base.identity, device_assignment)
+ end
+ end
+ begin
+ if host_compute_core !== nothing
+ desc["host_compute_core"] = map(Base.identity, host_compute_core)
+ end
+ end
+ begin
+ if Tinputs !== nothing
+ desc["Tinputs"] = map(Base.identity, Tinputs)
+ end
+ end
+ begin
+ if Tbroadcast_inputs !== nothing
+ desc["Tbroadcast_inputs"] = map(Base.identity, Tbroadcast_inputs)
+ end
+ end
+ begin
+ if NumVariables !== nothing
+ desc["NumVariables"] = Base.Int(NumVariables)
+ end
+ end
+ begin
+ if Tguaranteed_constants !== nothing
+ desc["Tguaranteed_constants"] = map(Base.identity, Tguaranteed_constants)
+ end
+ end
+ begin
+ if output_types !== nothing
+ desc["output_types"] = map(Base.identity, output_types)
+ end
+ end
+ end
+ end
+ tf.Tensor(tf.Operation(desc))
+ end
+ end
+ begin
+ function tpu_replicate_eager(inputs_, broadcast_inputs_, variables_, guaranteed_constants_; name=nothing, computation=nothing, num_replicas=nothing, num_cores_per_replica=nothing, topology=nothing, use_tpu=nothing, device_assignment=nothing, host_compute_core=nothing, Tinputs=nothing, Tbroadcast_inputs=nothing, NumVariables=nothing, Tguaranteed_constants=nothing, output_types=nothing)
+ desc = tf.EagerOp("TPUReplicate")
+ inputs_ = convert(tf.EagerTensor, inputs_)
+ broadcast_inputs_ = convert(tf.EagerTensor, broadcast_inputs_)
+ variables_ = convert(tf.EagerTensor, variables_)
+ guaranteed_constants_ = convert(tf.EagerTensor, guaranteed_constants_)
+ begin
+ begin
+ tf.add_input(desc, inputs_)
+ end
+ begin
+ tf.add_input(desc, broadcast_inputs_)
+ end
+ begin
+ tf.add_input(desc, variables_)
+ end
+ begin
+ tf.add_input(desc, guaranteed_constants_)
+ end
+ end
+ begin
+ begin
+ if computation !== nothing
+ desc["computation"] = Base.identity(computation)
+ end
+ end
+ begin
+ if num_replicas !== nothing
+ desc["num_replicas"] = Base.Int(num_replicas)
+ end
+ end
+ begin
+ if num_cores_per_replica !== nothing
+ desc["num_cores_per_replica"] = Base.Int(num_cores_per_replica)
+ end
+ end
+ begin
+ if topology !== nothing
+ desc["topology"] = Base.String(topology)
+ end
+ end
+ begin
+ if use_tpu !== nothing
+ desc["use_tpu"] = Base.Bool(use_tpu)
+ end
+ end
+ begin
+ if device_assignment !== nothing
+ desc["device_assignment"] = map(Base.identity, device_assignment)
+ end
+ end
+ begin
+ if host_compute_core !== nothing
+ desc["host_compute_core"] = map(Base.identity, host_compute_core)
+ end
+ end
+ begin
+ if Tinputs !== nothing
+ desc["Tinputs"] = map(Base.identity, Tinputs)
+ end
+ end
+ begin
+ if Tbroadcast_inputs !== nothing
+ desc["Tbroadcast_inputs"] = map(Base.identity, Tbroadcast_inputs)
+ end
+ end
+ begin
+ if NumVariables !== nothing
+ desc["NumVariables"] = Base.Int(NumVariables)
+ end
+ end
+ begin
+ if Tguaranteed_constants !== nothing
+ desc["Tguaranteed_constants"] = map(Base.identity, Tguaranteed_constants)
+ end
+ end
+ begin
+ if output_types !== nothing
+ desc["output_types"] = map(Base.identity, output_types)
+ end
+ end
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(tpu_replicate, [inputs_, broadcast_inputs_, variables_, guaranteed_constants_], name=nothing, computation=nothing, num_replicas=nothing, num_cores_per_replica=nothing, topology=nothing, use_tpu=nothing, device_assignment=nothing, host_compute_core=nothing, Tinputs=nothing, Tbroadcast_inputs=nothing, NumVariables=nothing, Tguaranteed_constants=nothing, output_types=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res[1]
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function tpu_replicate(inputs_, broadcast_inputs_, variables_, guaranteed_constants_; name=nothing, computation=nothing, num_replicas=nothing, num_cores_per_replica=nothing, topology=nothing, use_tpu=nothing, device_assignment=nothing, host_compute_core=nothing, Tinputs=nothing, Tbroadcast_inputs=nothing, NumVariables=nothing, Tguaranteed_constants=nothing, output_types=nothing)
+ if tf.in_eager_mode()
+ tpu_replicate_eager(inputs_, broadcast_inputs_, variables_, guaranteed_constants_; name=name, computation=computation, num_replicas=num_replicas, num_cores_per_replica=num_cores_per_replica, topology=topology, use_tpu=use_tpu, device_assignment=device_assignment, host_compute_core=host_compute_core, Tinputs=Tinputs, Tbroadcast_inputs=Tbroadcast_inputs, NumVariables=NumVariables, Tguaranteed_constants=Tguaranteed_constants, output_types=output_types)
+ else
+ tpu_replicate_graph(inputs_, broadcast_inputs_, variables_, guaranteed_constants_; name=name, computation=computation, num_replicas=num_replicas, num_cores_per_replica=num_cores_per_replica, topology=topology, use_tpu=use_tpu, device_assignment=device_assignment, host_compute_core=host_compute_core, Tinputs=Tinputs, Tbroadcast_inputs=Tbroadcast_inputs, NumVariables=NumVariables, Tguaranteed_constants=Tguaranteed_constants, output_types=output_types)
+ end
+ end
+ end
+end
+
+
+"""
+ batch_self_adjoint_eig_v2(input; compute_v=true)
+
+
+"""
+begin
+ begin
+ function batch_self_adjoint_eig_v2_graph(input_; name=nothing, compute_v=nothing)
+ local desc
+ tf.with_op_name(name, "BatchSelfAdjointEigV2") do
+ desc = tf.NodeDescription("BatchSelfAdjointEigV2")
+ begin
+ begin
+ input_ = convert(Tensor{Any}, input_)
+ begin
+ end
+ end
+ begin
+ (input_,) = tf.tf_promote(input_)
+ end
+ end
+ begin
+ begin
+ tf.add_input(desc, input_)
+ end
+ end
+ begin
+ begin
+ if compute_v !== nothing
+ desc["compute_v"] = Base.Bool(compute_v)
+ end
+ end
+ end
+ end
+ begin
+ out = tf.Tensor[]
+ op = tf.Operation(desc)
+ for out_idx = 1:2
+ push!(out, tf.Tensor(op, out_idx))
+ end
+ out
+ end
+ end
+ end
+ begin
+ function batch_self_adjoint_eig_v2_eager(input_; name=nothing, compute_v=nothing)
+ desc = tf.EagerOp("BatchSelfAdjointEigV2")
+ input_ = convert(tf.EagerTensor, input_)
+ begin
+ begin
+ tf.add_input(desc, input_)
+ end
+ end
+ begin
+ begin
+ if compute_v !== nothing
+ desc["compute_v"] = Base.Bool(compute_v)
+ end
+ end
+ end
+ begin
+ desc["T"] = tf.data_type(input_)
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(batch_self_adjoint_eig_v2, [input_], name=nothing, compute_v=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function batch_self_adjoint_eig_v2(input_; name=nothing, compute_v=nothing)
+ if tf.in_eager_mode()
+ batch_self_adjoint_eig_v2_eager(input_; name=name, compute_v=compute_v)
+ else
+ batch_self_adjoint_eig_v2_graph(input_; name=name, compute_v=compute_v)
+ end
+ end
+ end
+end
+
+
+"""
+ shape(input; out_type=Int32)
+
+
+"""
+begin
+ begin
+ function shape_graph(input_; name=nothing, out_type=nothing)
+ local desc
+ tf.with_op_name(name, "Shape") do
+ desc = tf.NodeDescription("Shape")
+ begin
+ begin
+ input_ = convert(Tensor{Any}, input_)
+ begin
+ end
+ end
+ begin
+ (input_,) = tf.tf_promote(input_)
+ end
+ end
+ begin
+ begin
+ tf.add_input(desc, input_)
+ end
+ end
+ begin
+ begin
+ if out_type !== nothing
+ desc["out_type"] = Base.identity(out_type)
+ end
+ end
+ end
+ end
+ tf.Tensor(tf.Operation(desc))
+ end
+ end
+ begin
+ function shape_eager(input_; name=nothing, out_type=nothing)
+ desc = tf.EagerOp("Shape")
+ input_ = convert(tf.EagerTensor, input_)
+ begin
+ begin
+ tf.add_input(desc, input_)
+ end
+ end
+ begin
+ begin
+ if out_type !== nothing
+ desc["out_type"] = Base.identity(out_type)
+ end
+ end
+ end
+ begin
+ desc["T"] = tf.data_type(input_)
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(shape, [input_], name=nothing, out_type=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res[1]
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function shape(input_; name=nothing, out_type=nothing)
+ if tf.in_eager_mode()
+ shape_eager(input_; name=name, out_type=out_type)
+ else
+ shape_graph(input_; name=name, out_type=out_type)
+ end
+ end
+ end
+end
+
+
+"""
+ repeat_dataset(input_dataset, count)
+
+
+"""
+begin
+ begin
+ function repeat_dataset_graph(input_dataset_, count_; name=nothing, output_types=nothing, output_shapes=nothing)
+ local desc
+ tf.with_op_name(name, "RepeatDataset") do
+ desc = tf.NodeDescription("RepeatDataset")
+ begin
+ begin
+ input_dataset_ = convert(Tensor{Any}, input_dataset_)
+ begin
+ end
+ end
+ begin
+ count_ = convert(Tensor{Int64}, count_)
+ begin
+ end
+ end
+ end
+ begin
+ begin
+ tf.add_input(desc, input_dataset_)
+ end
+ begin
+ tf.add_input(desc, count_)
+ end
+ end
+ begin
+ begin
+ if output_types !== nothing
+ desc["output_types"] = map(Base.identity, output_types)
+ end
+ end
+ begin
+ if output_shapes !== nothing
+ desc["output_shapes"] = map(Base.identity, output_shapes)
+ end
+ end
+ end
+ end
+ tf.Tensor(tf.Operation(desc))
+ end
+ end
+ begin
+ function repeat_dataset_eager(input_dataset_, count_; name=nothing, output_types=nothing, output_shapes=nothing)
+ desc = tf.EagerOp("RepeatDataset")
+ input_dataset_ = convert(tf.EagerTensor, input_dataset_)
+ count_ = convert(tf.EagerTensor, count_)
+ begin
+ begin
+ tf.add_input(desc, input_dataset_)
+ end
+ begin
+ tf.add_input(desc, count_)
+ end
+ end
+ begin
+ begin
+ if output_types !== nothing
+ desc["output_types"] = map(Base.identity, output_types)
+ end
+ end
+ begin
+ if output_shapes !== nothing
+ desc["output_shapes"] = map(Base.identity, output_shapes)
+ end
+ end
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(repeat_dataset, [input_dataset_, count_], name=nothing, output_types=nothing, output_shapes=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res[1]
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function repeat_dataset(input_dataset_, count_; name=nothing, output_types=nothing, output_shapes=nothing)
+ if tf.in_eager_mode()
+ repeat_dataset_eager(input_dataset_, count_; name=name, output_types=output_types, output_shapes=output_shapes)
+ else
+ repeat_dataset_graph(input_dataset_, count_; name=name, output_types=output_types, output_shapes=output_shapes)
+ end
+ end
+ end
+end
+
+
+"""
+ reciprocal_grad(y, dy)
+
+
+"""
+begin
+ begin
+ function reciprocal_grad_graph(y_, dy_; name=nothing)
+ local desc
+ tf.with_op_name(name, "ReciprocalGrad") do
+ desc = tf.NodeDescription("ReciprocalGrad")
+ begin
+ begin
+ y_ = convert(Tensor{Any}, y_)
+ begin
+ end
+ end
+ begin
+ dy_ = convert(Tensor{Any}, dy_)
+ begin
+ end
+ end
+ begin
+ (y_, dy_) = tf.tf_promote(y_, dy_)
+ end
+ end
+ begin
+ begin
+ tf.add_input(desc, y_)
+ end
+ begin
+ tf.add_input(desc, dy_)
+ end
+ end
+ begin
+ end
+ end
+ tf.Tensor(tf.Operation(desc))
+ end
+ end
+ begin
+ function reciprocal_grad_eager(y_, dy_; name=nothing)
+ desc = tf.EagerOp("ReciprocalGrad")
+ y_ = convert(tf.EagerTensor, y_)
+ dy_ = convert(tf.EagerTensor, dy_)
+ begin
+ begin
+ tf.add_input(desc, y_)
+ end
+ begin
+ tf.add_input(desc, dy_)
+ end
+ end
+ begin
+ end
+ begin
+ desc["T"] = tf.data_type(y_)
+ end
+ begin
+ desc["T"] = tf.data_type(dy_)
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(reciprocal_grad, [y_, dy_], name=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res[1]
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function reciprocal_grad(y_, dy_; name=nothing)
+ if tf.in_eager_mode()
+ reciprocal_grad_eager(y_, dy_; name=name)
+ else
+ reciprocal_grad_graph(y_, dy_; name=name)
+ end
+ end
+ end
+end
+
+
+"""
+ crop_and_resize_grad_boxes(grads, image, boxes, box_ind; method=)
+
+
+"""
+begin
+ begin
+ function crop_and_resize_grad_boxes_graph(grads_, image_, boxes_, box_ind_; name=nothing, method=nothing)
+ local desc
+ tf.with_op_name(name, "CropAndResizeGradBoxes") do
+ desc = tf.NodeDescription("CropAndResizeGradBoxes")
+ begin
+ begin
+ grads_ = convert(Tensor{Float32}, grads_)
+ begin
+ end
+ end
+ begin
+ image_ = convert(Tensor{Any}, image_)
+ begin
+ end
+ end
+ begin
+ boxes_ = convert(Tensor{Float32}, boxes_)
+ begin
+ end
+ end
+ begin
+ box_ind_ = convert(Tensor{Int32}, box_ind_)
+ begin
+ end
+ end
+ begin
+ (image_,) = tf.tf_promote(image_)
+ end
+ end
+ begin
+ begin
+ tf.add_input(desc, grads_)
+ end
+ begin
+ tf.add_input(desc, image_)
+ end
+ begin
+ tf.add_input(desc, boxes_)
+ end
+ begin
+ tf.add_input(desc, box_ind_)
+ end
+ end
+ begin
+ begin
+ if method !== nothing
+ desc["method"] = Base.String(method)
+ end
+ end
+ end
+ end
+ tf.Tensor(tf.Operation(desc))
+ end
+ end
+ begin
+ function crop_and_resize_grad_boxes_eager(grads_, image_, boxes_, box_ind_; name=nothing, method=nothing)
+ desc = tf.EagerOp("CropAndResizeGradBoxes")
+ grads_ = convert(tf.EagerTensor, grads_)
+ image_ = convert(tf.EagerTensor, image_)
+ boxes_ = convert(tf.EagerTensor, boxes_)
+ box_ind_ = convert(tf.EagerTensor, box_ind_)
+ begin
+ begin
+ tf.add_input(desc, grads_)
+ end
+ begin
+ tf.add_input(desc, image_)
+ end
+ begin
+ tf.add_input(desc, boxes_)
+ end
+ begin
+ tf.add_input(desc, box_ind_)
+ end
+ end
+ begin
+ begin
+ if method !== nothing
+ desc["method"] = Base.String(method)
+ end
+ end
+ end
+ begin
+ desc["T"] = tf.data_type(image_)
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(crop_and_resize_grad_boxes, [grads_, image_, boxes_, box_ind_], name=nothing, method=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res[1]
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function crop_and_resize_grad_boxes(grads_, image_, boxes_, box_ind_; name=nothing, method=nothing)
+ if tf.in_eager_mode()
+ crop_and_resize_grad_boxes_eager(grads_, image_, boxes_, box_ind_; name=name, method=method)
+ else
+ crop_and_resize_grad_boxes_graph(grads_, image_, boxes_, box_ind_; name=name, method=method)
+ end
+ end
+ end
+end
+
+
+"""
+ batch_matrix_solve(matrix, rhs; adjoint=false)
+
+
+"""
+begin
+ begin
+ function batch_matrix_solve_graph(matrix_, rhs_; name=nothing, adjoint=nothing)
+ local desc
+ tf.with_op_name(name, "BatchMatrixSolve") do
+ desc = tf.NodeDescription("BatchMatrixSolve")
+ begin
+ begin
+ matrix_ = convert(Tensor{Any}, matrix_)
+ begin
+ end
+ end
+ begin
+ rhs_ = convert(Tensor{Any}, rhs_)
+ begin
+ end
+ end
+ begin
+ (matrix_, rhs_) = tf.tf_promote(matrix_, rhs_)
+ end
+ end
+ begin
+ begin
+ tf.add_input(desc, matrix_)
+ end
+ begin
+ tf.add_input(desc, rhs_)
+ end
+ end
+ begin
+ begin
+ if adjoint !== nothing
+ desc["adjoint"] = Base.Bool(adjoint)
+ end
+ end
+ end
+ end
+ tf.Tensor(tf.Operation(desc))
+ end
+ end
+ begin
+ function batch_matrix_solve_eager(matrix_, rhs_; name=nothing, adjoint=nothing)
+ desc = tf.EagerOp("BatchMatrixSolve")
+ matrix_ = convert(tf.EagerTensor, matrix_)
+ rhs_ = convert(tf.EagerTensor, rhs_)
+ begin
+ begin
+ tf.add_input(desc, matrix_)
+ end
+ begin
+ tf.add_input(desc, rhs_)
+ end
+ end
+ begin
+ begin
+ if adjoint !== nothing
+ desc["adjoint"] = Base.Bool(adjoint)
+ end
+ end
+ end
+ begin
+ desc["T"] = tf.data_type(matrix_)
+ end
+ begin
+ desc["T"] = tf.data_type(rhs_)
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(batch_matrix_solve, [matrix_, rhs_], name=nothing, adjoint=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res[1]
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function batch_matrix_solve(matrix_, rhs_; name=nothing, adjoint=nothing)
+ if tf.in_eager_mode()
+ batch_matrix_solve_eager(matrix_, rhs_; name=name, adjoint=adjoint)
+ else
+ batch_matrix_solve_graph(matrix_, rhs_; name=name, adjoint=adjoint)
+ end
+ end
+ end
+end
+
+
+"""
+ mutable_hash_table_v2(; container=, shared_name=, use_node_name_sharing=false)
+
+
+"""
+begin
+ begin
+ function mutable_hash_table_v2_graph(; name=nothing, container=nothing, shared_name=nothing, use_node_name_sharing=nothing, key_dtype=nothing, value_dtype=nothing)
+ local desc
+ tf.with_op_name(name, "MutableHashTableV2") do
+ desc = tf.NodeDescription("MutableHashTableV2")
+ begin
+ end
+ begin
+ end
+ begin
+ begin
+ if container !== nothing
+ desc["container"] = Base.String(container)
+ end
+ end
+ begin
+ if shared_name !== nothing
+ desc["shared_name"] = Base.String(shared_name)
+ end
+ end
+ begin
+ if use_node_name_sharing !== nothing
+ desc["use_node_name_sharing"] = Base.Bool(use_node_name_sharing)
+ end
+ end
+ begin
+ if key_dtype !== nothing
+ desc["key_dtype"] = Base.identity(key_dtype)
+ end
+ end
+ begin
+ if value_dtype !== nothing
+ desc["value_dtype"] = Base.identity(value_dtype)
+ end
+ end
+ end
+ end
+ tf.Tensor(tf.Operation(desc))
+ end
+ end
+ begin
+ function mutable_hash_table_v2_eager(; name=nothing, container=nothing, shared_name=nothing, use_node_name_sharing=nothing, key_dtype=nothing, value_dtype=nothing)
+ desc = tf.EagerOp("MutableHashTableV2")
+ begin
+ end
+ begin
+ begin
+ if container !== nothing
+ desc["container"] = Base.String(container)
+ end
+ end
+ begin
+ if shared_name !== nothing
+ desc["shared_name"] = Base.String(shared_name)
+ end
+ end
+ begin
+ if use_node_name_sharing !== nothing
+ desc["use_node_name_sharing"] = Base.Bool(use_node_name_sharing)
+ end
+ end
+ begin
+ if key_dtype !== nothing
+ desc["key_dtype"] = Base.identity(key_dtype)
+ end
+ end
+ begin
+ if value_dtype !== nothing
+ desc["value_dtype"] = Base.identity(value_dtype)
+ end
+ end
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(mutable_hash_table_v2, [], name=nothing, container=nothing, shared_name=nothing, use_node_name_sharing=nothing, key_dtype=nothing, value_dtype=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res[1]
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function mutable_hash_table_v2(; name=nothing, container=nothing, shared_name=nothing, use_node_name_sharing=nothing, key_dtype=nothing, value_dtype=nothing)
+ if tf.in_eager_mode()
+ mutable_hash_table_v2_eager(; name=name, container=container, shared_name=shared_name, use_node_name_sharing=use_node_name_sharing, key_dtype=key_dtype, value_dtype=value_dtype)
+ else
+ mutable_hash_table_v2_graph(; name=name, container=container, shared_name=shared_name, use_node_name_sharing=use_node_name_sharing, key_dtype=key_dtype, value_dtype=value_dtype)
+ end
+ end
+ end
+end
+
+
+"""
+ exit(data)
+
+
+"""
+begin
+ begin
+ function exit_graph(data_; name=nothing)
+ local desc
+ tf.with_op_name(name, "Exit") do
+ desc = tf.NodeDescription("Exit")
+ begin
+ begin
+ data_ = convert(Tensor{Any}, data_)
+ begin
+ end
+ end
+ begin
+ (data_,) = tf.tf_promote(data_)
+ end
+ end
+ begin
+ begin
+ tf.add_input(desc, data_)
+ end
+ end
+ begin
+ end
+ end
+ tf.Tensor(tf.Operation(desc))
+ end
+ end
+ begin
+ function exit_eager(data_; name=nothing)
+ desc = tf.EagerOp("Exit")
+ data_ = convert(tf.EagerTensor, data_)
+ begin
+ begin
+ tf.add_input(desc, data_)
+ end
+ end
+ begin
+ end
+ begin
+ desc["T"] = tf.data_type(data_)
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(exit, [data_], name=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res[1]
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function exit(data_; name=nothing)
+ if tf.in_eager_mode()
+ exit_eager(data_; name=name)
+ else
+ exit_graph(data_; name=name)
+ end
+ end
+ end
+end
+
+
+"""
+ lrn(input; depth_radius=5, bias=?, alpha=?, beta=?)
+
+
+"""
+begin
+ begin
+ function lrn_graph(input_; name=nothing, depth_radius=nothing, bias=nothing, alpha=nothing, beta=nothing)
+ local desc
+ tf.with_op_name(name, "LRN") do
+ desc = tf.NodeDescription("LRN")
+ begin
+ begin
+ input_ = convert(Tensor{Float32}, input_)
+ begin
+ end
+ end
+ begin
+ (input_,) = tf.tf_promote(input_)
+ end
+ end
+ begin
+ begin
+ tf.add_input(desc, input_)
+ end
+ end
+ begin
+ begin
+ if depth_radius !== nothing
+ desc["depth_radius"] = Base.Int(depth_radius)
+ end
+ end
+ begin
+ if bias !== nothing
+ desc["bias"] = Base.identity(bias)
+ end
+ end
+ begin
+ if alpha !== nothing
+ desc["alpha"] = Base.identity(alpha)
+ end
+ end
+ begin
+ if beta !== nothing
+ desc["beta"] = Base.identity(beta)
+ end
+ end
+ end
+ end
+ tf.Tensor(tf.Operation(desc))
+ end
+ end
+ begin
+ function lrn_eager(input_; name=nothing, depth_radius=nothing, bias=nothing, alpha=nothing, beta=nothing)
+ desc = tf.EagerOp("LRN")
+ input_ = convert(tf.EagerTensor, input_)
+ begin
+ begin
+ tf.add_input(desc, input_)
+ end
+ end
+ begin
+ begin
+ if depth_radius !== nothing
+ desc["depth_radius"] = Base.Int(depth_radius)
+ end
+ end
+ begin
+ if bias !== nothing
+ desc["bias"] = Base.identity(bias)
+ end
+ end
+ begin
+ if alpha !== nothing
+ desc["alpha"] = Base.identity(alpha)
+ end
+ end
+ begin
+ if beta !== nothing
+ desc["beta"] = Base.identity(beta)
+ end
+ end
+ end
+ begin
+ desc["T"] = tf.data_type(input_)
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(lrn, [input_], name=nothing, depth_radius=nothing, bias=nothing, alpha=nothing, beta=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res[1]
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function lrn(input_; name=nothing, depth_radius=nothing, bias=nothing, alpha=nothing, beta=nothing)
+ if tf.in_eager_mode()
+ lrn_eager(input_; name=name, depth_radius=depth_radius, bias=bias, alpha=alpha, beta=beta)
+ else
+ lrn_graph(input_; name=name, depth_radius=depth_radius, bias=bias, alpha=alpha, beta=beta)
+ end
+ end
+ end
+end
+
+
+"""
+ stateless_if(cond, input)
+
+
+"""
+begin
+ begin
+ function stateless_if_graph(cond_, input_; name=nothing, Tin=nothing, Tout=nothing, then_branch=nothing, else_branch=nothing)
+ local desc
+ tf.with_op_name(name, "StatelessIf") do
+ desc = tf.NodeDescription("StatelessIf")
+ begin
+ begin
+ cond_ = convert(Tensor{Any}, cond_)
+ begin
+ end
+ end
+ begin
+ input_ = [convert(Tensor{Any}, x) for x = input_]
+ begin
+ end
+ end
+ begin
+ (cond_,) = tf.tf_promote(cond_)
+ end
+ end
+ begin
+ begin
+ tf.add_input(desc, cond_)
+ end
+ begin
+ tf.add_input(desc, input_)
+ end
+ end
+ begin
+ begin
+ if Tin !== nothing
+ desc["Tin"] = map(Base.identity, Tin)
+ end
+ end
+ begin
+ if Tout !== nothing
+ desc["Tout"] = map(Base.identity, Tout)
+ end
+ end
+ begin
+ if then_branch !== nothing
+ desc["then_branch"] = Base.identity(then_branch)
+ end
+ end
+ begin
+ if else_branch !== nothing
+ desc["else_branch"] = Base.identity(else_branch)
+ end
+ end
+ end
+ end
+ tf.Tensor(tf.Operation(desc))
+ end
+ end
+ begin
+ function stateless_if_eager(cond_, input_; name=nothing, Tin=nothing, Tout=nothing, then_branch=nothing, else_branch=nothing)
+ desc = tf.EagerOp("StatelessIf")
+ cond_ = convert(tf.EagerTensor, cond_)
+ input_ = convert(tf.EagerTensor, input_)
+ begin
+ begin
+ tf.add_input(desc, cond_)
+ end
+ begin
+ tf.add_input(desc, input_)
+ end
+ end
+ begin
+ begin
+ if Tin !== nothing
+ desc["Tin"] = map(Base.identity, Tin)
+ end
+ end
+ begin
+ if Tout !== nothing
+ desc["Tout"] = map(Base.identity, Tout)
+ end
+ end
+ begin
+ if then_branch !== nothing
+ desc["then_branch"] = Base.identity(then_branch)
+ end
+ end
+ begin
+ if else_branch !== nothing
+ desc["else_branch"] = Base.identity(else_branch)
+ end
+ end
+ end
+ begin
+ desc["Tcond"] = tf.data_type(cond_)
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(stateless_if, [cond_, input_], name=nothing, Tin=nothing, Tout=nothing, then_branch=nothing, else_branch=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res[1]
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function stateless_if(cond_, input_; name=nothing, Tin=nothing, Tout=nothing, then_branch=nothing, else_branch=nothing)
+ if tf.in_eager_mode()
+ stateless_if_eager(cond_, input_; name=name, Tin=Tin, Tout=Tout, then_branch=then_branch, else_branch=else_branch)
+ else
+ stateless_if_graph(cond_, input_; name=name, Tin=Tin, Tout=Tout, then_branch=then_branch, else_branch=else_branch)
+ end
+ end
+ end
+end
+
+
+"""
+ tensor_list_set_item(input_handle, index, item)
+
+
+"""
+begin
+ begin
+ function tensor_list_set_item_graph(input_handle_, index_, item_; name=nothing, element_dtype=nothing)
+ local desc
+ tf.with_op_name(name, "TensorListSetItem") do
+ desc = tf.NodeDescription("TensorListSetItem")
+ begin
+ begin
+ input_handle_ = convert(Tensor{Any}, input_handle_)
+ begin
+ end
+ end
+ begin
+ index_ = convert(Tensor{Int32}, index_)
+ begin
+ end
+ end
+ begin
+ item_ = convert(Tensor{Any}, item_)
+ begin
+ end
+ end
+ begin
+ (item_,) = tf.tf_promote(item_)
+ end
+ end
+ begin
+ begin
+ tf.add_input(desc, input_handle_)
+ end
+ begin
+ tf.add_input(desc, index_)
+ end
+ begin
+ tf.add_input(desc, item_)
+ end
+ end
+ begin
+ begin
+ if element_dtype !== nothing
+ desc["element_dtype"] = Base.identity(element_dtype)
+ end
+ end
+ end
+ end
+ tf.Tensor(tf.Operation(desc))
+ end
+ end
+ begin
+ function tensor_list_set_item_eager(input_handle_, index_, item_; name=nothing, element_dtype=nothing)
+ desc = tf.EagerOp("TensorListSetItem")
+ input_handle_ = convert(tf.EagerTensor, input_handle_)
+ index_ = convert(tf.EagerTensor, index_)
+ item_ = convert(tf.EagerTensor, item_)
+ begin
+ begin
+ tf.add_input(desc, input_handle_)
+ end
+ begin
+ tf.add_input(desc, index_)
+ end
+ begin
+ tf.add_input(desc, item_)
+ end
+ end
+ begin
+ begin
+ if element_dtype !== nothing
+ desc["element_dtype"] = Base.identity(element_dtype)
+ end
+ end
+ end
+ begin
+ desc["element_dtype"] = tf.data_type(item_)
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(tensor_list_set_item, [input_handle_, index_, item_], name=nothing, element_dtype=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res[1]
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function tensor_list_set_item(input_handle_, index_, item_; name=nothing, element_dtype=nothing)
+ if tf.in_eager_mode()
+ tensor_list_set_item_eager(input_handle_, index_, item_; name=name, element_dtype=element_dtype)
+ else
+ tensor_list_set_item_graph(input_handle_, index_, item_; name=name, element_dtype=element_dtype)
+ end
+ end
+ end
+end
+
+
+"""
+ rsqrt(x)
+
+
+"""
+begin
+ begin
+ function rsqrt_graph(x_; name=nothing)
+ local desc
+ tf.with_op_name(name, "Rsqrt") do
+ desc = tf.NodeDescription("Rsqrt")
+ begin
+ begin
+ x_ = convert(Tensor{Any}, x_)
+ begin
+ end
+ end
+ begin
+ (x_,) = tf.tf_promote(x_)
+ end
+ end
+ begin
+ begin
+ tf.add_input(desc, x_)
+ end
+ end
+ begin
+ end
+ end
+ tf.Tensor(tf.Operation(desc))
+ end
+ end
+ begin
+ function rsqrt_eager(x_; name=nothing)
+ desc = tf.EagerOp("Rsqrt")
+ x_ = convert(tf.EagerTensor, x_)
+ begin
+ begin
+ tf.add_input(desc, x_)
+ end
+ end
+ begin
+ end
+ begin
+ desc["T"] = tf.data_type(x_)
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(rsqrt, [x_], name=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res[1]
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function rsqrt(x_; name=nothing)
+ if tf.in_eager_mode()
+ rsqrt_eager(x_; name=name)
+ else
+ rsqrt_graph(x_; name=name)
+ end
+ end
+ end
+end
+
+
+"""
+ delete_session_tensor(handle)
+
+
+"""
+begin
+ begin
+ function delete_session_tensor_graph(handle_; name=nothing)
+ local desc
+ tf.with_op_name(name, "DeleteSessionTensor") do
+ desc = tf.NodeDescription("DeleteSessionTensor")
+ begin
+ begin
+ handle_ = convert(Tensor{String}, handle_)
+ begin
+ end
+ end
+ end
+ begin
+ begin
+ tf.add_input(desc, handle_)
+ end
+ end
+ begin
+ end
+ end
+ tf.Tensor(tf.Operation(desc))
+ end
+ end
+ begin
+ function delete_session_tensor_eager(handle_; name=nothing)
+ desc = tf.EagerOp("DeleteSessionTensor")
+ handle_ = convert(tf.EagerTensor, handle_)
+ begin
+ begin
+ tf.add_input(desc, handle_)
+ end
+ end
+ begin
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(delete_session_tensor, [handle_], name=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res[1]
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function delete_session_tensor(handle_; name=nothing)
+ if tf.in_eager_mode()
+ delete_session_tensor_eager(handle_; name=name)
+ else
+ delete_session_tensor_graph(handle_; name=name)
+ end
+ end
+ end
+end
+
+
+"""
+ one_hot(indices, depth, on_value, off_value; axis=-1)
+
+
+"""
+begin
+ begin
+ function one_hot_graph(indices_, depth_, on_value_, off_value_; name=nothing, axis=nothing)
+ local desc
+ tf.with_op_name(name, "OneHot") do
+ desc = tf.NodeDescription("OneHot")
+ begin
+ begin
+ indices_ = convert(Tensor{Int64}, indices_)
+ begin
+ indices_ = indices_ - convert(tf.Tensor{eltype(indices_)}, 1)
+ end
+ end
+ begin
+ depth_ = convert(Tensor{Int32}, depth_)
+ begin
+ end
+ end
+ begin
+ on_value_ = convert(Tensor{Any}, on_value_)
+ begin
+ end
+ end
+ begin
+ off_value_ = convert(Tensor{Any}, off_value_)
+ begin
+ end
+ end
+ begin
+ (on_value_, off_value_) = tf.tf_promote(on_value_, off_value_)
+ end
+ begin
+ (indices_,) = tf.tf_promote(indices_)
+ end
+ end
+ begin
+ begin
+ tf.add_input(desc, indices_)
+ end
+ begin
+ tf.add_input(desc, depth_)
+ end
+ begin
+ tf.add_input(desc, on_value_)
+ end
+ begin
+ tf.add_input(desc, off_value_)
+ end
+ end
+ begin
+ begin
+ if axis !== nothing
+ axis = Base.Int(axis) - 1
+ end
+ end
+ begin
+ if axis !== nothing
+ desc["axis"] = Base.Int(axis)
+ end
+ end
+ end
+ end
+ tf.Tensor(tf.Operation(desc))
+ end
+ end
+ begin
+ function one_hot_eager(indices_, depth_, on_value_, off_value_; name=nothing, axis=nothing)
+ desc = tf.EagerOp("OneHot")
+ indices_ = convert(tf.EagerTensor, indices_)
+ depth_ = convert(tf.EagerTensor, depth_)
+ on_value_ = convert(tf.EagerTensor, on_value_)
+ off_value_ = convert(tf.EagerTensor, off_value_)
+ begin
+ begin
+ tf.add_input(desc, indices_)
+ end
+ begin
+ tf.add_input(desc, depth_)
+ end
+ begin
+ tf.add_input(desc, on_value_)
+ end
+ begin
+ tf.add_input(desc, off_value_)
+ end
+ end
+ begin
+ begin
+ if axis !== nothing
+ axis = Base.Int(axis) - 1
+ end
+ end
+ begin
+ if axis !== nothing
+ desc["axis"] = Base.Int(axis)
+ end
+ end
+ end
+ begin
+ desc["TI"] = tf.data_type(indices_)
+ end
+ begin
+ desc["T"] = tf.data_type(on_value_)
+ end
+ begin
+ desc["T"] = tf.data_type(off_value_)
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(one_hot, [indices_, depth_, on_value_, off_value_], name=nothing, axis=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res[1]
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function one_hot(indices_, depth_, on_value_, off_value_; name=nothing, axis=nothing)
+ if tf.in_eager_mode()
+ one_hot_eager(indices_, depth_, on_value_, off_value_; name=name, axis=axis)
+ else
+ one_hot_graph(indices_, depth_, on_value_, off_value_; name=name, axis=axis)
+ end
+ end
+ end
+end
+
+
+"""
+ resource_apply_ftrl(var, accum, linear, grad, lr, l1, l2, lr_power; use_locking=false)
+
+
+"""
+begin
+ begin
+ function resource_apply_ftrl_graph(var_, accum_, linear_, grad_, lr_, l1_, l2_, lr_power_; name=nothing, use_locking=nothing)
+ local desc
+ tf.with_op_name(name, "ResourceApplyFtrl") do
+ desc = tf.NodeDescription("ResourceApplyFtrl")
+ begin
+ begin
+ var_ = convert(Tensor{Any}, var_)
+ begin
+ end
+ end
+ begin
+ accum_ = convert(Tensor{Any}, accum_)
+ begin
+ end
+ end
+ begin
+ linear_ = convert(Tensor{Any}, linear_)
+ begin
+ end
+ end
+ begin
+ grad_ = convert(Tensor{Any}, grad_)
+ begin
+ end
+ end
+ begin
+ lr_ = convert(Tensor{Any}, lr_)
+ begin
+ end
+ end
+ begin
+ l1_ = convert(Tensor{Any}, l1_)
+ begin
+ end
+ end
+ begin
+ l2_ = convert(Tensor{Any}, l2_)
+ begin
+ end
+ end
+ begin
+ lr_power_ = convert(Tensor{Any}, lr_power_)
+ begin
+ end
+ end
+ begin
+ (grad_, lr_, l1_, l2_, lr_power_) = tf.tf_promote(grad_, lr_, l1_, l2_, lr_power_)
+ end
+ end
+ begin
+ begin
+ tf.add_input(desc, var_)
+ end
+ begin
+ tf.add_input(desc, accum_)
+ end
+ begin
+ tf.add_input(desc, linear_)
+ end
+ begin
+ tf.add_input(desc, grad_)
+ end
+ begin
+ tf.add_input(desc, lr_)
+ end
+ begin
+ tf.add_input(desc, l1_)
+ end
+ begin
+ tf.add_input(desc, l2_)
+ end
+ begin
+ tf.add_input(desc, lr_power_)
+ end
+ end
+ begin
+ begin
+ if use_locking !== nothing
+ desc["use_locking"] = Base.Bool(use_locking)
+ end
+ end
+ end
+ end
+ tf.Tensor(tf.Operation(desc))
+ end
+ end
+ begin
+ function resource_apply_ftrl_eager(var_, accum_, linear_, grad_, lr_, l1_, l2_, lr_power_; name=nothing, use_locking=nothing)
+ desc = tf.EagerOp("ResourceApplyFtrl")
+ var_ = convert(tf.EagerTensor, var_)
+ accum_ = convert(tf.EagerTensor, accum_)
+ linear_ = convert(tf.EagerTensor, linear_)
+ grad_ = convert(tf.EagerTensor, grad_)
+ lr_ = convert(tf.EagerTensor, lr_)
+ l1_ = convert(tf.EagerTensor, l1_)
+ l2_ = convert(tf.EagerTensor, l2_)
+ lr_power_ = convert(tf.EagerTensor, lr_power_)
+ begin
+ begin
+ tf.add_input(desc, var_)
+ end
+ begin
+ tf.add_input(desc, accum_)
+ end
+ begin
+ tf.add_input(desc, linear_)
+ end
+ begin
+ tf.add_input(desc, grad_)
+ end
+ begin
+ tf.add_input(desc, lr_)
+ end
+ begin
+ tf.add_input(desc, l1_)
+ end
+ begin
+ tf.add_input(desc, l2_)
+ end
+ begin
+ tf.add_input(desc, lr_power_)
+ end
+ end
+ begin
+ begin
+ if use_locking !== nothing
+ desc["use_locking"] = Base.Bool(use_locking)
+ end
+ end
+ end
+ begin
+ desc["T"] = tf.data_type(grad_)
+ end
+ begin
+ desc["T"] = tf.data_type(lr_)
+ end
+ begin
+ desc["T"] = tf.data_type(l1_)
+ end
+ begin
+ desc["T"] = tf.data_type(l2_)
+ end
+ begin
+ desc["T"] = tf.data_type(lr_power_)
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(resource_apply_ftrl, [var_, accum_, linear_, grad_, lr_, l1_, l2_, lr_power_], name=nothing, use_locking=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res[1]
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function resource_apply_ftrl(var_, accum_, linear_, grad_, lr_, l1_, l2_, lr_power_; name=nothing, use_locking=nothing)
+ if tf.in_eager_mode()
+ resource_apply_ftrl_eager(var_, accum_, linear_, grad_, lr_, l1_, l2_, lr_power_; name=name, use_locking=use_locking)
+ else
+ resource_apply_ftrl_graph(var_, accum_, linear_, grad_, lr_, l1_, l2_, lr_power_; name=name, use_locking=use_locking)
+ end
+ end
+ end
+end
+
+
+"""
+ sdca_optimizer_v2(sparse_example_indices, sparse_feature_indices, sparse_feature_values, dense_features, example_weights, example_labels, sparse_indices, sparse_weights, dense_weights, example_state_data; adaptive=false)
+
+
+"""
+begin
+ begin
+ function sdca_optimizer_v2_graph(sparse_example_indices_, sparse_feature_indices_, sparse_feature_values_, dense_features_, example_weights_, example_labels_, sparse_indices_, sparse_weights_, dense_weights_, example_state_data_; name=nothing, loss_type=nothing, adaptive=nothing, num_sparse_features=nothing, num_sparse_features_with_values=nothing, num_dense_features=nothing, l1=nothing, l2=nothing, num_loss_partitions=nothing, num_inner_iterations=nothing)
+ local desc
+ tf.with_op_name(name, "SdcaOptimizerV2") do
+ desc = tf.NodeDescription("SdcaOptimizerV2")
+ begin
+ begin
+ sparse_example_indices_ = [convert(Tensor{Int64}, x) for x = sparse_example_indices_]
+ begin
+ end
+ end
+ begin
+ sparse_feature_indices_ = [convert(Tensor{Int64}, x) for x = sparse_feature_indices_]
+ begin
+ end
+ end
+ begin
+ sparse_feature_values_ = [convert(Tensor{Float32}, x) for x = sparse_feature_values_]
+ begin
+ end
+ end
+ begin
+ dense_features_ = [convert(Tensor{Float32}, x) for x = dense_features_]
+ begin
+ end
+ end
+ begin
+ example_weights_ = convert(Tensor{Float32}, example_weights_)
+ begin
+ end
+ end
+ begin
+ example_labels_ = convert(Tensor{Float32}, example_labels_)
+ begin
+ end
+ end
+ begin
+ sparse_indices_ = [convert(Tensor{Int64}, x) for x = sparse_indices_]
+ begin
+ end
+ end
+ begin
+ sparse_weights_ = [convert(Tensor{Float32}, x) for x = sparse_weights_]
+ begin
+ end
+ end
+ begin
+ dense_weights_ = [convert(Tensor{Float32}, x) for x = dense_weights_]
+ begin
+ end
+ end
+ begin
+ example_state_data_ = convert(Tensor{Float32}, example_state_data_)
+ begin
+ end
+ end
+ end
+ begin
+ begin
+ tf.add_input(desc, sparse_example_indices_)
+ end
+ begin
+ tf.add_input(desc, sparse_feature_indices_)
+ end
+ begin
+ tf.add_input(desc, sparse_feature_values_)
+ end
+ begin
+ tf.add_input(desc, dense_features_)
+ end
+ begin
+ tf.add_input(desc, example_weights_)
+ end
+ begin
+ tf.add_input(desc, example_labels_)
+ end
+ begin
+ tf.add_input(desc, sparse_indices_)
+ end
+ begin
+ tf.add_input(desc, sparse_weights_)
+ end
+ begin
+ tf.add_input(desc, dense_weights_)
+ end
+ begin
+ tf.add_input(desc, example_state_data_)
+ end
+ end
+ begin
+ begin
+ if loss_type !== nothing
+ desc["loss_type"] = Base.String(loss_type)
+ end
+ end
+ begin
+ if adaptive !== nothing
+ desc["adaptive"] = Base.Bool(adaptive)
+ end
+ end
+ begin
+ if num_sparse_features !== nothing
+ desc["num_sparse_features"] = Base.Int(num_sparse_features)
+ end
+ end
+ begin
+ if num_sparse_features_with_values !== nothing
+ desc["num_sparse_features_with_values"] = Base.Int(num_sparse_features_with_values)
+ end
+ end
+ begin
+ if num_dense_features !== nothing
+ desc["num_dense_features"] = Base.Int(num_dense_features)
+ end
+ end
+ begin
+ if l1 !== nothing
+ desc["l1"] = Base.identity(l1)
+ end
+ end
+ begin
+ if l2 !== nothing
+ desc["l2"] = Base.identity(l2)
+ end
+ end
+ begin
+ if num_loss_partitions !== nothing
+ desc["num_loss_partitions"] = Base.Int(num_loss_partitions)
+ end
+ end
+ begin
+ if num_inner_iterations !== nothing
+ desc["num_inner_iterations"] = Base.Int(num_inner_iterations)
+ end
+ end
+ end
+ end
+ begin
+ out = tf.Tensor[]
+ op = tf.Operation(desc)
+ for out_idx = 1:3
+ push!(out, tf.Tensor(op, out_idx))
+ end
+ out
+ end
+ end
+ end
+ begin
+ function sdca_optimizer_v2_eager(sparse_example_indices_, sparse_feature_indices_, sparse_feature_values_, dense_features_, example_weights_, example_labels_, sparse_indices_, sparse_weights_, dense_weights_, example_state_data_; name=nothing, loss_type=nothing, adaptive=nothing, num_sparse_features=nothing, num_sparse_features_with_values=nothing, num_dense_features=nothing, l1=nothing, l2=nothing, num_loss_partitions=nothing, num_inner_iterations=nothing)
+ desc = tf.EagerOp("SdcaOptimizerV2")
+ sparse_example_indices_ = convert(tf.EagerTensor, sparse_example_indices_)
+ sparse_feature_indices_ = convert(tf.EagerTensor, sparse_feature_indices_)
+ sparse_feature_values_ = convert(tf.EagerTensor, sparse_feature_values_)
+ dense_features_ = convert(tf.EagerTensor, dense_features_)
+ example_weights_ = convert(tf.EagerTensor, example_weights_)
+ example_labels_ = convert(tf.EagerTensor, example_labels_)
+ sparse_indices_ = convert(tf.EagerTensor, sparse_indices_)
+ sparse_weights_ = convert(tf.EagerTensor, sparse_weights_)
+ dense_weights_ = convert(tf.EagerTensor, dense_weights_)
+ example_state_data_ = convert(tf.EagerTensor, example_state_data_)
+ begin
+ begin
+ tf.add_input(desc, sparse_example_indices_)
+ end
+ begin
+ tf.add_input(desc, sparse_feature_indices_)
+ end
+ begin
+ tf.add_input(desc, sparse_feature_values_)
+ end
+ begin
+ tf.add_input(desc, dense_features_)
+ end
+ begin
+ tf.add_input(desc, example_weights_)
+ end
+ begin
+ tf.add_input(desc, example_labels_)
+ end
+ begin
+ tf.add_input(desc, sparse_indices_)
+ end
+ begin
+ tf.add_input(desc, sparse_weights_)
+ end
+ begin
+ tf.add_input(desc, dense_weights_)
+ end
+ begin
+ tf.add_input(desc, example_state_data_)
+ end
+ end
+ begin
+ begin
+ if loss_type !== nothing
+ desc["loss_type"] = Base.String(loss_type)
+ end
+ end
+ begin
+ if adaptive !== nothing
+ desc["adaptive"] = Base.Bool(adaptive)
+ end
+ end
+ begin
+ if num_sparse_features !== nothing
+ desc["num_sparse_features"] = Base.Int(num_sparse_features)
+ end
+ end
+ begin
+ if num_sparse_features_with_values !== nothing
+ desc["num_sparse_features_with_values"] = Base.Int(num_sparse_features_with_values)
+ end
+ end
+ begin
+ if num_dense_features !== nothing
+ desc["num_dense_features"] = Base.Int(num_dense_features)
+ end
+ end
+ begin
+ if l1 !== nothing
+ desc["l1"] = Base.identity(l1)
+ end
+ end
+ begin
+ if l2 !== nothing
+ desc["l2"] = Base.identity(l2)
+ end
+ end
+ begin
+ if num_loss_partitions !== nothing
+ desc["num_loss_partitions"] = Base.Int(num_loss_partitions)
+ end
+ end
+ begin
+ if num_inner_iterations !== nothing
+ desc["num_inner_iterations"] = Base.Int(num_inner_iterations)
+ end
+ end
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(sdca_optimizer_v2, [sparse_example_indices_, sparse_feature_indices_, sparse_feature_values_, dense_features_, example_weights_, example_labels_, sparse_indices_, sparse_weights_, dense_weights_, example_state_data_], name=nothing, loss_type=nothing, adaptive=nothing, num_sparse_features=nothing, num_sparse_features_with_values=nothing, num_dense_features=nothing, l1=nothing, l2=nothing, num_loss_partitions=nothing, num_inner_iterations=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function sdca_optimizer_v2(sparse_example_indices_, sparse_feature_indices_, sparse_feature_values_, dense_features_, example_weights_, example_labels_, sparse_indices_, sparse_weights_, dense_weights_, example_state_data_; name=nothing, loss_type=nothing, adaptive=nothing, num_sparse_features=nothing, num_sparse_features_with_values=nothing, num_dense_features=nothing, l1=nothing, l2=nothing, num_loss_partitions=nothing, num_inner_iterations=nothing)
+ if tf.in_eager_mode()
+ sdca_optimizer_v2_eager(sparse_example_indices_, sparse_feature_indices_, sparse_feature_values_, dense_features_, example_weights_, example_labels_, sparse_indices_, sparse_weights_, dense_weights_, example_state_data_; name=name, loss_type=loss_type, adaptive=adaptive, num_sparse_features=num_sparse_features, num_sparse_features_with_values=num_sparse_features_with_values, num_dense_features=num_dense_features, l1=l1, l2=l2, num_loss_partitions=num_loss_partitions, num_inner_iterations=num_inner_iterations)
+ else
+ sdca_optimizer_v2_graph(sparse_example_indices_, sparse_feature_indices_, sparse_feature_values_, dense_features_, example_weights_, example_labels_, sparse_indices_, sparse_weights_, dense_weights_, example_state_data_; name=name, loss_type=loss_type, adaptive=adaptive, num_sparse_features=num_sparse_features, num_sparse_features_with_values=num_sparse_features_with_values, num_dense_features=num_dense_features, l1=l1, l2=l2, num_loss_partitions=num_loss_partitions, num_inner_iterations=num_inner_iterations)
+ end
+ end
+ end
+end
+
+
+"""
+ queue_enqueue(handle, components; timeout_ms=-1)
+
+
+"""
+begin
+ begin
+ function queue_enqueue_graph(handle_, components_; name=nothing, Tcomponents=nothing, timeout_ms=nothing)
+ local desc
+ tf.with_op_name(name, "QueueEnqueue") do
+ desc = tf.NodeDescription("QueueEnqueue")
+ begin
+ begin
+ handle_ = convert(Tensor{String}, handle_)
+ begin
+ end
+ end
+ begin
+ components_ = [convert(Tensor{Any}, x) for x = components_]
+ begin
+ end
+ end
+ end
+ begin
+ begin
+ tf.add_input(desc, handle_)
+ end
+ begin
+ tf.add_input(desc, components_)
+ end
+ end
+ begin
+ begin
+ if Tcomponents !== nothing
+ desc["Tcomponents"] = map(Base.identity, Tcomponents)
+ end
+ end
+ begin
+ if timeout_ms !== nothing
+ desc["timeout_ms"] = Base.Int(timeout_ms)
+ end
+ end
+ end
+ end
+ tf.Tensor(tf.Operation(desc))
+ end
+ end
+ begin
+ function queue_enqueue_eager(handle_, components_; name=nothing, Tcomponents=nothing, timeout_ms=nothing)
+ desc = tf.EagerOp("QueueEnqueue")
+ handle_ = convert(tf.EagerTensor, handle_)
+ components_ = convert(tf.EagerTensor, components_)
+ begin
+ begin
+ tf.add_input(desc, handle_)
+ end
+ begin
+ tf.add_input(desc, components_)
+ end
+ end
+ begin
+ begin
+ if Tcomponents !== nothing
+ desc["Tcomponents"] = map(Base.identity, Tcomponents)
+ end
+ end
+ begin
+ if timeout_ms !== nothing
+ desc["timeout_ms"] = Base.Int(timeout_ms)
+ end
+ end
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(queue_enqueue, [handle_, components_], name=nothing, Tcomponents=nothing, timeout_ms=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res[1]
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function queue_enqueue(handle_, components_; name=nothing, Tcomponents=nothing, timeout_ms=nothing)
+ if tf.in_eager_mode()
+ queue_enqueue_eager(handle_, components_; name=name, Tcomponents=Tcomponents, timeout_ms=timeout_ms)
+ else
+ queue_enqueue_graph(handle_, components_; name=name, Tcomponents=Tcomponents, timeout_ms=timeout_ms)
+ end
+ end
+ end
+end
+
+
+"""
+ ctc_beam_search_decoder(inputs, sequence_length; merge_repeated=true)
+
+
+"""
+begin
+ begin
+ function ctc_beam_search_decoder_graph(inputs_, sequence_length_; name=nothing, beam_width=nothing, top_paths=nothing, merge_repeated=nothing)
+ local desc
+ tf.with_op_name(name, "CTCBeamSearchDecoder") do
+ desc = tf.NodeDescription("CTCBeamSearchDecoder")
+ begin
+ begin
+ inputs_ = convert(Tensor{Float32}, inputs_)
+ begin
+ end
+ end
+ begin
+ sequence_length_ = convert(Tensor{Int32}, sequence_length_)
+ begin
+ end
+ end
+ end
+ begin
+ begin
+ tf.add_input(desc, inputs_)
+ end
+ begin
+ tf.add_input(desc, sequence_length_)
+ end
+ end
+ begin
+ begin
+ if beam_width !== nothing
+ desc["beam_width"] = Base.Int(beam_width)
+ end
+ end
+ begin
+ if top_paths !== nothing
+ desc["top_paths"] = Base.Int(top_paths)
+ end
+ end
+ begin
+ if merge_repeated !== nothing
+ desc["merge_repeated"] = Base.Bool(merge_repeated)
+ end
+ end
+ end
+ end
+ begin
+ out = tf.Tensor[]
+ op = tf.Operation(desc)
+ for out_idx = 1:4
+ push!(out, tf.Tensor(op, out_idx))
+ end
+ out
+ end
+ end
+ end
+ begin
+ function ctc_beam_search_decoder_eager(inputs_, sequence_length_; name=nothing, beam_width=nothing, top_paths=nothing, merge_repeated=nothing)
+ desc = tf.EagerOp("CTCBeamSearchDecoder")
+ inputs_ = convert(tf.EagerTensor, inputs_)
+ sequence_length_ = convert(tf.EagerTensor, sequence_length_)
+ begin
+ begin
+ tf.add_input(desc, inputs_)
+ end
+ begin
+ tf.add_input(desc, sequence_length_)
+ end
+ end
+ begin
+ begin
+ if beam_width !== nothing
+ desc["beam_width"] = Base.Int(beam_width)
+ end
+ end
+ begin
+ if top_paths !== nothing
+ desc["top_paths"] = Base.Int(top_paths)
+ end
+ end
+ begin
+ if merge_repeated !== nothing
+ desc["merge_repeated"] = Base.Bool(merge_repeated)
+ end
+ end
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(ctc_beam_search_decoder, [inputs_, sequence_length_], name=nothing, beam_width=nothing, top_paths=nothing, merge_repeated=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function ctc_beam_search_decoder(inputs_, sequence_length_; name=nothing, beam_width=nothing, top_paths=nothing, merge_repeated=nothing)
+ if tf.in_eager_mode()
+ ctc_beam_search_decoder_eager(inputs_, sequence_length_; name=name, beam_width=beam_width, top_paths=top_paths, merge_repeated=merge_repeated)
+ else
+ ctc_beam_search_decoder_graph(inputs_, sequence_length_; name=name, beam_width=beam_width, top_paths=top_paths, merge_repeated=merge_repeated)
+ end
+ end
+ end
+end
+
+
+"""
+ conditional_accumulator(; container=, shared_name=, reduction_type=)
+
+
+"""
+begin
+ begin
+ function conditional_accumulator_graph(; name=nothing, dtype=nothing, shape=nothing, container=nothing, shared_name=nothing, reduction_type=nothing)
+ local desc
+ tf.with_op_name(name, "ConditionalAccumulator") do
+ desc = tf.NodeDescription("ConditionalAccumulator")
+ begin
+ end
+ begin
+ end
+ begin
+ begin
+ if dtype !== nothing
+ desc["dtype"] = Base.identity(dtype)
+ end
+ end
+ begin
+ if shape !== nothing
+ desc["shape"] = Base.identity(shape)
+ end
+ end
+ begin
+ if container !== nothing
+ desc["container"] = Base.String(container)
+ end
+ end
+ begin
+ if shared_name !== nothing
+ desc["shared_name"] = Base.String(shared_name)
+ end
+ end
+ begin
+ if reduction_type !== nothing
+ desc["reduction_type"] = Base.String(reduction_type)
+ end
+ end
+ end
+ end
+ tf.Tensor(tf.Operation(desc))
+ end
+ end
+ begin
+ function conditional_accumulator_eager(; name=nothing, dtype=nothing, shape=nothing, container=nothing, shared_name=nothing, reduction_type=nothing)
+ desc = tf.EagerOp("ConditionalAccumulator")
+ begin
+ end
+ begin
+ begin
+ if dtype !== nothing
+ desc["dtype"] = Base.identity(dtype)
+ end
+ end
+ begin
+ if shape !== nothing
+ desc["shape"] = Base.identity(shape)
+ end
+ end
+ begin
+ if container !== nothing
+ desc["container"] = Base.String(container)
+ end
+ end
+ begin
+ if shared_name !== nothing
+ desc["shared_name"] = Base.String(shared_name)
+ end
+ end
+ begin
+ if reduction_type !== nothing
+ desc["reduction_type"] = Base.String(reduction_type)
+ end
+ end
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(conditional_accumulator, [], name=nothing, dtype=nothing, shape=nothing, container=nothing, shared_name=nothing, reduction_type=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res[1]
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function conditional_accumulator(; name=nothing, dtype=nothing, shape=nothing, container=nothing, shared_name=nothing, reduction_type=nothing)
+ if tf.in_eager_mode()
+ conditional_accumulator_eager(; name=name, dtype=dtype, shape=shape, container=container, shared_name=shared_name, reduction_type=reduction_type)
+ else
+ conditional_accumulator_graph(; name=name, dtype=dtype, shape=shape, container=container, shared_name=shared_name, reduction_type=reduction_type)
+ end
+ end
+ end
+end
+
+
+"""
+ whole_file_reader(; container=, shared_name=)
+
+
+"""
+begin
+ begin
+ function whole_file_reader_graph(; name=nothing, container=nothing, shared_name=nothing)
+ local desc
+ tf.with_op_name(name, "WholeFileReader") do
+ desc = tf.NodeDescription("WholeFileReader")
+ begin
+ end
+ begin
+ end
+ begin
+ begin
+ if container !== nothing
+ desc["container"] = Base.String(container)
+ end
+ end
+ begin
+ if shared_name !== nothing
+ desc["shared_name"] = Base.String(shared_name)
+ end
+ end
+ end
+ end
+ tf.Tensor(tf.Operation(desc))
+ end
+ end
+ begin
+ function whole_file_reader_eager(; name=nothing, container=nothing, shared_name=nothing)
+ desc = tf.EagerOp("WholeFileReader")
+ begin
+ end
+ begin
+ begin
+ if container !== nothing
+ desc["container"] = Base.String(container)
+ end
+ end
+ begin
+ if shared_name !== nothing
+ desc["shared_name"] = Base.String(shared_name)
+ end
+ end
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(whole_file_reader, [], name=nothing, container=nothing, shared_name=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res[1]
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function whole_file_reader(; name=nothing, container=nothing, shared_name=nothing)
+ if tf.in_eager_mode()
+ whole_file_reader_eager(; name=name, container=container, shared_name=shared_name)
+ else
+ whole_file_reader_graph(; name=name, container=container, shared_name=shared_name)
+ end
+ end
+ end
+end
+
+
+"""
+ apply_rms_prop(var, ms, mom, lr, rho, momentum, epsilon, grad; use_locking=false)
+
+
+"""
+begin
+ begin
+ function apply_rms_prop_graph(var_, ms_, mom_, lr_, rho_, momentum_, epsilon_, grad_; name=nothing, use_locking=nothing)
+ local desc
+ tf.with_op_name(name, "ApplyRMSProp") do
+ desc = tf.NodeDescription("ApplyRMSProp")
+ begin
+ begin
+ var_ = convert(Tensor{Any}, var_)
+ begin
+ end
+ end
+ begin
+ ms_ = convert(Tensor{Any}, ms_)
+ begin
+ end
+ end
+ begin
+ mom_ = convert(Tensor{Any}, mom_)
+ begin
+ end
+ end
+ begin
+ lr_ = convert(Tensor{Any}, lr_)
+ begin
+ end
+ end
+ begin
+ rho_ = convert(Tensor{Any}, rho_)
+ begin
+ end
+ end
+ begin
+ momentum_ = convert(Tensor{Any}, momentum_)
+ begin
+ end
+ end
+ begin
+ epsilon_ = convert(Tensor{Any}, epsilon_)
+ begin
+ end
+ end
+ begin
+ grad_ = convert(Tensor{Any}, grad_)
+ begin
+ end
+ end
+ begin
+ (var_, ms_, mom_, lr_, rho_, momentum_, epsilon_, grad_) = tf.tf_promote(var_, ms_, mom_, lr_, rho_, momentum_, epsilon_, grad_)
+ end
+ end
+ begin
+ begin
+ tf.add_input(desc, var_)
+ end
+ begin
+ tf.add_input(desc, ms_)
+ end
+ begin
+ tf.add_input(desc, mom_)
+ end
+ begin
+ tf.add_input(desc, lr_)
+ end
+ begin
+ tf.add_input(desc, rho_)
+ end
+ begin
+ tf.add_input(desc, momentum_)
+ end
+ begin
+ tf.add_input(desc, epsilon_)
+ end
+ begin
+ tf.add_input(desc, grad_)
+ end
+ end
+ begin
+ begin
+ if use_locking !== nothing
+ desc["use_locking"] = Base.Bool(use_locking)
+ end
+ end
+ end
+ end
+ tf.Tensor(tf.Operation(desc))
+ end
+ end
+ begin
+ function apply_rms_prop_eager(var_, ms_, mom_, lr_, rho_, momentum_, epsilon_, grad_; name=nothing, use_locking=nothing)
+ desc = tf.EagerOp("ApplyRMSProp")
+ var_ = convert(tf.EagerTensor, var_)
+ ms_ = convert(tf.EagerTensor, ms_)
+ mom_ = convert(tf.EagerTensor, mom_)
+ lr_ = convert(tf.EagerTensor, lr_)
+ rho_ = convert(tf.EagerTensor, rho_)
+ momentum_ = convert(tf.EagerTensor, momentum_)
+ epsilon_ = convert(tf.EagerTensor, epsilon_)
+ grad_ = convert(tf.EagerTensor, grad_)
+ begin
+ begin
+ tf.add_input(desc, var_)
+ end
+ begin
+ tf.add_input(desc, ms_)
+ end
+ begin
+ tf.add_input(desc, mom_)
+ end
+ begin
+ tf.add_input(desc, lr_)
+ end
+ begin
+ tf.add_input(desc, rho_)
+ end
+ begin
+ tf.add_input(desc, momentum_)
+ end
+ begin
+ tf.add_input(desc, epsilon_)
+ end
+ begin
+ tf.add_input(desc, grad_)
+ end
+ end
+ begin
+ begin
+ if use_locking !== nothing
+ desc["use_locking"] = Base.Bool(use_locking)
+ end
+ end
+ end
+ begin
+ desc["T"] = tf.data_type(var_)
+ end
+ begin
+ desc["T"] = tf.data_type(ms_)
+ end
+ begin
+ desc["T"] = tf.data_type(mom_)
+ end
+ begin
+ desc["T"] = tf.data_type(lr_)
+ end
+ begin
+ desc["T"] = tf.data_type(rho_)
+ end
+ begin
+ desc["T"] = tf.data_type(momentum_)
+ end
+ begin
+ desc["T"] = tf.data_type(epsilon_)
+ end
+ begin
+ desc["T"] = tf.data_type(grad_)
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(apply_rms_prop, [var_, ms_, mom_, lr_, rho_, momentum_, epsilon_, grad_], name=nothing, use_locking=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res[1]
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function apply_rms_prop(var_, ms_, mom_, lr_, rho_, momentum_, epsilon_, grad_; name=nothing, use_locking=nothing)
+ if tf.in_eager_mode()
+ apply_rms_prop_eager(var_, ms_, mom_, lr_, rho_, momentum_, epsilon_, grad_; name=name, use_locking=use_locking)
+ else
+ apply_rms_prop_graph(var_, ms_, mom_, lr_, rho_, momentum_, epsilon_, grad_; name=name, use_locking=use_locking)
+ end
+ end
+ end
+end
+
+
+"""
+ adjust_saturation(images, scale)
+
+
+"""
+begin
+ begin
+ function adjust_saturation_graph(images_, scale_; name=nothing)
+ local desc
+ tf.with_op_name(name, "AdjustSaturation") do
+ desc = tf.NodeDescription("AdjustSaturation")
+ begin
+ begin
+ images_ = convert(Tensor{Float32}, images_)
+ begin
+ end
+ end
+ begin
+ scale_ = convert(Tensor{Float32}, scale_)
+ begin
+ end
+ end
+ end
+ begin
+ begin
+ tf.add_input(desc, images_)
+ end
+ begin
+ tf.add_input(desc, scale_)
+ end
+ end
+ begin
+ end
+ end
+ tf.Tensor(tf.Operation(desc))
+ end
+ end
+ begin
+ function adjust_saturation_eager(images_, scale_; name=nothing)
+ desc = tf.EagerOp("AdjustSaturation")
+ images_ = convert(tf.EagerTensor, images_)
+ scale_ = convert(tf.EagerTensor, scale_)
+ begin
+ begin
+ tf.add_input(desc, images_)
+ end
+ begin
+ tf.add_input(desc, scale_)
+ end
+ end
+ begin
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(adjust_saturation, [images_, scale_], name=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res[1]
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function adjust_saturation(images_, scale_; name=nothing)
+ if tf.in_eager_mode()
+ adjust_saturation_eager(images_, scale_; name=name)
+ else
+ adjust_saturation_graph(images_, scale_; name=name)
+ end
+ end
+ end
+end
+
+
+"""
+ lookup_table_remove_v2(table_handle, keys)
+
+
+"""
+begin
+ begin
+ function lookup_table_remove_v2_graph(table_handle_, keys_; name=nothing)
+ local desc
+ tf.with_op_name(name, "LookupTableRemoveV2") do
+ desc = tf.NodeDescription("LookupTableRemoveV2")
+ begin
+ begin
+ table_handle_ = convert(Tensor{Any}, table_handle_)
+ begin
+ end
+ end
+ begin
+ keys_ = convert(Tensor{Any}, keys_)
+ begin
+ end
+ end
+ begin
+ (keys_,) = tf.tf_promote(keys_)
+ end
+ end
+ begin
+ begin
+ tf.add_input(desc, table_handle_)
+ end
+ begin
+ tf.add_input(desc, keys_)
+ end
+ end
+ begin
+ end
+ end
+ tf.Tensor(tf.Operation(desc))
+ end
+ end
+ begin
+ function lookup_table_remove_v2_eager(table_handle_, keys_; name=nothing)
+ desc = tf.EagerOp("LookupTableRemoveV2")
+ table_handle_ = convert(tf.EagerTensor, table_handle_)
+ keys_ = convert(tf.EagerTensor, keys_)
+ begin
+ begin
+ tf.add_input(desc, table_handle_)
+ end
+ begin
+ tf.add_input(desc, keys_)
+ end
+ end
+ begin
+ end
+ begin
+ desc["Tin"] = tf.data_type(keys_)
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(lookup_table_remove_v2, [table_handle_, keys_], name=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res[1]
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function lookup_table_remove_v2(table_handle_, keys_; name=nothing)
+ if tf.in_eager_mode()
+ lookup_table_remove_v2_eager(table_handle_, keys_; name=name)
+ else
+ lookup_table_remove_v2_graph(table_handle_, keys_; name=name)
+ end
+ end
+ end
+end
+
+
+"""
+ queue_close(handle; cancel_pending_enqueues=false)
+
+
+"""
+begin
+ begin
+ function queue_close_graph(handle_; name=nothing, cancel_pending_enqueues=nothing)
+ local desc
+ tf.with_op_name(name, "QueueClose") do
+ desc = tf.NodeDescription("QueueClose")
+ begin
+ begin
+ handle_ = convert(Tensor{String}, handle_)
+ begin
+ end
+ end
+ end
+ begin
+ begin
+ tf.add_input(desc, handle_)
+ end
+ end
+ begin
+ begin
+ if cancel_pending_enqueues !== nothing
+ desc["cancel_pending_enqueues"] = Base.Bool(cancel_pending_enqueues)
+ end
+ end
+ end
+ end
+ tf.Tensor(tf.Operation(desc))
+ end
+ end
+ begin
+ function queue_close_eager(handle_; name=nothing, cancel_pending_enqueues=nothing)
+ desc = tf.EagerOp("QueueClose")
+ handle_ = convert(tf.EagerTensor, handle_)
+ begin
+ begin
+ tf.add_input(desc, handle_)
+ end
+ end
+ begin
+ begin
+ if cancel_pending_enqueues !== nothing
+ desc["cancel_pending_enqueues"] = Base.Bool(cancel_pending_enqueues)
+ end
+ end
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(queue_close, [handle_], name=nothing, cancel_pending_enqueues=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res[1]
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function queue_close(handle_; name=nothing, cancel_pending_enqueues=nothing)
+ if tf.in_eager_mode()
+ queue_close_eager(handle_; name=name, cancel_pending_enqueues=cancel_pending_enqueues)
+ else
+ queue_close_graph(handle_; name=name, cancel_pending_enqueues=cancel_pending_enqueues)
+ end
+ end
+ end
+end
+
+
+"""
+ prefetch_dataset(input_dataset, buffer_size)
+
+
+"""
+begin
+ begin
+ function prefetch_dataset_graph(input_dataset_, buffer_size_; name=nothing, output_types=nothing, output_shapes=nothing)
+ local desc
+ tf.with_op_name(name, "PrefetchDataset") do
+ desc = tf.NodeDescription("PrefetchDataset")
+ begin
+ begin
+ input_dataset_ = convert(Tensor{Any}, input_dataset_)
+ begin
+ end
+ end
+ begin
+ buffer_size_ = convert(Tensor{Int64}, buffer_size_)
+ begin
+ end
+ end
+ end
+ begin
+ begin
+ tf.add_input(desc, input_dataset_)
+ end
+ begin
+ tf.add_input(desc, buffer_size_)
+ end
+ end
+ begin
+ begin
+ if output_types !== nothing
+ desc["output_types"] = map(Base.identity, output_types)
+ end
+ end
+ begin
+ if output_shapes !== nothing
+ desc["output_shapes"] = map(Base.identity, output_shapes)
+ end
+ end
+ end
+ end
+ tf.Tensor(tf.Operation(desc))
+ end
+ end
+ begin
+ function prefetch_dataset_eager(input_dataset_, buffer_size_; name=nothing, output_types=nothing, output_shapes=nothing)
+ desc = tf.EagerOp("PrefetchDataset")
+ input_dataset_ = convert(tf.EagerTensor, input_dataset_)
+ buffer_size_ = convert(tf.EagerTensor, buffer_size_)
+ begin
+ begin
+ tf.add_input(desc, input_dataset_)
+ end
+ begin
+ tf.add_input(desc, buffer_size_)
+ end
+ end
+ begin
+ begin
+ if output_types !== nothing
+ desc["output_types"] = map(Base.identity, output_types)
+ end
+ end
+ begin
+ if output_shapes !== nothing
+ desc["output_shapes"] = map(Base.identity, output_shapes)
+ end
+ end
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(prefetch_dataset, [input_dataset_, buffer_size_], name=nothing, output_types=nothing, output_shapes=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res[1]
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function prefetch_dataset(input_dataset_, buffer_size_; name=nothing, output_types=nothing, output_shapes=nothing)
+ if tf.in_eager_mode()
+ prefetch_dataset_eager(input_dataset_, buffer_size_; name=name, output_types=output_types, output_shapes=output_shapes)
+ else
+ prefetch_dataset_graph(input_dataset_, buffer_size_; name=name, output_types=output_types, output_shapes=output_shapes)
+ end
+ end
+ end
+end
+
+
+"""
+ map_dataset(input_dataset, other_arguments; use_inter_op_parallelism=true, preserve_cardinality=false)
+
+
+"""
+begin
+ begin
+ function map_dataset_graph(input_dataset_, other_arguments_; name=nothing, f=nothing, Targuments=nothing, output_types=nothing, output_shapes=nothing, use_inter_op_parallelism=nothing, preserve_cardinality=nothing)
+ local desc
+ tf.with_op_name(name, "MapDataset") do
+ desc = tf.NodeDescription("MapDataset")
+ begin
+ begin
+ input_dataset_ = convert(Tensor{Any}, input_dataset_)
+ begin
+ end
+ end
+ begin
+ other_arguments_ = [convert(Tensor{Any}, x) for x = other_arguments_]
+ begin
+ end
+ end
+ end
+ begin
+ begin
+ tf.add_input(desc, input_dataset_)
+ end
+ begin
+ tf.add_input(desc, other_arguments_)
+ end
+ end
+ begin
+ begin
+ if f !== nothing
+ desc["f"] = Base.identity(f)
+ end
+ end
+ begin
+ if Targuments !== nothing
+ desc["Targuments"] = map(Base.identity, Targuments)
+ end
+ end
+ begin
+ if output_types !== nothing
+ desc["output_types"] = map(Base.identity, output_types)
+ end
+ end
+ begin
+ if output_shapes !== nothing
+ desc["output_shapes"] = map(Base.identity, output_shapes)
+ end
+ end
+ begin
+ if use_inter_op_parallelism !== nothing
+ desc["use_inter_op_parallelism"] = Base.Bool(use_inter_op_parallelism)
+ end
+ end
+ begin
+ if preserve_cardinality !== nothing
+ desc["preserve_cardinality"] = Base.Bool(preserve_cardinality)
+ end
+ end
+ end
+ end
+ tf.Tensor(tf.Operation(desc))
+ end
+ end
+ begin
+ function map_dataset_eager(input_dataset_, other_arguments_; name=nothing, f=nothing, Targuments=nothing, output_types=nothing, output_shapes=nothing, use_inter_op_parallelism=nothing, preserve_cardinality=nothing)
+ desc = tf.EagerOp("MapDataset")
+ input_dataset_ = convert(tf.EagerTensor, input_dataset_)
+ other_arguments_ = convert(tf.EagerTensor, other_arguments_)
+ begin
+ begin
+ tf.add_input(desc, input_dataset_)
+ end
+ begin
+ tf.add_input(desc, other_arguments_)
+ end
+ end
+ begin
+ begin
+ if f !== nothing
+ desc["f"] = Base.identity(f)
+ end
+ end
+ begin
+ if Targuments !== nothing
+ desc["Targuments"] = map(Base.identity, Targuments)
+ end
+ end
+ begin
+ if output_types !== nothing
+ desc["output_types"] = map(Base.identity, output_types)
+ end
+ end
+ begin
+ if output_shapes !== nothing
+ desc["output_shapes"] = map(Base.identity, output_shapes)
+ end
+ end
+ begin
+ if use_inter_op_parallelism !== nothing
+ desc["use_inter_op_parallelism"] = Base.Bool(use_inter_op_parallelism)
+ end
+ end
+ begin
+ if preserve_cardinality !== nothing
+ desc["preserve_cardinality"] = Base.Bool(preserve_cardinality)
+ end
+ end
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(map_dataset, [input_dataset_, other_arguments_], name=nothing, f=nothing, Targuments=nothing, output_types=nothing, output_shapes=nothing, use_inter_op_parallelism=nothing, preserve_cardinality=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res[1]
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function map_dataset(input_dataset_, other_arguments_; name=nothing, f=nothing, Targuments=nothing, output_types=nothing, output_shapes=nothing, use_inter_op_parallelism=nothing, preserve_cardinality=nothing)
+ if tf.in_eager_mode()
+ map_dataset_eager(input_dataset_, other_arguments_; name=name, f=f, Targuments=Targuments, output_types=output_types, output_shapes=output_shapes, use_inter_op_parallelism=use_inter_op_parallelism, preserve_cardinality=preserve_cardinality)
+ else
+ map_dataset_graph(input_dataset_, other_arguments_; name=name, f=f, Targuments=Targuments, output_types=output_types, output_shapes=output_shapes, use_inter_op_parallelism=use_inter_op_parallelism, preserve_cardinality=preserve_cardinality)
+ end
+ end
+ end
+end
+
+
+"""
+ tensor_array_read_v3(handle, index, flow_in)
+
+
+"""
+begin
+ begin
+ function tensor_array_read_v3_graph(handle_, index_, flow_in_; name=nothing, dtype=nothing)
+ local desc
+ tf.with_op_name(name, "TensorArrayReadV3") do
+ desc = tf.NodeDescription("TensorArrayReadV3")
+ begin
+ begin
+ handle_ = convert(Tensor{Any}, handle_)
+ begin
+ end
+ end
+ begin
+ index_ = convert(Tensor{Int32}, index_)
+ begin
+ end
+ end
+ begin
+ flow_in_ = convert(Tensor{Float32}, flow_in_)
+ begin
+ end
+ end
+ end
+ begin
+ begin
+ tf.add_input(desc, handle_)
+ end
+ begin
+ tf.add_input(desc, index_)
+ end
+ begin
+ tf.add_input(desc, flow_in_)
+ end
+ end
+ begin
+ begin
+ if dtype !== nothing
+ desc["dtype"] = Base.identity(dtype)
+ end
+ end
+ end
+ end
+ tf.Tensor(tf.Operation(desc))
+ end
+ end
+ begin
+ function tensor_array_read_v3_eager(handle_, index_, flow_in_; name=nothing, dtype=nothing)
+ desc = tf.EagerOp("TensorArrayReadV3")
+ handle_ = convert(tf.EagerTensor, handle_)
+ index_ = convert(tf.EagerTensor, index_)
+ flow_in_ = convert(tf.EagerTensor, flow_in_)
+ begin
+ begin
+ tf.add_input(desc, handle_)
+ end
+ begin
+ tf.add_input(desc, index_)
+ end
+ begin
+ tf.add_input(desc, flow_in_)
+ end
+ end
+ begin
+ begin
+ if dtype !== nothing
+ desc["dtype"] = Base.identity(dtype)
+ end
+ end
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(tensor_array_read_v3, [handle_, index_, flow_in_], name=nothing, dtype=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res[1]
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function tensor_array_read_v3(handle_, index_, flow_in_; name=nothing, dtype=nothing)
+ if tf.in_eager_mode()
+ tensor_array_read_v3_eager(handle_, index_, flow_in_; name=name, dtype=dtype)
+ else
+ tensor_array_read_v3_graph(handle_, index_, flow_in_; name=name, dtype=dtype)
+ end
+ end
+ end
+end
+
+
+"""
+ identity(input)
+
+
+"""
+begin
+ begin
+ function identity_graph(input_; name=nothing)
+ local desc
+ tf.with_op_name(name, "Identity") do
+ desc = tf.NodeDescription("Identity")
+ begin
+ begin
+ input_ = convert(Tensor{Any}, input_)
+ begin
+ end
+ end
+ begin
+ (input_,) = tf.tf_promote(input_)
+ end
+ end
+ begin
+ begin
+ tf.add_input(desc, input_)
+ end
+ end
+ begin
+ end
+ end
+ tf.Tensor(tf.Operation(desc))
+ end
+ end
+ begin
+ function identity_eager(input_; name=nothing)
+ desc = tf.EagerOp("Identity")
+ input_ = convert(tf.EagerTensor, input_)
+ begin
+ begin
+ tf.add_input(desc, input_)
+ end
+ end
+ begin
+ end
+ begin
+ desc["T"] = tf.data_type(input_)
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(identity, [input_], name=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res[1]
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function identity(input_; name=nothing)
+ if tf.in_eager_mode()
+ identity_eager(input_; name=name)
+ else
+ identity_graph(input_; name=name)
+ end
+ end
+ end
+end
+
+
+"""
+ print(input, data; message=, first_n=-1, summarize=3)
+
+
+"""
+begin
+ begin
+ function print_graph(input_, data_; name=nothing, U=nothing, message=nothing, first_n=nothing, summarize=nothing)
+ local desc
+ tf.with_op_name(name, "Print") do
+ desc = tf.NodeDescription("Print")
+ begin
+ begin
+ input_ = convert(Tensor{Any}, input_)
+ begin
+ end
+ end
+ begin
+ data_ = [convert(Tensor{Any}, x) for x = data_]
+ begin
+ end
+ end
+ begin
+ (input_,) = tf.tf_promote(input_)
+ end
+ end
+ begin
+ begin
+ tf.add_input(desc, input_)
+ end
+ begin
+ tf.add_input(desc, data_)
+ end
+ end
+ begin
+ begin
+ if U !== nothing
+ desc["U"] = map(Base.identity, U)
+ end
+ end
+ begin
+ if message !== nothing
+ desc["message"] = Base.String(message)
+ end
+ end
+ begin
+ if first_n !== nothing
+ desc["first_n"] = Base.Int(first_n)
+ end
+ end
+ begin
+ if summarize !== nothing
+ desc["summarize"] = Base.Int(summarize)
+ end
+ end
+ end
+ end
+ tf.Tensor(tf.Operation(desc))
+ end
+ end
+ begin
+ function print_eager(input_, data_; name=nothing, U=nothing, message=nothing, first_n=nothing, summarize=nothing)
+ desc = tf.EagerOp("Print")
+ input_ = convert(tf.EagerTensor, input_)
+ data_ = convert(tf.EagerTensor, data_)
+ begin
+ begin
+ tf.add_input(desc, input_)
+ end
+ begin
+ tf.add_input(desc, data_)
+ end
+ end
+ begin
+ begin
+ if U !== nothing
+ desc["U"] = map(Base.identity, U)
+ end
+ end
+ begin
+ if message !== nothing
+ desc["message"] = Base.String(message)
+ end
+ end
+ begin
+ if first_n !== nothing
+ desc["first_n"] = Base.Int(first_n)
+ end
+ end
+ begin
+ if summarize !== nothing
+ desc["summarize"] = Base.Int(summarize)
+ end
+ end
+ end
+ begin
+ desc["T"] = tf.data_type(input_)
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(print, [input_, data_], name=nothing, U=nothing, message=nothing, first_n=nothing, summarize=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res[1]
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function print(input_, data_; name=nothing, U=nothing, message=nothing, first_n=nothing, summarize=nothing)
+ if tf.in_eager_mode()
+ print_eager(input_, data_; name=name, U=U, message=message, first_n=first_n, summarize=summarize)
+ else
+ print_graph(input_, data_; name=name, U=U, message=message, first_n=first_n, summarize=summarize)
+ end
+ end
+ end
+end
+
+
+"""
+ collective_bcast_send(input)
+
+
+"""
+begin
+ begin
+ function collective_bcast_send_graph(input_; name=nothing, group_size=nothing, group_key=nothing, instance_key=nothing, shape=nothing)
+ local desc
+ tf.with_op_name(name, "CollectiveBcastSend") do
+ desc = tf.NodeDescription("CollectiveBcastSend")
+ begin
+ begin
+ input_ = convert(Tensor{Any}, input_)
+ begin
+ end
+ end
+ begin
+ (input_,) = tf.tf_promote(input_)
+ end
+ end
+ begin
+ begin
+ tf.add_input(desc, input_)
+ end
+ end
+ begin
+ begin
+ if group_size !== nothing
+ desc["group_size"] = Base.Int(group_size)
+ end
+ end
+ begin
+ if group_key !== nothing
+ desc["group_key"] = Base.Int(group_key)
+ end
+ end
+ begin
+ if instance_key !== nothing
+ desc["instance_key"] = Base.Int(instance_key)
+ end
+ end
+ begin
+ if shape !== nothing
+ desc["shape"] = Base.identity(shape)
+ end
+ end
+ end
+ end
+ tf.Tensor(tf.Operation(desc))
+ end
+ end
+ begin
+ function collective_bcast_send_eager(input_; name=nothing, group_size=nothing, group_key=nothing, instance_key=nothing, shape=nothing)
+ desc = tf.EagerOp("CollectiveBcastSend")
+ input_ = convert(tf.EagerTensor, input_)
+ begin
+ begin
+ tf.add_input(desc, input_)
+ end
+ end
+ begin
+ begin
+ if group_size !== nothing
+ desc["group_size"] = Base.Int(group_size)
+ end
+ end
+ begin
+ if group_key !== nothing
+ desc["group_key"] = Base.Int(group_key)
+ end
+ end
+ begin
+ if instance_key !== nothing
+ desc["instance_key"] = Base.Int(instance_key)
+ end
+ end
+ begin
+ if shape !== nothing
+ desc["shape"] = Base.identity(shape)
+ end
+ end
+ end
+ begin
+ desc["T"] = tf.data_type(input_)
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(collective_bcast_send, [input_], name=nothing, group_size=nothing, group_key=nothing, instance_key=nothing, shape=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res[1]
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function collective_bcast_send(input_; name=nothing, group_size=nothing, group_key=nothing, instance_key=nothing, shape=nothing)
+ if tf.in_eager_mode()
+ collective_bcast_send_eager(input_; name=name, group_size=group_size, group_key=group_key, instance_key=instance_key, shape=shape)
+ else
+ collective_bcast_send_graph(input_; name=name, group_size=group_size, group_key=group_key, instance_key=instance_key, shape=shape)
+ end
+ end
+ end
+end
+
+
+"""
+ _list_to_array(input)
+
+Converts a list of tensors to an array of tensors.
+"""
+begin
+ begin
+ function _list_to_array_graph(input_; name=nothing, Tin=nothing, N=nothing)
+ local desc
+ tf.with_op_name(name, "_ListToArray") do
+ desc = tf.NodeDescription("_ListToArray")
+ begin
+ begin
+ input_ = [convert(Tensor{Any}, x) for x = input_]
+ begin
+ end
+ end
+ end
+ begin
+ begin
+ tf.add_input(desc, input_)
+ end
+ end
+ begin
+ begin
+ if Tin !== nothing
+ desc["Tin"] = map(Base.identity, Tin)
+ end
+ end
+ begin
+ if N !== nothing
+ desc["N"] = Base.Int(N)
+ end
+ end
+ end
+ end
+ begin
+ out = tf.Tensor[]
+ op = tf.Operation(desc)
+ for out_idx = 1:N
+ push!(out, tf.Tensor(op, out_idx))
+ end
+ out
+ end
+ end
+ end
+ begin
+ function _list_to_array_eager(input_; name=nothing, Tin=nothing, N=nothing)
+ desc = tf.EagerOp("_ListToArray")
+ input_ = convert(tf.EagerTensor, input_)
+ begin
+ begin
+ tf.add_input(desc, input_)
+ end
+ end
+ begin
+ begin
+ if Tin !== nothing
+ desc["Tin"] = map(Base.identity, Tin)
+ end
+ end
+ begin
+ if N !== nothing
+ desc["N"] = Base.Int(N)
+ end
+ end
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(_list_to_array, [input_], name=nothing, Tin=nothing, N=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function _list_to_array(input_; name=nothing, Tin=nothing, N=nothing)
+ if tf.in_eager_mode()
+ _list_to_array_eager(input_; name=name, Tin=Tin, N=N)
+ else
+ _list_to_array_graph(input_; name=name, Tin=Tin, N=N)
+ end
+ end
+ end
+end
+
+
+"""
+ neg_train(w_in, w_out, examples, labels, lr)
+
+
+"""
+begin
+ begin
+ function neg_train_graph(w_in_, w_out_, examples_, labels_, lr_; name=nothing, vocab_count=nothing, num_negative_samples=nothing)
+ local desc
+ tf.with_op_name(name, "NegTrain") do
+ desc = tf.NodeDescription("NegTrain")
+ begin
+ begin
+ w_in_ = convert(Tensor{Float32}, w_in_)
+ begin
+ end
+ end
+ begin
+ w_out_ = convert(Tensor{Float32}, w_out_)
+ begin
+ end
+ end
+ begin
+ examples_ = convert(Tensor{Int32}, examples_)
+ begin
+ end
+ end
+ begin
+ labels_ = convert(Tensor{Int32}, labels_)
+ begin
+ end
+ end
+ begin
+ lr_ = convert(Tensor{Float32}, lr_)
+ begin
+ end
+ end
+ end
+ begin
+ begin
+ tf.add_input(desc, w_in_)
+ end
+ begin
+ tf.add_input(desc, w_out_)
+ end
+ begin
+ tf.add_input(desc, examples_)
+ end
+ begin
+ tf.add_input(desc, labels_)
+ end
+ begin
+ tf.add_input(desc, lr_)
+ end
+ end
+ begin
+ begin
+ if vocab_count !== nothing
+ desc["vocab_count"] = map(Base.identity, vocab_count)
+ end
+ end
+ begin
+ if num_negative_samples !== nothing
+ desc["num_negative_samples"] = Base.Int(num_negative_samples)
+ end
+ end
+ end
+ end
+ tf.Tensor(tf.Operation(desc))
+ end
+ end
+ begin
+ function neg_train_eager(w_in_, w_out_, examples_, labels_, lr_; name=nothing, vocab_count=nothing, num_negative_samples=nothing)
+ desc = tf.EagerOp("NegTrain")
+ w_in_ = convert(tf.EagerTensor, w_in_)
+ w_out_ = convert(tf.EagerTensor, w_out_)
+ examples_ = convert(tf.EagerTensor, examples_)
+ labels_ = convert(tf.EagerTensor, labels_)
+ lr_ = convert(tf.EagerTensor, lr_)
+ begin
+ begin
+ tf.add_input(desc, w_in_)
+ end
+ begin
+ tf.add_input(desc, w_out_)
+ end
+ begin
+ tf.add_input(desc, examples_)
+ end
+ begin
+ tf.add_input(desc, labels_)
+ end
+ begin
+ tf.add_input(desc, lr_)
+ end
+ end
+ begin
+ begin
+ if vocab_count !== nothing
+ desc["vocab_count"] = map(Base.identity, vocab_count)
+ end
+ end
+ begin
+ if num_negative_samples !== nothing
+ desc["num_negative_samples"] = Base.Int(num_negative_samples)
+ end
+ end
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(neg_train, [w_in_, w_out_, examples_, labels_, lr_], name=nothing, vocab_count=nothing, num_negative_samples=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res[1]
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function neg_train(w_in_, w_out_, examples_, labels_, lr_; name=nothing, vocab_count=nothing, num_negative_samples=nothing)
+ if tf.in_eager_mode()
+ neg_train_eager(w_in_, w_out_, examples_, labels_, lr_; name=name, vocab_count=vocab_count, num_negative_samples=num_negative_samples)
+ else
+ neg_train_graph(w_in_, w_out_, examples_, labels_, lr_; name=name, vocab_count=vocab_count, num_negative_samples=num_negative_samples)
+ end
+ end
+ end
+end
+
+
+"""
+ merge_v2checkpoints(checkpoint_prefixes, destination_prefix; delete_old_dirs=true)
+
+
+"""
+begin
+ begin
+ function merge_v2checkpoints_graph(checkpoint_prefixes_, destination_prefix_; name=nothing, delete_old_dirs=nothing)
+ local desc
+ tf.with_op_name(name, "MergeV2Checkpoints") do
+ desc = tf.NodeDescription("MergeV2Checkpoints")
+ begin
+ begin
+ checkpoint_prefixes_ = convert(Tensor{String}, checkpoint_prefixes_)
+ begin
+ end
+ end
+ begin
+ destination_prefix_ = convert(Tensor{String}, destination_prefix_)
+ begin
+ end
+ end
+ end
+ begin
+ begin
+ tf.add_input(desc, checkpoint_prefixes_)
+ end
+ begin
+ tf.add_input(desc, destination_prefix_)
+ end
+ end
+ begin
+ begin
+ if delete_old_dirs !== nothing
+ desc["delete_old_dirs"] = Base.Bool(delete_old_dirs)
+ end
+ end
+ end
+ end
+ tf.Tensor(tf.Operation(desc))
+ end
+ end
+ begin
+ function merge_v2checkpoints_eager(checkpoint_prefixes_, destination_prefix_; name=nothing, delete_old_dirs=nothing)
+ desc = tf.EagerOp("MergeV2Checkpoints")
+ checkpoint_prefixes_ = convert(tf.EagerTensor, checkpoint_prefixes_)
+ destination_prefix_ = convert(tf.EagerTensor, destination_prefix_)
+ begin
+ begin
+ tf.add_input(desc, checkpoint_prefixes_)
+ end
+ begin
+ tf.add_input(desc, destination_prefix_)
+ end
+ end
+ begin
+ begin
+ if delete_old_dirs !== nothing
+ desc["delete_old_dirs"] = Base.Bool(delete_old_dirs)
+ end
+ end
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(merge_v2checkpoints, [checkpoint_prefixes_, destination_prefix_], name=nothing, delete_old_dirs=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res[1]
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function merge_v2checkpoints(checkpoint_prefixes_, destination_prefix_; name=nothing, delete_old_dirs=nothing)
+ if tf.in_eager_mode()
+ merge_v2checkpoints_eager(checkpoint_prefixes_, destination_prefix_; name=name, delete_old_dirs=delete_old_dirs)
+ else
+ merge_v2checkpoints_graph(checkpoint_prefixes_, destination_prefix_; name=name, delete_old_dirs=delete_old_dirs)
+ end
+ end
+ end
+end
+
+
+"""
+ worker_heartbeat(request)
+
+Worker heartbeat op.
+"""
+begin
+ begin
+ function worker_heartbeat_graph(request_; name=nothing)
+ local desc
+ tf.with_op_name(name, "WorkerHeartbeat") do
+ desc = tf.NodeDescription("WorkerHeartbeat")
+ begin
+ begin
+ request_ = convert(Tensor{String}, request_)
+ begin
+ end
+ end
+ end
+ begin
+ begin
+ tf.add_input(desc, request_)
+ end
+ end
+ begin
+ end
+ end
+ tf.Tensor(tf.Operation(desc))
+ end
+ end
+ begin
+ function worker_heartbeat_eager(request_; name=nothing)
+ desc = tf.EagerOp("WorkerHeartbeat")
+ request_ = convert(tf.EagerTensor, request_)
+ begin
+ begin
+ tf.add_input(desc, request_)
+ end
+ end
+ begin
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(worker_heartbeat, [request_], name=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res[1]
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function worker_heartbeat(request_; name=nothing)
+ if tf.in_eager_mode()
+ worker_heartbeat_eager(request_; name=name)
+ else
+ worker_heartbeat_graph(request_; name=name)
+ end
+ end
+ end
+end
+
+
+"""
+ collective_permute(input, source_target_pairs)
+
+An Op to permute tensors across replicated TPU instances. Each instance
+"""
+begin
+ begin
+ function collective_permute_graph(input_, source_target_pairs_; name=nothing)
+ local desc
+ tf.with_op_name(name, "CollectivePermute") do
+ desc = tf.NodeDescription("CollectivePermute")
+ begin
+ begin
+ input_ = convert(Tensor{Any}, input_)
+ begin
+ end
+ end
+ begin
+ source_target_pairs_ = convert(Tensor{Int32}, source_target_pairs_)
+ begin
+ end
+ end
+ begin
+ (input_,) = tf.tf_promote(input_)
+ end
+ end
+ begin
+ begin
+ tf.add_input(desc, input_)
+ end
+ begin
+ tf.add_input(desc, source_target_pairs_)
+ end
+ end
+ begin
+ end
+ end
+ tf.Tensor(tf.Operation(desc))
+ end
+ end
+ begin
+ function collective_permute_eager(input_, source_target_pairs_; name=nothing)
+ desc = tf.EagerOp("CollectivePermute")
+ input_ = convert(tf.EagerTensor, input_)
+ source_target_pairs_ = convert(tf.EagerTensor, source_target_pairs_)
+ begin
+ begin
+ tf.add_input(desc, input_)
+ end
+ begin
+ tf.add_input(desc, source_target_pairs_)
+ end
+ end
+ begin
+ end
+ begin
+ desc["T"] = tf.data_type(input_)
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(collective_permute, [input_, source_target_pairs_], name=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res[1]
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function collective_permute(input_, source_target_pairs_; name=nothing)
+ if tf.in_eager_mode()
+ collective_permute_eager(input_, source_target_pairs_; name=name)
+ else
+ collective_permute_graph(input_, source_target_pairs_; name=name)
+ end
+ end
+ end
+end
+
+
+"""
+ quantize_and_dequantize_v3(input, input_min, input_max, num_bits; signed_input=true, range_given=true)
+
+
+"""
+begin
+ begin
+ function quantize_and_dequantize_v3_graph(input_, input_min_, input_max_, num_bits_; name=nothing, signed_input=nothing, range_given=nothing)
+ local desc
+ tf.with_op_name(name, "QuantizeAndDequantizeV3") do
+ desc = tf.NodeDescription("QuantizeAndDequantizeV3")
+ begin
+ begin
+ input_ = convert(Tensor{Any}, input_)
+ begin
+ end
+ end
+ begin
+ input_min_ = convert(Tensor{Any}, input_min_)
+ begin
+ end
+ end
+ begin
+ input_max_ = convert(Tensor{Any}, input_max_)
+ begin
+ end
+ end
+ begin
+ num_bits_ = convert(Tensor{Int32}, num_bits_)
+ begin
+ end
+ end
+ begin
+ (input_, input_min_, input_max_) = tf.tf_promote(input_, input_min_, input_max_)
+ end
+ end
+ begin
+ begin
+ tf.add_input(desc, input_)
+ end
+ begin
+ tf.add_input(desc, input_min_)
+ end
+ begin
+ tf.add_input(desc, input_max_)
+ end
+ begin
+ tf.add_input(desc, num_bits_)
+ end
+ end
+ begin
+ begin
+ if signed_input !== nothing
+ desc["signed_input"] = Base.Bool(signed_input)
+ end
+ end
+ begin
+ if range_given !== nothing
+ desc["range_given"] = Base.Bool(range_given)
+ end
+ end
+ end
+ end
+ tf.Tensor(tf.Operation(desc))
+ end
+ end
+ begin
+ function quantize_and_dequantize_v3_eager(input_, input_min_, input_max_, num_bits_; name=nothing, signed_input=nothing, range_given=nothing)
+ desc = tf.EagerOp("QuantizeAndDequantizeV3")
+ input_ = convert(tf.EagerTensor, input_)
+ input_min_ = convert(tf.EagerTensor, input_min_)
+ input_max_ = convert(tf.EagerTensor, input_max_)
+ num_bits_ = convert(tf.EagerTensor, num_bits_)
+ begin
+ begin
+ tf.add_input(desc, input_)
+ end
+ begin
+ tf.add_input(desc, input_min_)
+ end
+ begin
+ tf.add_input(desc, input_max_)
+ end
+ begin
+ tf.add_input(desc, num_bits_)
+ end
+ end
+ begin
+ begin
+ if signed_input !== nothing
+ desc["signed_input"] = Base.Bool(signed_input)
+ end
+ end
+ begin
+ if range_given !== nothing
+ desc["range_given"] = Base.Bool(range_given)
+ end
+ end
+ end
+ begin
+ desc["T"] = tf.data_type(input_)
+ end
+ begin
+ desc["T"] = tf.data_type(input_min_)
+ end
+ begin
+ desc["T"] = tf.data_type(input_max_)
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(quantize_and_dequantize_v3, [input_, input_min_, input_max_, num_bits_], name=nothing, signed_input=nothing, range_given=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res[1]
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function quantize_and_dequantize_v3(input_, input_min_, input_max_, num_bits_; name=nothing, signed_input=nothing, range_given=nothing)
+ if tf.in_eager_mode()
+ quantize_and_dequantize_v3_eager(input_, input_min_, input_max_, num_bits_; name=name, signed_input=signed_input, range_given=range_given)
+ else
+ quantize_and_dequantize_v3_graph(input_, input_min_, input_max_, num_bits_; name=name, signed_input=signed_input, range_given=range_given)
+ end
+ end
+ end
+end
+
+
+"""
+ hash_table(; container=, shared_name=, use_node_name_sharing=false)
+
+
+"""
+begin
+ begin
+ function hash_table_graph(; name=nothing, container=nothing, shared_name=nothing, use_node_name_sharing=nothing, key_dtype=nothing, value_dtype=nothing)
+ local desc
+ tf.with_op_name(name, "HashTable") do
+ desc = tf.NodeDescription("HashTable")
+ begin
+ end
+ begin
+ end
+ begin
+ begin
+ if container !== nothing
+ desc["container"] = Base.String(container)
+ end
+ end
+ begin
+ if shared_name !== nothing
+ desc["shared_name"] = Base.String(shared_name)
+ end
+ end
+ begin
+ if use_node_name_sharing !== nothing
+ desc["use_node_name_sharing"] = Base.Bool(use_node_name_sharing)
+ end
+ end
+ begin
+ if key_dtype !== nothing
+ desc["key_dtype"] = Base.identity(key_dtype)
+ end
+ end
+ begin
+ if value_dtype !== nothing
+ desc["value_dtype"] = Base.identity(value_dtype)
+ end
+ end
+ end
+ end
+ tf.Tensor(tf.Operation(desc))
+ end
+ end
+ begin
+ function hash_table_eager(; name=nothing, container=nothing, shared_name=nothing, use_node_name_sharing=nothing, key_dtype=nothing, value_dtype=nothing)
+ desc = tf.EagerOp("HashTable")
+ begin
+ end
+ begin
+ begin
+ if container !== nothing
+ desc["container"] = Base.String(container)
+ end
+ end
+ begin
+ if shared_name !== nothing
+ desc["shared_name"] = Base.String(shared_name)
+ end
+ end
+ begin
+ if use_node_name_sharing !== nothing
+ desc["use_node_name_sharing"] = Base.Bool(use_node_name_sharing)
+ end
+ end
+ begin
+ if key_dtype !== nothing
+ desc["key_dtype"] = Base.identity(key_dtype)
+ end
+ end
+ begin
+ if value_dtype !== nothing
+ desc["value_dtype"] = Base.identity(value_dtype)
+ end
+ end
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(hash_table, [], name=nothing, container=nothing, shared_name=nothing, use_node_name_sharing=nothing, key_dtype=nothing, value_dtype=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res[1]
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function hash_table(; name=nothing, container=nothing, shared_name=nothing, use_node_name_sharing=nothing, key_dtype=nothing, value_dtype=nothing)
+ if tf.in_eager_mode()
+ hash_table_eager(; name=name, container=container, shared_name=shared_name, use_node_name_sharing=use_node_name_sharing, key_dtype=key_dtype, value_dtype=value_dtype)
+ else
+ hash_table_graph(; name=name, container=container, shared_name=shared_name, use_node_name_sharing=use_node_name_sharing, key_dtype=key_dtype, value_dtype=value_dtype)
+ end
+ end
+ end
+end
+
+
+"""
+ softplus_grad(gradients, features)
+
+
+"""
+begin
+ begin
+ function softplus_grad_graph(gradients_, features_; name=nothing)
+ local desc
+ tf.with_op_name(name, "SoftplusGrad") do
+ desc = tf.NodeDescription("SoftplusGrad")
+ begin
+ begin
+ gradients_ = convert(Tensor{Any}, gradients_)
+ begin
+ end
+ end
+ begin
+ features_ = convert(Tensor{Any}, features_)
+ begin
+ end
+ end
+ begin
+ (gradients_, features_) = tf.tf_promote(gradients_, features_)
+ end
+ end
+ begin
+ begin
+ tf.add_input(desc, gradients_)
+ end
+ begin
+ tf.add_input(desc, features_)
+ end
+ end
+ begin
+ end
+ end
+ tf.Tensor(tf.Operation(desc))
+ end
+ end
+ begin
+ function softplus_grad_eager(gradients_, features_; name=nothing)
+ desc = tf.EagerOp("SoftplusGrad")
+ gradients_ = convert(tf.EagerTensor, gradients_)
+ features_ = convert(tf.EagerTensor, features_)
+ begin
+ begin
+ tf.add_input(desc, gradients_)
+ end
+ begin
+ tf.add_input(desc, features_)
+ end
+ end
+ begin
+ end
+ begin
+ desc["T"] = tf.data_type(gradients_)
+ end
+ begin
+ desc["T"] = tf.data_type(features_)
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(softplus_grad, [gradients_, features_], name=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res[1]
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function softplus_grad(gradients_, features_; name=nothing)
+ if tf.in_eager_mode()
+ softplus_grad_eager(gradients_, features_; name=name)
+ else
+ softplus_grad_graph(gradients_, features_; name=name)
+ end
+ end
+ end
+end
+
+
+"""
+ fixed_length_record_reader(; header_bytes=0, footer_bytes=0, hop_bytes=0, container=, shared_name=)
+
+
+"""
+begin
+ begin
+ function fixed_length_record_reader_graph(; name=nothing, header_bytes=nothing, record_bytes=nothing, footer_bytes=nothing, hop_bytes=nothing, container=nothing, shared_name=nothing)
+ local desc
+ tf.with_op_name(name, "FixedLengthRecordReader") do
+ desc = tf.NodeDescription("FixedLengthRecordReader")
+ begin
+ end
+ begin
+ end
+ begin
+ begin
+ if header_bytes !== nothing
+ desc["header_bytes"] = Base.Int(header_bytes)
+ end
+ end
+ begin
+ if record_bytes !== nothing
+ desc["record_bytes"] = Base.Int(record_bytes)
+ end
+ end
+ begin
+ if footer_bytes !== nothing
+ desc["footer_bytes"] = Base.Int(footer_bytes)
+ end
+ end
+ begin
+ if hop_bytes !== nothing
+ desc["hop_bytes"] = Base.Int(hop_bytes)
+ end
+ end
+ begin
+ if container !== nothing
+ desc["container"] = Base.String(container)
+ end
+ end
+ begin
+ if shared_name !== nothing
+ desc["shared_name"] = Base.String(shared_name)
+ end
+ end
+ end
+ end
+ tf.Tensor(tf.Operation(desc))
+ end
+ end
+ begin
+ function fixed_length_record_reader_eager(; name=nothing, header_bytes=nothing, record_bytes=nothing, footer_bytes=nothing, hop_bytes=nothing, container=nothing, shared_name=nothing)
+ desc = tf.EagerOp("FixedLengthRecordReader")
+ begin
+ end
+ begin
+ begin
+ if header_bytes !== nothing
+ desc["header_bytes"] = Base.Int(header_bytes)
+ end
+ end
+ begin
+ if record_bytes !== nothing
+ desc["record_bytes"] = Base.Int(record_bytes)
+ end
+ end
+ begin
+ if footer_bytes !== nothing
+ desc["footer_bytes"] = Base.Int(footer_bytes)
+ end
+ end
+ begin
+ if hop_bytes !== nothing
+ desc["hop_bytes"] = Base.Int(hop_bytes)
+ end
+ end
+ begin
+ if container !== nothing
+ desc["container"] = Base.String(container)
+ end
+ end
+ begin
+ if shared_name !== nothing
+ desc["shared_name"] = Base.String(shared_name)
+ end
+ end
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(fixed_length_record_reader, [], name=nothing, header_bytes=nothing, record_bytes=nothing, footer_bytes=nothing, hop_bytes=nothing, container=nothing, shared_name=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res[1]
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function fixed_length_record_reader(; name=nothing, header_bytes=nothing, record_bytes=nothing, footer_bytes=nothing, hop_bytes=nothing, container=nothing, shared_name=nothing)
+ if tf.in_eager_mode()
+ fixed_length_record_reader_eager(; name=name, header_bytes=header_bytes, record_bytes=record_bytes, footer_bytes=footer_bytes, hop_bytes=hop_bytes, container=container, shared_name=shared_name)
+ else
+ fixed_length_record_reader_graph(; name=name, header_bytes=header_bytes, record_bytes=record_bytes, footer_bytes=footer_bytes, hop_bytes=hop_bytes, container=container, shared_name=shared_name)
+ end
+ end
+ end
+end
+
+
+"""
+ tensor_array_scatter_v2(handle, indices, value, flow_in)
+
+
+"""
+begin
+ begin
+ function tensor_array_scatter_v2_graph(handle_, indices_, value_, flow_in_; name=nothing)
+ local desc
+ tf.with_op_name(name, "TensorArrayScatterV2") do
+ desc = tf.NodeDescription("TensorArrayScatterV2")
+ begin
+ begin
+ handle_ = convert(Tensor{String}, handle_)
+ begin
+ end
+ end
+ begin
+ indices_ = convert(Tensor{Int32}, indices_)
+ begin
+ end
+ end
+ begin
+ value_ = convert(Tensor{Any}, value_)
+ begin
+ end
+ end
+ begin
+ flow_in_ = convert(Tensor{Float32}, flow_in_)
+ begin
+ end
+ end
+ begin
+ (value_,) = tf.tf_promote(value_)
+ end
+ end
+ begin
+ begin
+ tf.add_input(desc, handle_)
+ end
+ begin
+ tf.add_input(desc, indices_)
+ end
+ begin
+ tf.add_input(desc, value_)
+ end
+ begin
+ tf.add_input(desc, flow_in_)
+ end
+ end
+ begin
+ end
+ end
+ tf.Tensor(tf.Operation(desc))
+ end
+ end
+ begin
+ function tensor_array_scatter_v2_eager(handle_, indices_, value_, flow_in_; name=nothing)
+ desc = tf.EagerOp("TensorArrayScatterV2")
+ handle_ = convert(tf.EagerTensor, handle_)
+ indices_ = convert(tf.EagerTensor, indices_)
+ value_ = convert(tf.EagerTensor, value_)
+ flow_in_ = convert(tf.EagerTensor, flow_in_)
+ begin
+ begin
+ tf.add_input(desc, handle_)
+ end
+ begin
+ tf.add_input(desc, indices_)
+ end
+ begin
+ tf.add_input(desc, value_)
+ end
+ begin
+ tf.add_input(desc, flow_in_)
+ end
+ end
+ begin
+ end
+ begin
+ desc["T"] = tf.data_type(value_)
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(tensor_array_scatter_v2, [handle_, indices_, value_, flow_in_], name=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res[1]
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function tensor_array_scatter_v2(handle_, indices_, value_, flow_in_; name=nothing)
+ if tf.in_eager_mode()
+ tensor_array_scatter_v2_eager(handle_, indices_, value_, flow_in_; name=name)
+ else
+ tensor_array_scatter_v2_graph(handle_, indices_, value_, flow_in_; name=name)
+ end
+ end
+ end
+end
+
+
+"""
+ decode_json_example(json_examples)
+
+
+"""
+begin
+ begin
+ function decode_json_example_graph(json_examples_; name=nothing)
+ local desc
+ tf.with_op_name(name, "DecodeJSONExample") do
+ desc = tf.NodeDescription("DecodeJSONExample")
+ begin
+ begin
+ json_examples_ = convert(Tensor{String}, json_examples_)
+ begin
+ end
+ end
+ end
+ begin
+ begin
+ tf.add_input(desc, json_examples_)
+ end
+ end
+ begin
+ end
+ end
+ tf.Tensor(tf.Operation(desc))
+ end
+ end
+ begin
+ function decode_json_example_eager(json_examples_; name=nothing)
+ desc = tf.EagerOp("DecodeJSONExample")
+ json_examples_ = convert(tf.EagerTensor, json_examples_)
+ begin
+ begin
+ tf.add_input(desc, json_examples_)
+ end
+ end
+ begin
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(decode_json_example, [json_examples_], name=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res[1]
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function decode_json_example(json_examples_; name=nothing)
+ if tf.in_eager_mode()
+ decode_json_example_eager(json_examples_; name=name)
+ else
+ decode_json_example_graph(json_examples_; name=name)
+ end
+ end
+ end
+end
+
+
+"""
+ fused_batch_norm_grad_v2(y_backprop, x, scale, reserve_space_1, reserve_space_2; epsilon=?, data_format=, is_training=true)
+
+
+"""
+begin
+ begin
+ function fused_batch_norm_grad_v2_graph(y_backprop_, x_, scale_, reserve_space_1_, reserve_space_2_; name=nothing, U=nothing, epsilon=nothing, data_format=nothing, is_training=nothing)
+ local desc
+ tf.with_op_name(name, "FusedBatchNormGradV2") do
+ desc = tf.NodeDescription("FusedBatchNormGradV2")
+ begin
+ begin
+ y_backprop_ = convert(Tensor{Any}, y_backprop_)
+ begin
+ end
+ end
+ begin
+ x_ = convert(Tensor{Any}, x_)
+ begin
+ end
+ end
+ begin
+ scale_ = convert(Tensor{Float32}, scale_)
+ begin
+ end
+ end
+ begin
+ reserve_space_1_ = convert(Tensor{Any}, reserve_space_1_)
+ begin
+ end
+ end
+ begin
+ reserve_space_2_ = convert(Tensor{Any}, reserve_space_2_)
+ begin
+ end
+ end
+ begin
+ (reserve_space_1_, reserve_space_2_) = tf.tf_promote(reserve_space_1_, reserve_space_2_)
+ end
+ begin
+ (y_backprop_, x_) = tf.tf_promote(y_backprop_, x_)
+ end
+ end
+ begin
+ begin
+ tf.add_input(desc, y_backprop_)
+ end
+ begin
+ tf.add_input(desc, x_)
+ end
+ begin
+ tf.add_input(desc, scale_)
+ end
+ begin
+ tf.add_input(desc, reserve_space_1_)
+ end
+ begin
+ tf.add_input(desc, reserve_space_2_)
+ end
+ end
+ begin
+ begin
+ if U !== nothing
+ desc["U"] = Base.identity(U)
+ end
+ end
+ begin
+ if epsilon !== nothing
+ desc["epsilon"] = Base.identity(epsilon)
+ end
+ end
+ begin
+ if data_format !== nothing
+ desc["data_format"] = Base.String(data_format)
+ end
+ end
+ begin
+ if is_training !== nothing
+ desc["is_training"] = Base.Bool(is_training)
+ end
+ end
+ end
+ end
+ begin
+ out = tf.Tensor[]
+ op = tf.Operation(desc)
+ for out_idx = 1:5
+ push!(out, tf.Tensor(op, out_idx))
+ end
+ out
+ end
+ end
+ end
+ begin
+ function fused_batch_norm_grad_v2_eager(y_backprop_, x_, scale_, reserve_space_1_, reserve_space_2_; name=nothing, U=nothing, epsilon=nothing, data_format=nothing, is_training=nothing)
+ desc = tf.EagerOp("FusedBatchNormGradV2")
+ y_backprop_ = convert(tf.EagerTensor, y_backprop_)
+ x_ = convert(tf.EagerTensor, x_)
+ scale_ = convert(tf.EagerTensor, scale_)
+ reserve_space_1_ = convert(tf.EagerTensor, reserve_space_1_)
+ reserve_space_2_ = convert(tf.EagerTensor, reserve_space_2_)
+ begin
+ begin
+ tf.add_input(desc, y_backprop_)
+ end
+ begin
+ tf.add_input(desc, x_)
+ end
+ begin
+ tf.add_input(desc, scale_)
+ end
+ begin
+ tf.add_input(desc, reserve_space_1_)
+ end
+ begin
+ tf.add_input(desc, reserve_space_2_)
+ end
+ end
+ begin
+ begin
+ if U !== nothing
+ desc["U"] = Base.identity(U)
+ end
+ end
+ begin
+ if epsilon !== nothing
+ desc["epsilon"] = Base.identity(epsilon)
+ end
+ end
+ begin
+ if data_format !== nothing
+ desc["data_format"] = Base.String(data_format)
+ end
+ end
+ begin
+ if is_training !== nothing
+ desc["is_training"] = Base.Bool(is_training)
+ end
+ end
+ end
+ begin
+ desc["T"] = tf.data_type(y_backprop_)
+ end
+ begin
+ desc["T"] = tf.data_type(x_)
+ end
+ begin
+ desc["U"] = tf.data_type(reserve_space_1_)
+ end
+ begin
+ desc["U"] = tf.data_type(reserve_space_2_)
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(fused_batch_norm_grad_v2, [y_backprop_, x_, scale_, reserve_space_1_, reserve_space_2_], name=nothing, U=nothing, epsilon=nothing, data_format=nothing, is_training=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function fused_batch_norm_grad_v2(y_backprop_, x_, scale_, reserve_space_1_, reserve_space_2_; name=nothing, U=nothing, epsilon=nothing, data_format=nothing, is_training=nothing)
+ if tf.in_eager_mode()
+ fused_batch_norm_grad_v2_eager(y_backprop_, x_, scale_, reserve_space_1_, reserve_space_2_; name=name, U=U, epsilon=epsilon, data_format=data_format, is_training=is_training)
+ else
+ fused_batch_norm_grad_v2_graph(y_backprop_, x_, scale_, reserve_space_1_, reserve_space_2_; name=name, U=U, epsilon=epsilon, data_format=data_format, is_training=is_training)
+ end
+ end
+ end
+end
+
+
+"""
+ _host_cast(x; Truncate=false)
+
+Cast x of type SrcT to y of DstT.
+"""
+begin
+ begin
+ function _host_cast_graph(x_; name=nothing, SrcT=nothing, DstT=nothing, Truncate=nothing)
+ local desc
+ tf.with_op_name(name, "_HostCast") do
+ desc = tf.NodeDescription("_HostCast")
+ begin
+ begin
+ x_ = convert(Tensor{Any}, x_)
+ begin
+ end
+ end
+ begin
+ (x_,) = tf.tf_promote(x_)
+ end
+ end
+ begin
+ begin
+ tf.add_input(desc, x_)
+ end
+ end
+ begin
+ begin
+ if SrcT !== nothing
+ desc["SrcT"] = Base.identity(SrcT)
+ end
+ end
+ begin
+ if DstT !== nothing
+ desc["DstT"] = Base.identity(DstT)
+ end
+ end
+ begin
+ if Truncate !== nothing
+ desc["Truncate"] = Base.Bool(Truncate)
+ end
+ end
+ end
+ end
+ tf.Tensor(tf.Operation(desc))
+ end
+ end
+ begin
+ function _host_cast_eager(x_; name=nothing, SrcT=nothing, DstT=nothing, Truncate=nothing)
+ desc = tf.EagerOp("_HostCast")
+ x_ = convert(tf.EagerTensor, x_)
+ begin
+ begin
+ tf.add_input(desc, x_)
+ end
+ end
+ begin
+ begin
+ if SrcT !== nothing
+ desc["SrcT"] = Base.identity(SrcT)
+ end
+ end
+ begin
+ if DstT !== nothing
+ desc["DstT"] = Base.identity(DstT)
+ end
+ end
+ begin
+ if Truncate !== nothing
+ desc["Truncate"] = Base.Bool(Truncate)
+ end
+ end
+ end
+ begin
+ desc["SrcT"] = tf.data_type(x_)
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(_host_cast, [x_], name=nothing, SrcT=nothing, DstT=nothing, Truncate=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res[1]
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function _host_cast(x_; name=nothing, SrcT=nothing, DstT=nothing, Truncate=nothing)
+ if tf.in_eager_mode()
+ _host_cast_eager(x_; name=name, SrcT=SrcT, DstT=DstT, Truncate=Truncate)
+ else
+ _host_cast_graph(x_; name=name, SrcT=SrcT, DstT=DstT, Truncate=Truncate)
+ end
+ end
+ end
+end
+
+
+"""
+ tf_record_reader(; container=, shared_name=, compression_type=)
+
+
+"""
+begin
+ begin
+ function tf_record_reader_graph(; name=nothing, container=nothing, shared_name=nothing, compression_type=nothing)
+ local desc
+ tf.with_op_name(name, "TFRecordReader") do
+ desc = tf.NodeDescription("TFRecordReader")
+ begin
+ end
+ begin
+ end
+ begin
+ begin
+ if container !== nothing
+ desc["container"] = Base.String(container)
+ end
+ end
+ begin
+ if shared_name !== nothing
+ desc["shared_name"] = Base.String(shared_name)
+ end
+ end
+ begin
+ if compression_type !== nothing
+ desc["compression_type"] = Base.String(compression_type)
+ end
+ end
+ end
+ end
+ tf.Tensor(tf.Operation(desc))
+ end
+ end
+ begin
+ function tf_record_reader_eager(; name=nothing, container=nothing, shared_name=nothing, compression_type=nothing)
+ desc = tf.EagerOp("TFRecordReader")
+ begin
+ end
+ begin
+ begin
+ if container !== nothing
+ desc["container"] = Base.String(container)
+ end
+ end
+ begin
+ if shared_name !== nothing
+ desc["shared_name"] = Base.String(shared_name)
+ end
+ end
+ begin
+ if compression_type !== nothing
+ desc["compression_type"] = Base.String(compression_type)
+ end
+ end
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(tf_record_reader, [], name=nothing, container=nothing, shared_name=nothing, compression_type=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res[1]
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function tf_record_reader(; name=nothing, container=nothing, shared_name=nothing, compression_type=nothing)
+ if tf.in_eager_mode()
+ tf_record_reader_eager(; name=name, container=container, shared_name=shared_name, compression_type=compression_type)
+ else
+ tf_record_reader_graph(; name=name, container=container, shared_name=shared_name, compression_type=compression_type)
+ end
+ end
+ end
+end
+
+
+"""
+ while_(input; output_shapes=Int64[])
+
+
+"""
+begin
+ begin
+ function while__graph(input_; name=nothing, T=nothing, cond=nothing, body=nothing, output_shapes=nothing)
+ local desc
+ tf.with_op_name(name, "While") do
+ desc = tf.NodeDescription("While")
+ begin
+ begin
+ input_ = [convert(Tensor{Any}, x) for x = input_]
+ begin
+ end
+ end
+ end
+ begin
+ begin
+ tf.add_input(desc, input_)
+ end
+ end
+ begin
+ begin
+ if T !== nothing
+ desc["T"] = map(Base.identity, T)
+ end
+ end
+ begin
+ if cond !== nothing
+ desc["cond"] = Base.identity(cond)
+ end
+ end
+ begin
+ if body !== nothing
+ desc["body"] = Base.identity(body)
+ end
+ end
+ begin
+ if output_shapes !== nothing
+ desc["output_shapes"] = map(Base.identity, output_shapes)
+ end
+ end
+ end
+ end
+ tf.Tensor(tf.Operation(desc))
+ end
+ end
+ begin
+ function while__eager(input_; name=nothing, T=nothing, cond=nothing, body=nothing, output_shapes=nothing)
+ desc = tf.EagerOp("While")
+ input_ = convert(tf.EagerTensor, input_)
+ begin
+ begin
+ tf.add_input(desc, input_)
+ end
+ end
+ begin
+ begin
+ if T !== nothing
+ desc["T"] = map(Base.identity, T)
+ end
+ end
+ begin
+ if cond !== nothing
+ desc["cond"] = Base.identity(cond)
+ end
+ end
+ begin
+ if body !== nothing
+ desc["body"] = Base.identity(body)
+ end
+ end
+ begin
+ if output_shapes !== nothing
+ desc["output_shapes"] = map(Base.identity, output_shapes)
+ end
+ end
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(while_, [input_], name=nothing, T=nothing, cond=nothing, body=nothing, output_shapes=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res[1]
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function while_(input_; name=nothing, T=nothing, cond=nothing, body=nothing, output_shapes=nothing)
+ if tf.in_eager_mode()
+ while__eager(input_; name=name, T=T, cond=cond, body=body, output_shapes=output_shapes)
+ else
+ while__graph(input_; name=name, T=T, cond=cond, body=body, output_shapes=output_shapes)
+ end
+ end
+ end
+end
+
+
+"""
+ stateless_multinomial(logits, num_samples, seed; output_dtype=Int64)
+
+
+"""
+begin
+ begin
+ function stateless_multinomial_graph(logits_, num_samples_, seed_; name=nothing, output_dtype=nothing)
+ local desc
+ tf.with_op_name(name, "StatelessMultinomial") do
+ desc = tf.NodeDescription("StatelessMultinomial")
+ begin
+ begin
+ logits_ = convert(Tensor{Any}, logits_)
+ begin
+ end
+ end
+ begin
+ num_samples_ = convert(Tensor{Int32}, num_samples_)
+ begin
+ end
+ end
+ begin
+ seed_ = convert(Tensor{Int64}, seed_)
+ begin
+ end
+ end
+ begin
+ (logits_,) = tf.tf_promote(logits_)
+ end
+ begin
+ (seed_,) = tf.tf_promote(seed_)
+ end
+ end
+ begin
+ begin
+ tf.add_input(desc, logits_)
+ end
+ begin
+ tf.add_input(desc, num_samples_)
+ end
+ begin
+ tf.add_input(desc, seed_)
+ end
+ end
+ begin
+ begin
+ if output_dtype !== nothing
+ desc["output_dtype"] = Base.identity(output_dtype)
+ end
+ end
+ end
+ end
+ tf.Tensor(tf.Operation(desc))
+ end
+ end
+ begin
+ function stateless_multinomial_eager(logits_, num_samples_, seed_; name=nothing, output_dtype=nothing)
+ desc = tf.EagerOp("StatelessMultinomial")
+ logits_ = convert(tf.EagerTensor, logits_)
+ num_samples_ = convert(tf.EagerTensor, num_samples_)
+ seed_ = convert(tf.EagerTensor, seed_)
+ begin
+ begin
+ tf.add_input(desc, logits_)
+ end
+ begin
+ tf.add_input(desc, num_samples_)
+ end
+ begin
+ tf.add_input(desc, seed_)
+ end
+ end
+ begin
+ begin
+ if output_dtype !== nothing
+ desc["output_dtype"] = Base.identity(output_dtype)
+ end
+ end
+ end
+ begin
+ desc["T"] = tf.data_type(logits_)
+ end
+ begin
+ desc["Tseed"] = tf.data_type(seed_)
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(stateless_multinomial, [logits_, num_samples_, seed_], name=nothing, output_dtype=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res[1]
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function stateless_multinomial(logits_, num_samples_, seed_; name=nothing, output_dtype=nothing)
+ if tf.in_eager_mode()
+ stateless_multinomial_eager(logits_, num_samples_, seed_; name=name, output_dtype=output_dtype)
+ else
+ stateless_multinomial_graph(logits_, num_samples_, seed_; name=name, output_dtype=output_dtype)
+ end
+ end
+ end
+end
+
+
+"""
+ scatter_add(ref, indices, updates; use_locking=false)
+
+
+"""
+begin
+ begin
+ function scatter_add_graph(ref_, indices_, updates_; name=nothing, use_locking=nothing)
+ local desc
+ tf.with_op_name(name, "ScatterAdd") do
+ desc = tf.NodeDescription("ScatterAdd")
+ begin
+ begin
+ ref_ = convert(Tensor{Any}, ref_)
+ begin
+ end
+ end
+ begin
+ indices_ = convert(Tensor{Any}, indices_)
+ begin
+ indices_ = indices_ - convert(tf.Tensor{eltype(indices_)}, 1)
+ end
+ end
+ begin
+ updates_ = convert(Tensor{Any}, updates_)
+ begin
+ end
+ end
+ begin
+ (ref_, updates_) = tf.tf_promote(ref_, updates_)
+ end
+ begin
+ (indices_,) = tf.tf_promote(indices_)
+ end
+ end
+ begin
+ begin
+ tf.add_input(desc, ref_)
+ end
+ begin
+ tf.add_input(desc, indices_)
+ end
+ begin
+ tf.add_input(desc, updates_)
+ end
+ end
+ begin
+ begin
+ if use_locking !== nothing
+ desc["use_locking"] = Base.Bool(use_locking)
+ end
+ end
+ end
+ end
+ tf.Tensor(tf.Operation(desc))
+ end
+ end
+ begin
+ function scatter_add_eager(ref_, indices_, updates_; name=nothing, use_locking=nothing)
+ desc = tf.EagerOp("ScatterAdd")
+ ref_ = convert(tf.EagerTensor, ref_)
+ indices_ = convert(tf.EagerTensor, indices_)
+ updates_ = convert(tf.EagerTensor, updates_)
+ begin
+ begin
+ tf.add_input(desc, ref_)
+ end
+ begin
+ tf.add_input(desc, indices_)
+ end
+ begin
+ tf.add_input(desc, updates_)
+ end
+ end
+ begin
+ begin
+ if use_locking !== nothing
+ desc["use_locking"] = Base.Bool(use_locking)
+ end
+ end
+ end
+ begin
+ desc["T"] = tf.data_type(ref_)
+ end
+ begin
+ desc["Tindices"] = tf.data_type(indices_)
+ end
+ begin
+ desc["T"] = tf.data_type(updates_)
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(scatter_add, [ref_, indices_, updates_], name=nothing, use_locking=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res[1]
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function scatter_add(ref_, indices_, updates_; name=nothing, use_locking=nothing)
+ if tf.in_eager_mode()
+ scatter_add_eager(ref_, indices_, updates_; name=name, use_locking=use_locking)
+ else
+ scatter_add_graph(ref_, indices_, updates_; name=name, use_locking=use_locking)
+ end
+ end
+ end
+end
+
+
+"""
+ conj(input)
+
+
+"""
+begin
+ begin
+ function conj_graph(input_; name=nothing)
+ local desc
+ tf.with_op_name(name, "Conj") do
+ desc = tf.NodeDescription("Conj")
+ begin
+ begin
+ input_ = convert(Tensor{Complex{Float32}}, input_)
+ begin
+ end
+ end
+ begin
+ (input_,) = tf.tf_promote(input_)
+ end
+ end
+ begin
+ begin
+ tf.add_input(desc, input_)
+ end
+ end
+ begin
+ end
+ end
+ tf.Tensor(tf.Operation(desc))
+ end
+ end
+ begin
+ function conj_eager(input_; name=nothing)
+ desc = tf.EagerOp("Conj")
+ input_ = convert(tf.EagerTensor, input_)
+ begin
+ begin
+ tf.add_input(desc, input_)
+ end
+ end
+ begin
+ end
+ begin
+ desc["T"] = tf.data_type(input_)
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(conj, [input_], name=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res[1]
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function conj(input_; name=nothing)
+ if tf.in_eager_mode()
+ conj_eager(input_; name=name)
+ else
+ conj_graph(input_; name=name)
+ end
+ end
+ end
+end
+
+
+"""
+ parallel_dynamic_stitch(indices, data)
+
+
+"""
+begin
+ begin
+ function parallel_dynamic_stitch_graph(indices_, data_; name=nothing, N=nothing)
+ local desc
+ tf.with_op_name(name, "ParallelDynamicStitch") do
+ desc = tf.NodeDescription("ParallelDynamicStitch")
+ begin
+ begin
+ indices_ = [convert(Tensor{Int32}, x) for x = indices_]
+ begin
+ end
+ end
+ begin
+ data_ = [convert(Tensor{Any}, x) for x = data_]
+ begin
+ end
+ end
+ begin
+ (data_,) = tf.tf_promote(data_)
+ end
+ end
+ begin
+ begin
+ tf.add_input(desc, indices_)
+ end
+ begin
+ tf.add_input(desc, data_)
+ end
+ end
+ begin
+ begin
+ if N !== nothing
+ desc["N"] = Base.Int(N)
+ end
+ end
+ end
+ end
+ tf.Tensor(tf.Operation(desc))
+ end
+ end
+ begin
+ function parallel_dynamic_stitch_eager(indices_, data_; name=nothing, N=nothing)
+ desc = tf.EagerOp("ParallelDynamicStitch")
+ indices_ = convert(tf.EagerTensor, indices_)
+ data_ = convert(tf.EagerTensor, data_)
+ begin
+ begin
+ tf.add_input(desc, indices_)
+ end
+ begin
+ tf.add_input(desc, data_)
+ end
+ end
+ begin
+ begin
+ if N !== nothing
+ desc["N"] = Base.Int(N)
+ end
+ end
+ end
+ begin
+ desc["T"] = tf.data_type(data_)
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(parallel_dynamic_stitch, [indices_, data_], name=nothing, N=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res[1]
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function parallel_dynamic_stitch(indices_, data_; name=nothing, N=nothing)
+ if tf.in_eager_mode()
+ parallel_dynamic_stitch_eager(indices_, data_; name=name, N=N)
+ else
+ parallel_dynamic_stitch_graph(indices_, data_; name=name, N=N)
+ end
+ end
+ end
+end
+
+
+"""
+ make_iterator(dataset, iterator)
+
+
+"""
+begin
+ begin
+ function make_iterator_graph(dataset_, iterator_; name=nothing)
+ local desc
+ tf.with_op_name(name, "MakeIterator") do
+ desc = tf.NodeDescription("MakeIterator")
+ begin
+ begin
+ dataset_ = convert(Tensor{Any}, dataset_)
+ begin
+ end
+ end
+ begin
+ iterator_ = convert(Tensor{Any}, iterator_)
+ begin
+ end
+ end
+ end
+ begin
+ begin
+ tf.add_input(desc, dataset_)
+ end
+ begin
+ tf.add_input(desc, iterator_)
+ end
+ end
+ begin
+ end
+ end
+ tf.Tensor(tf.Operation(desc))
+ end
+ end
+ begin
+ function make_iterator_eager(dataset_, iterator_; name=nothing)
+ desc = tf.EagerOp("MakeIterator")
+ dataset_ = convert(tf.EagerTensor, dataset_)
+ iterator_ = convert(tf.EagerTensor, iterator_)
+ begin
+ begin
+ tf.add_input(desc, dataset_)
+ end
+ begin
+ tf.add_input(desc, iterator_)
+ end
+ end
+ begin
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(make_iterator, [dataset_, iterator_], name=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res[1]
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function make_iterator(dataset_, iterator_; name=nothing)
+ if tf.in_eager_mode()
+ make_iterator_eager(dataset_, iterator_; name=name)
+ else
+ make_iterator_graph(dataset_, iterator_; name=name)
+ end
+ end
+ end
+end
+
+
+"""
+ rfft3d(input, fft_length)
+
+
+"""
+begin
+ begin
+ function rfft3d_graph(input_, fft_length_; name=nothing)
+ local desc
+ tf.with_op_name(name, "RFFT3D") do
+ desc = tf.NodeDescription("RFFT3D")
+ begin
+ begin
+ input_ = convert(Tensor{Float32}, input_)
+ begin
+ end
+ end
+ begin
+ fft_length_ = convert(Tensor{Int32}, fft_length_)
+ begin
+ end
+ end
+ end
+ begin
+ begin
+ tf.add_input(desc, input_)
+ end
+ begin
+ tf.add_input(desc, fft_length_)
+ end
+ end
+ begin
+ end
+ end
+ tf.Tensor(tf.Operation(desc))
+ end
+ end
+ begin
+ function rfft3d_eager(input_, fft_length_; name=nothing)
+ desc = tf.EagerOp("RFFT3D")
+ input_ = convert(tf.EagerTensor, input_)
+ fft_length_ = convert(tf.EagerTensor, fft_length_)
+ begin
+ begin
+ tf.add_input(desc, input_)
+ end
+ begin
+ tf.add_input(desc, fft_length_)
+ end
+ end
+ begin
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(rfft3d, [input_, fft_length_], name=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res[1]
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function rfft3d(input_, fft_length_; name=nothing)
+ if tf.in_eager_mode()
+ rfft3d_eager(input_, fft_length_; name=name)
+ else
+ rfft3d_graph(input_, fft_length_; name=name)
+ end
+ end
+ end
+end
+
+
+"""
+ sparse_reduce_sum_sparse(input_indices, input_values, input_shape, reduction_axes; keep_dims=false)
+
+
+"""
+begin
+ begin
+ function sparse_reduce_sum_sparse_graph(input_indices_, input_values_, input_shape_, reduction_axes_; name=nothing, keep_dims=nothing)
+ local desc
+ tf.with_op_name(name, "SparseReduceSumSparse") do
+ desc = tf.NodeDescription("SparseReduceSumSparse")
+ begin
+ begin
+ input_indices_ = convert(Tensor{Int64}, input_indices_)
+ begin
+ end
+ end
+ begin
+ input_values_ = convert(Tensor{Any}, input_values_)
+ begin
+ end
+ end
+ begin
+ input_shape_ = convert(Tensor{Int64}, input_shape_)
+ begin
+ end
+ end
+ begin
+ reduction_axes_ = convert(Tensor{Int32}, reduction_axes_)
+ begin
+ end
+ end
+ begin
+ (input_values_,) = tf.tf_promote(input_values_)
+ end
+ end
+ begin
+ begin
+ tf.add_input(desc, input_indices_)
+ end
+ begin
+ tf.add_input(desc, input_values_)
+ end
+ begin
+ tf.add_input(desc, input_shape_)
+ end
+ begin
+ tf.add_input(desc, reduction_axes_)
+ end
+ end
+ begin
+ begin
+ if keep_dims !== nothing
+ desc["keep_dims"] = Base.Bool(keep_dims)
+ end
+ end
+ end
+ end
+ begin
+ out = tf.Tensor[]
+ op = tf.Operation(desc)
+ for out_idx = 1:3
+ push!(out, tf.Tensor(op, out_idx))
+ end
+ out
+ end
+ end
+ end
+ begin
+ function sparse_reduce_sum_sparse_eager(input_indices_, input_values_, input_shape_, reduction_axes_; name=nothing, keep_dims=nothing)
+ desc = tf.EagerOp("SparseReduceSumSparse")
+ input_indices_ = convert(tf.EagerTensor, input_indices_)
+ input_values_ = convert(tf.EagerTensor, input_values_)
+ input_shape_ = convert(tf.EagerTensor, input_shape_)
+ reduction_axes_ = convert(tf.EagerTensor, reduction_axes_)
+ begin
+ begin
+ tf.add_input(desc, input_indices_)
+ end
+ begin
+ tf.add_input(desc, input_values_)
+ end
+ begin
+ tf.add_input(desc, input_shape_)
+ end
+ begin
+ tf.add_input(desc, reduction_axes_)
+ end
+ end
+ begin
+ begin
+ if keep_dims !== nothing
+ desc["keep_dims"] = Base.Bool(keep_dims)
+ end
+ end
+ end
+ begin
+ desc["T"] = tf.data_type(input_values_)
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(sparse_reduce_sum_sparse, [input_indices_, input_values_, input_shape_, reduction_axes_], name=nothing, keep_dims=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function sparse_reduce_sum_sparse(input_indices_, input_values_, input_shape_, reduction_axes_; name=nothing, keep_dims=nothing)
+ if tf.in_eager_mode()
+ sparse_reduce_sum_sparse_eager(input_indices_, input_values_, input_shape_, reduction_axes_; name=name, keep_dims=keep_dims)
+ else
+ sparse_reduce_sum_sparse_graph(input_indices_, input_values_, input_shape_, reduction_axes_; name=name, keep_dims=keep_dims)
+ end
+ end
+ end
+end
+
+
+"""
+ _scoped_allocator()
+
+Allocates a mutable tensor that becomes available to appropriately annotated
+"""
+begin
+ begin
+ function _scoped_allocator_graph(; name=nothing, shapes=nothing, shape=nothing, sa_name=nothing, id=nothing, expected_call_count=nothing)
+ local desc
+ tf.with_op_name(name, "_ScopedAllocator") do
+ desc = tf.NodeDescription("_ScopedAllocator")
+ begin
+ end
+ begin
+ end
+ begin
+ begin
+ if shapes !== nothing
+ desc["shapes"] = map(Base.identity, shapes)
+ end
+ end
+ begin
+ if shape !== nothing
+ desc["shape"] = Base.identity(shape)
+ end
+ end
+ begin
+ if sa_name !== nothing
+ desc["sa_name"] = Base.String(sa_name)
+ end
+ end
+ begin
+ if id !== nothing
+ desc["id"] = Base.Int(id)
+ end
+ end
+ begin
+ if expected_call_count !== nothing
+ desc["expected_call_count"] = Base.Int(expected_call_count)
+ end
+ end
+ end
+ end
+ tf.Tensor(tf.Operation(desc))
+ end
+ end
+ begin
+ function _scoped_allocator_eager(; name=nothing, shapes=nothing, shape=nothing, sa_name=nothing, id=nothing, expected_call_count=nothing)
+ desc = tf.EagerOp("_ScopedAllocator")
+ begin
+ end
+ begin
+ begin
+ if shapes !== nothing
+ desc["shapes"] = map(Base.identity, shapes)
+ end
+ end
+ begin
+ if shape !== nothing
+ desc["shape"] = Base.identity(shape)
+ end
+ end
+ begin
+ if sa_name !== nothing
+ desc["sa_name"] = Base.String(sa_name)
+ end
+ end
+ begin
+ if id !== nothing
+ desc["id"] = Base.Int(id)
+ end
+ end
+ begin
+ if expected_call_count !== nothing
+ desc["expected_call_count"] = Base.Int(expected_call_count)
+ end
+ end
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(_scoped_allocator, [], name=nothing, shapes=nothing, shape=nothing, sa_name=nothing, id=nothing, expected_call_count=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res[1]
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function _scoped_allocator(; name=nothing, shapes=nothing, shape=nothing, sa_name=nothing, id=nothing, expected_call_count=nothing)
+ if tf.in_eager_mode()
+ _scoped_allocator_eager(; name=name, shapes=shapes, shape=shape, sa_name=sa_name, id=id, expected_call_count=expected_call_count)
+ else
+ _scoped_allocator_graph(; name=name, shapes=shapes, shape=shape, sa_name=sa_name, id=id, expected_call_count=expected_call_count)
+ end
+ end
+ end
+end
+
+
+"""
+ load_tpu_embedding_adadelta_parameters(parameters, accumulators, updates; table_id=-1, table_name=)
+
+Load embedding parameters for a single table.
+"""
+begin
+ begin
+ function load_tpu_embedding_adadelta_parameters_graph(parameters_, accumulators_, updates_; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing)
+ local desc
+ tf.with_op_name(name, "LoadTPUEmbeddingAdadeltaParameters") do
+ desc = tf.NodeDescription("LoadTPUEmbeddingAdadeltaParameters")
+ begin
+ begin
+ parameters_ = convert(Tensor{Float32}, parameters_)
+ begin
+ end
+ end
+ begin
+ accumulators_ = convert(Tensor{Float32}, accumulators_)
+ begin
+ end
+ end
+ begin
+ updates_ = convert(Tensor{Float32}, updates_)
+ begin
+ end
+ end
+ end
+ begin
+ begin
+ tf.add_input(desc, parameters_)
+ end
+ begin
+ tf.add_input(desc, accumulators_)
+ end
+ begin
+ tf.add_input(desc, updates_)
+ end
+ end
+ begin
+ begin
+ if table_id !== nothing
+ desc["table_id"] = Base.Int(table_id)
+ end
+ end
+ begin
+ if table_name !== nothing
+ desc["table_name"] = Base.String(table_name)
+ end
+ end
+ begin
+ if num_shards !== nothing
+ desc["num_shards"] = Base.Int(num_shards)
+ end
+ end
+ begin
+ if shard_id !== nothing
+ desc["shard_id"] = Base.Int(shard_id)
+ end
+ end
+ end
+ end
+ tf.Tensor(tf.Operation(desc))
+ end
+ end
+ begin
+ function load_tpu_embedding_adadelta_parameters_eager(parameters_, accumulators_, updates_; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing)
+ desc = tf.EagerOp("LoadTPUEmbeddingAdadeltaParameters")
+ parameters_ = convert(tf.EagerTensor, parameters_)
+ accumulators_ = convert(tf.EagerTensor, accumulators_)
+ updates_ = convert(tf.EagerTensor, updates_)
+ begin
+ begin
+ tf.add_input(desc, parameters_)
+ end
+ begin
+ tf.add_input(desc, accumulators_)
+ end
+ begin
+ tf.add_input(desc, updates_)
+ end
+ end
+ begin
+ begin
+ if table_id !== nothing
+ desc["table_id"] = Base.Int(table_id)
+ end
+ end
+ begin
+ if table_name !== nothing
+ desc["table_name"] = Base.String(table_name)
+ end
+ end
+ begin
+ if num_shards !== nothing
+ desc["num_shards"] = Base.Int(num_shards)
+ end
+ end
+ begin
+ if shard_id !== nothing
+ desc["shard_id"] = Base.Int(shard_id)
+ end
+ end
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(load_tpu_embedding_adadelta_parameters, [parameters_, accumulators_, updates_], name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res[1]
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function load_tpu_embedding_adadelta_parameters(parameters_, accumulators_, updates_; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing)
+ if tf.in_eager_mode()
+ load_tpu_embedding_adadelta_parameters_eager(parameters_, accumulators_, updates_; name=name, table_id=table_id, table_name=table_name, num_shards=num_shards, shard_id=shard_id)
+ else
+ load_tpu_embedding_adadelta_parameters_graph(parameters_, accumulators_, updates_; name=name, table_id=table_id, table_name=table_name, num_shards=num_shards, shard_id=shard_id)
+ end
+ end
+ end
+end
+
+
+"""
+ sparse_add(a_indices, a_values, a_shape, b_indices, b_values, b_shape, thresh)
+
+
+"""
+begin
+ begin
+ function sparse_add_graph(a_indices_, a_values_, a_shape_, b_indices_, b_values_, b_shape_, thresh_; name=nothing)
+ local desc
+ tf.with_op_name(name, "SparseAdd") do
+ desc = tf.NodeDescription("SparseAdd")
+ begin
+ begin
+ a_indices_ = convert(Tensor{Int64}, a_indices_)
+ begin
+ end
+ end
+ begin
+ a_values_ = convert(Tensor{Any}, a_values_)
+ begin
+ end
+ end
+ begin
+ a_shape_ = convert(Tensor{Int64}, a_shape_)
+ begin
+ end
+ end
+ begin
+ b_indices_ = convert(Tensor{Int64}, b_indices_)
+ begin
+ end
+ end
+ begin
+ b_values_ = convert(Tensor{Any}, b_values_)
+ begin
+ end
+ end
+ begin
+ b_shape_ = convert(Tensor{Int64}, b_shape_)
+ begin
+ end
+ end
+ begin
+ thresh_ = convert(Tensor{Any}, thresh_)
+ begin
+ end
+ end
+ begin
+ (thresh_,) = tf.tf_promote(thresh_)
+ end
+ begin
+ (a_values_, b_values_) = tf.tf_promote(a_values_, b_values_)
+ end
+ end
+ begin
+ begin
+ tf.add_input(desc, a_indices_)
+ end
+ begin
+ tf.add_input(desc, a_values_)
+ end
+ begin
+ tf.add_input(desc, a_shape_)
+ end
+ begin
+ tf.add_input(desc, b_indices_)
+ end
+ begin
+ tf.add_input(desc, b_values_)
+ end
+ begin
+ tf.add_input(desc, b_shape_)
+ end
+ begin
+ tf.add_input(desc, thresh_)
+ end
+ end
+ begin
+ end
+ end
+ begin
+ out = tf.Tensor[]
+ op = tf.Operation(desc)
+ for out_idx = 1:3
+ push!(out, tf.Tensor(op, out_idx))
+ end
+ out
+ end
+ end
+ end
+ begin
+ function sparse_add_eager(a_indices_, a_values_, a_shape_, b_indices_, b_values_, b_shape_, thresh_; name=nothing)
+ desc = tf.EagerOp("SparseAdd")
+ a_indices_ = convert(tf.EagerTensor, a_indices_)
+ a_values_ = convert(tf.EagerTensor, a_values_)
+ a_shape_ = convert(tf.EagerTensor, a_shape_)
+ b_indices_ = convert(tf.EagerTensor, b_indices_)
+ b_values_ = convert(tf.EagerTensor, b_values_)
+ b_shape_ = convert(tf.EagerTensor, b_shape_)
+ thresh_ = convert(tf.EagerTensor, thresh_)
+ begin
+ begin
+ tf.add_input(desc, a_indices_)
+ end
+ begin
+ tf.add_input(desc, a_values_)
+ end
+ begin
+ tf.add_input(desc, a_shape_)
+ end
+ begin
+ tf.add_input(desc, b_indices_)
+ end
+ begin
+ tf.add_input(desc, b_values_)
+ end
+ begin
+ tf.add_input(desc, b_shape_)
+ end
+ begin
+ tf.add_input(desc, thresh_)
+ end
+ end
+ begin
+ end
+ begin
+ desc["T"] = tf.data_type(a_values_)
+ end
+ begin
+ desc["T"] = tf.data_type(b_values_)
+ end
+ begin
+ desc["Treal"] = tf.data_type(thresh_)
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(sparse_add, [a_indices_, a_values_, a_shape_, b_indices_, b_values_, b_shape_, thresh_], name=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function sparse_add(a_indices_, a_values_, a_shape_, b_indices_, b_values_, b_shape_, thresh_; name=nothing)
+ if tf.in_eager_mode()
+ sparse_add_eager(a_indices_, a_values_, a_shape_, b_indices_, b_values_, b_shape_, thresh_; name=name)
+ else
+ sparse_add_graph(a_indices_, a_values_, a_shape_, b_indices_, b_values_, b_shape_, thresh_; name=name)
+ end
+ end
+ end
+end
+
+
+"""
+ ctc_greedy_decoder(inputs, sequence_length; merge_repeated=false)
+
+
+"""
+begin
+ begin
+ function ctc_greedy_decoder_graph(inputs_, sequence_length_; name=nothing, merge_repeated=nothing)
+ local desc
+ tf.with_op_name(name, "CTCGreedyDecoder") do
+ desc = tf.NodeDescription("CTCGreedyDecoder")
+ begin
+ begin
+ inputs_ = convert(Tensor{Float32}, inputs_)
+ begin
+ end
+ end
+ begin
+ sequence_length_ = convert(Tensor{Int32}, sequence_length_)
+ begin
+ end
+ end
+ end
+ begin
+ begin
+ tf.add_input(desc, inputs_)
+ end
+ begin
+ tf.add_input(desc, sequence_length_)
+ end
+ end
+ begin
+ begin
+ if merge_repeated !== nothing
+ desc["merge_repeated"] = Base.Bool(merge_repeated)
+ end
+ end
+ end
+ end
+ begin
+ out = tf.Tensor[]
+ op = tf.Operation(desc)
+ for out_idx = 1:4
+ push!(out, tf.Tensor(op, out_idx))
+ end
+ out
+ end
+ end
+ end
+ begin
+ function ctc_greedy_decoder_eager(inputs_, sequence_length_; name=nothing, merge_repeated=nothing)
+ desc = tf.EagerOp("CTCGreedyDecoder")
+ inputs_ = convert(tf.EagerTensor, inputs_)
+ sequence_length_ = convert(tf.EagerTensor, sequence_length_)
+ begin
+ begin
+ tf.add_input(desc, inputs_)
+ end
+ begin
+ tf.add_input(desc, sequence_length_)
+ end
+ end
+ begin
+ begin
+ if merge_repeated !== nothing
+ desc["merge_repeated"] = Base.Bool(merge_repeated)
+ end
+ end
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(ctc_greedy_decoder, [inputs_, sequence_length_], name=nothing, merge_repeated=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function ctc_greedy_decoder(inputs_, sequence_length_; name=nothing, merge_repeated=nothing)
+ if tf.in_eager_mode()
+ ctc_greedy_decoder_eager(inputs_, sequence_length_; name=name, merge_repeated=merge_repeated)
+ else
+ ctc_greedy_decoder_graph(inputs_, sequence_length_; name=name, merge_repeated=merge_repeated)
+ end
+ end
+ end
+end
+
+
+"""
+ immutable_const()
+
+
+"""
+begin
+ begin
+ function immutable_const_graph(; name=nothing, dtype=nothing, shape=nothing, memory_region_name=nothing)
+ local desc
+ tf.with_op_name(name, "ImmutableConst") do
+ desc = tf.NodeDescription("ImmutableConst")
+ begin
+ end
+ begin
+ end
+ begin
+ begin
+ if dtype !== nothing
+ desc["dtype"] = Base.identity(dtype)
+ end
+ end
+ begin
+ if shape !== nothing
+ desc["shape"] = Base.identity(shape)
+ end
+ end
+ begin
+ if memory_region_name !== nothing
+ desc["memory_region_name"] = Base.String(memory_region_name)
+ end
+ end
+ end
+ end
+ tf.Tensor(tf.Operation(desc))
+ end
+ end
+ begin
+ function immutable_const_eager(; name=nothing, dtype=nothing, shape=nothing, memory_region_name=nothing)
+ desc = tf.EagerOp("ImmutableConst")
+ begin
+ end
+ begin
+ begin
+ if dtype !== nothing
+ desc["dtype"] = Base.identity(dtype)
+ end
+ end
+ begin
+ if shape !== nothing
+ desc["shape"] = Base.identity(shape)
+ end
+ end
+ begin
+ if memory_region_name !== nothing
+ desc["memory_region_name"] = Base.String(memory_region_name)
+ end
+ end
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(immutable_const, [], name=nothing, dtype=nothing, shape=nothing, memory_region_name=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res[1]
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function immutable_const(; name=nothing, dtype=nothing, shape=nothing, memory_region_name=nothing)
+ if tf.in_eager_mode()
+ immutable_const_eager(; name=name, dtype=dtype, shape=shape, memory_region_name=memory_region_name)
+ else
+ immutable_const_graph(; name=name, dtype=dtype, shape=shape, memory_region_name=memory_region_name)
+ end
+ end
+ end
+end
+
+
+"""
+ consume_mutex_lock(mutex_lock)
+
+
+"""
+begin
+ begin
+ function consume_mutex_lock_graph(mutex_lock_; name=nothing)
+ local desc
+ tf.with_op_name(name, "ConsumeMutexLock") do
+ desc = tf.NodeDescription("ConsumeMutexLock")
+ begin
+ begin
+ mutex_lock_ = convert(Tensor{Any}, mutex_lock_)
+ begin
+ end
+ end
+ end
+ begin
+ begin
+ tf.add_input(desc, mutex_lock_)
+ end
+ end
+ begin
+ end
+ end
+ tf.Tensor(tf.Operation(desc))
+ end
+ end
+ begin
+ function consume_mutex_lock_eager(mutex_lock_; name=nothing)
+ desc = tf.EagerOp("ConsumeMutexLock")
+ mutex_lock_ = convert(tf.EagerTensor, mutex_lock_)
+ begin
+ begin
+ tf.add_input(desc, mutex_lock_)
+ end
+ end
+ begin
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(consume_mutex_lock, [mutex_lock_], name=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res[1]
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function consume_mutex_lock(mutex_lock_; name=nothing)
+ if tf.in_eager_mode()
+ consume_mutex_lock_eager(mutex_lock_; name=name)
+ else
+ consume_mutex_lock_graph(mutex_lock_; name=name)
+ end
+ end
+ end
+end
+
+
+"""
+ greater_equal(x, y)
+
+
+"""
+begin
+ begin
+ function greater_equal_graph(x_, y_; name=nothing)
+ local desc
+ tf.with_op_name(name, "GreaterEqual") do
+ desc = tf.NodeDescription("GreaterEqual")
+ begin
+ begin
+ x_ = convert(Tensor{Any}, x_)
+ begin
+ end
+ end
+ begin
+ y_ = convert(Tensor{Any}, y_)
+ begin
+ end
+ end
+ begin
+ (x_, y_) = tf.tf_promote(x_, y_)
+ end
+ end
+ begin
+ begin
+ tf.add_input(desc, x_)
+ end
+ begin
+ tf.add_input(desc, y_)
+ end
+ end
+ begin
+ end
+ end
+ tf.Tensor(tf.Operation(desc))
+ end
+ end
+ begin
+ function greater_equal_eager(x_, y_; name=nothing)
+ desc = tf.EagerOp("GreaterEqual")
+ x_ = convert(tf.EagerTensor, x_)
+ y_ = convert(tf.EagerTensor, y_)
+ begin
+ begin
+ tf.add_input(desc, x_)
+ end
+ begin
+ tf.add_input(desc, y_)
+ end
+ end
+ begin
+ end
+ begin
+ desc["T"] = tf.data_type(x_)
+ end
+ begin
+ desc["T"] = tf.data_type(y_)
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(greater_equal, [x_, y_], name=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res[1]
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function greater_equal(x_, y_; name=nothing)
+ if tf.in_eager_mode()
+ greater_equal_eager(x_, y_; name=name)
+ else
+ greater_equal_graph(x_, y_; name=name)
+ end
+ end
+ end
+end
+
+
+"""
+ initialize_table_from_text_file_v2(table_handle, filename; vocab_size=-1, delimiter=)
+
+
+"""
+begin
+ begin
+ function initialize_table_from_text_file_v2_graph(table_handle_, filename_; name=nothing, key_index=nothing, value_index=nothing, vocab_size=nothing, delimiter=nothing)
+ local desc
+ tf.with_op_name(name, "InitializeTableFromTextFileV2") do
+ desc = tf.NodeDescription("InitializeTableFromTextFileV2")
+ begin
+ begin
+ table_handle_ = convert(Tensor{Any}, table_handle_)
+ begin
+ end
+ end
+ begin
+ filename_ = convert(Tensor{String}, filename_)
+ begin
+ end
+ end
+ end
+ begin
+ begin
+ tf.add_input(desc, table_handle_)
+ end
+ begin
+ tf.add_input(desc, filename_)
+ end
+ end
+ begin
+ begin
+ if key_index !== nothing
+ desc["key_index"] = Base.Int(key_index)
+ end
+ end
+ begin
+ if value_index !== nothing
+ desc["value_index"] = Base.Int(value_index)
+ end
+ end
+ begin
+ if vocab_size !== nothing
+ desc["vocab_size"] = Base.Int(vocab_size)
+ end
+ end
+ begin
+ if delimiter !== nothing
+ desc["delimiter"] = Base.String(delimiter)
+ end
+ end
+ end
+ end
+ tf.Tensor(tf.Operation(desc))
+ end
+ end
+ begin
+ function initialize_table_from_text_file_v2_eager(table_handle_, filename_; name=nothing, key_index=nothing, value_index=nothing, vocab_size=nothing, delimiter=nothing)
+ desc = tf.EagerOp("InitializeTableFromTextFileV2")
+ table_handle_ = convert(tf.EagerTensor, table_handle_)
+ filename_ = convert(tf.EagerTensor, filename_)
+ begin
+ begin
+ tf.add_input(desc, table_handle_)
+ end
+ begin
+ tf.add_input(desc, filename_)
+ end
+ end
+ begin
+ begin
+ if key_index !== nothing
+ desc["key_index"] = Base.Int(key_index)
+ end
+ end
+ begin
+ if value_index !== nothing
+ desc["value_index"] = Base.Int(value_index)
+ end
+ end
+ begin
+ if vocab_size !== nothing
+ desc["vocab_size"] = Base.Int(vocab_size)
+ end
+ end
+ begin
+ if delimiter !== nothing
+ desc["delimiter"] = Base.String(delimiter)
+ end
+ end
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(initialize_table_from_text_file_v2, [table_handle_, filename_], name=nothing, key_index=nothing, value_index=nothing, vocab_size=nothing, delimiter=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res[1]
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function initialize_table_from_text_file_v2(table_handle_, filename_; name=nothing, key_index=nothing, value_index=nothing, vocab_size=nothing, delimiter=nothing)
+ if tf.in_eager_mode()
+ initialize_table_from_text_file_v2_eager(table_handle_, filename_; name=name, key_index=key_index, value_index=value_index, vocab_size=vocab_size, delimiter=delimiter)
+ else
+ initialize_table_from_text_file_v2_graph(table_handle_, filename_; name=name, key_index=key_index, value_index=value_index, vocab_size=vocab_size, delimiter=delimiter)
+ end
+ end
+ end
+end
+
+
+"""
+ queue_dequeue(handle; timeout_ms=-1)
+
+
+"""
+begin
+ begin
+ function queue_dequeue_graph(handle_; name=nothing, component_types=nothing, timeout_ms=nothing)
+ local desc
+ tf.with_op_name(name, "QueueDequeue") do
+ desc = tf.NodeDescription("QueueDequeue")
+ begin
+ begin
+ handle_ = convert(Tensor{String}, handle_)
+ begin
+ end
+ end
+ end
+ begin
+ begin
+ tf.add_input(desc, handle_)
+ end
+ end
+ begin
+ begin
+ if component_types !== nothing
+ desc["component_types"] = map(Base.identity, component_types)
+ end
+ end
+ begin
+ if timeout_ms !== nothing
+ desc["timeout_ms"] = Base.Int(timeout_ms)
+ end
+ end
+ end
+ end
+ tf.Tensor(tf.Operation(desc))
+ end
+ end
+ begin
+ function queue_dequeue_eager(handle_; name=nothing, component_types=nothing, timeout_ms=nothing)
+ desc = tf.EagerOp("QueueDequeue")
+ handle_ = convert(tf.EagerTensor, handle_)
+ begin
+ begin
+ tf.add_input(desc, handle_)
+ end
+ end
+ begin
+ begin
+ if component_types !== nothing
+ desc["component_types"] = map(Base.identity, component_types)
+ end
+ end
+ begin
+ if timeout_ms !== nothing
+ desc["timeout_ms"] = Base.Int(timeout_ms)
+ end
+ end
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(queue_dequeue, [handle_], name=nothing, component_types=nothing, timeout_ms=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res[1]
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function queue_dequeue(handle_; name=nothing, component_types=nothing, timeout_ms=nothing)
+ if tf.in_eager_mode()
+ queue_dequeue_eager(handle_; name=name, component_types=component_types, timeout_ms=timeout_ms)
+ else
+ queue_dequeue_graph(handle_; name=name, component_types=component_types, timeout_ms=timeout_ms)
+ end
+ end
+ end
+end
+
+
+"""
+ equal(x, y)
+
+
+"""
+begin
+ begin
+ function equal_graph(x_, y_; name=nothing)
+ local desc
+ tf.with_op_name(name, "Equal") do
+ desc = tf.NodeDescription("Equal")
+ begin
+ begin
+ x_ = convert(Tensor{Any}, x_)
+ begin
+ end
+ end
+ begin
+ y_ = convert(Tensor{Any}, y_)
+ begin
+ end
+ end
+ begin
+ (x_, y_) = tf.tf_promote(x_, y_)
+ end
+ end
+ begin
+ begin
+ tf.add_input(desc, x_)
+ end
+ begin
+ tf.add_input(desc, y_)
+ end
+ end
+ begin
+ end
+ end
+ tf.Tensor(tf.Operation(desc))
+ end
+ end
+ begin
+ function equal_eager(x_, y_; name=nothing)
+ desc = tf.EagerOp("Equal")
+ x_ = convert(tf.EagerTensor, x_)
+ y_ = convert(tf.EagerTensor, y_)
+ begin
+ begin
+ tf.add_input(desc, x_)
+ end
+ begin
+ tf.add_input(desc, y_)
+ end
+ end
+ begin
+ end
+ begin
+ desc["T"] = tf.data_type(x_)
+ end
+ begin
+ desc["T"] = tf.data_type(y_)
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(equal, [x_, y_], name=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res[1]
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function equal(x_, y_; name=nothing)
+ if tf.in_eager_mode()
+ equal_eager(x_, y_; name=name)
+ else
+ equal_graph(x_, y_; name=name)
+ end
+ end
+ end
+end
+
+
+"""
+ iterator_from_string_handle(string_handle; output_types=Int64[], output_shapes=Int64[])
+
+
+"""
+begin
+ begin
+ function iterator_from_string_handle_graph(string_handle_; name=nothing, output_types=nothing, output_shapes=nothing)
+ local desc
+ tf.with_op_name(name, "IteratorFromStringHandle") do
+ desc = tf.NodeDescription("IteratorFromStringHandle")
+ begin
+ begin
+ string_handle_ = convert(Tensor{String}, string_handle_)
+ begin
+ end
+ end
+ end
+ begin
+ begin
+ tf.add_input(desc, string_handle_)
+ end
+ end
+ begin
+ begin
+ if output_types !== nothing
+ desc["output_types"] = map(Base.identity, output_types)
+ end
+ end
+ begin
+ if output_shapes !== nothing
+ desc["output_shapes"] = map(Base.identity, output_shapes)
+ end
+ end
+ end
+ end
+ tf.Tensor(tf.Operation(desc))
+ end
+ end
+ begin
+ function iterator_from_string_handle_eager(string_handle_; name=nothing, output_types=nothing, output_shapes=nothing)
+ desc = tf.EagerOp("IteratorFromStringHandle")
+ string_handle_ = convert(tf.EagerTensor, string_handle_)
+ begin
+ begin
+ tf.add_input(desc, string_handle_)
+ end
+ end
+ begin
+ begin
+ if output_types !== nothing
+ desc["output_types"] = map(Base.identity, output_types)
+ end
+ end
+ begin
+ if output_shapes !== nothing
+ desc["output_shapes"] = map(Base.identity, output_shapes)
+ end
+ end
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(iterator_from_string_handle, [string_handle_], name=nothing, output_types=nothing, output_shapes=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res[1]
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function iterator_from_string_handle(string_handle_; name=nothing, output_types=nothing, output_shapes=nothing)
+ if tf.in_eager_mode()
+ iterator_from_string_handle_eager(string_handle_; name=name, output_types=output_types, output_shapes=output_shapes)
+ else
+ iterator_from_string_handle_graph(string_handle_; name=name, output_types=output_types, output_shapes=output_shapes)
+ end
+ end
+ end
+end
+
+
+"""
+ tensor_list_split(tensor, element_shape, lengths)
+
+
+"""
+begin
+ begin
+ function tensor_list_split_graph(tensor_, element_shape_, lengths_; name=nothing, element_dtype=nothing, shape_type=nothing)
+ local desc
+ tf.with_op_name(name, "TensorListSplit") do
+ desc = tf.NodeDescription("TensorListSplit")
+ begin
+ begin
+ tensor_ = convert(Tensor{Any}, tensor_)
+ begin
+ end
+ end
+ begin
+ element_shape_ = convert(Tensor{Any}, element_shape_)
+ begin
+ end
+ end
+ begin
+ lengths_ = convert(Tensor{Int64}, lengths_)
+ begin
+ end
+ end
+ begin
+ (tensor_,) = tf.tf_promote(tensor_)
+ end
+ begin
+ (element_shape_,) = tf.tf_promote(element_shape_)
+ end
+ end
+ begin
+ begin
+ tf.add_input(desc, tensor_)
+ end
+ begin
+ tf.add_input(desc, element_shape_)
+ end
+ begin
+ tf.add_input(desc, lengths_)
+ end
+ end
+ begin
+ begin
+ if element_dtype !== nothing
+ desc["element_dtype"] = Base.identity(element_dtype)
+ end
+ end
+ begin
+ if shape_type !== nothing
+ desc["shape_type"] = Base.identity(shape_type)
+ end
+ end
+ end
+ end
+ tf.Tensor(tf.Operation(desc))
+ end
+ end
+ begin
+ function tensor_list_split_eager(tensor_, element_shape_, lengths_; name=nothing, element_dtype=nothing, shape_type=nothing)
+ desc = tf.EagerOp("TensorListSplit")
+ tensor_ = convert(tf.EagerTensor, tensor_)
+ element_shape_ = convert(tf.EagerTensor, element_shape_)
+ lengths_ = convert(tf.EagerTensor, lengths_)
+ begin
+ begin
+ tf.add_input(desc, tensor_)
+ end
+ begin
+ tf.add_input(desc, element_shape_)
+ end
+ begin
+ tf.add_input(desc, lengths_)
+ end
+ end
+ begin
+ begin
+ if element_dtype !== nothing
+ desc["element_dtype"] = Base.identity(element_dtype)
+ end
+ end
+ begin
+ if shape_type !== nothing
+ desc["shape_type"] = Base.identity(shape_type)
+ end
+ end
+ end
+ begin
+ desc["element_dtype"] = tf.data_type(tensor_)
+ end
+ begin
+ desc["shape_type"] = tf.data_type(element_shape_)
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(tensor_list_split, [tensor_, element_shape_, lengths_], name=nothing, element_dtype=nothing, shape_type=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res[1]
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function tensor_list_split(tensor_, element_shape_, lengths_; name=nothing, element_dtype=nothing, shape_type=nothing)
+ if tf.in_eager_mode()
+ tensor_list_split_eager(tensor_, element_shape_, lengths_; name=name, element_dtype=element_dtype, shape_type=shape_type)
+ else
+ tensor_list_split_graph(tensor_, element_shape_, lengths_; name=name, element_dtype=element_dtype, shape_type=shape_type)
+ end
+ end
+ end
+end
+
+
+"""
+ fractional_max_pool(value; pseudo_random=false, overlapping=false, deterministic=false, seed=0, seed2=0)
+
+
+"""
+begin
+ begin
+ function fractional_max_pool_graph(value_; name=nothing, pooling_ratio=nothing, pseudo_random=nothing, overlapping=nothing, deterministic=nothing, seed=nothing, seed2=nothing)
+ local desc
+ tf.with_op_name(name, "FractionalMaxPool") do
+ desc = tf.NodeDescription("FractionalMaxPool")
+ begin
+ begin
+ value_ = convert(Tensor{Any}, value_)
+ begin
+ end
+ end
+ begin
+ (value_,) = tf.tf_promote(value_)
+ end
+ end
+ begin
+ begin
+ tf.add_input(desc, value_)
+ end
+ end
+ begin
+ begin
+ if pooling_ratio !== nothing
+ desc["pooling_ratio"] = map(Base.identity, pooling_ratio)
+ end
+ end
+ begin
+ if pseudo_random !== nothing
+ desc["pseudo_random"] = Base.Bool(pseudo_random)
+ end
+ end
+ begin
+ if overlapping !== nothing
+ desc["overlapping"] = Base.Bool(overlapping)
+ end
+ end
+ begin
+ if deterministic !== nothing
+ desc["deterministic"] = Base.Bool(deterministic)
+ end
+ end
+ begin
+ if seed !== nothing
+ desc["seed"] = Base.Int(seed)
+ end
+ end
+ begin
+ if seed2 !== nothing
+ desc["seed2"] = Base.Int(seed2)
+ end
+ end
+ end
+ end
+ begin
+ out = tf.Tensor[]
+ op = tf.Operation(desc)
+ for out_idx = 1:3
+ push!(out, tf.Tensor(op, out_idx))
+ end
+ out
+ end
+ end
+ end
+ begin
+ function fractional_max_pool_eager(value_; name=nothing, pooling_ratio=nothing, pseudo_random=nothing, overlapping=nothing, deterministic=nothing, seed=nothing, seed2=nothing)
+ desc = tf.EagerOp("FractionalMaxPool")
+ value_ = convert(tf.EagerTensor, value_)
+ begin
+ begin
+ tf.add_input(desc, value_)
+ end
+ end
+ begin
+ begin
+ if pooling_ratio !== nothing
+ desc["pooling_ratio"] = map(Base.identity, pooling_ratio)
+ end
+ end
+ begin
+ if pseudo_random !== nothing
+ desc["pseudo_random"] = Base.Bool(pseudo_random)
+ end
+ end
+ begin
+ if overlapping !== nothing
+ desc["overlapping"] = Base.Bool(overlapping)
+ end
+ end
+ begin
+ if deterministic !== nothing
+ desc["deterministic"] = Base.Bool(deterministic)
+ end
+ end
+ begin
+ if seed !== nothing
+ desc["seed"] = Base.Int(seed)
+ end
+ end
+ begin
+ if seed2 !== nothing
+ desc["seed2"] = Base.Int(seed2)
+ end
+ end
+ end
+ begin
+ desc["T"] = tf.data_type(value_)
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(fractional_max_pool, [value_], name=nothing, pooling_ratio=nothing, pseudo_random=nothing, overlapping=nothing, deterministic=nothing, seed=nothing, seed2=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function fractional_max_pool(value_; name=nothing, pooling_ratio=nothing, pseudo_random=nothing, overlapping=nothing, deterministic=nothing, seed=nothing, seed2=nothing)
+ if tf.in_eager_mode()
+ fractional_max_pool_eager(value_; name=name, pooling_ratio=pooling_ratio, pseudo_random=pseudo_random, overlapping=overlapping, deterministic=deterministic, seed=seed, seed2=seed2)
+ else
+ fractional_max_pool_graph(value_; name=name, pooling_ratio=pooling_ratio, pseudo_random=pseudo_random, overlapping=overlapping, deterministic=deterministic, seed=seed, seed2=seed2)
+ end
+ end
+ end
+end
+
+
+"""
+ scatter_nd(indices, updates, shape)
+
+
+"""
+begin
+ begin
+ function scatter_nd_graph(indices_, updates_, shape_; name=nothing)
+ local desc
+ tf.with_op_name(name, "ScatterNd") do
+ desc = tf.NodeDescription("ScatterNd")
+ begin
+ begin
+ indices_ = convert(Tensor{Any}, indices_)
+ begin
+ indices_ = indices_ - convert(tf.Tensor{eltype(indices_)}, 1)
+ end
+ end
+ begin
+ updates_ = convert(Tensor{Any}, updates_)
+ begin
+ end
+ end
+ begin
+ shape_ = convert(Tensor{Any}, shape_)
+ begin
+ end
+ end
+ begin
+ (updates_,) = tf.tf_promote(updates_)
+ end
+ begin
+ (indices_, shape_) = tf.tf_promote(indices_, shape_)
+ end
+ end
+ begin
+ begin
+ tf.add_input(desc, indices_)
+ end
+ begin
+ tf.add_input(desc, updates_)
+ end
+ begin
+ tf.add_input(desc, shape_)
+ end
+ end
+ begin
+ end
+ end
+ tf.Tensor(tf.Operation(desc))
+ end
+ end
+ begin
+ function scatter_nd_eager(indices_, updates_, shape_; name=nothing)
+ desc = tf.EagerOp("ScatterNd")
+ indices_ = convert(tf.EagerTensor, indices_)
+ updates_ = convert(tf.EagerTensor, updates_)
+ shape_ = convert(tf.EagerTensor, shape_)
+ begin
+ begin
+ tf.add_input(desc, indices_)
+ end
+ begin
+ tf.add_input(desc, updates_)
+ end
+ begin
+ tf.add_input(desc, shape_)
+ end
+ end
+ begin
+ end
+ begin
+ desc["Tindices"] = tf.data_type(indices_)
+ end
+ begin
+ desc["T"] = tf.data_type(updates_)
+ end
+ begin
+ desc["Tindices"] = tf.data_type(shape_)
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(scatter_nd, [indices_, updates_, shape_], name=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res[1]
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function scatter_nd(indices_, updates_, shape_; name=nothing)
+ if tf.in_eager_mode()
+ scatter_nd_eager(indices_, updates_, shape_; name=name)
+ else
+ scatter_nd_graph(indices_, updates_, shape_; name=name)
+ end
+ end
+ end
+end
+
+
+"""
+ select(condition, t, e)
+
+
+"""
+begin
+ begin
+ function select_graph(condition_, t_, e_; name=nothing)
+ local desc
+ tf.with_op_name(name, "Select") do
+ desc = tf.NodeDescription("Select")
+ begin
+ begin
+ condition_ = convert(Tensor{Bool}, condition_)
+ begin
+ end
+ end
+ begin
+ t_ = convert(Tensor{Any}, t_)
+ begin
+ end
+ end
+ begin
+ e_ = convert(Tensor{Any}, e_)
+ begin
+ end
+ end
+ begin
+ (t_, e_) = tf.tf_promote(t_, e_)
+ end
+ end
+ begin
+ begin
+ tf.add_input(desc, condition_)
+ end
+ begin
+ tf.add_input(desc, t_)
+ end
+ begin
+ tf.add_input(desc, e_)
+ end
+ end
+ begin
+ end
+ end
+ tf.Tensor(tf.Operation(desc))
+ end
+ end
+ begin
+ function select_eager(condition_, t_, e_; name=nothing)
+ desc = tf.EagerOp("Select")
+ condition_ = convert(tf.EagerTensor, condition_)
+ t_ = convert(tf.EagerTensor, t_)
+ e_ = convert(tf.EagerTensor, e_)
+ begin
+ begin
+ tf.add_input(desc, condition_)
+ end
+ begin
+ tf.add_input(desc, t_)
+ end
+ begin
+ tf.add_input(desc, e_)
+ end
+ end
+ begin
+ end
+ begin
+ desc["T"] = tf.data_type(t_)
+ end
+ begin
+ desc["T"] = tf.data_type(e_)
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(select, [condition_, t_, e_], name=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res[1]
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function select(condition_, t_, e_; name=nothing)
+ if tf.in_eager_mode()
+ select_eager(condition_, t_, e_; name=name)
+ else
+ select_graph(condition_, t_, e_; name=name)
+ end
+ end
+ end
+end
+
+
+"""
+ min(input, reduction_indices; keep_dims=false)
+
+
+"""
+begin
+ begin
+ function min_graph(input_, reduction_indices_; name=nothing, keep_dims=nothing)
+ local desc
+ tf.with_op_name(name, "Min") do
+ desc = tf.NodeDescription("Min")
+ begin
+ begin
+ input_ = convert(Tensor{Any}, input_)
+ begin
+ end
+ end
+ begin
+ reduction_indices_ = convert(Tensor{Int32}, reduction_indices_)
+ begin
+ reduction_indices_ = reduction_indices_ - convert(tf.Tensor{eltype(reduction_indices_)}, 1)
+ end
+ end
+ begin
+ (input_,) = tf.tf_promote(input_)
+ end
+ begin
+ (reduction_indices_,) = tf.tf_promote(reduction_indices_)
+ end
+ end
+ begin
+ begin
+ tf.add_input(desc, input_)
+ end
+ begin
+ tf.add_input(desc, reduction_indices_)
+ end
+ end
+ begin
+ begin
+ if keep_dims !== nothing
+ desc["keep_dims"] = Base.Bool(keep_dims)
+ end
+ end
+ end
+ end
+ tf.Tensor(tf.Operation(desc))
+ end
+ end
+ begin
+ function min_eager(input_, reduction_indices_; name=nothing, keep_dims=nothing)
+ desc = tf.EagerOp("Min")
+ input_ = convert(tf.EagerTensor, input_)
+ reduction_indices_ = convert(tf.EagerTensor, reduction_indices_)
+ begin
+ begin
+ tf.add_input(desc, input_)
+ end
+ begin
+ tf.add_input(desc, reduction_indices_)
+ end
+ end
+ begin
+ begin
+ if keep_dims !== nothing
+ desc["keep_dims"] = Base.Bool(keep_dims)
+ end
+ end
+ end
+ begin
+ desc["T"] = tf.data_type(input_)
+ end
+ begin
+ desc["Tidx"] = tf.data_type(reduction_indices_)
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(min, [input_, reduction_indices_], name=nothing, keep_dims=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res[1]
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function min(input_, reduction_indices_; name=nothing, keep_dims=nothing)
+ if tf.in_eager_mode()
+ min_eager(input_, reduction_indices_; name=name, keep_dims=keep_dims)
+ else
+ min_graph(input_, reduction_indices_; name=name, keep_dims=keep_dims)
+ end
+ end
+ end
+end
+
+
+"""
+ lrn_grad(input_grads, input_image, output_image; depth_radius=5, bias=?, alpha=?, beta=?)
+
+
+"""
+begin
+ begin
+ function lrn_grad_graph(input_grads_, input_image_, output_image_; name=nothing, depth_radius=nothing, bias=nothing, alpha=nothing, beta=nothing)
+ local desc
+ tf.with_op_name(name, "LRNGrad") do
+ desc = tf.NodeDescription("LRNGrad")
+ begin
+ begin
+ input_grads_ = convert(Tensor{Float32}, input_grads_)
+ begin
+ end
+ end
+ begin
+ input_image_ = convert(Tensor{Float32}, input_image_)
+ begin
+ end
+ end
+ begin
+ output_image_ = convert(Tensor{Float32}, output_image_)
+ begin
+ end
+ end
+ begin
+ (input_grads_, input_image_, output_image_) = tf.tf_promote(input_grads_, input_image_, output_image_)
+ end
+ end
+ begin
+ begin
+ tf.add_input(desc, input_grads_)
+ end
+ begin
+ tf.add_input(desc, input_image_)
+ end
+ begin
+ tf.add_input(desc, output_image_)
+ end
+ end
+ begin
+ begin
+ if depth_radius !== nothing
+ desc["depth_radius"] = Base.Int(depth_radius)
+ end
+ end
+ begin
+ if bias !== nothing
+ desc["bias"] = Base.identity(bias)
+ end
+ end
+ begin
+ if alpha !== nothing
+ desc["alpha"] = Base.identity(alpha)
+ end
+ end
+ begin
+ if beta !== nothing
+ desc["beta"] = Base.identity(beta)
+ end
+ end
+ end
+ end
+ tf.Tensor(tf.Operation(desc))
+ end
+ end
+ begin
+ function lrn_grad_eager(input_grads_, input_image_, output_image_; name=nothing, depth_radius=nothing, bias=nothing, alpha=nothing, beta=nothing)
+ desc = tf.EagerOp("LRNGrad")
+ input_grads_ = convert(tf.EagerTensor, input_grads_)
+ input_image_ = convert(tf.EagerTensor, input_image_)
+ output_image_ = convert(tf.EagerTensor, output_image_)
+ begin
+ begin
+ tf.add_input(desc, input_grads_)
+ end
+ begin
+ tf.add_input(desc, input_image_)
+ end
+ begin
+ tf.add_input(desc, output_image_)
+ end
+ end
+ begin
+ begin
+ if depth_radius !== nothing
+ desc["depth_radius"] = Base.Int(depth_radius)
+ end
+ end
+ begin
+ if bias !== nothing
+ desc["bias"] = Base.identity(bias)
+ end
+ end
+ begin
+ if alpha !== nothing
+ desc["alpha"] = Base.identity(alpha)
+ end
+ end
+ begin
+ if beta !== nothing
+ desc["beta"] = Base.identity(beta)
+ end
+ end
+ end
+ begin
+ desc["T"] = tf.data_type(input_grads_)
+ end
+ begin
+ desc["T"] = tf.data_type(input_image_)
+ end
+ begin
+ desc["T"] = tf.data_type(output_image_)
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(lrn_grad, [input_grads_, input_image_, output_image_], name=nothing, depth_radius=nothing, bias=nothing, alpha=nothing, beta=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res[1]
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function lrn_grad(input_grads_, input_image_, output_image_; name=nothing, depth_radius=nothing, bias=nothing, alpha=nothing, beta=nothing)
+ if tf.in_eager_mode()
+ lrn_grad_eager(input_grads_, input_image_, output_image_; name=name, depth_radius=depth_radius, bias=bias, alpha=alpha, beta=beta)
+ else
+ lrn_grad_graph(input_grads_, input_image_, output_image_; name=name, depth_radius=depth_radius, bias=bias, alpha=alpha, beta=beta)
+ end
+ end
+ end
+end
+
+
+"""
+ random_poisson_v2(shape, rate; seed=0, seed2=0, R=Float64, dtype=Int64)
+
+
+"""
+begin
+ begin
+ function random_poisson_v2_graph(shape_, rate_; name=nothing, seed=nothing, seed2=nothing, S=nothing, R=nothing, dtype=nothing)
+ local desc
+ tf.with_op_name(name, "RandomPoissonV2") do
+ desc = tf.NodeDescription("RandomPoissonV2")
+ begin
+ begin
+ shape_ = convert(Tensor{Any}, shape_)
+ begin
+ end
+ end
+ begin
+ rate_ = convert(Tensor{Float64}, rate_)
+ begin
+ end
+ end
+ begin
+ (shape_,) = tf.tf_promote(shape_)
+ end
+ begin
+ (rate_,) = tf.tf_promote(rate_)
+ end
+ end
+ begin
+ begin
+ tf.add_input(desc, shape_)
+ end
+ begin
+ tf.add_input(desc, rate_)
+ end
+ end
+ begin
+ begin
+ if seed !== nothing
+ desc["seed"] = Base.Int(seed)
+ end
+ end
+ begin
+ if seed2 !== nothing
+ desc["seed2"] = Base.Int(seed2)
+ end
+ end
+ begin
+ if S !== nothing
+ desc["S"] = Base.identity(S)
+ end
+ end
+ begin
+ if R !== nothing
+ desc["R"] = Base.identity(R)
+ end
+ end
+ begin
+ if dtype !== nothing
+ desc["dtype"] = Base.identity(dtype)
+ end
+ end
+ end
+ end
+ tf.Tensor(tf.Operation(desc))
+ end
+ end
+ begin
+ function random_poisson_v2_eager(shape_, rate_; name=nothing, seed=nothing, seed2=nothing, S=nothing, R=nothing, dtype=nothing)
+ desc = tf.EagerOp("RandomPoissonV2")
+ shape_ = convert(tf.EagerTensor, shape_)
+ rate_ = convert(tf.EagerTensor, rate_)
+ begin
+ begin
+ tf.add_input(desc, shape_)
+ end
+ begin
+ tf.add_input(desc, rate_)
+ end
+ end
+ begin
+ begin
+ if seed !== nothing
+ desc["seed"] = Base.Int(seed)
+ end
+ end
+ begin
+ if seed2 !== nothing
+ desc["seed2"] = Base.Int(seed2)
+ end
+ end
+ begin
+ if S !== nothing
+ desc["S"] = Base.identity(S)
+ end
+ end
+ begin
+ if R !== nothing
+ desc["R"] = Base.identity(R)
+ end
+ end
+ begin
+ if dtype !== nothing
+ desc["dtype"] = Base.identity(dtype)
+ end
+ end
+ end
+ begin
+ desc["S"] = tf.data_type(shape_)
+ end
+ begin
+ desc["R"] = tf.data_type(rate_)
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(random_poisson_v2, [shape_, rate_], name=nothing, seed=nothing, seed2=nothing, S=nothing, R=nothing, dtype=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res[1]
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function random_poisson_v2(shape_, rate_; name=nothing, seed=nothing, seed2=nothing, S=nothing, R=nothing, dtype=nothing)
+ if tf.in_eager_mode()
+ random_poisson_v2_eager(shape_, rate_; name=name, seed=seed, seed2=seed2, S=S, R=R, dtype=dtype)
+ else
+ random_poisson_v2_graph(shape_, rate_; name=name, seed=seed, seed2=seed2, S=S, R=R, dtype=dtype)
+ end
+ end
+ end
+end
+
+
+"""
+ fifo_queue(; shapes=Int64[], capacity=-1, container=, shared_name=)
+
+
+"""
+begin
+ begin
+ function fifo_queue_graph(; name=nothing, component_types=nothing, shapes=nothing, capacity=nothing, container=nothing, shared_name=nothing)
+ local desc
+ tf.with_op_name(name, "FIFOQueue") do
+ desc = tf.NodeDescription("FIFOQueue")
+ begin
+ end
+ begin
+ end
+ begin
+ begin
+ if component_types !== nothing
+ desc["component_types"] = map(Base.identity, component_types)
+ end
+ end
+ begin
+ if shapes !== nothing
+ desc["shapes"] = map(Base.identity, shapes)
+ end
+ end
+ begin
+ if capacity !== nothing
+ desc["capacity"] = Base.Int(capacity)
+ end
+ end
+ begin
+ if container !== nothing
+ desc["container"] = Base.String(container)
+ end
+ end
+ begin
+ if shared_name !== nothing
+ desc["shared_name"] = Base.String(shared_name)
+ end
+ end
+ end
+ end
+ tf.Tensor(tf.Operation(desc))
+ end
+ end
+ begin
+ function fifo_queue_eager(; name=nothing, component_types=nothing, shapes=nothing, capacity=nothing, container=nothing, shared_name=nothing)
+ desc = tf.EagerOp("FIFOQueue")
+ begin
+ end
+ begin
+ begin
+ if component_types !== nothing
+ desc["component_types"] = map(Base.identity, component_types)
+ end
+ end
+ begin
+ if shapes !== nothing
+ desc["shapes"] = map(Base.identity, shapes)
+ end
+ end
+ begin
+ if capacity !== nothing
+ desc["capacity"] = Base.Int(capacity)
+ end
+ end
+ begin
+ if container !== nothing
+ desc["container"] = Base.String(container)
+ end
+ end
+ begin
+ if shared_name !== nothing
+ desc["shared_name"] = Base.String(shared_name)
+ end
+ end
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(fifo_queue, [], name=nothing, component_types=nothing, shapes=nothing, capacity=nothing, container=nothing, shared_name=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res[1]
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function fifo_queue(; name=nothing, component_types=nothing, shapes=nothing, capacity=nothing, container=nothing, shared_name=nothing)
+ if tf.in_eager_mode()
+ fifo_queue_eager(; name=name, component_types=component_types, shapes=shapes, capacity=capacity, container=container, shared_name=shared_name)
+ else
+ fifo_queue_graph(; name=name, component_types=component_types, shapes=shapes, capacity=capacity, container=container, shared_name=shared_name)
+ end
+ end
+ end
+end
+
+
+"""
+ resource_sparse_apply_proximal_gradient_descent(var, alpha, l1, l2, grad, indices; use_locking=false)
+
+
+"""
+begin
+ begin
+ function resource_sparse_apply_proximal_gradient_descent_graph(var_, alpha_, l1_, l2_, grad_, indices_; name=nothing, use_locking=nothing)
+ local desc
+ tf.with_op_name(name, "ResourceSparseApplyProximalGradientDescent") do
+ desc = tf.NodeDescription("ResourceSparseApplyProximalGradientDescent")
+ begin
+ begin
+ var_ = convert(Tensor{Any}, var_)
+ begin
+ end
+ end
+ begin
+ alpha_ = convert(Tensor{Any}, alpha_)
+ begin
+ end
+ end
+ begin
+ l1_ = convert(Tensor{Any}, l1_)
+ begin
+ end
+ end
+ begin
+ l2_ = convert(Tensor{Any}, l2_)
+ begin
+ end
+ end
+ begin
+ grad_ = convert(Tensor{Any}, grad_)
+ begin
+ end
+ end
+ begin
+ indices_ = convert(Tensor{Any}, indices_)
+ begin
+ indices_ = indices_ - convert(tf.Tensor{eltype(indices_)}, 1)
+ end
+ end
+ begin
+ (alpha_, l1_, l2_, grad_) = tf.tf_promote(alpha_, l1_, l2_, grad_)
+ end
+ begin
+ (indices_,) = tf.tf_promote(indices_)
+ end
+ end
+ begin
+ begin
+ tf.add_input(desc, var_)
+ end
+ begin
+ tf.add_input(desc, alpha_)
+ end
+ begin
+ tf.add_input(desc, l1_)
+ end
+ begin
+ tf.add_input(desc, l2_)
+ end
+ begin
+ tf.add_input(desc, grad_)
+ end
+ begin
+ tf.add_input(desc, indices_)
+ end
+ end
+ begin
+ begin
+ if use_locking !== nothing
+ desc["use_locking"] = Base.Bool(use_locking)
+ end
+ end
+ end
+ end
+ tf.Tensor(tf.Operation(desc))
+ end
+ end
+ begin
+ function resource_sparse_apply_proximal_gradient_descent_eager(var_, alpha_, l1_, l2_, grad_, indices_; name=nothing, use_locking=nothing)
+ desc = tf.EagerOp("ResourceSparseApplyProximalGradientDescent")
+ var_ = convert(tf.EagerTensor, var_)
+ alpha_ = convert(tf.EagerTensor, alpha_)
+ l1_ = convert(tf.EagerTensor, l1_)
+ l2_ = convert(tf.EagerTensor, l2_)
+ grad_ = convert(tf.EagerTensor, grad_)
+ indices_ = convert(tf.EagerTensor, indices_)
+ begin
+ begin
+ tf.add_input(desc, var_)
+ end
+ begin
+ tf.add_input(desc, alpha_)
+ end
+ begin
+ tf.add_input(desc, l1_)
+ end
+ begin
+ tf.add_input(desc, l2_)
+ end
+ begin
+ tf.add_input(desc, grad_)
+ end
+ begin
+ tf.add_input(desc, indices_)
+ end
+ end
+ begin
+ begin
+ if use_locking !== nothing
+ desc["use_locking"] = Base.Bool(use_locking)
+ end
+ end
+ end
+ begin
+ desc["T"] = tf.data_type(alpha_)
+ end
+ begin
+ desc["T"] = tf.data_type(l1_)
+ end
+ begin
+ desc["T"] = tf.data_type(l2_)
+ end
+ begin
+ desc["T"] = tf.data_type(grad_)
+ end
+ begin
+ desc["Tindices"] = tf.data_type(indices_)
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(resource_sparse_apply_proximal_gradient_descent, [var_, alpha_, l1_, l2_, grad_, indices_], name=nothing, use_locking=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res[1]
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function resource_sparse_apply_proximal_gradient_descent(var_, alpha_, l1_, l2_, grad_, indices_; name=nothing, use_locking=nothing)
+ if tf.in_eager_mode()
+ resource_sparse_apply_proximal_gradient_descent_eager(var_, alpha_, l1_, l2_, grad_, indices_; name=name, use_locking=use_locking)
+ else
+ resource_sparse_apply_proximal_gradient_descent_graph(var_, alpha_, l1_, l2_, grad_, indices_; name=name, use_locking=use_locking)
+ end
+ end
+ end
+end
+
+
+"""
+ experimental_non_serializable_dataset(input_dataset)
+
+
+"""
+begin
+ begin
+ function experimental_non_serializable_dataset_graph(input_dataset_; name=nothing, output_types=nothing, output_shapes=nothing)
+ local desc
+ tf.with_op_name(name, "ExperimentalNonSerializableDataset") do
+ desc = tf.NodeDescription("ExperimentalNonSerializableDataset")
+ begin
+ begin
+ input_dataset_ = convert(Tensor{Any}, input_dataset_)
+ begin
+ end
+ end
+ end
+ begin
+ begin
+ tf.add_input(desc, input_dataset_)
+ end
+ end
+ begin
+ begin
+ if output_types !== nothing
+ desc["output_types"] = map(Base.identity, output_types)
+ end
+ end
+ begin
+ if output_shapes !== nothing
+ desc["output_shapes"] = map(Base.identity, output_shapes)
+ end
+ end
+ end
+ end
+ tf.Tensor(tf.Operation(desc))
+ end
+ end
+ begin
+ function experimental_non_serializable_dataset_eager(input_dataset_; name=nothing, output_types=nothing, output_shapes=nothing)
+ desc = tf.EagerOp("ExperimentalNonSerializableDataset")
+ input_dataset_ = convert(tf.EagerTensor, input_dataset_)
+ begin
+ begin
+ tf.add_input(desc, input_dataset_)
+ end
+ end
+ begin
+ begin
+ if output_types !== nothing
+ desc["output_types"] = map(Base.identity, output_types)
+ end
+ end
+ begin
+ if output_shapes !== nothing
+ desc["output_shapes"] = map(Base.identity, output_shapes)
+ end
+ end
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(experimental_non_serializable_dataset, [input_dataset_], name=nothing, output_types=nothing, output_shapes=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res[1]
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function experimental_non_serializable_dataset(input_dataset_; name=nothing, output_types=nothing, output_shapes=nothing)
+ if tf.in_eager_mode()
+ experimental_non_serializable_dataset_eager(input_dataset_; name=name, output_types=output_types, output_shapes=output_shapes)
+ else
+ experimental_non_serializable_dataset_graph(input_dataset_; name=name, output_types=output_types, output_shapes=output_shapes)
+ end
+ end
+ end
+end
+
+
+"""
+ dilation2d_backprop_filter(input, filter, out_backprop)
+
+
+"""
+begin
+ begin
+ function dilation2d_backprop_filter_graph(input_, filter_, out_backprop_; name=nothing, strides=nothing, rates=nothing, padding=nothing)
+ local desc
+ tf.with_op_name(name, "Dilation2DBackpropFilter") do
+ desc = tf.NodeDescription("Dilation2DBackpropFilter")
+ begin
+ begin
+ input_ = convert(Tensor{Any}, input_)
+ begin
+ end
+ end
+ begin
+ filter_ = convert(Tensor{Any}, filter_)
+ begin
+ end
+ end
+ begin
+ out_backprop_ = convert(Tensor{Any}, out_backprop_)
+ begin
+ end
+ end
+ begin
+ (input_, filter_, out_backprop_) = tf.tf_promote(input_, filter_, out_backprop_)
+ end
+ end
+ begin
+ begin
+ tf.add_input(desc, input_)
+ end
+ begin
+ tf.add_input(desc, filter_)
+ end
+ begin
+ tf.add_input(desc, out_backprop_)
+ end
+ end
+ begin
+ begin
+ if strides !== nothing
+ desc["strides"] = map(Base.identity, strides)
+ end
+ end
+ begin
+ if rates !== nothing
+ desc["rates"] = map(Base.identity, rates)
+ end
+ end
+ begin
+ if padding !== nothing
+ desc["padding"] = Base.String(padding)
+ end
+ end
+ end
+ end
+ tf.Tensor(tf.Operation(desc))
+ end
+ end
+ begin
+ function dilation2d_backprop_filter_eager(input_, filter_, out_backprop_; name=nothing, strides=nothing, rates=nothing, padding=nothing)
+ desc = tf.EagerOp("Dilation2DBackpropFilter")
+ input_ = convert(tf.EagerTensor, input_)
+ filter_ = convert(tf.EagerTensor, filter_)
+ out_backprop_ = convert(tf.EagerTensor, out_backprop_)
+ begin
+ begin
+ tf.add_input(desc, input_)
+ end
+ begin
+ tf.add_input(desc, filter_)
+ end
+ begin
+ tf.add_input(desc, out_backprop_)
+ end
+ end
+ begin
+ begin
+ if strides !== nothing
+ desc["strides"] = map(Base.identity, strides)
+ end
+ end
+ begin
+ if rates !== nothing
+ desc["rates"] = map(Base.identity, rates)
+ end
+ end
+ begin
+ if padding !== nothing
+ desc["padding"] = Base.String(padding)
+ end
+ end
+ end
+ begin
+ desc["T"] = tf.data_type(input_)
+ end
+ begin
+ desc["T"] = tf.data_type(filter_)
+ end
+ begin
+ desc["T"] = tf.data_type(out_backprop_)
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(dilation2d_backprop_filter, [input_, filter_, out_backprop_], name=nothing, strides=nothing, rates=nothing, padding=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res[1]
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function dilation2d_backprop_filter(input_, filter_, out_backprop_; name=nothing, strides=nothing, rates=nothing, padding=nothing)
+ if tf.in_eager_mode()
+ dilation2d_backprop_filter_eager(input_, filter_, out_backprop_; name=name, strides=strides, rates=rates, padding=padding)
+ else
+ dilation2d_backprop_filter_graph(input_, filter_, out_backprop_; name=name, strides=strides, rates=rates, padding=padding)
+ end
+ end
+ end
+end
+
+
+"""
+ experimental_bytes_produced_stats_dataset(input_dataset, tag)
+
+
+"""
+begin
+ begin
+ function experimental_bytes_produced_stats_dataset_graph(input_dataset_, tag_; name=nothing, output_types=nothing, output_shapes=nothing)
+ local desc
+ tf.with_op_name(name, "ExperimentalBytesProducedStatsDataset") do
+ desc = tf.NodeDescription("ExperimentalBytesProducedStatsDataset")
+ begin
+ begin
+ input_dataset_ = convert(Tensor{Any}, input_dataset_)
+ begin
+ end
+ end
+ begin
+ tag_ = convert(Tensor{String}, tag_)
+ begin
+ end
+ end
+ end
+ begin
+ begin
+ tf.add_input(desc, input_dataset_)
+ end
+ begin
+ tf.add_input(desc, tag_)
+ end
+ end
+ begin
+ begin
+ if output_types !== nothing
+ desc["output_types"] = map(Base.identity, output_types)
+ end
+ end
+ begin
+ if output_shapes !== nothing
+ desc["output_shapes"] = map(Base.identity, output_shapes)
+ end
+ end
+ end
+ end
+ tf.Tensor(tf.Operation(desc))
+ end
+ end
+ begin
+ function experimental_bytes_produced_stats_dataset_eager(input_dataset_, tag_; name=nothing, output_types=nothing, output_shapes=nothing)
+ desc = tf.EagerOp("ExperimentalBytesProducedStatsDataset")
+ input_dataset_ = convert(tf.EagerTensor, input_dataset_)
+ tag_ = convert(tf.EagerTensor, tag_)
+ begin
+ begin
+ tf.add_input(desc, input_dataset_)
+ end
+ begin
+ tf.add_input(desc, tag_)
+ end
+ end
+ begin
+ begin
+ if output_types !== nothing
+ desc["output_types"] = map(Base.identity, output_types)
+ end
+ end
+ begin
+ if output_shapes !== nothing
+ desc["output_shapes"] = map(Base.identity, output_shapes)
+ end
+ end
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(experimental_bytes_produced_stats_dataset, [input_dataset_, tag_], name=nothing, output_types=nothing, output_shapes=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res[1]
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function experimental_bytes_produced_stats_dataset(input_dataset_, tag_; name=nothing, output_types=nothing, output_shapes=nothing)
+ if tf.in_eager_mode()
+ experimental_bytes_produced_stats_dataset_eager(input_dataset_, tag_; name=name, output_types=output_types, output_shapes=output_shapes)
+ else
+ experimental_bytes_produced_stats_dataset_graph(input_dataset_, tag_; name=name, output_types=output_types, output_shapes=output_shapes)
+ end
+ end
+ end
+end
+
+
+"""
+ _if(cond, input)
+
+output = cond ? then_branch(input) : else_branch(input)
+"""
+begin
+ begin
+ function _if_graph(cond_, input_; name=nothing, Tin=nothing, Tout=nothing, then_branch=nothing, else_branch=nothing)
+ local desc
+ tf.with_op_name(name, "_If") do
+ desc = tf.NodeDescription("_If")
+ begin
+ begin
+ cond_ = convert(Tensor{Any}, cond_)
+ begin
+ end
+ end
+ begin
+ input_ = [convert(Tensor{Any}, x) for x = input_]
+ begin
+ end
+ end
+ begin
+ (cond_,) = tf.tf_promote(cond_)
+ end
+ end
+ begin
+ begin
+ tf.add_input(desc, cond_)
+ end
+ begin
+ tf.add_input(desc, input_)
+ end
+ end
+ begin
+ begin
+ if Tin !== nothing
+ desc["Tin"] = map(Base.identity, Tin)
+ end
+ end
+ begin
+ if Tout !== nothing
+ desc["Tout"] = map(Base.identity, Tout)
+ end
+ end
+ begin
+ if then_branch !== nothing
+ desc["then_branch"] = Base.identity(then_branch)
+ end
+ end
+ begin
+ if else_branch !== nothing
+ desc["else_branch"] = Base.identity(else_branch)
+ end
+ end
+ end
+ end
+ tf.Tensor(tf.Operation(desc))
+ end
+ end
+ begin
+ function _if_eager(cond_, input_; name=nothing, Tin=nothing, Tout=nothing, then_branch=nothing, else_branch=nothing)
+ desc = tf.EagerOp("_If")
+ cond_ = convert(tf.EagerTensor, cond_)
+ input_ = convert(tf.EagerTensor, input_)
+ begin
+ begin
+ tf.add_input(desc, cond_)
+ end
+ begin
+ tf.add_input(desc, input_)
+ end
+ end
+ begin
+ begin
+ if Tin !== nothing
+ desc["Tin"] = map(Base.identity, Tin)
+ end
+ end
+ begin
+ if Tout !== nothing
+ desc["Tout"] = map(Base.identity, Tout)
+ end
+ end
+ begin
+ if then_branch !== nothing
+ desc["then_branch"] = Base.identity(then_branch)
+ end
+ end
+ begin
+ if else_branch !== nothing
+ desc["else_branch"] = Base.identity(else_branch)
+ end
+ end
+ end
+ begin
+ desc["Tcond"] = tf.data_type(cond_)
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(_if, [cond_, input_], name=nothing, Tin=nothing, Tout=nothing, then_branch=nothing, else_branch=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res[1]
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function _if(cond_, input_; name=nothing, Tin=nothing, Tout=nothing, then_branch=nothing, else_branch=nothing)
+ if tf.in_eager_mode()
+ _if_eager(cond_, input_; name=name, Tin=Tin, Tout=Tout, then_branch=then_branch, else_branch=else_branch)
+ else
+ _if_graph(cond_, input_; name=name, Tin=Tin, Tout=Tout, then_branch=then_branch, else_branch=else_branch)
+ end
+ end
+ end
+end
+
+
+"""
+ bias_add_grad(out_backprop; data_format=)
+
+
+"""
+begin
+ begin
+ function bias_add_grad_graph(out_backprop_; name=nothing, data_format=nothing)
+ local desc
+ tf.with_op_name(name, "BiasAddGrad") do
+ desc = tf.NodeDescription("BiasAddGrad")
+ begin
+ begin
+ out_backprop_ = convert(Tensor{Any}, out_backprop_)
+ begin
+ end
+ end
+ begin
+ (out_backprop_,) = tf.tf_promote(out_backprop_)
+ end
+ end
+ begin
+ begin
+ tf.add_input(desc, out_backprop_)
+ end
+ end
+ begin
+ begin
+ if data_format !== nothing
+ desc["data_format"] = Base.String(data_format)
+ end
+ end
+ end
+ end
+ tf.Tensor(tf.Operation(desc))
+ end
+ end
+ begin
+ function bias_add_grad_eager(out_backprop_; name=nothing, data_format=nothing)
+ desc = tf.EagerOp("BiasAddGrad")
+ out_backprop_ = convert(tf.EagerTensor, out_backprop_)
+ begin
+ begin
+ tf.add_input(desc, out_backprop_)
+ end
+ end
+ begin
+ begin
+ if data_format !== nothing
+ desc["data_format"] = Base.String(data_format)
+ end
+ end
+ end
+ begin
+ desc["T"] = tf.data_type(out_backprop_)
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(bias_add_grad, [out_backprop_], name=nothing, data_format=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res[1]
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function bias_add_grad(out_backprop_; name=nothing, data_format=nothing)
+ if tf.in_eager_mode()
+ bias_add_grad_eager(out_backprop_; name=name, data_format=data_format)
+ else
+ bias_add_grad_graph(out_backprop_; name=name, data_format=data_format)
+ end
+ end
+ end
+end
+
+
+"""
+ reader_serialize_state_v2(reader_handle)
+
+
+"""
+begin
+ begin
+ function reader_serialize_state_v2_graph(reader_handle_; name=nothing)
+ local desc
+ tf.with_op_name(name, "ReaderSerializeStateV2") do
+ desc = tf.NodeDescription("ReaderSerializeStateV2")
+ begin
+ begin
+ reader_handle_ = convert(Tensor{Any}, reader_handle_)
+ begin
+ end
+ end
+ end
+ begin
+ begin
+ tf.add_input(desc, reader_handle_)
+ end
+ end
+ begin
+ end
+ end
+ tf.Tensor(tf.Operation(desc))
+ end
+ end
+ begin
+ function reader_serialize_state_v2_eager(reader_handle_; name=nothing)
+ desc = tf.EagerOp("ReaderSerializeStateV2")
+ reader_handle_ = convert(tf.EagerTensor, reader_handle_)
+ begin
+ begin
+ tf.add_input(desc, reader_handle_)
+ end
+ end
+ begin
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(reader_serialize_state_v2, [reader_handle_], name=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res[1]
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function reader_serialize_state_v2(reader_handle_; name=nothing)
+ if tf.in_eager_mode()
+ reader_serialize_state_v2_eager(reader_handle_; name=name)
+ else
+ reader_serialize_state_v2_graph(reader_handle_; name=name)
+ end
+ end
+ end
+end
+
+
+"""
+ wrap_dataset_variant(input_handle)
+
+
+"""
+begin
+ begin
+ function wrap_dataset_variant_graph(input_handle_; name=nothing)
+ local desc
+ tf.with_op_name(name, "WrapDatasetVariant") do
+ desc = tf.NodeDescription("WrapDatasetVariant")
+ begin
+ begin
+ input_handle_ = convert(Tensor{Any}, input_handle_)
+ begin
+ end
+ end
+ end
+ begin
+ begin
+ tf.add_input(desc, input_handle_)
+ end
+ end
+ begin
+ end
+ end
+ tf.Tensor(tf.Operation(desc))
+ end
+ end
+ begin
+ function wrap_dataset_variant_eager(input_handle_; name=nothing)
+ desc = tf.EagerOp("WrapDatasetVariant")
+ input_handle_ = convert(tf.EagerTensor, input_handle_)
+ begin
+ begin
+ tf.add_input(desc, input_handle_)
+ end
+ end
+ begin
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(wrap_dataset_variant, [input_handle_], name=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res[1]
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function wrap_dataset_variant(input_handle_; name=nothing)
+ if tf.in_eager_mode()
+ wrap_dataset_variant_eager(input_handle_; name=name)
+ else
+ wrap_dataset_variant_graph(input_handle_; name=name)
+ end
+ end
+ end
+end
+
+
+"""
+ parallel_interleave_dataset_v2(input_dataset, other_arguments, cycle_length, block_length, num_parallel_calls; sloppy=false)
+
+
+"""
+begin
+ begin
+ function parallel_interleave_dataset_v2_graph(input_dataset_, other_arguments_, cycle_length_, block_length_, num_parallel_calls_; name=nothing, f=nothing, Targuments=nothing, output_types=nothing, output_shapes=nothing, sloppy=nothing)
+ local desc
+ tf.with_op_name(name, "ParallelInterleaveDatasetV2") do
+ desc = tf.NodeDescription("ParallelInterleaveDatasetV2")
+ begin
+ begin
+ input_dataset_ = convert(Tensor{Any}, input_dataset_)
+ begin
+ end
+ end
+ begin
+ other_arguments_ = [convert(Tensor{Any}, x) for x = other_arguments_]
+ begin
+ end
+ end
+ begin
+ cycle_length_ = convert(Tensor{Int64}, cycle_length_)
+ begin
+ end
+ end
+ begin
+ block_length_ = convert(Tensor{Int64}, block_length_)
+ begin
+ end
+ end
+ begin
+ num_parallel_calls_ = convert(Tensor{Int64}, num_parallel_calls_)
+ begin
+ end
+ end
+ end
+ begin
+ begin
+ tf.add_input(desc, input_dataset_)
+ end
+ begin
+ tf.add_input(desc, other_arguments_)
+ end
+ begin
+ tf.add_input(desc, cycle_length_)
+ end
+ begin
+ tf.add_input(desc, block_length_)
+ end
+ begin
+ tf.add_input(desc, num_parallel_calls_)
+ end
+ end
+ begin
+ begin
+ if f !== nothing
+ desc["f"] = Base.identity(f)
+ end
+ end
+ begin
+ if Targuments !== nothing
+ desc["Targuments"] = map(Base.identity, Targuments)
+ end
+ end
+ begin
+ if output_types !== nothing
+ desc["output_types"] = map(Base.identity, output_types)
+ end
+ end
+ begin
+ if output_shapes !== nothing
+ desc["output_shapes"] = map(Base.identity, output_shapes)
+ end
+ end
+ begin
+ if sloppy !== nothing
+ desc["sloppy"] = Base.Bool(sloppy)
+ end
+ end
+ end
+ end
+ tf.Tensor(tf.Operation(desc))
+ end
+ end
+ begin
+ function parallel_interleave_dataset_v2_eager(input_dataset_, other_arguments_, cycle_length_, block_length_, num_parallel_calls_; name=nothing, f=nothing, Targuments=nothing, output_types=nothing, output_shapes=nothing, sloppy=nothing)
+ desc = tf.EagerOp("ParallelInterleaveDatasetV2")
+ input_dataset_ = convert(tf.EagerTensor, input_dataset_)
+ other_arguments_ = convert(tf.EagerTensor, other_arguments_)
+ cycle_length_ = convert(tf.EagerTensor, cycle_length_)
+ block_length_ = convert(tf.EagerTensor, block_length_)
+ num_parallel_calls_ = convert(tf.EagerTensor, num_parallel_calls_)
+ begin
+ begin
+ tf.add_input(desc, input_dataset_)
+ end
+ begin
+ tf.add_input(desc, other_arguments_)
+ end
+ begin
+ tf.add_input(desc, cycle_length_)
+ end
+ begin
+ tf.add_input(desc, block_length_)
+ end
+ begin
+ tf.add_input(desc, num_parallel_calls_)
+ end
+ end
+ begin
+ begin
+ if f !== nothing
+ desc["f"] = Base.identity(f)
+ end
+ end
+ begin
+ if Targuments !== nothing
+ desc["Targuments"] = map(Base.identity, Targuments)
+ end
+ end
+ begin
+ if output_types !== nothing
+ desc["output_types"] = map(Base.identity, output_types)
+ end
+ end
+ begin
+ if output_shapes !== nothing
+ desc["output_shapes"] = map(Base.identity, output_shapes)
+ end
+ end
+ begin
+ if sloppy !== nothing
+ desc["sloppy"] = Base.Bool(sloppy)
+ end
+ end
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(parallel_interleave_dataset_v2, [input_dataset_, other_arguments_, cycle_length_, block_length_, num_parallel_calls_], name=nothing, f=nothing, Targuments=nothing, output_types=nothing, output_shapes=nothing, sloppy=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res[1]
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function parallel_interleave_dataset_v2(input_dataset_, other_arguments_, cycle_length_, block_length_, num_parallel_calls_; name=nothing, f=nothing, Targuments=nothing, output_types=nothing, output_shapes=nothing, sloppy=nothing)
+ if tf.in_eager_mode()
+ parallel_interleave_dataset_v2_eager(input_dataset_, other_arguments_, cycle_length_, block_length_, num_parallel_calls_; name=name, f=f, Targuments=Targuments, output_types=output_types, output_shapes=output_shapes, sloppy=sloppy)
+ else
+ parallel_interleave_dataset_v2_graph(input_dataset_, other_arguments_, cycle_length_, block_length_, num_parallel_calls_; name=name, f=f, Targuments=Targuments, output_types=output_types, output_shapes=output_shapes, sloppy=sloppy)
+ end
+ end
+ end
+end
+
+
+"""
+ depthwise_conv2d_native_backprop_input(input_sizes, filter, out_backprop; data_format=, dilations=[1, 1, 1, 1])
+
+
+"""
+begin
+ begin
+ function depthwise_conv2d_native_backprop_input_graph(input_sizes_, filter_, out_backprop_; name=nothing, strides=nothing, padding=nothing, data_format=nothing, dilations=nothing)
+ local desc
+ tf.with_op_name(name, "DepthwiseConv2dNativeBackpropInput") do
+ desc = tf.NodeDescription("DepthwiseConv2dNativeBackpropInput")
+ begin
+ begin
+ input_sizes_ = convert(Tensor{Int32}, input_sizes_)
+ begin
+ end
+ end
+ begin
+ filter_ = convert(Tensor{Any}, filter_)
+ begin
+ end
+ end
+ begin
+ out_backprop_ = convert(Tensor{Any}, out_backprop_)
+ begin
+ end
+ end
+ begin
+ (filter_, out_backprop_) = tf.tf_promote(filter_, out_backprop_)
+ end
+ end
+ begin
+ begin
+ tf.add_input(desc, input_sizes_)
+ end
+ begin
+ tf.add_input(desc, filter_)
+ end
+ begin
+ tf.add_input(desc, out_backprop_)
+ end
+ end
+ begin
+ begin
+ if strides !== nothing
+ desc["strides"] = map(Base.identity, strides)
+ end
+ end
+ begin
+ if padding !== nothing
+ desc["padding"] = Base.String(padding)
+ end
+ end
+ begin
+ if data_format !== nothing
+ desc["data_format"] = Base.String(data_format)
+ end
+ end
+ begin
+ if dilations !== nothing
+ desc["dilations"] = map(Base.identity, dilations)
+ end
+ end
+ end
+ end
+ tf.Tensor(tf.Operation(desc))
+ end
+ end
+ begin
+ function depthwise_conv2d_native_backprop_input_eager(input_sizes_, filter_, out_backprop_; name=nothing, strides=nothing, padding=nothing, data_format=nothing, dilations=nothing)
+ desc = tf.EagerOp("DepthwiseConv2dNativeBackpropInput")
+ input_sizes_ = convert(tf.EagerTensor, input_sizes_)
+ filter_ = convert(tf.EagerTensor, filter_)
+ out_backprop_ = convert(tf.EagerTensor, out_backprop_)
+ begin
+ begin
+ tf.add_input(desc, input_sizes_)
+ end
+ begin
+ tf.add_input(desc, filter_)
+ end
+ begin
+ tf.add_input(desc, out_backprop_)
+ end
+ end
+ begin
+ begin
+ if strides !== nothing
+ desc["strides"] = map(Base.identity, strides)
+ end
+ end
+ begin
+ if padding !== nothing
+ desc["padding"] = Base.String(padding)
+ end
+ end
+ begin
+ if data_format !== nothing
+ desc["data_format"] = Base.String(data_format)
+ end
+ end
+ begin
+ if dilations !== nothing
+ desc["dilations"] = map(Base.identity, dilations)
+ end
+ end
+ end
+ begin
+ desc["T"] = tf.data_type(filter_)
+ end
+ begin
+ desc["T"] = tf.data_type(out_backprop_)
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(depthwise_conv2d_native_backprop_input, [input_sizes_, filter_, out_backprop_], name=nothing, strides=nothing, padding=nothing, data_format=nothing, dilations=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res[1]
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function depthwise_conv2d_native_backprop_input(input_sizes_, filter_, out_backprop_; name=nothing, strides=nothing, padding=nothing, data_format=nothing, dilations=nothing)
+ if tf.in_eager_mode()
+ depthwise_conv2d_native_backprop_input_eager(input_sizes_, filter_, out_backprop_; name=name, strides=strides, padding=padding, data_format=data_format, dilations=dilations)
+ else
+ depthwise_conv2d_native_backprop_input_graph(input_sizes_, filter_, out_backprop_; name=name, strides=strides, padding=padding, data_format=data_format, dilations=dilations)
+ end
+ end
+ end
+end
+
+
+"""
+ resource_apply_rms_prop(var, ms, mom, lr, rho, momentum, epsilon, grad; use_locking=false)
+
+
+"""
+begin
+ begin
+ function resource_apply_rms_prop_graph(var_, ms_, mom_, lr_, rho_, momentum_, epsilon_, grad_; name=nothing, use_locking=nothing)
+ local desc
+ tf.with_op_name(name, "ResourceApplyRMSProp") do
+ desc = tf.NodeDescription("ResourceApplyRMSProp")
+ begin
+ begin
+ var_ = convert(Tensor{Any}, var_)
+ begin
+ end
+ end
+ begin
+ ms_ = convert(Tensor{Any}, ms_)
+ begin
+ end
+ end
+ begin
+ mom_ = convert(Tensor{Any}, mom_)
+ begin
+ end
+ end
+ begin
+ lr_ = convert(Tensor{Any}, lr_)
+ begin
+ end
+ end
+ begin
+ rho_ = convert(Tensor{Any}, rho_)
+ begin
+ end
+ end
+ begin
+ momentum_ = convert(Tensor{Any}, momentum_)
+ begin
+ end
+ end
+ begin
+ epsilon_ = convert(Tensor{Any}, epsilon_)
+ begin
+ end
+ end
+ begin
+ grad_ = convert(Tensor{Any}, grad_)
+ begin
+ end
+ end
+ begin
+ (lr_, rho_, momentum_, epsilon_, grad_) = tf.tf_promote(lr_, rho_, momentum_, epsilon_, grad_)
+ end
+ end
+ begin
+ begin
+ tf.add_input(desc, var_)
+ end
+ begin
+ tf.add_input(desc, ms_)
+ end
+ begin
+ tf.add_input(desc, mom_)
+ end
+ begin
+ tf.add_input(desc, lr_)
+ end
+ begin
+ tf.add_input(desc, rho_)
+ end
+ begin
+ tf.add_input(desc, momentum_)
+ end
+ begin
+ tf.add_input(desc, epsilon_)
+ end
+ begin
+ tf.add_input(desc, grad_)
+ end
+ end
+ begin
+ begin
+ if use_locking !== nothing
+ desc["use_locking"] = Base.Bool(use_locking)
+ end
+ end
+ end
+ end
+ tf.Tensor(tf.Operation(desc))
+ end
+ end
+ begin
+ function resource_apply_rms_prop_eager(var_, ms_, mom_, lr_, rho_, momentum_, epsilon_, grad_; name=nothing, use_locking=nothing)
+ desc = tf.EagerOp("ResourceApplyRMSProp")
+ var_ = convert(tf.EagerTensor, var_)
+ ms_ = convert(tf.EagerTensor, ms_)
+ mom_ = convert(tf.EagerTensor, mom_)
+ lr_ = convert(tf.EagerTensor, lr_)
+ rho_ = convert(tf.EagerTensor, rho_)
+ momentum_ = convert(tf.EagerTensor, momentum_)
+ epsilon_ = convert(tf.EagerTensor, epsilon_)
+ grad_ = convert(tf.EagerTensor, grad_)
+ begin
+ begin
+ tf.add_input(desc, var_)
+ end
+ begin
+ tf.add_input(desc, ms_)
+ end
+ begin
+ tf.add_input(desc, mom_)
+ end
+ begin
+ tf.add_input(desc, lr_)
+ end
+ begin
+ tf.add_input(desc, rho_)
+ end
+ begin
+ tf.add_input(desc, momentum_)
+ end
+ begin
+ tf.add_input(desc, epsilon_)
+ end
+ begin
+ tf.add_input(desc, grad_)
+ end
+ end
+ begin
+ begin
+ if use_locking !== nothing
+ desc["use_locking"] = Base.Bool(use_locking)
+ end
+ end
+ end
+ begin
+ desc["T"] = tf.data_type(lr_)
+ end
+ begin
+ desc["T"] = tf.data_type(rho_)
+ end
+ begin
+ desc["T"] = tf.data_type(momentum_)
+ end
+ begin
+ desc["T"] = tf.data_type(epsilon_)
+ end
+ begin
+ desc["T"] = tf.data_type(grad_)
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(resource_apply_rms_prop, [var_, ms_, mom_, lr_, rho_, momentum_, epsilon_, grad_], name=nothing, use_locking=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res[1]
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function resource_apply_rms_prop(var_, ms_, mom_, lr_, rho_, momentum_, epsilon_, grad_; name=nothing, use_locking=nothing)
+ if tf.in_eager_mode()
+ resource_apply_rms_prop_eager(var_, ms_, mom_, lr_, rho_, momentum_, epsilon_, grad_; name=name, use_locking=use_locking)
+ else
+ resource_apply_rms_prop_graph(var_, ms_, mom_, lr_, rho_, momentum_, epsilon_, grad_; name=name, use_locking=use_locking)
+ end
+ end
+ end
+end
+
+
+"""
+ experimental_lmdb_dataset(filenames)
+
+
+"""
+begin
+ begin
+ function experimental_lmdb_dataset_graph(filenames_; name=nothing, output_types=nothing, output_shapes=nothing)
+ local desc
+ tf.with_op_name(name, "ExperimentalLMDBDataset") do
+ desc = tf.NodeDescription("ExperimentalLMDBDataset")
+ begin
+ begin
+ filenames_ = convert(Tensor{String}, filenames_)
+ begin
+ end
+ end
+ end
+ begin
+ begin
+ tf.add_input(desc, filenames_)
+ end
+ end
+ begin
+ begin
+ if output_types !== nothing
+ desc["output_types"] = map(Base.identity, output_types)
+ end
+ end
+ begin
+ if output_shapes !== nothing
+ desc["output_shapes"] = map(Base.identity, output_shapes)
+ end
+ end
+ end
+ end
+ tf.Tensor(tf.Operation(desc))
+ end
+ end
+ begin
+ function experimental_lmdb_dataset_eager(filenames_; name=nothing, output_types=nothing, output_shapes=nothing)
+ desc = tf.EagerOp("ExperimentalLMDBDataset")
+ filenames_ = convert(tf.EagerTensor, filenames_)
+ begin
+ begin
+ tf.add_input(desc, filenames_)
+ end
+ end
+ begin
+ begin
+ if output_types !== nothing
+ desc["output_types"] = map(Base.identity, output_types)
+ end
+ end
+ begin
+ if output_shapes !== nothing
+ desc["output_shapes"] = map(Base.identity, output_shapes)
+ end
+ end
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(experimental_lmdb_dataset, [filenames_], name=nothing, output_types=nothing, output_shapes=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res[1]
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function experimental_lmdb_dataset(filenames_; name=nothing, output_types=nothing, output_shapes=nothing)
+ if tf.in_eager_mode()
+ experimental_lmdb_dataset_eager(filenames_; name=name, output_types=output_types, output_shapes=output_shapes)
+ else
+ experimental_lmdb_dataset_graph(filenames_; name=name, output_types=output_types, output_shapes=output_shapes)
+ end
+ end
+ end
+end
+
+
+"""
+ sparse_accumulator_take_gradient(handle, num_required)
+
+
+"""
+begin
+ begin
+ function sparse_accumulator_take_gradient_graph(handle_, num_required_; name=nothing, dtype=nothing)
+ local desc
+ tf.with_op_name(name, "SparseAccumulatorTakeGradient") do
+ desc = tf.NodeDescription("SparseAccumulatorTakeGradient")
+ begin
+ begin
+ handle_ = convert(Tensor{String}, handle_)
+ begin
+ end
+ end
+ begin
+ num_required_ = convert(Tensor{Int32}, num_required_)
+ begin
+ end
+ end
+ end
+ begin
+ begin
+ tf.add_input(desc, handle_)
+ end
+ begin
+ tf.add_input(desc, num_required_)
+ end
+ end
+ begin
+ begin
+ if dtype !== nothing
+ desc["dtype"] = Base.identity(dtype)
+ end
+ end
+ end
+ end
+ begin
+ out = tf.Tensor[]
+ op = tf.Operation(desc)
+ for out_idx = 1:3
+ push!(out, tf.Tensor(op, out_idx))
+ end
+ out
+ end
+ end
+ end
+ begin
+ function sparse_accumulator_take_gradient_eager(handle_, num_required_; name=nothing, dtype=nothing)
+ desc = tf.EagerOp("SparseAccumulatorTakeGradient")
+ handle_ = convert(tf.EagerTensor, handle_)
+ num_required_ = convert(tf.EagerTensor, num_required_)
+ begin
+ begin
+ tf.add_input(desc, handle_)
+ end
+ begin
+ tf.add_input(desc, num_required_)
+ end
+ end
+ begin
+ begin
+ if dtype !== nothing
+ desc["dtype"] = Base.identity(dtype)
+ end
+ end
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(sparse_accumulator_take_gradient, [handle_, num_required_], name=nothing, dtype=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function sparse_accumulator_take_gradient(handle_, num_required_; name=nothing, dtype=nothing)
+ if tf.in_eager_mode()
+ sparse_accumulator_take_gradient_eager(handle_, num_required_; name=name, dtype=dtype)
+ else
+ sparse_accumulator_take_gradient_graph(handle_, num_required_; name=name, dtype=dtype)
+ end
+ end
+ end
+end
+
+
+"""
+ stack_close_v2(handle)
+
+
+"""
+begin
+ begin
+ function stack_close_v2_graph(handle_; name=nothing)
+ local desc
+ tf.with_op_name(name, "StackCloseV2") do
+ desc = tf.NodeDescription("StackCloseV2")
+ begin
+ begin
+ handle_ = convert(Tensor{Any}, handle_)
+ begin
+ end
+ end
+ end
+ begin
+ begin
+ tf.add_input(desc, handle_)
+ end
+ end
+ begin
+ end
+ end
+ tf.Tensor(tf.Operation(desc))
+ end
+ end
+ begin
+ function stack_close_v2_eager(handle_; name=nothing)
+ desc = tf.EagerOp("StackCloseV2")
+ handle_ = convert(tf.EagerTensor, handle_)
+ begin
+ begin
+ tf.add_input(desc, handle_)
+ end
+ end
+ begin
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(stack_close_v2, [handle_], name=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res[1]
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function stack_close_v2(handle_; name=nothing)
+ if tf.in_eager_mode()
+ stack_close_v2_eager(handle_; name=name)
+ else
+ stack_close_v2_graph(handle_; name=name)
+ end
+ end
+ end
+end
+
+
+"""
+ map_size(; capacity=0, memory_limit=0, container=, shared_name=)
+
+
+"""
+begin
+ begin
+ function map_size_graph(; name=nothing, capacity=nothing, memory_limit=nothing, dtypes=nothing, container=nothing, shared_name=nothing)
+ local desc
+ tf.with_op_name(name, "MapSize") do
+ desc = tf.NodeDescription("MapSize")
+ begin
+ end
+ begin
+ end
+ begin
+ begin
+ if capacity !== nothing
+ desc["capacity"] = Base.Int(capacity)
+ end
+ end
+ begin
+ if memory_limit !== nothing
+ desc["memory_limit"] = Base.Int(memory_limit)
+ end
+ end
+ begin
+ if dtypes !== nothing
+ desc["dtypes"] = map(Base.identity, dtypes)
+ end
+ end
+ begin
+ if container !== nothing
+ desc["container"] = Base.String(container)
+ end
+ end
+ begin
+ if shared_name !== nothing
+ desc["shared_name"] = Base.String(shared_name)
+ end
+ end
+ end
+ end
+ tf.Tensor(tf.Operation(desc))
+ end
+ end
+ begin
+ function map_size_eager(; name=nothing, capacity=nothing, memory_limit=nothing, dtypes=nothing, container=nothing, shared_name=nothing)
+ desc = tf.EagerOp("MapSize")
+ begin
+ end
+ begin
+ begin
+ if capacity !== nothing
+ desc["capacity"] = Base.Int(capacity)
+ end
+ end
+ begin
+ if memory_limit !== nothing
+ desc["memory_limit"] = Base.Int(memory_limit)
+ end
+ end
+ begin
+ if dtypes !== nothing
+ desc["dtypes"] = map(Base.identity, dtypes)
+ end
+ end
+ begin
+ if container !== nothing
+ desc["container"] = Base.String(container)
+ end
+ end
+ begin
+ if shared_name !== nothing
+ desc["shared_name"] = Base.String(shared_name)
+ end
+ end
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(map_size, [], name=nothing, capacity=nothing, memory_limit=nothing, dtypes=nothing, container=nothing, shared_name=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res[1]
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function map_size(; name=nothing, capacity=nothing, memory_limit=nothing, dtypes=nothing, container=nothing, shared_name=nothing)
+ if tf.in_eager_mode()
+ map_size_eager(; name=name, capacity=capacity, memory_limit=memory_limit, dtypes=dtypes, container=container, shared_name=shared_name)
+ else
+ map_size_graph(; name=name, capacity=capacity, memory_limit=memory_limit, dtypes=dtypes, container=container, shared_name=shared_name)
+ end
+ end
+ end
+end
+
+
+"""
+ resource_apply_adagrad_da(var, gradient_accumulator, gradient_squared_accumulator, grad, lr, l1, l2, global_step; use_locking=false)
+
+
+"""
+begin
+ begin
+ function resource_apply_adagrad_da_graph(var_, gradient_accumulator_, gradient_squared_accumulator_, grad_, lr_, l1_, l2_, global_step_; name=nothing, use_locking=nothing)
+ local desc
+ tf.with_op_name(name, "ResourceApplyAdagradDA") do
+ desc = tf.NodeDescription("ResourceApplyAdagradDA")
+ begin
+ begin
+ var_ = convert(Tensor{Any}, var_)
+ begin
+ end
+ end
+ begin
+ gradient_accumulator_ = convert(Tensor{Any}, gradient_accumulator_)
+ begin
+ end
+ end
+ begin
+ gradient_squared_accumulator_ = convert(Tensor{Any}, gradient_squared_accumulator_)
+ begin
+ end
+ end
+ begin
+ grad_ = convert(Tensor{Any}, grad_)
+ begin
+ end
+ end
+ begin
+ lr_ = convert(Tensor{Any}, lr_)
+ begin
+ end
+ end
+ begin
+ l1_ = convert(Tensor{Any}, l1_)
+ begin
+ end
+ end
+ begin
+ l2_ = convert(Tensor{Any}, l2_)
+ begin
+ end
+ end
+ begin
+ global_step_ = convert(Tensor{Int64}, global_step_)
+ begin
+ end
+ end
+ begin
+ (grad_, lr_, l1_, l2_) = tf.tf_promote(grad_, lr_, l1_, l2_)
+ end
+ end
+ begin
+ begin
+ tf.add_input(desc, var_)
+ end
+ begin
+ tf.add_input(desc, gradient_accumulator_)
+ end
+ begin
+ tf.add_input(desc, gradient_squared_accumulator_)
+ end
+ begin
+ tf.add_input(desc, grad_)
+ end
+ begin
+ tf.add_input(desc, lr_)
+ end
+ begin
+ tf.add_input(desc, l1_)
+ end
+ begin
+ tf.add_input(desc, l2_)
+ end
+ begin
+ tf.add_input(desc, global_step_)
+ end
+ end
+ begin
+ begin
+ if use_locking !== nothing
+ desc["use_locking"] = Base.Bool(use_locking)
+ end
+ end
+ end
+ end
+ tf.Tensor(tf.Operation(desc))
+ end
+ end
+ begin
+ function resource_apply_adagrad_da_eager(var_, gradient_accumulator_, gradient_squared_accumulator_, grad_, lr_, l1_, l2_, global_step_; name=nothing, use_locking=nothing)
+ desc = tf.EagerOp("ResourceApplyAdagradDA")
+ var_ = convert(tf.EagerTensor, var_)
+ gradient_accumulator_ = convert(tf.EagerTensor, gradient_accumulator_)
+ gradient_squared_accumulator_ = convert(tf.EagerTensor, gradient_squared_accumulator_)
+ grad_ = convert(tf.EagerTensor, grad_)
+ lr_ = convert(tf.EagerTensor, lr_)
+ l1_ = convert(tf.EagerTensor, l1_)
+ l2_ = convert(tf.EagerTensor, l2_)
+ global_step_ = convert(tf.EagerTensor, global_step_)
+ begin
+ begin
+ tf.add_input(desc, var_)
+ end
+ begin
+ tf.add_input(desc, gradient_accumulator_)
+ end
+ begin
+ tf.add_input(desc, gradient_squared_accumulator_)
+ end
+ begin
+ tf.add_input(desc, grad_)
+ end
+ begin
+ tf.add_input(desc, lr_)
+ end
+ begin
+ tf.add_input(desc, l1_)
+ end
+ begin
+ tf.add_input(desc, l2_)
+ end
+ begin
+ tf.add_input(desc, global_step_)
+ end
+ end
+ begin
+ begin
+ if use_locking !== nothing
+ desc["use_locking"] = Base.Bool(use_locking)
+ end
+ end
+ end
+ begin
+ desc["T"] = tf.data_type(grad_)
+ end
+ begin
+ desc["T"] = tf.data_type(lr_)
+ end
+ begin
+ desc["T"] = tf.data_type(l1_)
+ end
+ begin
+ desc["T"] = tf.data_type(l2_)
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(resource_apply_adagrad_da, [var_, gradient_accumulator_, gradient_squared_accumulator_, grad_, lr_, l1_, l2_, global_step_], name=nothing, use_locking=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res[1]
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function resource_apply_adagrad_da(var_, gradient_accumulator_, gradient_squared_accumulator_, grad_, lr_, l1_, l2_, global_step_; name=nothing, use_locking=nothing)
+ if tf.in_eager_mode()
+ resource_apply_adagrad_da_eager(var_, gradient_accumulator_, gradient_squared_accumulator_, grad_, lr_, l1_, l2_, global_step_; name=name, use_locking=use_locking)
+ else
+ resource_apply_adagrad_da_graph(var_, gradient_accumulator_, gradient_squared_accumulator_, grad_, lr_, l1_, l2_, global_step_; name=name, use_locking=use_locking)
+ end
+ end
+ end
+end
+
+
+"""
+ tensor_forest_tree_size(tree_handle)
+
+
+"""
+begin
+ begin
+ function tensor_forest_tree_size_graph(tree_handle_; name=nothing)
+ local desc
+ tf.with_op_name(name, "TensorForestTreeSize") do
+ desc = tf.NodeDescription("TensorForestTreeSize")
+ begin
+ begin
+ tree_handle_ = convert(Tensor{Any}, tree_handle_)
+ begin
+ end
+ end
+ end
+ begin
+ begin
+ tf.add_input(desc, tree_handle_)
+ end
+ end
+ begin
+ end
+ end
+ tf.Tensor(tf.Operation(desc))
+ end
+ end
+ begin
+ function tensor_forest_tree_size_eager(tree_handle_; name=nothing)
+ desc = tf.EagerOp("TensorForestTreeSize")
+ tree_handle_ = convert(tf.EagerTensor, tree_handle_)
+ begin
+ begin
+ tf.add_input(desc, tree_handle_)
+ end
+ end
+ begin
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(tensor_forest_tree_size, [tree_handle_], name=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res[1]
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function tensor_forest_tree_size(tree_handle_; name=nothing)
+ if tf.in_eager_mode()
+ tensor_forest_tree_size_eager(tree_handle_; name=name)
+ else
+ tensor_forest_tree_size_graph(tree_handle_; name=name)
+ end
+ end
+ end
+end
+
+
+"""
+ matrix_diag_part(input)
+
+
+"""
+begin
+ begin
+ function matrix_diag_part_graph(input_; name=nothing)
+ local desc
+ tf.with_op_name(name, "MatrixDiagPart") do
+ desc = tf.NodeDescription("MatrixDiagPart")
+ begin
+ begin
+ input_ = convert(Tensor{Any}, input_)
+ begin
+ end
+ end
+ begin
+ (input_,) = tf.tf_promote(input_)
+ end
+ end
+ begin
+ begin
+ tf.add_input(desc, input_)
+ end
+ end
+ begin
+ end
+ end
+ tf.Tensor(tf.Operation(desc))
+ end
+ end
+ begin
+ function matrix_diag_part_eager(input_; name=nothing)
+ desc = tf.EagerOp("MatrixDiagPart")
+ input_ = convert(tf.EagerTensor, input_)
+ begin
+ begin
+ tf.add_input(desc, input_)
+ end
+ end
+ begin
+ end
+ begin
+ desc["T"] = tf.data_type(input_)
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(matrix_diag_part, [input_], name=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res[1]
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function matrix_diag_part(input_; name=nothing)
+ if tf.in_eager_mode()
+ matrix_diag_part_eager(input_; name=name)
+ else
+ matrix_diag_part_graph(input_; name=name)
+ end
+ end
+ end
+end
+
+
+"""
+ reader_num_work_units_completed_v2(reader_handle)
+
+
+"""
+begin
+ begin
+ function reader_num_work_units_completed_v2_graph(reader_handle_; name=nothing)
+ local desc
+ tf.with_op_name(name, "ReaderNumWorkUnitsCompletedV2") do
+ desc = tf.NodeDescription("ReaderNumWorkUnitsCompletedV2")
+ begin
+ begin
+ reader_handle_ = convert(Tensor{Any}, reader_handle_)
+ begin
+ end
+ end
+ end
+ begin
+ begin
+ tf.add_input(desc, reader_handle_)
+ end
+ end
+ begin
+ end
+ end
+ tf.Tensor(tf.Operation(desc))
+ end
+ end
+ begin
+ function reader_num_work_units_completed_v2_eager(reader_handle_; name=nothing)
+ desc = tf.EagerOp("ReaderNumWorkUnitsCompletedV2")
+ reader_handle_ = convert(tf.EagerTensor, reader_handle_)
+ begin
+ begin
+ tf.add_input(desc, reader_handle_)
+ end
+ end
+ begin
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(reader_num_work_units_completed_v2, [reader_handle_], name=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res[1]
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function reader_num_work_units_completed_v2(reader_handle_; name=nothing)
+ if tf.in_eager_mode()
+ reader_num_work_units_completed_v2_eager(reader_handle_; name=name)
+ else
+ reader_num_work_units_completed_v2_graph(reader_handle_; name=name)
+ end
+ end
+ end
+end
+
+
+"""
+ tensor_array_split_v3(handle, value, lengths, flow_in)
+
+
+"""
+begin
+ begin
+ function tensor_array_split_v3_graph(handle_, value_, lengths_, flow_in_; name=nothing)
+ local desc
+ tf.with_op_name(name, "TensorArraySplitV3") do
+ desc = tf.NodeDescription("TensorArraySplitV3")
+ begin
+ begin
+ handle_ = convert(Tensor{Any}, handle_)
+ begin
+ end
+ end
+ begin
+ value_ = convert(Tensor{Any}, value_)
+ begin
+ end
+ end
+ begin
+ lengths_ = convert(Tensor{Int64}, lengths_)
+ begin
+ end
+ end
+ begin
+ flow_in_ = convert(Tensor{Float32}, flow_in_)
+ begin
+ end
+ end
+ begin
+ (value_,) = tf.tf_promote(value_)
+ end
+ end
+ begin
+ begin
+ tf.add_input(desc, handle_)
+ end
+ begin
+ tf.add_input(desc, value_)
+ end
+ begin
+ tf.add_input(desc, lengths_)
+ end
+ begin
+ tf.add_input(desc, flow_in_)
+ end
+ end
+ begin
+ end
+ end
+ tf.Tensor(tf.Operation(desc))
+ end
+ end
+ begin
+ function tensor_array_split_v3_eager(handle_, value_, lengths_, flow_in_; name=nothing)
+ desc = tf.EagerOp("TensorArraySplitV3")
+ handle_ = convert(tf.EagerTensor, handle_)
+ value_ = convert(tf.EagerTensor, value_)
+ lengths_ = convert(tf.EagerTensor, lengths_)
+ flow_in_ = convert(tf.EagerTensor, flow_in_)
+ begin
+ begin
+ tf.add_input(desc, handle_)
+ end
+ begin
+ tf.add_input(desc, value_)
+ end
+ begin
+ tf.add_input(desc, lengths_)
+ end
+ begin
+ tf.add_input(desc, flow_in_)
+ end
+ end
+ begin
+ end
+ begin
+ desc["T"] = tf.data_type(value_)
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(tensor_array_split_v3, [handle_, value_, lengths_, flow_in_], name=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res[1]
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function tensor_array_split_v3(handle_, value_, lengths_, flow_in_; name=nothing)
+ if tf.in_eager_mode()
+ tensor_array_split_v3_eager(handle_, value_, lengths_, flow_in_; name=name)
+ else
+ tensor_array_split_v3_graph(handle_, value_, lengths_, flow_in_; name=name)
+ end
+ end
+ end
+end
+
+
+"""
+ sparse_to_dense(sparse_indices, output_shape, sparse_values, default_value; validate_indices=true)
+
+
+"""
+begin
+ begin
+ function sparse_to_dense_graph(sparse_indices_, output_shape_, sparse_values_, default_value_; name=nothing, validate_indices=nothing)
+ local desc
+ tf.with_op_name(name, "SparseToDense") do
+ desc = tf.NodeDescription("SparseToDense")
+ begin
+ begin
+ sparse_indices_ = convert(Tensor{Any}, sparse_indices_)
+ begin
+ sparse_indices_ = sparse_indices_ - convert(tf.Tensor{eltype(sparse_indices_)}, 1)
+ end
+ end
+ begin
+ output_shape_ = convert(Tensor{Any}, output_shape_)
+ begin
+ output_shape_ = output_shape_ - convert(tf.Tensor{eltype(output_shape_)}, 1)
+ end
+ end
+ begin
+ sparse_values_ = convert(Tensor{Any}, sparse_values_)
+ begin
+ end
+ end
+ begin
+ default_value_ = convert(Tensor{Any}, default_value_)
+ begin
+ end
+ end
+ begin
+ (sparse_values_, default_value_) = tf.tf_promote(sparse_values_, default_value_)
+ end
+ begin
+ (sparse_indices_, output_shape_) = tf.tf_promote(sparse_indices_, output_shape_)
+ end
+ end
+ begin
+ begin
+ tf.add_input(desc, sparse_indices_)
+ end
+ begin
+ tf.add_input(desc, output_shape_)
+ end
+ begin
+ tf.add_input(desc, sparse_values_)
+ end
+ begin
+ tf.add_input(desc, default_value_)
+ end
+ end
+ begin
+ begin
+ if validate_indices !== nothing
+ desc["validate_indices"] = Base.Bool(validate_indices)
+ end
+ end
+ end
+ end
+ tf.Tensor(tf.Operation(desc))
+ end
+ end
+ begin
+ function sparse_to_dense_eager(sparse_indices_, output_shape_, sparse_values_, default_value_; name=nothing, validate_indices=nothing)
+ desc = tf.EagerOp("SparseToDense")
+ sparse_indices_ = convert(tf.EagerTensor, sparse_indices_)
+ output_shape_ = convert(tf.EagerTensor, output_shape_)
+ sparse_values_ = convert(tf.EagerTensor, sparse_values_)
+ default_value_ = convert(tf.EagerTensor, default_value_)
+ begin
+ begin
+ tf.add_input(desc, sparse_indices_)
+ end
+ begin
+ tf.add_input(desc, output_shape_)
+ end
+ begin
+ tf.add_input(desc, sparse_values_)
+ end
+ begin
+ tf.add_input(desc, default_value_)
+ end
+ end
+ begin
+ begin
+ if validate_indices !== nothing
+ desc["validate_indices"] = Base.Bool(validate_indices)
+ end
+ end
+ end
+ begin
+ desc["Tindices"] = tf.data_type(sparse_indices_)
+ end
+ begin
+ desc["Tindices"] = tf.data_type(output_shape_)
+ end
+ begin
+ desc["T"] = tf.data_type(sparse_values_)
+ end
+ begin
+ desc["T"] = tf.data_type(default_value_)
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(sparse_to_dense, [sparse_indices_, output_shape_, sparse_values_, default_value_], name=nothing, validate_indices=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res[1]
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function sparse_to_dense(sparse_indices_, output_shape_, sparse_values_, default_value_; name=nothing, validate_indices=nothing)
+ if tf.in_eager_mode()
+ sparse_to_dense_eager(sparse_indices_, output_shape_, sparse_values_, default_value_; name=name, validate_indices=validate_indices)
+ else
+ sparse_to_dense_graph(sparse_indices_, output_shape_, sparse_values_, default_value_; name=name, validate_indices=validate_indices)
+ end
+ end
+ end
+end
+
+
+"""
+ tpu_replicated_input(inputs)
+
+Operator that connects N unreplicated inputs to an N-way replicated TPU computation.
+"""
+begin
+ begin
+ function tpu_replicated_input_graph(inputs_; name=nothing, N=nothing)
+ local desc
+ tf.with_op_name(name, "TPUReplicatedInput") do
+ desc = tf.NodeDescription("TPUReplicatedInput")
+ begin
+ begin
+ inputs_ = [convert(Tensor{Any}, x) for x = inputs_]
+ begin
+ end
+ end
+ begin
+ (inputs_,) = tf.tf_promote(inputs_)
+ end
+ end
+ begin
+ begin
+ tf.add_input(desc, inputs_)
+ end
+ end
+ begin
+ begin
+ if N !== nothing
+ desc["N"] = Base.Int(N)
+ end
+ end
+ end
+ end
+ tf.Tensor(tf.Operation(desc))
+ end
+ end
+ begin
+ function tpu_replicated_input_eager(inputs_; name=nothing, N=nothing)
+ desc = tf.EagerOp("TPUReplicatedInput")
+ inputs_ = convert(tf.EagerTensor, inputs_)
+ begin
+ begin
+ tf.add_input(desc, inputs_)
+ end
+ end
+ begin
+ begin
+ if N !== nothing
+ desc["N"] = Base.Int(N)
+ end
+ end
+ end
+ begin
+ desc["T"] = tf.data_type(inputs_)
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(tpu_replicated_input, [inputs_], name=nothing, N=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res[1]
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function tpu_replicated_input(inputs_; name=nothing, N=nothing)
+ if tf.in_eager_mode()
+ tpu_replicated_input_eager(inputs_; name=name, N=N)
+ else
+ tpu_replicated_input_graph(inputs_; name=name, N=N)
+ end
+ end
+ end
+end
+
+
+"""
+ stack_close(handle)
+
+
+"""
+begin
+ begin
+ function stack_close_graph(handle_; name=nothing)
+ local desc
+ tf.with_op_name(name, "StackClose") do
+ desc = tf.NodeDescription("StackClose")
+ begin
+ begin
+ handle_ = convert(Tensor{String}, handle_)
+ begin
+ end
+ end
+ end
+ begin
+ begin
+ tf.add_input(desc, handle_)
+ end
+ end
+ begin
+ end
+ end
+ tf.Tensor(tf.Operation(desc))
+ end
+ end
+ begin
+ function stack_close_eager(handle_; name=nothing)
+ desc = tf.EagerOp("StackClose")
+ handle_ = convert(tf.EagerTensor, handle_)
+ begin
+ begin
+ tf.add_input(desc, handle_)
+ end
+ end
+ begin
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(stack_close, [handle_], name=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res[1]
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function stack_close(handle_; name=nothing)
+ if tf.in_eager_mode()
+ stack_close_eager(handle_; name=name)
+ else
+ stack_close_graph(handle_; name=name)
+ end
+ end
+ end
+end
+
+
+"""
+ deserialize_many_sparse(serialized_sparse)
+
+
+"""
+begin
+ begin
+ function deserialize_many_sparse_graph(serialized_sparse_; name=nothing, dtype=nothing)
+ local desc
+ tf.with_op_name(name, "DeserializeManySparse") do
+ desc = tf.NodeDescription("DeserializeManySparse")
+ begin
+ begin
+ serialized_sparse_ = convert(Tensor{String}, serialized_sparse_)
+ begin
+ end
+ end
+ end
+ begin
+ begin
+ tf.add_input(desc, serialized_sparse_)
+ end
+ end
+ begin
+ begin
+ if dtype !== nothing
+ desc["dtype"] = Base.identity(dtype)
+ end
+ end
+ end
+ end
+ begin
+ out = tf.Tensor[]
+ op = tf.Operation(desc)
+ for out_idx = 1:3
+ push!(out, tf.Tensor(op, out_idx))
+ end
+ out
+ end
+ end
+ end
+ begin
+ function deserialize_many_sparse_eager(serialized_sparse_; name=nothing, dtype=nothing)
+ desc = tf.EagerOp("DeserializeManySparse")
+ serialized_sparse_ = convert(tf.EagerTensor, serialized_sparse_)
+ begin
+ begin
+ tf.add_input(desc, serialized_sparse_)
+ end
+ end
+ begin
+ begin
+ if dtype !== nothing
+ desc["dtype"] = Base.identity(dtype)
+ end
+ end
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(deserialize_many_sparse, [serialized_sparse_], name=nothing, dtype=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function deserialize_many_sparse(serialized_sparse_; name=nothing, dtype=nothing)
+ if tf.in_eager_mode()
+ deserialize_many_sparse_eager(serialized_sparse_; name=name, dtype=dtype)
+ else
+ deserialize_many_sparse_graph(serialized_sparse_; name=name, dtype=dtype)
+ end
+ end
+ end
+end
+
+
+"""
+ _nccl_reduce_recv(input)
+
+Replacement node for NcclReduce.
+"""
+begin
+ begin
+ function _nccl_reduce_recv_graph(input_; name=nothing, reduction=nothing, num_devices=nothing, shared_name=nothing)
+ local desc
+ tf.with_op_name(name, "_NcclReduceRecv") do
+ desc = tf.NodeDescription("_NcclReduceRecv")
+ begin
+ begin
+ input_ = convert(Tensor{Any}, input_)
+ begin
+ end
+ end
+ begin
+ (input_,) = tf.tf_promote(input_)
+ end
+ end
+ begin
+ begin
+ tf.add_input(desc, input_)
+ end
+ end
+ begin
+ begin
+ if reduction !== nothing
+ desc["reduction"] = Base.String(reduction)
+ end
+ end
+ begin
+ if num_devices !== nothing
+ desc["num_devices"] = Base.Int(num_devices)
+ end
+ end
+ begin
+ if shared_name !== nothing
+ desc["shared_name"] = Base.String(shared_name)
+ end
+ end
+ end
+ end
+ tf.Tensor(tf.Operation(desc))
+ end
+ end
+ begin
+ function _nccl_reduce_recv_eager(input_; name=nothing, reduction=nothing, num_devices=nothing, shared_name=nothing)
+ desc = tf.EagerOp("_NcclReduceRecv")
+ input_ = convert(tf.EagerTensor, input_)
+ begin
+ begin
+ tf.add_input(desc, input_)
+ end
+ end
+ begin
+ begin
+ if reduction !== nothing
+ desc["reduction"] = Base.String(reduction)
+ end
+ end
+ begin
+ if num_devices !== nothing
+ desc["num_devices"] = Base.Int(num_devices)
+ end
+ end
+ begin
+ if shared_name !== nothing
+ desc["shared_name"] = Base.String(shared_name)
+ end
+ end
+ end
+ begin
+ desc["T"] = tf.data_type(input_)
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(_nccl_reduce_recv, [input_], name=nothing, reduction=nothing, num_devices=nothing, shared_name=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res[1]
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function _nccl_reduce_recv(input_; name=nothing, reduction=nothing, num_devices=nothing, shared_name=nothing)
+ if tf.in_eager_mode()
+ _nccl_reduce_recv_eager(input_; name=name, reduction=reduction, num_devices=num_devices, shared_name=shared_name)
+ else
+ _nccl_reduce_recv_graph(input_; name=name, reduction=reduction, num_devices=num_devices, shared_name=shared_name)
+ end
+ end
+ end
+end
+
+
+"""
+ mirror_pad_grad(input, paddings)
+
+
+"""
+begin
+ begin
+ function mirror_pad_grad_graph(input_, paddings_; name=nothing, mode=nothing)
+ local desc
+ tf.with_op_name(name, "MirrorPadGrad") do
+ desc = tf.NodeDescription("MirrorPadGrad")
+ begin
+ begin
+ input_ = convert(Tensor{Any}, input_)
+ begin
+ end
+ end
+ begin
+ paddings_ = convert(Tensor{Int32}, paddings_)
+ begin
+ end
+ end
+ begin
+ (input_,) = tf.tf_promote(input_)
+ end
+ begin
+ (paddings_,) = tf.tf_promote(paddings_)
+ end
+ end
+ begin
+ begin
+ tf.add_input(desc, input_)
+ end
+ begin
+ tf.add_input(desc, paddings_)
+ end
+ end
+ begin
+ begin
+ if mode !== nothing
+ desc["mode"] = Base.String(mode)
+ end
+ end
+ end
+ end
+ tf.Tensor(tf.Operation(desc))
+ end
+ end
+ begin
+ function mirror_pad_grad_eager(input_, paddings_; name=nothing, mode=nothing)
+ desc = tf.EagerOp("MirrorPadGrad")
+ input_ = convert(tf.EagerTensor, input_)
+ paddings_ = convert(tf.EagerTensor, paddings_)
+ begin
+ begin
+ tf.add_input(desc, input_)
+ end
+ begin
+ tf.add_input(desc, paddings_)
+ end
+ end
+ begin
+ begin
+ if mode !== nothing
+ desc["mode"] = Base.String(mode)
+ end
+ end
+ end
+ begin
+ desc["T"] = tf.data_type(input_)
+ end
+ begin
+ desc["Tpaddings"] = tf.data_type(paddings_)
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(mirror_pad_grad, [input_, paddings_], name=nothing, mode=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res[1]
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function mirror_pad_grad(input_, paddings_; name=nothing, mode=nothing)
+ if tf.in_eager_mode()
+ mirror_pad_grad_eager(input_, paddings_; name=name, mode=mode)
+ else
+ mirror_pad_grad_graph(input_, paddings_; name=name, mode=mode)
+ end
+ end
+ end
+end
+
+
+"""
+ broadcast_args(s0, s1)
+
+
+"""
+begin
+ begin
+ function broadcast_args_graph(s0_, s1_; name=nothing)
+ local desc
+ tf.with_op_name(name, "BroadcastArgs") do
+ desc = tf.NodeDescription("BroadcastArgs")
+ begin
+ begin
+ s0_ = convert(Tensor{Int32}, s0_)
+ begin
+ end
+ end
+ begin
+ s1_ = convert(Tensor{Int32}, s1_)
+ begin
+ end
+ end
+ begin
+ (s0_, s1_) = tf.tf_promote(s0_, s1_)
+ end
+ end
+ begin
+ begin
+ tf.add_input(desc, s0_)
+ end
+ begin
+ tf.add_input(desc, s1_)
+ end
+ end
+ begin
+ end
+ end
+ tf.Tensor(tf.Operation(desc))
+ end
+ end
+ begin
+ function broadcast_args_eager(s0_, s1_; name=nothing)
+ desc = tf.EagerOp("BroadcastArgs")
+ s0_ = convert(tf.EagerTensor, s0_)
+ s1_ = convert(tf.EagerTensor, s1_)
+ begin
+ begin
+ tf.add_input(desc, s0_)
+ end
+ begin
+ tf.add_input(desc, s1_)
+ end
+ end
+ begin
+ end
+ begin
+ desc["T"] = tf.data_type(s0_)
+ end
+ begin
+ desc["T"] = tf.data_type(s1_)
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(broadcast_args, [s0_, s1_], name=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res[1]
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function broadcast_args(s0_, s1_; name=nothing)
+ if tf.in_eager_mode()
+ broadcast_args_eager(s0_, s1_; name=name)
+ else
+ broadcast_args_graph(s0_, s1_; name=name)
+ end
+ end
+ end
+end
+
+
+"""
+ stateless_truncated_normal(shape, seed; dtype=Float32)
+
+
+"""
+begin
+ begin
+ function stateless_truncated_normal_graph(shape_, seed_; name=nothing, dtype=nothing)
+ local desc
+ tf.with_op_name(name, "StatelessTruncatedNormal") do
+ desc = tf.NodeDescription("StatelessTruncatedNormal")
+ begin
+ begin
+ shape_ = convert(Tensor{Int32}, shape_)
+ begin
+ end
+ end
+ begin
+ seed_ = convert(Tensor{Int64}, seed_)
+ begin
+ end
+ end
+ begin
+ (shape_,) = tf.tf_promote(shape_)
+ end
+ begin
+ (seed_,) = tf.tf_promote(seed_)
+ end
+ end
+ begin
+ begin
+ tf.add_input(desc, shape_)
+ end
+ begin
+ tf.add_input(desc, seed_)
+ end
+ end
+ begin
+ begin
+ if dtype !== nothing
+ desc["dtype"] = Base.identity(dtype)
+ end
+ end
+ end
+ end
+ tf.Tensor(tf.Operation(desc))
+ end
+ end
+ begin
+ function stateless_truncated_normal_eager(shape_, seed_; name=nothing, dtype=nothing)
+ desc = tf.EagerOp("StatelessTruncatedNormal")
+ shape_ = convert(tf.EagerTensor, shape_)
+ seed_ = convert(tf.EagerTensor, seed_)
+ begin
+ begin
+ tf.add_input(desc, shape_)
+ end
+ begin
+ tf.add_input(desc, seed_)
+ end
+ end
+ begin
+ begin
+ if dtype !== nothing
+ desc["dtype"] = Base.identity(dtype)
+ end
+ end
+ end
+ begin
+ desc["T"] = tf.data_type(shape_)
+ end
+ begin
+ desc["Tseed"] = tf.data_type(seed_)
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(stateless_truncated_normal, [shape_, seed_], name=nothing, dtype=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res[1]
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function stateless_truncated_normal(shape_, seed_; name=nothing, dtype=nothing)
+ if tf.in_eager_mode()
+ stateless_truncated_normal_eager(shape_, seed_; name=name, dtype=dtype)
+ else
+ stateless_truncated_normal_graph(shape_, seed_; name=name, dtype=dtype)
+ end
+ end
+ end
+end
+
+
+"""
+ regex_full_match(input, pattern)
+
+
+"""
+begin
+ begin
+ function regex_full_match_graph(input_, pattern_; name=nothing)
+ local desc
+ tf.with_op_name(name, "RegexFullMatch") do
+ desc = tf.NodeDescription("RegexFullMatch")
+ begin
+ begin
+ input_ = convert(Tensor{String}, input_)
+ begin
+ end
+ end
+ begin
+ pattern_ = convert(Tensor{String}, pattern_)
+ begin
+ end
+ end
+ end
+ begin
+ begin
+ tf.add_input(desc, input_)
+ end
+ begin
+ tf.add_input(desc, pattern_)
+ end
+ end
+ begin
+ end
+ end
+ tf.Tensor(tf.Operation(desc))
+ end
+ end
+ begin
+ function regex_full_match_eager(input_, pattern_; name=nothing)
+ desc = tf.EagerOp("RegexFullMatch")
+ input_ = convert(tf.EagerTensor, input_)
+ pattern_ = convert(tf.EagerTensor, pattern_)
+ begin
+ begin
+ tf.add_input(desc, input_)
+ end
+ begin
+ tf.add_input(desc, pattern_)
+ end
+ end
+ begin
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(regex_full_match, [input_, pattern_], name=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res[1]
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function regex_full_match(input_, pattern_; name=nothing)
+ if tf.in_eager_mode()
+ regex_full_match_eager(input_, pattern_; name=name)
+ else
+ regex_full_match_graph(input_, pattern_; name=name)
+ end
+ end
+ end
+end
+
+
+"""
+ unwrap_dataset_variant(input_handle)
+
+
+"""
+begin
+ begin
+ function unwrap_dataset_variant_graph(input_handle_; name=nothing)
+ local desc
+ tf.with_op_name(name, "UnwrapDatasetVariant") do
+ desc = tf.NodeDescription("UnwrapDatasetVariant")
+ begin
+ begin
+ input_handle_ = convert(Tensor{Any}, input_handle_)
+ begin
+ end
+ end
+ end
+ begin
+ begin
+ tf.add_input(desc, input_handle_)
+ end
+ end
+ begin
+ end
+ end
+ tf.Tensor(tf.Operation(desc))
+ end
+ end
+ begin
+ function unwrap_dataset_variant_eager(input_handle_; name=nothing)
+ desc = tf.EagerOp("UnwrapDatasetVariant")
+ input_handle_ = convert(tf.EagerTensor, input_handle_)
+ begin
+ begin
+ tf.add_input(desc, input_handle_)
+ end
+ end
+ begin
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(unwrap_dataset_variant, [input_handle_], name=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res[1]
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function unwrap_dataset_variant(input_handle_; name=nothing)
+ if tf.in_eager_mode()
+ unwrap_dataset_variant_eager(input_handle_; name=name)
+ else
+ unwrap_dataset_variant_graph(input_handle_; name=name)
+ end
+ end
+ end
+end
+
+
+"""
+ empty(shape; init=false)
+
+
+"""
+begin
+ begin
+ function empty_graph(shape_; name=nothing, dtype=nothing, init=nothing)
+ local desc
+ tf.with_op_name(name, "Empty") do
+ desc = tf.NodeDescription("Empty")
+ begin
+ begin
+ shape_ = convert(Tensor{Int32}, shape_)
+ begin
+ end
+ end
+ end
+ begin
+ begin
+ tf.add_input(desc, shape_)
+ end
+ end
+ begin
+ begin
+ if dtype !== nothing
+ desc["dtype"] = Base.identity(dtype)
+ end
+ end
+ begin
+ if init !== nothing
+ desc["init"] = Base.Bool(init)
+ end
+ end
+ end
+ end
+ tf.Tensor(tf.Operation(desc))
+ end
+ end
+ begin
+ function empty_eager(shape_; name=nothing, dtype=nothing, init=nothing)
+ desc = tf.EagerOp("Empty")
+ shape_ = convert(tf.EagerTensor, shape_)
+ begin
+ begin
+ tf.add_input(desc, shape_)
+ end
+ end
+ begin
+ begin
+ if dtype !== nothing
+ desc["dtype"] = Base.identity(dtype)
+ end
+ end
+ begin
+ if init !== nothing
+ desc["init"] = Base.Bool(init)
+ end
+ end
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(empty, [shape_], name=nothing, dtype=nothing, init=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res[1]
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function empty(shape_; name=nothing, dtype=nothing, init=nothing)
+ if tf.in_eager_mode()
+ empty_eager(shape_; name=name, dtype=dtype, init=init)
+ else
+ empty_graph(shape_; name=name, dtype=dtype, init=init)
+ end
+ end
+ end
+end
+
+
+"""
+ outfeed_dequeue_tuple(; device_ordinal=-1)
+
+Retrieve multiple values that will be emitted by the computation as an XLA
+"""
+begin
+ begin
+ function outfeed_dequeue_tuple_graph(; name=nothing, dtypes=nothing, shapes=nothing, device_ordinal=nothing)
+ local desc
+ tf.with_op_name(name, "OutfeedDequeueTuple") do
+ desc = tf.NodeDescription("OutfeedDequeueTuple")
+ begin
+ end
+ begin
+ end
+ begin
+ begin
+ if dtypes !== nothing
+ desc["dtypes"] = map(Base.identity, dtypes)
+ end
+ end
+ begin
+ if shapes !== nothing
+ desc["shapes"] = map(Base.identity, shapes)
+ end
+ end
+ begin
+ if device_ordinal !== nothing
+ desc["device_ordinal"] = Base.Int(device_ordinal)
+ end
+ end
+ end
+ end
+ tf.Tensor(tf.Operation(desc))
+ end
+ end
+ begin
+ function outfeed_dequeue_tuple_eager(; name=nothing, dtypes=nothing, shapes=nothing, device_ordinal=nothing)
+ desc = tf.EagerOp("OutfeedDequeueTuple")
+ begin
+ end
+ begin
+ begin
+ if dtypes !== nothing
+ desc["dtypes"] = map(Base.identity, dtypes)
+ end
+ end
+ begin
+ if shapes !== nothing
+ desc["shapes"] = map(Base.identity, shapes)
+ end
+ end
+ begin
+ if device_ordinal !== nothing
+ desc["device_ordinal"] = Base.Int(device_ordinal)
+ end
+ end
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(outfeed_dequeue_tuple, [], name=nothing, dtypes=nothing, shapes=nothing, device_ordinal=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res[1]
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function outfeed_dequeue_tuple(; name=nothing, dtypes=nothing, shapes=nothing, device_ordinal=nothing)
+ if tf.in_eager_mode()
+ outfeed_dequeue_tuple_eager(; name=name, dtypes=dtypes, shapes=shapes, device_ordinal=device_ordinal)
+ else
+ outfeed_dequeue_tuple_graph(; name=name, dtypes=dtypes, shapes=shapes, device_ordinal=device_ordinal)
+ end
+ end
+ end
+end
+
+
+"""
+ div(x, y)
+
+
+"""
+begin
+ begin
+ function div_graph(x_, y_; name=nothing)
+ local desc
+ tf.with_op_name(name, "Div") do
+ desc = tf.NodeDescription("Div")
+ begin
+ begin
+ x_ = convert(Tensor{Any}, x_)
+ begin
+ end
+ end
+ begin
+ y_ = convert(Tensor{Any}, y_)
+ begin
+ end
+ end
+ begin
+ (x_, y_) = tf.tf_promote(x_, y_)
+ end
+ end
+ begin
+ begin
+ tf.add_input(desc, x_)
+ end
+ begin
+ tf.add_input(desc, y_)
+ end
+ end
+ begin
+ end
+ end
+ tf.Tensor(tf.Operation(desc))
+ end
+ end
+ begin
+ function div_eager(x_, y_; name=nothing)
+ desc = tf.EagerOp("Div")
+ x_ = convert(tf.EagerTensor, x_)
+ y_ = convert(tf.EagerTensor, y_)
+ begin
+ begin
+ tf.add_input(desc, x_)
+ end
+ begin
+ tf.add_input(desc, y_)
+ end
+ end
+ begin
+ end
+ begin
+ desc["T"] = tf.data_type(x_)
+ end
+ begin
+ desc["T"] = tf.data_type(y_)
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(div, [x_, y_], name=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res[1]
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function div(x_, y_; name=nothing)
+ if tf.in_eager_mode()
+ div_eager(x_, y_; name=name)
+ else
+ div_graph(x_, y_; name=name)
+ end
+ end
+ end
+end
+
+
+"""
+ barrier(; shapes=Int64[], capacity=-1, container=, shared_name=)
+
+
+"""
+begin
+ begin
+ function barrier_graph(; name=nothing, component_types=nothing, shapes=nothing, capacity=nothing, container=nothing, shared_name=nothing)
+ local desc
+ tf.with_op_name(name, "Barrier") do
+ desc = tf.NodeDescription("Barrier")
+ begin
+ end
+ begin
+ end
+ begin
+ begin
+ if component_types !== nothing
+ desc["component_types"] = map(Base.identity, component_types)
+ end
+ end
+ begin
+ if shapes !== nothing
+ desc["shapes"] = map(Base.identity, shapes)
+ end
+ end
+ begin
+ if capacity !== nothing
+ desc["capacity"] = Base.Int(capacity)
+ end
+ end
+ begin
+ if container !== nothing
+ desc["container"] = Base.String(container)
+ end
+ end
+ begin
+ if shared_name !== nothing
+ desc["shared_name"] = Base.String(shared_name)
+ end
+ end
+ end
+ end
+ tf.Tensor(tf.Operation(desc))
+ end
+ end
+ begin
+ function barrier_eager(; name=nothing, component_types=nothing, shapes=nothing, capacity=nothing, container=nothing, shared_name=nothing)
+ desc = tf.EagerOp("Barrier")
+ begin
+ end
+ begin
+ begin
+ if component_types !== nothing
+ desc["component_types"] = map(Base.identity, component_types)
+ end
+ end
+ begin
+ if shapes !== nothing
+ desc["shapes"] = map(Base.identity, shapes)
+ end
+ end
+ begin
+ if capacity !== nothing
+ desc["capacity"] = Base.Int(capacity)
+ end
+ end
+ begin
+ if container !== nothing
+ desc["container"] = Base.String(container)
+ end
+ end
+ begin
+ if shared_name !== nothing
+ desc["shared_name"] = Base.String(shared_name)
+ end
+ end
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(barrier, [], name=nothing, component_types=nothing, shapes=nothing, capacity=nothing, container=nothing, shared_name=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res[1]
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function barrier(; name=nothing, component_types=nothing, shapes=nothing, capacity=nothing, container=nothing, shared_name=nothing)
+ if tf.in_eager_mode()
+ barrier_eager(; name=name, component_types=component_types, shapes=shapes, capacity=capacity, container=container, shared_name=shared_name)
+ else
+ barrier_graph(; name=name, component_types=component_types, shapes=shapes, capacity=capacity, container=container, shared_name=shared_name)
+ end
+ end
+ end
+end
+
+
+"""
+ truncate_div(x, y)
+
+
+"""
+begin
+ begin
+ function truncate_div_graph(x_, y_; name=nothing)
+ local desc
+ tf.with_op_name(name, "TruncateDiv") do
+ desc = tf.NodeDescription("TruncateDiv")
+ begin
+ begin
+ x_ = convert(Tensor{Any}, x_)
+ begin
+ end
+ end
+ begin
+ y_ = convert(Tensor{Any}, y_)
+ begin
+ end
+ end
+ begin
+ (x_, y_) = tf.tf_promote(x_, y_)
+ end
+ end
+ begin
+ begin
+ tf.add_input(desc, x_)
+ end
+ begin
+ tf.add_input(desc, y_)
+ end
+ end
+ begin
+ end
+ end
+ tf.Tensor(tf.Operation(desc))
+ end
+ end
+ begin
+ function truncate_div_eager(x_, y_; name=nothing)
+ desc = tf.EagerOp("TruncateDiv")
+ x_ = convert(tf.EagerTensor, x_)
+ y_ = convert(tf.EagerTensor, y_)
+ begin
+ begin
+ tf.add_input(desc, x_)
+ end
+ begin
+ tf.add_input(desc, y_)
+ end
+ end
+ begin
+ end
+ begin
+ desc["T"] = tf.data_type(x_)
+ end
+ begin
+ desc["T"] = tf.data_type(y_)
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(truncate_div, [x_, y_], name=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res[1]
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function truncate_div(x_, y_; name=nothing)
+ if tf.in_eager_mode()
+ truncate_div_eager(x_, y_; name=name)
+ else
+ truncate_div_graph(x_, y_; name=name)
+ end
+ end
+ end
+end
+
+
+"""
+ unicode_encode(input_values, input_splits; errors=, replacement_char=65533)
+
+
+"""
+begin
+ begin
+ function unicode_encode_graph(input_values_, input_splits_; name=nothing, errors=nothing, output_encoding=nothing, replacement_char=nothing)
+ local desc
+ tf.with_op_name(name, "UnicodeEncode") do
+ desc = tf.NodeDescription("UnicodeEncode")
+ begin
+ begin
+ input_values_ = convert(Tensor{Int32}, input_values_)
+ begin
+ end
+ end
+ begin
+ input_splits_ = convert(Tensor{Int64}, input_splits_)
+ begin
+ end
+ end
+ end
+ begin
+ begin
+ tf.add_input(desc, input_values_)
+ end
+ begin
+ tf.add_input(desc, input_splits_)
+ end
+ end
+ begin
+ begin
+ if errors !== nothing
+ desc["errors"] = Base.String(errors)
+ end
+ end
+ begin
+ if output_encoding !== nothing
+ desc["output_encoding"] = Base.String(output_encoding)
+ end
+ end
+ begin
+ if replacement_char !== nothing
+ desc["replacement_char"] = Base.Int(replacement_char)
+ end
+ end
+ end
+ end
+ tf.Tensor(tf.Operation(desc))
+ end
+ end
+ begin
+ function unicode_encode_eager(input_values_, input_splits_; name=nothing, errors=nothing, output_encoding=nothing, replacement_char=nothing)
+ desc = tf.EagerOp("UnicodeEncode")
+ input_values_ = convert(tf.EagerTensor, input_values_)
+ input_splits_ = convert(tf.EagerTensor, input_splits_)
+ begin
+ begin
+ tf.add_input(desc, input_values_)
+ end
+ begin
+ tf.add_input(desc, input_splits_)
+ end
+ end
+ begin
+ begin
+ if errors !== nothing
+ desc["errors"] = Base.String(errors)
+ end
+ end
+ begin
+ if output_encoding !== nothing
+ desc["output_encoding"] = Base.String(output_encoding)
+ end
+ end
+ begin
+ if replacement_char !== nothing
+ desc["replacement_char"] = Base.Int(replacement_char)
+ end
+ end
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(unicode_encode, [input_values_, input_splits_], name=nothing, errors=nothing, output_encoding=nothing, replacement_char=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res[1]
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function unicode_encode(input_values_, input_splits_; name=nothing, errors=nothing, output_encoding=nothing, replacement_char=nothing)
+ if tf.in_eager_mode()
+ unicode_encode_eager(input_values_, input_splits_; name=name, errors=errors, output_encoding=output_encoding, replacement_char=replacement_char)
+ else
+ unicode_encode_graph(input_values_, input_splits_; name=name, errors=errors, output_encoding=output_encoding, replacement_char=replacement_char)
+ end
+ end
+ end
+end
+
+
+"""
+ merge_summary(inputs)
+
+
+"""
+begin
+ begin
+ function merge_summary_graph(inputs_; name=nothing, N=nothing)
+ local desc
+ tf.with_op_name(name, "MergeSummary") do
+ desc = tf.NodeDescription("MergeSummary")
+ begin
+ begin
+ inputs_ = [convert(Tensor{String}, x) for x = inputs_]
+ begin
+ end
+ end
+ end
+ begin
+ begin
+ tf.add_input(desc, inputs_)
+ end
+ end
+ begin
+ begin
+ if N !== nothing
+ desc["N"] = Base.Int(N)
+ end
+ end
+ end
+ end
+ tf.Tensor(tf.Operation(desc))
+ end
+ end
+ begin
+ function merge_summary_eager(inputs_; name=nothing, N=nothing)
+ desc = tf.EagerOp("MergeSummary")
+ inputs_ = convert(tf.EagerTensor, inputs_)
+ begin
+ begin
+ tf.add_input(desc, inputs_)
+ end
+ end
+ begin
+ begin
+ if N !== nothing
+ desc["N"] = Base.Int(N)
+ end
+ end
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(merge_summary, [inputs_], name=nothing, N=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res[1]
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function merge_summary(inputs_; name=nothing, N=nothing)
+ if tf.in_eager_mode()
+ merge_summary_eager(inputs_; name=name, N=N)
+ else
+ merge_summary_graph(inputs_; name=name, N=N)
+ end
+ end
+ end
+end
+
+
+"""
+ fake_queue(resource)
+
+
+"""
+begin
+ begin
+ function fake_queue_graph(resource_; name=nothing)
+ local desc
+ tf.with_op_name(name, "FakeQueue") do
+ desc = tf.NodeDescription("FakeQueue")
+ begin
+ begin
+ resource_ = convert(Tensor{Any}, resource_)
+ begin
+ end
+ end
+ end
+ begin
+ begin
+ tf.add_input(desc, resource_)
+ end
+ end
+ begin
+ end
+ end
+ tf.Tensor(tf.Operation(desc))
+ end
+ end
+ begin
+ function fake_queue_eager(resource_; name=nothing)
+ desc = tf.EagerOp("FakeQueue")
+ resource_ = convert(tf.EagerTensor, resource_)
+ begin
+ begin
+ tf.add_input(desc, resource_)
+ end
+ end
+ begin
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(fake_queue, [resource_], name=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res[1]
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function fake_queue(resource_; name=nothing)
+ if tf.in_eager_mode()
+ fake_queue_eager(resource_; name=name)
+ else
+ fake_queue_graph(resource_; name=name)
+ end
+ end
+ end
+end
+
+
+"""
+ batch_cholesky(input)
+
+
+"""
+begin
+ begin
+ function batch_cholesky_graph(input_; name=nothing)
+ local desc
+ tf.with_op_name(name, "BatchCholesky") do
+ desc = tf.NodeDescription("BatchCholesky")
+ begin
+ begin
+ input_ = convert(Tensor{Any}, input_)
+ begin
+ end
+ end
+ begin
+ (input_,) = tf.tf_promote(input_)
+ end
+ end
+ begin
+ begin
+ tf.add_input(desc, input_)
+ end
+ end
+ begin
+ end
+ end
+ tf.Tensor(tf.Operation(desc))
+ end
+ end
+ begin
+ function batch_cholesky_eager(input_; name=nothing)
+ desc = tf.EagerOp("BatchCholesky")
+ input_ = convert(tf.EagerTensor, input_)
+ begin
+ begin
+ tf.add_input(desc, input_)
+ end
+ end
+ begin
+ end
+ begin
+ desc["T"] = tf.data_type(input_)
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(batch_cholesky, [input_], name=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res[1]
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function batch_cholesky(input_; name=nothing)
+ if tf.in_eager_mode()
+ batch_cholesky_eager(input_; name=name)
+ else
+ batch_cholesky_graph(input_; name=name)
+ end
+ end
+ end
+end
+
+
+"""
+ iterator()
+
+
+"""
+begin
+ begin
+ function iterator_graph(; name=nothing, shared_name=nothing, container=nothing, output_types=nothing, output_shapes=nothing)
+ local desc
+ tf.with_op_name(name, "Iterator") do
+ desc = tf.NodeDescription("Iterator")
+ begin
+ end
+ begin
+ end
+ begin
+ begin
+ if shared_name !== nothing
+ desc["shared_name"] = Base.String(shared_name)
+ end
+ end
+ begin
+ if container !== nothing
+ desc["container"] = Base.String(container)
+ end
+ end
+ begin
+ if output_types !== nothing
+ desc["output_types"] = map(Base.identity, output_types)
+ end
+ end
+ begin
+ if output_shapes !== nothing
+ desc["output_shapes"] = map(Base.identity, output_shapes)
+ end
+ end
+ end
+ end
+ tf.Tensor(tf.Operation(desc))
+ end
+ end
+ begin
+ function iterator_eager(; name=nothing, shared_name=nothing, container=nothing, output_types=nothing, output_shapes=nothing)
+ desc = tf.EagerOp("Iterator")
+ begin
+ end
+ begin
+ begin
+ if shared_name !== nothing
+ desc["shared_name"] = Base.String(shared_name)
+ end
+ end
+ begin
+ if container !== nothing
+ desc["container"] = Base.String(container)
+ end
+ end
+ begin
+ if output_types !== nothing
+ desc["output_types"] = map(Base.identity, output_types)
+ end
+ end
+ begin
+ if output_shapes !== nothing
+ desc["output_shapes"] = map(Base.identity, output_shapes)
+ end
+ end
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(iterator, [], name=nothing, shared_name=nothing, container=nothing, output_types=nothing, output_shapes=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res[1]
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function iterator(; name=nothing, shared_name=nothing, container=nothing, output_types=nothing, output_shapes=nothing)
+ if tf.in_eager_mode()
+ iterator_eager(; name=name, shared_name=shared_name, container=container, output_types=output_types, output_shapes=output_shapes)
+ else
+ iterator_graph(; name=name, shared_name=shared_name, container=container, output_types=output_types, output_shapes=output_shapes)
+ end
+ end
+ end
+end
+
+
+"""
+ bessel_i1e(x)
+
+
+"""
+begin
+ begin
+ function bessel_i1e_graph(x_; name=nothing)
+ local desc
+ tf.with_op_name(name, "BesselI1e") do
+ desc = tf.NodeDescription("BesselI1e")
+ begin
+ begin
+ x_ = convert(Tensor{Any}, x_)
+ begin
+ end
+ end
+ begin
+ (x_,) = tf.tf_promote(x_)
+ end
+ end
+ begin
+ begin
+ tf.add_input(desc, x_)
+ end
+ end
+ begin
+ end
+ end
+ tf.Tensor(tf.Operation(desc))
+ end
+ end
+ begin
+ function bessel_i1e_eager(x_; name=nothing)
+ desc = tf.EagerOp("BesselI1e")
+ x_ = convert(tf.EagerTensor, x_)
+ begin
+ begin
+ tf.add_input(desc, x_)
+ end
+ end
+ begin
+ end
+ begin
+ desc["T"] = tf.data_type(x_)
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(bessel_i1e, [x_], name=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res[1]
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function bessel_i1e(x_; name=nothing)
+ if tf.in_eager_mode()
+ bessel_i1e_eager(x_; name=name)
+ else
+ bessel_i1e_graph(x_; name=name)
+ end
+ end
+ end
+end
+
+
+"""
+ import_event(writer, event)
+
+
+"""
+begin
+ begin
+ function import_event_graph(writer_, event_; name=nothing)
+ local desc
+ tf.with_op_name(name, "ImportEvent") do
+ desc = tf.NodeDescription("ImportEvent")
+ begin
+ begin
+ writer_ = convert(Tensor{Any}, writer_)
+ begin
+ end
+ end
+ begin
+ event_ = convert(Tensor{String}, event_)
+ begin
+ end
+ end
+ end
+ begin
+ begin
+ tf.add_input(desc, writer_)
+ end
+ begin
+ tf.add_input(desc, event_)
+ end
+ end
+ begin
+ end
+ end
+ tf.Tensor(tf.Operation(desc))
+ end
+ end
+ begin
+ function import_event_eager(writer_, event_; name=nothing)
+ desc = tf.EagerOp("ImportEvent")
+ writer_ = convert(tf.EagerTensor, writer_)
+ event_ = convert(tf.EagerTensor, event_)
+ begin
+ begin
+ tf.add_input(desc, writer_)
+ end
+ begin
+ tf.add_input(desc, event_)
+ end
+ end
+ begin
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(import_event, [writer_, event_], name=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res[1]
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function import_event(writer_, event_; name=nothing)
+ if tf.in_eager_mode()
+ import_event_eager(writer_, event_; name=name)
+ else
+ import_event_graph(writer_, event_; name=name)
+ end
+ end
+ end
+end
+
+
+"""
+ quantized_instance_norm(x, x_min, x_max; output_range_given=false, given_y_min=?, given_y_max=?, variance_epsilon=?, min_separation=?)
+
+
+"""
+begin
+ begin
+ function quantized_instance_norm_graph(x_, x_min_, x_max_; name=nothing, output_range_given=nothing, given_y_min=nothing, given_y_max=nothing, variance_epsilon=nothing, min_separation=nothing)
+ local desc
+ tf.with_op_name(name, "QuantizedInstanceNorm") do
+ desc = tf.NodeDescription("QuantizedInstanceNorm")
+ begin
+ begin
+ x_ = convert(Tensor{Any}, x_)
+ begin
+ end
+ end
+ begin
+ x_min_ = convert(Tensor{Float32}, x_min_)
+ begin
+ end
+ end
+ begin
+ x_max_ = convert(Tensor{Float32}, x_max_)
+ begin
+ end
+ end
+ begin
+ (x_,) = tf.tf_promote(x_)
+ end
+ end
+ begin
+ begin
+ tf.add_input(desc, x_)
+ end
+ begin
+ tf.add_input(desc, x_min_)
+ end
+ begin
+ tf.add_input(desc, x_max_)
+ end
+ end
+ begin
+ begin
+ if output_range_given !== nothing
+ desc["output_range_given"] = Base.Bool(output_range_given)
+ end
+ end
+ begin
+ if given_y_min !== nothing
+ desc["given_y_min"] = Base.identity(given_y_min)
+ end
+ end
+ begin
+ if given_y_max !== nothing
+ desc["given_y_max"] = Base.identity(given_y_max)
+ end
+ end
+ begin
+ if variance_epsilon !== nothing
+ desc["variance_epsilon"] = Base.identity(variance_epsilon)
+ end
+ end
+ begin
+ if min_separation !== nothing
+ desc["min_separation"] = Base.identity(min_separation)
+ end
+ end
+ end
+ end
+ begin
+ out = tf.Tensor[]
+ op = tf.Operation(desc)
+ for out_idx = 1:3
+ push!(out, tf.Tensor(op, out_idx))
+ end
+ out
+ end
+ end
+ end
+ begin
+ function quantized_instance_norm_eager(x_, x_min_, x_max_; name=nothing, output_range_given=nothing, given_y_min=nothing, given_y_max=nothing, variance_epsilon=nothing, min_separation=nothing)
+ desc = tf.EagerOp("QuantizedInstanceNorm")
+ x_ = convert(tf.EagerTensor, x_)
+ x_min_ = convert(tf.EagerTensor, x_min_)
+ x_max_ = convert(tf.EagerTensor, x_max_)
+ begin
+ begin
+ tf.add_input(desc, x_)
+ end
+ begin
+ tf.add_input(desc, x_min_)
+ end
+ begin
+ tf.add_input(desc, x_max_)
+ end
+ end
+ begin
+ begin
+ if output_range_given !== nothing
+ desc["output_range_given"] = Base.Bool(output_range_given)
+ end
+ end
+ begin
+ if given_y_min !== nothing
+ desc["given_y_min"] = Base.identity(given_y_min)
+ end
+ end
+ begin
+ if given_y_max !== nothing
+ desc["given_y_max"] = Base.identity(given_y_max)
+ end
+ end
+ begin
+ if variance_epsilon !== nothing
+ desc["variance_epsilon"] = Base.identity(variance_epsilon)
+ end
+ end
+ begin
+ if min_separation !== nothing
+ desc["min_separation"] = Base.identity(min_separation)
+ end
+ end
+ end
+ begin
+ desc["T"] = tf.data_type(x_)
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(quantized_instance_norm, [x_, x_min_, x_max_], name=nothing, output_range_given=nothing, given_y_min=nothing, given_y_max=nothing, variance_epsilon=nothing, min_separation=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function quantized_instance_norm(x_, x_min_, x_max_; name=nothing, output_range_given=nothing, given_y_min=nothing, given_y_max=nothing, variance_epsilon=nothing, min_separation=nothing)
+ if tf.in_eager_mode()
+ quantized_instance_norm_eager(x_, x_min_, x_max_; name=name, output_range_given=output_range_given, given_y_min=given_y_min, given_y_max=given_y_max, variance_epsilon=variance_epsilon, min_separation=min_separation)
+ else
+ quantized_instance_norm_graph(x_, x_min_, x_max_; name=name, output_range_given=output_range_given, given_y_min=given_y_min, given_y_max=given_y_max, variance_epsilon=variance_epsilon, min_separation=min_separation)
+ end
+ end
+ end
+end
+
+
+"""
+ tensor_array_write_v3(handle, index, value, flow_in)
+
+
+"""
+begin
+ begin
+ function tensor_array_write_v3_graph(handle_, index_, value_, flow_in_; name=nothing)
+ local desc
+ tf.with_op_name(name, "TensorArrayWriteV3") do
+ desc = tf.NodeDescription("TensorArrayWriteV3")
+ begin
+ begin
+ handle_ = convert(Tensor{Any}, handle_)
+ begin
+ end
+ end
+ begin
+ index_ = convert(Tensor{Int32}, index_)
+ begin
+ end
+ end
+ begin
+ value_ = convert(Tensor{Any}, value_)
+ begin
+ end
+ end
+ begin
+ flow_in_ = convert(Tensor{Float32}, flow_in_)
+ begin
+ end
+ end
+ begin
+ (value_,) = tf.tf_promote(value_)
+ end
+ end
+ begin
+ begin
+ tf.add_input(desc, handle_)
+ end
+ begin
+ tf.add_input(desc, index_)
+ end
+ begin
+ tf.add_input(desc, value_)
+ end
+ begin
+ tf.add_input(desc, flow_in_)
+ end
+ end
+ begin
+ end
+ end
+ tf.Tensor(tf.Operation(desc))
+ end
+ end
+ begin
+ function tensor_array_write_v3_eager(handle_, index_, value_, flow_in_; name=nothing)
+ desc = tf.EagerOp("TensorArrayWriteV3")
+ handle_ = convert(tf.EagerTensor, handle_)
+ index_ = convert(tf.EagerTensor, index_)
+ value_ = convert(tf.EagerTensor, value_)
+ flow_in_ = convert(tf.EagerTensor, flow_in_)
+ begin
+ begin
+ tf.add_input(desc, handle_)
+ end
+ begin
+ tf.add_input(desc, index_)
+ end
+ begin
+ tf.add_input(desc, value_)
+ end
+ begin
+ tf.add_input(desc, flow_in_)
+ end
+ end
+ begin
+ end
+ begin
+ desc["T"] = tf.data_type(value_)
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(tensor_array_write_v3, [handle_, index_, value_, flow_in_], name=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res[1]
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function tensor_array_write_v3(handle_, index_, value_, flow_in_; name=nothing)
+ if tf.in_eager_mode()
+ tensor_array_write_v3_eager(handle_, index_, value_, flow_in_; name=name)
+ else
+ tensor_array_write_v3_graph(handle_, index_, value_, flow_in_; name=name)
+ end
+ end
+ end
+end
+
+
+"""
+ load_tpu_embedding_adagrad_parameters(parameters, accumulators; table_id=-1, table_name=)
+
+Load embedding parameters for a single table.
+"""
+begin
+ begin
+ function load_tpu_embedding_adagrad_parameters_graph(parameters_, accumulators_; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing)
+ local desc
+ tf.with_op_name(name, "LoadTPUEmbeddingAdagradParameters") do
+ desc = tf.NodeDescription("LoadTPUEmbeddingAdagradParameters")
+ begin
+ begin
+ parameters_ = convert(Tensor{Float32}, parameters_)
+ begin
+ end
+ end
+ begin
+ accumulators_ = convert(Tensor{Float32}, accumulators_)
+ begin
+ end
+ end
+ end
+ begin
+ begin
+ tf.add_input(desc, parameters_)
+ end
+ begin
+ tf.add_input(desc, accumulators_)
+ end
+ end
+ begin
+ begin
+ if table_id !== nothing
+ desc["table_id"] = Base.Int(table_id)
+ end
+ end
+ begin
+ if table_name !== nothing
+ desc["table_name"] = Base.String(table_name)
+ end
+ end
+ begin
+ if num_shards !== nothing
+ desc["num_shards"] = Base.Int(num_shards)
+ end
+ end
+ begin
+ if shard_id !== nothing
+ desc["shard_id"] = Base.Int(shard_id)
+ end
+ end
+ end
+ end
+ tf.Tensor(tf.Operation(desc))
+ end
+ end
+ begin
+ function load_tpu_embedding_adagrad_parameters_eager(parameters_, accumulators_; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing)
+ desc = tf.EagerOp("LoadTPUEmbeddingAdagradParameters")
+ parameters_ = convert(tf.EagerTensor, parameters_)
+ accumulators_ = convert(tf.EagerTensor, accumulators_)
+ begin
+ begin
+ tf.add_input(desc, parameters_)
+ end
+ begin
+ tf.add_input(desc, accumulators_)
+ end
+ end
+ begin
+ begin
+ if table_id !== nothing
+ desc["table_id"] = Base.Int(table_id)
+ end
+ end
+ begin
+ if table_name !== nothing
+ desc["table_name"] = Base.String(table_name)
+ end
+ end
+ begin
+ if num_shards !== nothing
+ desc["num_shards"] = Base.Int(num_shards)
+ end
+ end
+ begin
+ if shard_id !== nothing
+ desc["shard_id"] = Base.Int(shard_id)
+ end
+ end
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(load_tpu_embedding_adagrad_parameters, [parameters_, accumulators_], name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res[1]
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function load_tpu_embedding_adagrad_parameters(parameters_, accumulators_; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing)
+ if tf.in_eager_mode()
+ load_tpu_embedding_adagrad_parameters_eager(parameters_, accumulators_; name=name, table_id=table_id, table_name=table_name, num_shards=num_shards, shard_id=shard_id)
+ else
+ load_tpu_embedding_adagrad_parameters_graph(parameters_, accumulators_; name=name, table_id=table_id, table_name=table_name, num_shards=num_shards, shard_id=shard_id)
+ end
+ end
+ end
+end
+
+
+"""
+ dense_to_dense_set_operation(set1, set2; validate_indices=true)
+
+
+"""
+begin
+ begin
+ function dense_to_dense_set_operation_graph(set1_, set2_; name=nothing, set_operation=nothing, validate_indices=nothing)
+ local desc
+ tf.with_op_name(name, "DenseToDenseSetOperation") do
+ desc = tf.NodeDescription("DenseToDenseSetOperation")
+ begin
+ begin
+ set1_ = convert(Tensor{Any}, set1_)
+ begin
+ end
+ end
+ begin
+ set2_ = convert(Tensor{Any}, set2_)
+ begin
+ end
+ end
+ begin
+ (set1_, set2_) = tf.tf_promote(set1_, set2_)
+ end
+ end
+ begin
+ begin
+ tf.add_input(desc, set1_)
+ end
+ begin
+ tf.add_input(desc, set2_)
+ end
+ end
+ begin
+ begin
+ if set_operation !== nothing
+ desc["set_operation"] = Base.String(set_operation)
+ end
+ end
+ begin
+ if validate_indices !== nothing
+ desc["validate_indices"] = Base.Bool(validate_indices)
+ end
+ end
+ end
+ end
+ begin
+ out = tf.Tensor[]
+ op = tf.Operation(desc)
+ for out_idx = 1:3
+ push!(out, tf.Tensor(op, out_idx))
+ end
+ out
+ end
+ end
+ end
+ begin
+ function dense_to_dense_set_operation_eager(set1_, set2_; name=nothing, set_operation=nothing, validate_indices=nothing)
+ desc = tf.EagerOp("DenseToDenseSetOperation")
+ set1_ = convert(tf.EagerTensor, set1_)
+ set2_ = convert(tf.EagerTensor, set2_)
+ begin
+ begin
+ tf.add_input(desc, set1_)
+ end
+ begin
+ tf.add_input(desc, set2_)
+ end
+ end
+ begin
+ begin
+ if set_operation !== nothing
+ desc["set_operation"] = Base.String(set_operation)
+ end
+ end
+ begin
+ if validate_indices !== nothing
+ desc["validate_indices"] = Base.Bool(validate_indices)
+ end
+ end
+ end
+ begin
+ desc["T"] = tf.data_type(set1_)
+ end
+ begin
+ desc["T"] = tf.data_type(set2_)
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(dense_to_dense_set_operation, [set1_, set2_], name=nothing, set_operation=nothing, validate_indices=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function dense_to_dense_set_operation(set1_, set2_; name=nothing, set_operation=nothing, validate_indices=nothing)
+ if tf.in_eager_mode()
+ dense_to_dense_set_operation_eager(set1_, set2_; name=name, set_operation=set_operation, validate_indices=validate_indices)
+ else
+ dense_to_dense_set_operation_graph(set1_, set2_; name=name, set_operation=set_operation, validate_indices=validate_indices)
+ end
+ end
+ end
+end
+
+
+"""
+ encode_jpeg(image; format=, quality=95, progressive=false, optimize_size=false, chroma_downsampling=true, density_unit=, x_density=300, y_density=300, xmp_metadata=)
+
+
+"""
+begin
+ begin
+ function encode_jpeg_graph(image_; name=nothing, format=nothing, quality=nothing, progressive=nothing, optimize_size=nothing, chroma_downsampling=nothing, density_unit=nothing, x_density=nothing, y_density=nothing, xmp_metadata=nothing)
+ local desc
+ tf.with_op_name(name, "EncodeJpeg") do
+ desc = tf.NodeDescription("EncodeJpeg")
+ begin
+ begin
+ image_ = convert(Tensor{UInt8}, image_)
+ begin
+ end
+ end
+ end
+ begin
+ begin
+ tf.add_input(desc, image_)
+ end
+ end
+ begin
+ begin
+ if format !== nothing
+ desc["format"] = Base.String(format)
+ end
+ end
+ begin
+ if quality !== nothing
+ desc["quality"] = Base.Int(quality)
+ end
+ end
+ begin
+ if progressive !== nothing
+ desc["progressive"] = Base.Bool(progressive)
+ end
+ end
+ begin
+ if optimize_size !== nothing
+ desc["optimize_size"] = Base.Bool(optimize_size)
+ end
+ end
+ begin
+ if chroma_downsampling !== nothing
+ desc["chroma_downsampling"] = Base.Bool(chroma_downsampling)
+ end
+ end
+ begin
+ if density_unit !== nothing
+ desc["density_unit"] = Base.String(density_unit)
+ end
+ end
+ begin
+ if x_density !== nothing
+ desc["x_density"] = Base.Int(x_density)
+ end
+ end
+ begin
+ if y_density !== nothing
+ desc["y_density"] = Base.Int(y_density)
+ end
+ end
+ begin
+ if xmp_metadata !== nothing
+ desc["xmp_metadata"] = Base.String(xmp_metadata)
+ end
+ end
+ end
+ end
+ tf.Tensor(tf.Operation(desc))
+ end
+ end
+ begin
+ function encode_jpeg_eager(image_; name=nothing, format=nothing, quality=nothing, progressive=nothing, optimize_size=nothing, chroma_downsampling=nothing, density_unit=nothing, x_density=nothing, y_density=nothing, xmp_metadata=nothing)
+ desc = tf.EagerOp("EncodeJpeg")
+ image_ = convert(tf.EagerTensor, image_)
+ begin
+ begin
+ tf.add_input(desc, image_)
+ end
+ end
+ begin
+ begin
+ if format !== nothing
+ desc["format"] = Base.String(format)
+ end
+ end
+ begin
+ if quality !== nothing
+ desc["quality"] = Base.Int(quality)
+ end
+ end
+ begin
+ if progressive !== nothing
+ desc["progressive"] = Base.Bool(progressive)
+ end
+ end
+ begin
+ if optimize_size !== nothing
+ desc["optimize_size"] = Base.Bool(optimize_size)
+ end
+ end
+ begin
+ if chroma_downsampling !== nothing
+ desc["chroma_downsampling"] = Base.Bool(chroma_downsampling)
+ end
+ end
+ begin
+ if density_unit !== nothing
+ desc["density_unit"] = Base.String(density_unit)
+ end
+ end
+ begin
+ if x_density !== nothing
+ desc["x_density"] = Base.Int(x_density)
+ end
+ end
+ begin
+ if y_density !== nothing
+ desc["y_density"] = Base.Int(y_density)
+ end
+ end
+ begin
+ if xmp_metadata !== nothing
+ desc["xmp_metadata"] = Base.String(xmp_metadata)
+ end
+ end
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(encode_jpeg, [image_], name=nothing, format=nothing, quality=nothing, progressive=nothing, optimize_size=nothing, chroma_downsampling=nothing, density_unit=nothing, x_density=nothing, y_density=nothing, xmp_metadata=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res[1]
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function encode_jpeg(image_; name=nothing, format=nothing, quality=nothing, progressive=nothing, optimize_size=nothing, chroma_downsampling=nothing, density_unit=nothing, x_density=nothing, y_density=nothing, xmp_metadata=nothing)
+ if tf.in_eager_mode()
+ encode_jpeg_eager(image_; name=name, format=format, quality=quality, progressive=progressive, optimize_size=optimize_size, chroma_downsampling=chroma_downsampling, density_unit=density_unit, x_density=x_density, y_density=y_density, xmp_metadata=xmp_metadata)
+ else
+ encode_jpeg_graph(image_; name=name, format=format, quality=quality, progressive=progressive, optimize_size=optimize_size, chroma_downsampling=chroma_downsampling, density_unit=density_unit, x_density=x_density, y_density=y_density, xmp_metadata=xmp_metadata)
+ end
+ end
+ end
+end
+
+
+"""
+ fused_pad_conv2d(input, paddings, filter)
+
+
+"""
+begin
+ begin
+ function fused_pad_conv2d_graph(input_, paddings_, filter_; name=nothing, mode=nothing, strides=nothing, padding=nothing)
+ local desc
+ tf.with_op_name(name, "FusedPadConv2D") do
+ desc = tf.NodeDescription("FusedPadConv2D")
+ begin
+ begin
+ input_ = convert(Tensor{Any}, input_)
+ begin
+ end
+ end
+ begin
+ paddings_ = convert(Tensor{Int32}, paddings_)
+ begin
+ end
+ end
+ begin
+ filter_ = convert(Tensor{Any}, filter_)
+ begin
+ end
+ end
+ begin
+ (input_, filter_) = tf.tf_promote(input_, filter_)
+ end
+ end
+ begin
+ begin
+ tf.add_input(desc, input_)
+ end
+ begin
+ tf.add_input(desc, paddings_)
+ end
+ begin
+ tf.add_input(desc, filter_)
+ end
+ end
+ begin
+ begin
+ if mode !== nothing
+ desc["mode"] = Base.String(mode)
+ end
+ end
+ begin
+ if strides !== nothing
+ desc["strides"] = map(Base.identity, strides)
+ end
+ end
+ begin
+ if padding !== nothing
+ desc["padding"] = Base.String(padding)
+ end
+ end
+ end
+ end
+ tf.Tensor(tf.Operation(desc))
+ end
+ end
+ begin
+ function fused_pad_conv2d_eager(input_, paddings_, filter_; name=nothing, mode=nothing, strides=nothing, padding=nothing)
+ desc = tf.EagerOp("FusedPadConv2D")
+ input_ = convert(tf.EagerTensor, input_)
+ paddings_ = convert(tf.EagerTensor, paddings_)
+ filter_ = convert(tf.EagerTensor, filter_)
+ begin
+ begin
+ tf.add_input(desc, input_)
+ end
+ begin
+ tf.add_input(desc, paddings_)
+ end
+ begin
+ tf.add_input(desc, filter_)
+ end
+ end
+ begin
+ begin
+ if mode !== nothing
+ desc["mode"] = Base.String(mode)
+ end
+ end
+ begin
+ if strides !== nothing
+ desc["strides"] = map(Base.identity, strides)
+ end
+ end
+ begin
+ if padding !== nothing
+ desc["padding"] = Base.String(padding)
+ end
+ end
+ end
+ begin
+ desc["T"] = tf.data_type(input_)
+ end
+ begin
+ desc["T"] = tf.data_type(filter_)
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(fused_pad_conv2d, [input_, paddings_, filter_], name=nothing, mode=nothing, strides=nothing, padding=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res[1]
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function fused_pad_conv2d(input_, paddings_, filter_; name=nothing, mode=nothing, strides=nothing, padding=nothing)
+ if tf.in_eager_mode()
+ fused_pad_conv2d_eager(input_, paddings_, filter_; name=name, mode=mode, strides=strides, padding=padding)
+ else
+ fused_pad_conv2d_graph(input_, paddings_, filter_; name=name, mode=mode, strides=strides, padding=padding)
+ end
+ end
+ end
+end
+
+
+"""
+ inplace_update(x, i, v)
+
+
+"""
+begin
+ begin
+ function inplace_update_graph(x_, i_, v_; name=nothing)
+ local desc
+ tf.with_op_name(name, "InplaceUpdate") do
+ desc = tf.NodeDescription("InplaceUpdate")
+ begin
+ begin
+ x_ = convert(Tensor{Any}, x_)
+ begin
+ end
+ end
+ begin
+ i_ = convert(Tensor{Int32}, i_)
+ begin
+ end
+ end
+ begin
+ v_ = convert(Tensor{Any}, v_)
+ begin
+ end
+ end
+ begin
+ (x_, v_) = tf.tf_promote(x_, v_)
+ end
+ end
+ begin
+ begin
+ tf.add_input(desc, x_)
+ end
+ begin
+ tf.add_input(desc, i_)
+ end
+ begin
+ tf.add_input(desc, v_)
+ end
+ end
+ begin
+ end
+ end
+ tf.Tensor(tf.Operation(desc))
+ end
+ end
+ begin
+ function inplace_update_eager(x_, i_, v_; name=nothing)
+ desc = tf.EagerOp("InplaceUpdate")
+ x_ = convert(tf.EagerTensor, x_)
+ i_ = convert(tf.EagerTensor, i_)
+ v_ = convert(tf.EagerTensor, v_)
+ begin
+ begin
+ tf.add_input(desc, x_)
+ end
+ begin
+ tf.add_input(desc, i_)
+ end
+ begin
+ tf.add_input(desc, v_)
+ end
+ end
+ begin
+ end
+ begin
+ desc["T"] = tf.data_type(x_)
+ end
+ begin
+ desc["T"] = tf.data_type(v_)
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(inplace_update, [x_, i_, v_], name=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res[1]
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function inplace_update(x_, i_, v_; name=nothing)
+ if tf.in_eager_mode()
+ inplace_update_eager(x_, i_, v_; name=name)
+ else
+ inplace_update_graph(x_, i_, v_; name=name)
+ end
+ end
+ end
+end
+
+
+"""
+ quantized_relu(features, min_features, max_features; out_type=Float32)
+
+
+"""
+begin
+ begin
+ function quantized_relu_graph(features_, min_features_, max_features_; name=nothing, out_type=nothing)
+ local desc
+ tf.with_op_name(name, "QuantizedRelu") do
+ desc = tf.NodeDescription("QuantizedRelu")
+ begin
+ begin
+ features_ = convert(Tensor{Any}, features_)
+ begin
+ end
+ end
+ begin
+ min_features_ = convert(Tensor{Float32}, min_features_)
+ begin
+ end
+ end
+ begin
+ max_features_ = convert(Tensor{Float32}, max_features_)
+ begin
+ end
+ end
+ begin
+ (features_,) = tf.tf_promote(features_)
+ end
+ end
+ begin
+ begin
+ tf.add_input(desc, features_)
+ end
+ begin
+ tf.add_input(desc, min_features_)
+ end
+ begin
+ tf.add_input(desc, max_features_)
+ end
+ end
+ begin
+ begin
+ if out_type !== nothing
+ desc["out_type"] = Base.identity(out_type)
+ end
+ end
+ end
+ end
+ begin
+ out = tf.Tensor[]
+ op = tf.Operation(desc)
+ for out_idx = 1:3
+ push!(out, tf.Tensor(op, out_idx))
+ end
+ out
+ end
+ end
+ end
+ begin
+ function quantized_relu_eager(features_, min_features_, max_features_; name=nothing, out_type=nothing)
+ desc = tf.EagerOp("QuantizedRelu")
+ features_ = convert(tf.EagerTensor, features_)
+ min_features_ = convert(tf.EagerTensor, min_features_)
+ max_features_ = convert(tf.EagerTensor, max_features_)
+ begin
+ begin
+ tf.add_input(desc, features_)
+ end
+ begin
+ tf.add_input(desc, min_features_)
+ end
+ begin
+ tf.add_input(desc, max_features_)
+ end
+ end
+ begin
+ begin
+ if out_type !== nothing
+ desc["out_type"] = Base.identity(out_type)
+ end
+ end
+ end
+ begin
+ desc["Tinput"] = tf.data_type(features_)
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(quantized_relu, [features_, min_features_, max_features_], name=nothing, out_type=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function quantized_relu(features_, min_features_, max_features_; name=nothing, out_type=nothing)
+ if tf.in_eager_mode()
+ quantized_relu_eager(features_, min_features_, max_features_; name=name, out_type=out_type)
+ else
+ quantized_relu_graph(features_, min_features_, max_features_; name=name, out_type=out_type)
+ end
+ end
+ end
+end
+
+
+"""
+ gather_nd(params, indices)
+
+
+"""
+begin
+ begin
+ function gather_nd_graph(params_, indices_; name=nothing)
+ local desc
+ tf.with_op_name(name, "GatherNd") do
+ desc = tf.NodeDescription("GatherNd")
+ begin
+ begin
+ params_ = convert(Tensor{Any}, params_)
+ begin
+ end
+ end
+ begin
+ indices_ = convert(Tensor{Any}, indices_)
+ begin
+ indices_ = indices_ - convert(tf.Tensor{eltype(indices_)}, 1)
+ end
+ end
+ begin
+ (params_,) = tf.tf_promote(params_)
+ end
+ begin
+ (indices_,) = tf.tf_promote(indices_)
+ end
+ end
+ begin
+ begin
+ tf.add_input(desc, params_)
+ end
+ begin
+ tf.add_input(desc, indices_)
+ end
+ end
+ begin
+ end
+ end
+ tf.Tensor(tf.Operation(desc))
+ end
+ end
+ begin
+ function gather_nd_eager(params_, indices_; name=nothing)
+ desc = tf.EagerOp("GatherNd")
+ params_ = convert(tf.EagerTensor, params_)
+ indices_ = convert(tf.EagerTensor, indices_)
+ begin
+ begin
+ tf.add_input(desc, params_)
+ end
+ begin
+ tf.add_input(desc, indices_)
+ end
+ end
+ begin
+ end
+ begin
+ desc["Tparams"] = tf.data_type(params_)
+ end
+ begin
+ desc["Tindices"] = tf.data_type(indices_)
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(gather_nd, [params_, indices_], name=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res[1]
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function gather_nd(params_, indices_; name=nothing)
+ if tf.in_eager_mode()
+ gather_nd_eager(params_, indices_; name=name)
+ else
+ gather_nd_graph(params_, indices_; name=name)
+ end
+ end
+ end
+end
+
+
+"""
+ placeholder(; shape=?)
+
+
+"""
+begin
+ begin
+ function placeholder_graph(; name=nothing, dtype=nothing, shape=nothing)
+ local desc
+ tf.with_op_name(name, "Placeholder") do
+ desc = tf.NodeDescription("Placeholder")
+ begin
+ end
+ begin
+ end
+ begin
+ begin
+ if dtype !== nothing
+ desc["dtype"] = Base.identity(dtype)
+ end
+ end
+ begin
+ if shape !== nothing
+ desc["shape"] = Base.identity(shape)
+ end
+ end
+ end
+ end
+ tf.Tensor(tf.Operation(desc))
+ end
+ end
+ begin
+ function placeholder_eager(; name=nothing, dtype=nothing, shape=nothing)
+ desc = tf.EagerOp("Placeholder")
+ begin
+ end
+ begin
+ begin
+ if dtype !== nothing
+ desc["dtype"] = Base.identity(dtype)
+ end
+ end
+ begin
+ if shape !== nothing
+ desc["shape"] = Base.identity(shape)
+ end
+ end
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(placeholder, [], name=nothing, dtype=nothing, shape=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res[1]
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function placeholder(; name=nothing, dtype=nothing, shape=nothing)
+ if tf.in_eager_mode()
+ placeholder_eager(; name=name, dtype=dtype, shape=shape)
+ else
+ placeholder_graph(; name=name, dtype=dtype, shape=shape)
+ end
+ end
+ end
+end
+
+
+"""
+ filter_by_last_component_dataset(input_dataset)
+
+
+"""
+begin
+ begin
+ function filter_by_last_component_dataset_graph(input_dataset_; name=nothing, output_types=nothing, output_shapes=nothing)
+ local desc
+ tf.with_op_name(name, "FilterByLastComponentDataset") do
+ desc = tf.NodeDescription("FilterByLastComponentDataset")
+ begin
+ begin
+ input_dataset_ = convert(Tensor{Any}, input_dataset_)
+ begin
+ end
+ end
+ end
+ begin
+ begin
+ tf.add_input(desc, input_dataset_)
+ end
+ end
+ begin
+ begin
+ if output_types !== nothing
+ desc["output_types"] = map(Base.identity, output_types)
+ end
+ end
+ begin
+ if output_shapes !== nothing
+ desc["output_shapes"] = map(Base.identity, output_shapes)
+ end
+ end
+ end
+ end
+ tf.Tensor(tf.Operation(desc))
+ end
+ end
+ begin
+ function filter_by_last_component_dataset_eager(input_dataset_; name=nothing, output_types=nothing, output_shapes=nothing)
+ desc = tf.EagerOp("FilterByLastComponentDataset")
+ input_dataset_ = convert(tf.EagerTensor, input_dataset_)
+ begin
+ begin
+ tf.add_input(desc, input_dataset_)
+ end
+ end
+ begin
+ begin
+ if output_types !== nothing
+ desc["output_types"] = map(Base.identity, output_types)
+ end
+ end
+ begin
+ if output_shapes !== nothing
+ desc["output_shapes"] = map(Base.identity, output_shapes)
+ end
+ end
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(filter_by_last_component_dataset, [input_dataset_], name=nothing, output_types=nothing, output_shapes=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res[1]
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function filter_by_last_component_dataset(input_dataset_; name=nothing, output_types=nothing, output_shapes=nothing)
+ if tf.in_eager_mode()
+ filter_by_last_component_dataset_eager(input_dataset_; name=name, output_types=output_types, output_shapes=output_shapes)
+ else
+ filter_by_last_component_dataset_graph(input_dataset_; name=name, output_types=output_types, output_shapes=output_shapes)
+ end
+ end
+ end
+end
+
+
+"""
+ clip_by_value(t, clip_value_min, clip_value_max)
+
+
+"""
+begin
+ begin
+ function clip_by_value_graph(t_, clip_value_min_, clip_value_max_; name=nothing)
+ local desc
+ tf.with_op_name(name, "ClipByValue") do
+ desc = tf.NodeDescription("ClipByValue")
+ begin
+ begin
+ t_ = convert(Tensor{Any}, t_)
+ begin
+ end
+ end
+ begin
+ clip_value_min_ = convert(Tensor{Any}, clip_value_min_)
+ begin
+ end
+ end
+ begin
+ clip_value_max_ = convert(Tensor{Any}, clip_value_max_)
+ begin
+ end
+ end
+ begin
+ (t_, clip_value_min_, clip_value_max_) = tf.tf_promote(t_, clip_value_min_, clip_value_max_)
+ end
+ end
+ begin
+ begin
+ tf.add_input(desc, t_)
+ end
+ begin
+ tf.add_input(desc, clip_value_min_)
+ end
+ begin
+ tf.add_input(desc, clip_value_max_)
+ end
+ end
+ begin
+ end
+ end
+ tf.Tensor(tf.Operation(desc))
+ end
+ end
+ begin
+ function clip_by_value_eager(t_, clip_value_min_, clip_value_max_; name=nothing)
+ desc = tf.EagerOp("ClipByValue")
+ t_ = convert(tf.EagerTensor, t_)
+ clip_value_min_ = convert(tf.EagerTensor, clip_value_min_)
+ clip_value_max_ = convert(tf.EagerTensor, clip_value_max_)
+ begin
+ begin
+ tf.add_input(desc, t_)
+ end
+ begin
+ tf.add_input(desc, clip_value_min_)
+ end
+ begin
+ tf.add_input(desc, clip_value_max_)
+ end
+ end
+ begin
+ end
+ begin
+ desc["T"] = tf.data_type(t_)
+ end
+ begin
+ desc["T"] = tf.data_type(clip_value_min_)
+ end
+ begin
+ desc["T"] = tf.data_type(clip_value_max_)
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(clip_by_value, [t_, clip_value_min_, clip_value_max_], name=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res[1]
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function clip_by_value(t_, clip_value_min_, clip_value_max_; name=nothing)
+ if tf.in_eager_mode()
+ clip_by_value_eager(t_, clip_value_min_, clip_value_max_; name=name)
+ else
+ clip_by_value_graph(t_, clip_value_min_, clip_value_max_; name=name)
+ end
+ end
+ end
+end
+
+
+"""
+ image_summary(tag, tensor; max_images=3, bad_color=?)
+
+
+"""
+begin
+ begin
+ function image_summary_graph(tag_, tensor_; name=nothing, max_images=nothing, bad_color=nothing)
+ local desc
+ tf.with_op_name(name, "ImageSummary") do
+ desc = tf.NodeDescription("ImageSummary")
+ begin
+ begin
+ tag_ = convert(Tensor{String}, tag_)
+ begin
+ end
+ end
+ begin
+ tensor_ = convert(Tensor{Float32}, tensor_)
+ begin
+ end
+ end
+ begin
+ (tensor_,) = tf.tf_promote(tensor_)
+ end
+ end
+ begin
+ begin
+ tf.add_input(desc, tag_)
+ end
+ begin
+ tf.add_input(desc, tensor_)
+ end
+ end
+ begin
+ begin
+ if max_images !== nothing
+ desc["max_images"] = Base.Int(max_images)
+ end
+ end
+ begin
+ if bad_color !== nothing
+ desc["bad_color"] = TensorFlow.RawTensor(bad_color)
+ end
+ end
+ end
+ end
+ tf.Tensor(tf.Operation(desc))
+ end
+ end
+ begin
+ function image_summary_eager(tag_, tensor_; name=nothing, max_images=nothing, bad_color=nothing)
+ desc = tf.EagerOp("ImageSummary")
+ tag_ = convert(tf.EagerTensor, tag_)
+ tensor_ = convert(tf.EagerTensor, tensor_)
+ begin
+ begin
+ tf.add_input(desc, tag_)
+ end
+ begin
+ tf.add_input(desc, tensor_)
+ end
+ end
+ begin
+ begin
+ if max_images !== nothing
+ desc["max_images"] = Base.Int(max_images)
+ end
+ end
+ begin
+ if bad_color !== nothing
+ desc["bad_color"] = TensorFlow.RawTensor(bad_color)
+ end
+ end
+ end
+ begin
+ desc["T"] = tf.data_type(tensor_)
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(image_summary, [tag_, tensor_], name=nothing, max_images=nothing, bad_color=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res[1]
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function image_summary(tag_, tensor_; name=nothing, max_images=nothing, bad_color=nothing)
+ if tf.in_eager_mode()
+ image_summary_eager(tag_, tensor_; name=name, max_images=max_images, bad_color=bad_color)
+ else
+ image_summary_graph(tag_, tensor_; name=name, max_images=max_images, bad_color=bad_color)
+ end
+ end
+ end
+end
+
+
+"""
+ retrieve_tpu_embedding_adadelta_parameters(; table_id=-1, table_name=)
+
+Retrieve embedding parameters for a single table.
+"""
+begin
+ begin
+ function retrieve_tpu_embedding_adadelta_parameters_graph(; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing)
+ local desc
+ tf.with_op_name(name, "RetrieveTPUEmbeddingAdadeltaParameters") do
+ desc = tf.NodeDescription("RetrieveTPUEmbeddingAdadeltaParameters")
+ begin
+ end
+ begin
+ end
+ begin
+ begin
+ if table_id !== nothing
+ desc["table_id"] = Base.Int(table_id)
+ end
+ end
+ begin
+ if table_name !== nothing
+ desc["table_name"] = Base.String(table_name)
+ end
+ end
+ begin
+ if num_shards !== nothing
+ desc["num_shards"] = Base.Int(num_shards)
+ end
+ end
+ begin
+ if shard_id !== nothing
+ desc["shard_id"] = Base.Int(shard_id)
+ end
+ end
+ end
+ end
+ begin
+ out = tf.Tensor[]
+ op = tf.Operation(desc)
+ for out_idx = 1:3
+ push!(out, tf.Tensor(op, out_idx))
+ end
+ out
+ end
+ end
+ end
+ begin
+ function retrieve_tpu_embedding_adadelta_parameters_eager(; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing)
+ desc = tf.EagerOp("RetrieveTPUEmbeddingAdadeltaParameters")
+ begin
+ end
+ begin
+ begin
+ if table_id !== nothing
+ desc["table_id"] = Base.Int(table_id)
+ end
+ end
+ begin
+ if table_name !== nothing
+ desc["table_name"] = Base.String(table_name)
+ end
+ end
+ begin
+ if num_shards !== nothing
+ desc["num_shards"] = Base.Int(num_shards)
+ end
+ end
+ begin
+ if shard_id !== nothing
+ desc["shard_id"] = Base.Int(shard_id)
+ end
+ end
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(retrieve_tpu_embedding_adadelta_parameters, [], name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function retrieve_tpu_embedding_adadelta_parameters(; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing)
+ if tf.in_eager_mode()
+ retrieve_tpu_embedding_adadelta_parameters_eager(; name=name, table_id=table_id, table_name=table_name, num_shards=num_shards, shard_id=shard_id)
+ else
+ retrieve_tpu_embedding_adadelta_parameters_graph(; name=name, table_id=table_id, table_name=table_name, num_shards=num_shards, shard_id=shard_id)
+ end
+ end
+ end
+end
+
+
+"""
+ string_join(inputs; separator=)
+
+
+"""
+begin
+ begin
+ function string_join_graph(inputs_; name=nothing, N=nothing, separator=nothing)
+ local desc
+ tf.with_op_name(name, "StringJoin") do
+ desc = tf.NodeDescription("StringJoin")
+ begin
+ begin
+ inputs_ = [convert(Tensor{String}, x) for x = inputs_]
+ begin
+ end
+ end
+ end
+ begin
+ begin
+ tf.add_input(desc, inputs_)
+ end
+ end
+ begin
+ begin
+ if N !== nothing
+ desc["N"] = Base.Int(N)
+ end
+ end
+ begin
+ if separator !== nothing
+ desc["separator"] = Base.String(separator)
+ end
+ end
+ end
+ end
+ tf.Tensor(tf.Operation(desc))
+ end
+ end
+ begin
+ function string_join_eager(inputs_; name=nothing, N=nothing, separator=nothing)
+ desc = tf.EagerOp("StringJoin")
+ inputs_ = convert(tf.EagerTensor, inputs_)
+ begin
+ begin
+ tf.add_input(desc, inputs_)
+ end
+ end
+ begin
+ begin
+ if N !== nothing
+ desc["N"] = Base.Int(N)
+ end
+ end
+ begin
+ if separator !== nothing
+ desc["separator"] = Base.String(separator)
+ end
+ end
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(string_join, [inputs_], name=nothing, N=nothing, separator=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res[1]
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function string_join(inputs_; name=nothing, N=nothing, separator=nothing)
+ if tf.in_eager_mode()
+ string_join_eager(inputs_; name=name, N=N, separator=separator)
+ else
+ string_join_graph(inputs_; name=name, N=N, separator=separator)
+ end
+ end
+ end
+end
+
+
+"""
+ resource_scatter_nd_add(ref, indices, updates; use_locking=true)
+
+
+"""
+begin
+ begin
+ function resource_scatter_nd_add_graph(ref_, indices_, updates_; name=nothing, use_locking=nothing)
+ local desc
+ tf.with_op_name(name, "ResourceScatterNdAdd") do
+ desc = tf.NodeDescription("ResourceScatterNdAdd")
+ begin
+ begin
+ ref_ = convert(Tensor{Any}, ref_)
+ begin
+ end
+ end
+ begin
+ indices_ = convert(Tensor{Any}, indices_)
+ begin
+ indices_ = indices_ - convert(tf.Tensor{eltype(indices_)}, 1)
+ end
+ end
+ begin
+ updates_ = convert(Tensor{Any}, updates_)
+ begin
+ end
+ end
+ begin
+ (updates_,) = tf.tf_promote(updates_)
+ end
+ begin
+ (indices_,) = tf.tf_promote(indices_)
+ end
+ end
+ begin
+ begin
+ tf.add_input(desc, ref_)
+ end
+ begin
+ tf.add_input(desc, indices_)
+ end
+ begin
+ tf.add_input(desc, updates_)
+ end
+ end
+ begin
+ begin
+ if use_locking !== nothing
+ desc["use_locking"] = Base.Bool(use_locking)
+ end
+ end
+ end
+ end
+ tf.Tensor(tf.Operation(desc))
+ end
+ end
+ begin
+ function resource_scatter_nd_add_eager(ref_, indices_, updates_; name=nothing, use_locking=nothing)
+ desc = tf.EagerOp("ResourceScatterNdAdd")
+ ref_ = convert(tf.EagerTensor, ref_)
+ indices_ = convert(tf.EagerTensor, indices_)
+ updates_ = convert(tf.EagerTensor, updates_)
+ begin
+ begin
+ tf.add_input(desc, ref_)
+ end
+ begin
+ tf.add_input(desc, indices_)
+ end
+ begin
+ tf.add_input(desc, updates_)
+ end
+ end
+ begin
+ begin
+ if use_locking !== nothing
+ desc["use_locking"] = Base.Bool(use_locking)
+ end
+ end
+ end
+ begin
+ desc["Tindices"] = tf.data_type(indices_)
+ end
+ begin
+ desc["T"] = tf.data_type(updates_)
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(resource_scatter_nd_add, [ref_, indices_, updates_], name=nothing, use_locking=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res[1]
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function resource_scatter_nd_add(ref_, indices_, updates_; name=nothing, use_locking=nothing)
+ if tf.in_eager_mode()
+ resource_scatter_nd_add_eager(ref_, indices_, updates_; name=name, use_locking=use_locking)
+ else
+ resource_scatter_nd_add_graph(ref_, indices_, updates_; name=name, use_locking=use_locking)
+ end
+ end
+ end
+end
+
+
+"""
+ boosted_trees_quantile_stream_resource_deserialize(quantile_stream_resource_handle, bucket_boundaries)
+
+
+"""
+begin
+ begin
+ function boosted_trees_quantile_stream_resource_deserialize_graph(quantile_stream_resource_handle_, bucket_boundaries_; name=nothing, num_streams=nothing)
+ local desc
+ tf.with_op_name(name, "BoostedTreesQuantileStreamResourceDeserialize") do
+ desc = tf.NodeDescription("BoostedTreesQuantileStreamResourceDeserialize")
+ begin
+ begin
+ quantile_stream_resource_handle_ = convert(Tensor{Any}, quantile_stream_resource_handle_)
+ begin
+ end
+ end
+ begin
+ bucket_boundaries_ = [convert(Tensor{Float32}, x) for x = bucket_boundaries_]
+ begin
+ end
+ end
+ end
+ begin
+ begin
+ tf.add_input(desc, quantile_stream_resource_handle_)
+ end
+ begin
+ tf.add_input(desc, bucket_boundaries_)
+ end
+ end
+ begin
+ begin
+ if num_streams !== nothing
+ desc["num_streams"] = Base.Int(num_streams)
+ end
+ end
+ end
+ end
+ tf.Tensor(tf.Operation(desc))
+ end
+ end
+ begin
+ function boosted_trees_quantile_stream_resource_deserialize_eager(quantile_stream_resource_handle_, bucket_boundaries_; name=nothing, num_streams=nothing)
+ desc = tf.EagerOp("BoostedTreesQuantileStreamResourceDeserialize")
+ quantile_stream_resource_handle_ = convert(tf.EagerTensor, quantile_stream_resource_handle_)
+ bucket_boundaries_ = convert(tf.EagerTensor, bucket_boundaries_)
+ begin
+ begin
+ tf.add_input(desc, quantile_stream_resource_handle_)
+ end
+ begin
+ tf.add_input(desc, bucket_boundaries_)
+ end
+ end
+ begin
+ begin
+ if num_streams !== nothing
+ desc["num_streams"] = Base.Int(num_streams)
+ end
+ end
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(boosted_trees_quantile_stream_resource_deserialize, [quantile_stream_resource_handle_, bucket_boundaries_], name=nothing, num_streams=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res[1]
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function boosted_trees_quantile_stream_resource_deserialize(quantile_stream_resource_handle_, bucket_boundaries_; name=nothing, num_streams=nothing)
+ if tf.in_eager_mode()
+ boosted_trees_quantile_stream_resource_deserialize_eager(quantile_stream_resource_handle_, bucket_boundaries_; name=name, num_streams=num_streams)
+ else
+ boosted_trees_quantile_stream_resource_deserialize_graph(quantile_stream_resource_handle_, bucket_boundaries_; name=name, num_streams=num_streams)
+ end
+ end
+ end
+end
+
+
+"""
+ left_shift(x, y)
+
+
+"""
+begin
+ begin
+ function left_shift_graph(x_, y_; name=nothing)
+ local desc
+ tf.with_op_name(name, "LeftShift") do
+ desc = tf.NodeDescription("LeftShift")
+ begin
+ begin
+ x_ = convert(Tensor{Any}, x_)
+ begin
+ end
+ end
+ begin
+ y_ = convert(Tensor{Any}, y_)
+ begin
+ end
+ end
+ begin
+ (x_, y_) = tf.tf_promote(x_, y_)
+ end
+ end
+ begin
+ begin
+ tf.add_input(desc, x_)
+ end
+ begin
+ tf.add_input(desc, y_)
+ end
+ end
+ begin
+ end
+ end
+ tf.Tensor(tf.Operation(desc))
+ end
+ end
+ begin
+ function left_shift_eager(x_, y_; name=nothing)
+ desc = tf.EagerOp("LeftShift")
+ x_ = convert(tf.EagerTensor, x_)
+ y_ = convert(tf.EagerTensor, y_)
+ begin
+ begin
+ tf.add_input(desc, x_)
+ end
+ begin
+ tf.add_input(desc, y_)
+ end
+ end
+ begin
+ end
+ begin
+ desc["T"] = tf.data_type(x_)
+ end
+ begin
+ desc["T"] = tf.data_type(y_)
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(left_shift, [x_, y_], name=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res[1]
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function left_shift(x_, y_; name=nothing)
+ if tf.in_eager_mode()
+ left_shift_eager(x_, y_; name=name)
+ else
+ left_shift_graph(x_, y_; name=name)
+ end
+ end
+ end
+end
+
+
+"""
+ tensor_scatter_add(tensor, indices, updates)
+
+
+"""
+begin
+ begin
+ function tensor_scatter_add_graph(tensor_, indices_, updates_; name=nothing)
+ local desc
+ tf.with_op_name(name, "TensorScatterAdd") do
+ desc = tf.NodeDescription("TensorScatterAdd")
+ begin
+ begin
+ tensor_ = convert(Tensor{Any}, tensor_)
+ begin
+ end
+ end
+ begin
+ indices_ = convert(Tensor{Any}, indices_)
+ begin
+ indices_ = indices_ - convert(tf.Tensor{eltype(indices_)}, 1)
+ end
+ end
+ begin
+ updates_ = convert(Tensor{Any}, updates_)
+ begin
+ end
+ end
+ begin
+ (tensor_, updates_) = tf.tf_promote(tensor_, updates_)
+ end
+ begin
+ (indices_,) = tf.tf_promote(indices_)
+ end
+ end
+ begin
+ begin
+ tf.add_input(desc, tensor_)
+ end
+ begin
+ tf.add_input(desc, indices_)
+ end
+ begin
+ tf.add_input(desc, updates_)
+ end
+ end
+ begin
+ end
+ end
+ tf.Tensor(tf.Operation(desc))
+ end
+ end
+ begin
+ function tensor_scatter_add_eager(tensor_, indices_, updates_; name=nothing)
+ desc = tf.EagerOp("TensorScatterAdd")
+ tensor_ = convert(tf.EagerTensor, tensor_)
+ indices_ = convert(tf.EagerTensor, indices_)
+ updates_ = convert(tf.EagerTensor, updates_)
+ begin
+ begin
+ tf.add_input(desc, tensor_)
+ end
+ begin
+ tf.add_input(desc, indices_)
+ end
+ begin
+ tf.add_input(desc, updates_)
+ end
+ end
+ begin
+ end
+ begin
+ desc["T"] = tf.data_type(tensor_)
+ end
+ begin
+ desc["Tindices"] = tf.data_type(indices_)
+ end
+ begin
+ desc["T"] = tf.data_type(updates_)
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(tensor_scatter_add, [tensor_, indices_, updates_], name=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res[1]
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function tensor_scatter_add(tensor_, indices_, updates_; name=nothing)
+ if tf.in_eager_mode()
+ tensor_scatter_add_eager(tensor_, indices_, updates_; name=name)
+ else
+ tensor_scatter_add_graph(tensor_, indices_, updates_; name=name)
+ end
+ end
+ end
+end
+
+
+"""
+ _var_handles_op()
+
+
+"""
+begin
+ begin
+ function _var_handles_op_graph(; name=nothing, containers=nothing, shared_names=nothing, N=nothing, dtypes=nothing, shapes=nothing)
+ local desc
+ tf.with_op_name(name, "_VarHandlesOp") do
+ desc = tf.NodeDescription("_VarHandlesOp")
+ begin
+ end
+ begin
+ end
+ begin
+ begin
+ if containers !== nothing
+ desc["containers"] = map(Base.identity, containers)
+ end
+ end
+ begin
+ if shared_names !== nothing
+ desc["shared_names"] = map(Base.identity, shared_names)
+ end
+ end
+ begin
+ if N !== nothing
+ desc["N"] = Base.Int(N)
+ end
+ end
+ begin
+ if dtypes !== nothing
+ desc["dtypes"] = map(Base.identity, dtypes)
+ end
+ end
+ begin
+ if shapes !== nothing
+ desc["shapes"] = map(Base.identity, shapes)
+ end
+ end
+ end
+ end
+ begin
+ out = tf.Tensor[]
+ op = tf.Operation(desc)
+ for out_idx = 1:N
+ push!(out, tf.Tensor(op, out_idx))
+ end
+ out
+ end
+ end
+ end
+ begin
+ function _var_handles_op_eager(; name=nothing, containers=nothing, shared_names=nothing, N=nothing, dtypes=nothing, shapes=nothing)
+ desc = tf.EagerOp("_VarHandlesOp")
+ begin
+ end
+ begin
+ begin
+ if containers !== nothing
+ desc["containers"] = map(Base.identity, containers)
+ end
+ end
+ begin
+ if shared_names !== nothing
+ desc["shared_names"] = map(Base.identity, shared_names)
+ end
+ end
+ begin
+ if N !== nothing
+ desc["N"] = Base.Int(N)
+ end
+ end
+ begin
+ if dtypes !== nothing
+ desc["dtypes"] = map(Base.identity, dtypes)
+ end
+ end
+ begin
+ if shapes !== nothing
+ desc["shapes"] = map(Base.identity, shapes)
+ end
+ end
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(_var_handles_op, [], name=nothing, containers=nothing, shared_names=nothing, N=nothing, dtypes=nothing, shapes=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function _var_handles_op(; name=nothing, containers=nothing, shared_names=nothing, N=nothing, dtypes=nothing, shapes=nothing)
+ if tf.in_eager_mode()
+ _var_handles_op_eager(; name=name, containers=containers, shared_names=shared_names, N=N, dtypes=dtypes, shapes=shapes)
+ else
+ _var_handles_op_graph(; name=name, containers=containers, shared_names=shared_names, N=N, dtypes=dtypes, shapes=shapes)
+ end
+ end
+ end
+end
+
+
+"""
+ ifft3d(input)
+
+
+"""
+begin
+ begin
+ function ifft3d_graph(input_; name=nothing)
+ local desc
+ tf.with_op_name(name, "IFFT3D") do
+ desc = tf.NodeDescription("IFFT3D")
+ begin
+ begin
+ input_ = convert(Tensor{Complex{Float32}}, input_)
+ begin
+ end
+ end
+ begin
+ (input_,) = tf.tf_promote(input_)
+ end
+ end
+ begin
+ begin
+ tf.add_input(desc, input_)
+ end
+ end
+ begin
+ end
+ end
+ tf.Tensor(tf.Operation(desc))
+ end
+ end
+ begin
+ function ifft3d_eager(input_; name=nothing)
+ desc = tf.EagerOp("IFFT3D")
+ input_ = convert(tf.EagerTensor, input_)
+ begin
+ begin
+ tf.add_input(desc, input_)
+ end
+ end
+ begin
+ end
+ begin
+ desc["Tcomplex"] = tf.data_type(input_)
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(ifft3d, [input_], name=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res[1]
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function ifft3d(input_; name=nothing)
+ if tf.in_eager_mode()
+ ifft3d_eager(input_; name=name)
+ else
+ ifft3d_graph(input_; name=name)
+ end
+ end
+ end
+end
+
+
+"""
+ ref_select(index, inputs)
+
+
+"""
+begin
+ begin
+ function ref_select_graph(index_, inputs_; name=nothing, N=nothing)
+ local desc
+ tf.with_op_name(name, "RefSelect") do
+ desc = tf.NodeDescription("RefSelect")
+ begin
+ begin
+ index_ = convert(Tensor{Int32}, index_)
+ begin
+ end
+ end
+ begin
+ inputs_ = [convert(Tensor{Any}, x) for x = inputs_]
+ begin
+ end
+ end
+ begin
+ (inputs_,) = tf.tf_promote(inputs_)
+ end
+ end
+ begin
+ begin
+ tf.add_input(desc, index_)
+ end
+ begin
+ tf.add_input(desc, inputs_)
+ end
+ end
+ begin
+ begin
+ if N !== nothing
+ desc["N"] = Base.Int(N)
+ end
+ end
+ end
+ end
+ tf.Tensor(tf.Operation(desc))
+ end
+ end
+ begin
+ function ref_select_eager(index_, inputs_; name=nothing, N=nothing)
+ desc = tf.EagerOp("RefSelect")
+ index_ = convert(tf.EagerTensor, index_)
+ inputs_ = convert(tf.EagerTensor, inputs_)
+ begin
+ begin
+ tf.add_input(desc, index_)
+ end
+ begin
+ tf.add_input(desc, inputs_)
+ end
+ end
+ begin
+ begin
+ if N !== nothing
+ desc["N"] = Base.Int(N)
+ end
+ end
+ end
+ begin
+ desc["T"] = tf.data_type(inputs_)
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(ref_select, [index_, inputs_], name=nothing, N=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res[1]
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function ref_select(index_, inputs_; name=nothing, N=nothing)
+ if tf.in_eager_mode()
+ ref_select_eager(index_, inputs_; name=name, N=N)
+ else
+ ref_select_graph(index_, inputs_; name=name, N=N)
+ end
+ end
+ end
+end
+
+
+"""
+ sparse_tensor_slice_dataset(indices, values, dense_shape)
+
+
+"""
+begin
+ begin
+ function sparse_tensor_slice_dataset_graph(indices_, values_, dense_shape_; name=nothing)
+ local desc
+ tf.with_op_name(name, "SparseTensorSliceDataset") do
+ desc = tf.NodeDescription("SparseTensorSliceDataset")
+ begin
+ begin
+ indices_ = convert(Tensor{Int64}, indices_)
+ begin
+ end
+ end
+ begin
+ values_ = convert(Tensor{Any}, values_)
+ begin
+ end
+ end
+ begin
+ dense_shape_ = convert(Tensor{Int64}, dense_shape_)
+ begin
+ end
+ end
+ begin
+ (values_,) = tf.tf_promote(values_)
+ end
+ end
+ begin
+ begin
+ tf.add_input(desc, indices_)
+ end
+ begin
+ tf.add_input(desc, values_)
+ end
+ begin
+ tf.add_input(desc, dense_shape_)
+ end
+ end
+ begin
+ end
+ end
+ tf.Tensor(tf.Operation(desc))
+ end
+ end
+ begin
+ function sparse_tensor_slice_dataset_eager(indices_, values_, dense_shape_; name=nothing)
+ desc = tf.EagerOp("SparseTensorSliceDataset")
+ indices_ = convert(tf.EagerTensor, indices_)
+ values_ = convert(tf.EagerTensor, values_)
+ dense_shape_ = convert(tf.EagerTensor, dense_shape_)
+ begin
+ begin
+ tf.add_input(desc, indices_)
+ end
+ begin
+ tf.add_input(desc, values_)
+ end
+ begin
+ tf.add_input(desc, dense_shape_)
+ end
+ end
+ begin
+ end
+ begin
+ desc["Tvalues"] = tf.data_type(values_)
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(sparse_tensor_slice_dataset, [indices_, values_, dense_shape_], name=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res[1]
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function sparse_tensor_slice_dataset(indices_, values_, dense_shape_; name=nothing)
+ if tf.in_eager_mode()
+ sparse_tensor_slice_dataset_eager(indices_, values_, dense_shape_; name=name)
+ else
+ sparse_tensor_slice_dataset_graph(indices_, values_, dense_shape_; name=name)
+ end
+ end
+ end
+end
+
+
+"""
+ retrieve_tpu_embedding_ftrl_parameters_grad_accum_debug(; table_id=-1, table_name=)
+
+Retrieve embedding parameters for a single table.
+"""
+begin
+ begin
+ function retrieve_tpu_embedding_ftrl_parameters_grad_accum_debug_graph(; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing)
+ local desc
+ tf.with_op_name(name, "RetrieveTPUEmbeddingFTRLParametersGradAccumDebug") do
+ desc = tf.NodeDescription("RetrieveTPUEmbeddingFTRLParametersGradAccumDebug")
+ begin
+ end
+ begin
+ end
+ begin
+ begin
+ if table_id !== nothing
+ desc["table_id"] = Base.Int(table_id)
+ end
+ end
+ begin
+ if table_name !== nothing
+ desc["table_name"] = Base.String(table_name)
+ end
+ end
+ begin
+ if num_shards !== nothing
+ desc["num_shards"] = Base.Int(num_shards)
+ end
+ end
+ begin
+ if shard_id !== nothing
+ desc["shard_id"] = Base.Int(shard_id)
+ end
+ end
+ end
+ end
+ begin
+ out = tf.Tensor[]
+ op = tf.Operation(desc)
+ for out_idx = 1:4
+ push!(out, tf.Tensor(op, out_idx))
+ end
+ out
+ end
+ end
+ end
+ begin
+ function retrieve_tpu_embedding_ftrl_parameters_grad_accum_debug_eager(; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing)
+ desc = tf.EagerOp("RetrieveTPUEmbeddingFTRLParametersGradAccumDebug")
+ begin
+ end
+ begin
+ begin
+ if table_id !== nothing
+ desc["table_id"] = Base.Int(table_id)
+ end
+ end
+ begin
+ if table_name !== nothing
+ desc["table_name"] = Base.String(table_name)
+ end
+ end
+ begin
+ if num_shards !== nothing
+ desc["num_shards"] = Base.Int(num_shards)
+ end
+ end
+ begin
+ if shard_id !== nothing
+ desc["shard_id"] = Base.Int(shard_id)
+ end
+ end
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(retrieve_tpu_embedding_ftrl_parameters_grad_accum_debug, [], name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function retrieve_tpu_embedding_ftrl_parameters_grad_accum_debug(; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing)
+ if tf.in_eager_mode()
+ retrieve_tpu_embedding_ftrl_parameters_grad_accum_debug_eager(; name=name, table_id=table_id, table_name=table_name, num_shards=num_shards, shard_id=shard_id)
+ else
+ retrieve_tpu_embedding_ftrl_parameters_grad_accum_debug_graph(; name=name, table_id=table_id, table_name=table_name, num_shards=num_shards, shard_id=shard_id)
+ end
+ end
+ end
+end
+
+
+"""
+ batch_ifft2d(input)
+
+
+"""
+begin
+ begin
+ function batch_ifft2d_graph(input_; name=nothing)
+ local desc
+ tf.with_op_name(name, "BatchIFFT2D") do
+ desc = tf.NodeDescription("BatchIFFT2D")
+ begin
+ begin
+ input_ = convert(Tensor{Complex{Float32}}, input_)
+ begin
+ end
+ end
+ end
+ begin
+ begin
+ tf.add_input(desc, input_)
+ end
+ end
+ begin
+ end
+ end
+ tf.Tensor(tf.Operation(desc))
+ end
+ end
+ begin
+ function batch_ifft2d_eager(input_; name=nothing)
+ desc = tf.EagerOp("BatchIFFT2D")
+ input_ = convert(tf.EagerTensor, input_)
+ begin
+ begin
+ tf.add_input(desc, input_)
+ end
+ end
+ begin
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(batch_ifft2d, [input_], name=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res[1]
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function batch_ifft2d(input_; name=nothing)
+ if tf.in_eager_mode()
+ batch_ifft2d_eager(input_; name=name)
+ else
+ batch_ifft2d_graph(input_; name=name)
+ end
+ end
+ end
+end
+
+
+"""
+ tensor_array_gather(handle, indices, flow_in; element_shape=?)
+
+
+"""
+begin
+ begin
+ function tensor_array_gather_graph(handle_, indices_, flow_in_; name=nothing, dtype=nothing, element_shape=nothing)
+ local desc
+ tf.with_op_name(name, "TensorArrayGather") do
+ desc = tf.NodeDescription("TensorArrayGather")
+ begin
+ begin
+ handle_ = convert(Tensor{String}, handle_)
+ begin
+ end
+ end
+ begin
+ indices_ = convert(Tensor{Int32}, indices_)
+ begin
+ end
+ end
+ begin
+ flow_in_ = convert(Tensor{Float32}, flow_in_)
+ begin
+ end
+ end
+ end
+ begin
+ begin
+ tf.add_input(desc, handle_)
+ end
+ begin
+ tf.add_input(desc, indices_)
+ end
+ begin
+ tf.add_input(desc, flow_in_)
+ end
+ end
+ begin
+ begin
+ if dtype !== nothing
+ desc["dtype"] = Base.identity(dtype)
+ end
+ end
+ begin
+ if element_shape !== nothing
+ desc["element_shape"] = Base.identity(element_shape)
+ end
+ end
+ end
+ end
+ tf.Tensor(tf.Operation(desc))
+ end
+ end
+ begin
+ function tensor_array_gather_eager(handle_, indices_, flow_in_; name=nothing, dtype=nothing, element_shape=nothing)
+ desc = tf.EagerOp("TensorArrayGather")
+ handle_ = convert(tf.EagerTensor, handle_)
+ indices_ = convert(tf.EagerTensor, indices_)
+ flow_in_ = convert(tf.EagerTensor, flow_in_)
+ begin
+ begin
+ tf.add_input(desc, handle_)
+ end
+ begin
+ tf.add_input(desc, indices_)
+ end
+ begin
+ tf.add_input(desc, flow_in_)
+ end
+ end
+ begin
+ begin
+ if dtype !== nothing
+ desc["dtype"] = Base.identity(dtype)
+ end
+ end
+ begin
+ if element_shape !== nothing
+ desc["element_shape"] = Base.identity(element_shape)
+ end
+ end
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(tensor_array_gather, [handle_, indices_, flow_in_], name=nothing, dtype=nothing, element_shape=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res[1]
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function tensor_array_gather(handle_, indices_, flow_in_; name=nothing, dtype=nothing, element_shape=nothing)
+ if tf.in_eager_mode()
+ tensor_array_gather_eager(handle_, indices_, flow_in_; name=name, dtype=dtype, element_shape=element_shape)
+ else
+ tensor_array_gather_graph(handle_, indices_, flow_in_; name=name, dtype=dtype, element_shape=element_shape)
+ end
+ end
+ end
+end
+
+
+"""
+ sparse_segment_mean_with_num_segments(data, indices, segment_ids, num_segments)
+
+
+"""
+begin
+ begin
+ function sparse_segment_mean_with_num_segments_graph(data_, indices_, segment_ids_, num_segments_; name=nothing)
+ local desc
+ tf.with_op_name(name, "SparseSegmentMeanWithNumSegments") do
+ desc = tf.NodeDescription("SparseSegmentMeanWithNumSegments")
+ begin
+ begin
+ data_ = convert(Tensor{Any}, data_)
+ begin
+ end
+ end
+ begin
+ indices_ = convert(Tensor{Int32}, indices_)
+ begin
+ indices_ = indices_ - convert(tf.Tensor{eltype(indices_)}, 1)
+ end
+ end
+ begin
+ segment_ids_ = convert(Tensor{Int32}, segment_ids_)
+ begin
+ end
+ end
+ begin
+ num_segments_ = convert(Tensor{Int32}, num_segments_)
+ begin
+ end
+ end
+ begin
+ (num_segments_,) = tf.tf_promote(num_segments_)
+ end
+ begin
+ (data_,) = tf.tf_promote(data_)
+ end
+ begin
+ (indices_,) = tf.tf_promote(indices_)
+ end
+ end
+ begin
+ begin
+ tf.add_input(desc, data_)
+ end
+ begin
+ tf.add_input(desc, indices_)
+ end
+ begin
+ tf.add_input(desc, segment_ids_)
+ end
+ begin
+ tf.add_input(desc, num_segments_)
+ end
+ end
+ begin
+ end
+ end
+ tf.Tensor(tf.Operation(desc))
+ end
+ end
+ begin
+ function sparse_segment_mean_with_num_segments_eager(data_, indices_, segment_ids_, num_segments_; name=nothing)
+ desc = tf.EagerOp("SparseSegmentMeanWithNumSegments")
+ data_ = convert(tf.EagerTensor, data_)
+ indices_ = convert(tf.EagerTensor, indices_)
+ segment_ids_ = convert(tf.EagerTensor, segment_ids_)
+ num_segments_ = convert(tf.EagerTensor, num_segments_)
+ begin
+ begin
+ tf.add_input(desc, data_)
+ end
+ begin
+ tf.add_input(desc, indices_)
+ end
+ begin
+ tf.add_input(desc, segment_ids_)
+ end
+ begin
+ tf.add_input(desc, num_segments_)
+ end
+ end
+ begin
+ end
+ begin
+ desc["T"] = tf.data_type(data_)
+ end
+ begin
+ desc["Tidx"] = tf.data_type(indices_)
+ end
+ begin
+ desc["Tnumsegments"] = tf.data_type(num_segments_)
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(sparse_segment_mean_with_num_segments, [data_, indices_, segment_ids_, num_segments_], name=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res[1]
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function sparse_segment_mean_with_num_segments(data_, indices_, segment_ids_, num_segments_; name=nothing)
+ if tf.in_eager_mode()
+ sparse_segment_mean_with_num_segments_eager(data_, indices_, segment_ids_, num_segments_; name=name)
+ else
+ sparse_segment_mean_with_num_segments_graph(data_, indices_, segment_ids_, num_segments_; name=name)
+ end
+ end
+ end
+end
+
+
+"""
+ ensure_shape(input)
+
+
+"""
+begin
+ begin
+ function ensure_shape_graph(input_; name=nothing, shape=nothing)
+ local desc
+ tf.with_op_name(name, "EnsureShape") do
+ desc = tf.NodeDescription("EnsureShape")
+ begin
+ begin
+ input_ = convert(Tensor{Any}, input_)
+ begin
+ end
+ end
+ begin
+ (input_,) = tf.tf_promote(input_)
+ end
+ end
+ begin
+ begin
+ tf.add_input(desc, input_)
+ end
+ end
+ begin
+ begin
+ if shape !== nothing
+ desc["shape"] = Base.identity(shape)
+ end
+ end
+ end
+ end
+ tf.Tensor(tf.Operation(desc))
+ end
+ end
+ begin
+ function ensure_shape_eager(input_; name=nothing, shape=nothing)
+ desc = tf.EagerOp("EnsureShape")
+ input_ = convert(tf.EagerTensor, input_)
+ begin
+ begin
+ tf.add_input(desc, input_)
+ end
+ end
+ begin
+ begin
+ if shape !== nothing
+ desc["shape"] = Base.identity(shape)
+ end
+ end
+ end
+ begin
+ desc["T"] = tf.data_type(input_)
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(ensure_shape, [input_], name=nothing, shape=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res[1]
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function ensure_shape(input_; name=nothing, shape=nothing)
+ if tf.in_eager_mode()
+ ensure_shape_eager(input_; name=name, shape=shape)
+ else
+ ensure_shape_graph(input_; name=name, shape=shape)
+ end
+ end
+ end
+end
+
+
+"""
+ apply_proximal_gradient_descent(var, alpha, l1, l2, delta; use_locking=false)
+
+
+"""
+begin
+ begin
+ function apply_proximal_gradient_descent_graph(var_, alpha_, l1_, l2_, delta_; name=nothing, use_locking=nothing)
+ local desc
+ tf.with_op_name(name, "ApplyProximalGradientDescent") do
+ desc = tf.NodeDescription("ApplyProximalGradientDescent")
+ begin
+ begin
+ var_ = convert(Tensor{Any}, var_)
+ begin
+ end
+ end
+ begin
+ alpha_ = convert(Tensor{Any}, alpha_)
+ begin
+ end
+ end
+ begin
+ l1_ = convert(Tensor{Any}, l1_)
+ begin
+ end
+ end
+ begin
+ l2_ = convert(Tensor{Any}, l2_)
+ begin
+ end
+ end
+ begin
+ delta_ = convert(Tensor{Any}, delta_)
+ begin
+ end
+ end
+ begin
+ (var_, alpha_, l1_, l2_, delta_) = tf.tf_promote(var_, alpha_, l1_, l2_, delta_)
+ end
+ end
+ begin
+ begin
+ tf.add_input(desc, var_)
+ end
+ begin
+ tf.add_input(desc, alpha_)
+ end
+ begin
+ tf.add_input(desc, l1_)
+ end
+ begin
+ tf.add_input(desc, l2_)
+ end
+ begin
+ tf.add_input(desc, delta_)
+ end
+ end
+ begin
+ begin
+ if use_locking !== nothing
+ desc["use_locking"] = Base.Bool(use_locking)
+ end
+ end
+ end
+ end
+ tf.Tensor(tf.Operation(desc))
+ end
+ end
+ begin
+ function apply_proximal_gradient_descent_eager(var_, alpha_, l1_, l2_, delta_; name=nothing, use_locking=nothing)
+ desc = tf.EagerOp("ApplyProximalGradientDescent")
+ var_ = convert(tf.EagerTensor, var_)
+ alpha_ = convert(tf.EagerTensor, alpha_)
+ l1_ = convert(tf.EagerTensor, l1_)
+ l2_ = convert(tf.EagerTensor, l2_)
+ delta_ = convert(tf.EagerTensor, delta_)
+ begin
+ begin
+ tf.add_input(desc, var_)
+ end
+ begin
+ tf.add_input(desc, alpha_)
+ end
+ begin
+ tf.add_input(desc, l1_)
+ end
+ begin
+ tf.add_input(desc, l2_)
+ end
+ begin
+ tf.add_input(desc, delta_)
+ end
+ end
+ begin
+ begin
+ if use_locking !== nothing
+ desc["use_locking"] = Base.Bool(use_locking)
+ end
+ end
+ end
+ begin
+ desc["T"] = tf.data_type(var_)
+ end
+ begin
+ desc["T"] = tf.data_type(alpha_)
+ end
+ begin
+ desc["T"] = tf.data_type(l1_)
+ end
+ begin
+ desc["T"] = tf.data_type(l2_)
+ end
+ begin
+ desc["T"] = tf.data_type(delta_)
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(apply_proximal_gradient_descent, [var_, alpha_, l1_, l2_, delta_], name=nothing, use_locking=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res[1]
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function apply_proximal_gradient_descent(var_, alpha_, l1_, l2_, delta_; name=nothing, use_locking=nothing)
+ if tf.in_eager_mode()
+ apply_proximal_gradient_descent_eager(var_, alpha_, l1_, l2_, delta_; name=name, use_locking=use_locking)
+ else
+ apply_proximal_gradient_descent_graph(var_, alpha_, l1_, l2_, delta_; name=name, use_locking=use_locking)
+ end
+ end
+ end
+end
+
+
+"""
+ collective_reduce(input)
+
+
+"""
+begin
+ begin
+ function collective_reduce_graph(input_; name=nothing, group_size=nothing, group_key=nothing, instance_key=nothing, merge_op=nothing, final_op=nothing, subdiv_offsets=nothing)
+ local desc
+ tf.with_op_name(name, "CollectiveReduce") do
+ desc = tf.NodeDescription("CollectiveReduce")
+ begin
+ begin
+ input_ = convert(Tensor{Any}, input_)
+ begin
+ end
+ end
+ begin
+ (input_,) = tf.tf_promote(input_)
+ end
+ end
+ begin
+ begin
+ tf.add_input(desc, input_)
+ end
+ end
+ begin
+ begin
+ if group_size !== nothing
+ desc["group_size"] = Base.Int(group_size)
+ end
+ end
+ begin
+ if group_key !== nothing
+ desc["group_key"] = Base.Int(group_key)
+ end
+ end
+ begin
+ if instance_key !== nothing
+ desc["instance_key"] = Base.Int(instance_key)
+ end
+ end
+ begin
+ if merge_op !== nothing
+ desc["merge_op"] = Base.String(merge_op)
+ end
+ end
+ begin
+ if final_op !== nothing
+ desc["final_op"] = Base.String(final_op)
+ end
+ end
+ begin
+ if subdiv_offsets !== nothing
+ desc["subdiv_offsets"] = map(Base.identity, subdiv_offsets)
+ end
+ end
+ end
+ end
+ tf.Tensor(tf.Operation(desc))
+ end
+ end
+ begin
+ function collective_reduce_eager(input_; name=nothing, group_size=nothing, group_key=nothing, instance_key=nothing, merge_op=nothing, final_op=nothing, subdiv_offsets=nothing)
+ desc = tf.EagerOp("CollectiveReduce")
+ input_ = convert(tf.EagerTensor, input_)
+ begin
+ begin
+ tf.add_input(desc, input_)
+ end
+ end
+ begin
+ begin
+ if group_size !== nothing
+ desc["group_size"] = Base.Int(group_size)
+ end
+ end
+ begin
+ if group_key !== nothing
+ desc["group_key"] = Base.Int(group_key)
+ end
+ end
+ begin
+ if instance_key !== nothing
+ desc["instance_key"] = Base.Int(instance_key)
+ end
+ end
+ begin
+ if merge_op !== nothing
+ desc["merge_op"] = Base.String(merge_op)
+ end
+ end
+ begin
+ if final_op !== nothing
+ desc["final_op"] = Base.String(final_op)
+ end
+ end
+ begin
+ if subdiv_offsets !== nothing
+ desc["subdiv_offsets"] = map(Base.identity, subdiv_offsets)
+ end
+ end
+ end
+ begin
+ desc["T"] = tf.data_type(input_)
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(collective_reduce, [input_], name=nothing, group_size=nothing, group_key=nothing, instance_key=nothing, merge_op=nothing, final_op=nothing, subdiv_offsets=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res[1]
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function collective_reduce(input_; name=nothing, group_size=nothing, group_key=nothing, instance_key=nothing, merge_op=nothing, final_op=nothing, subdiv_offsets=nothing)
+ if tf.in_eager_mode()
+ collective_reduce_eager(input_; name=name, group_size=group_size, group_key=group_key, instance_key=instance_key, merge_op=merge_op, final_op=final_op, subdiv_offsets=subdiv_offsets)
+ else
+ collective_reduce_graph(input_; name=name, group_size=group_size, group_key=group_key, instance_key=instance_key, merge_op=merge_op, final_op=final_op, subdiv_offsets=subdiv_offsets)
+ end
+ end
+ end
+end
+
+
+"""
+ is_nan(x)
+
+
+"""
+begin
+ begin
+ function is_nan_graph(x_; name=nothing)
+ local desc
+ tf.with_op_name(name, "IsNan") do
+ desc = tf.NodeDescription("IsNan")
+ begin
+ begin
+ x_ = convert(Tensor{Any}, x_)
+ begin
+ end
+ end
+ begin
+ (x_,) = tf.tf_promote(x_)
+ end
+ end
+ begin
+ begin
+ tf.add_input(desc, x_)
+ end
+ end
+ begin
+ end
+ end
+ tf.Tensor(tf.Operation(desc))
+ end
+ end
+ begin
+ function is_nan_eager(x_; name=nothing)
+ desc = tf.EagerOp("IsNan")
+ x_ = convert(tf.EagerTensor, x_)
+ begin
+ begin
+ tf.add_input(desc, x_)
+ end
+ end
+ begin
+ end
+ begin
+ desc["T"] = tf.data_type(x_)
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(is_nan, [x_], name=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res[1]
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function is_nan(x_; name=nothing)
+ if tf.in_eager_mode()
+ is_nan_eager(x_; name=name)
+ else
+ is_nan_graph(x_; name=name)
+ end
+ end
+ end
+end
+
+
+"""
+ apply_ada_max(var, m, v, beta1_power, lr, beta1, beta2, epsilon, grad; use_locking=false)
+
+
+"""
+begin
+ begin
+ function apply_ada_max_graph(var_, m_, v_, beta1_power_, lr_, beta1_, beta2_, epsilon_, grad_; name=nothing, use_locking=nothing)
+ local desc
+ tf.with_op_name(name, "ApplyAdaMax") do
+ desc = tf.NodeDescription("ApplyAdaMax")
+ begin
+ begin
+ var_ = convert(Tensor{Any}, var_)
+ begin
+ end
+ end
+ begin
+ m_ = convert(Tensor{Any}, m_)
+ begin
+ end
+ end
+ begin
+ v_ = convert(Tensor{Any}, v_)
+ begin
+ end
+ end
+ begin
+ beta1_power_ = convert(Tensor{Any}, beta1_power_)
+ begin
+ end
+ end
+ begin
+ lr_ = convert(Tensor{Any}, lr_)
+ begin
+ end
+ end
+ begin
+ beta1_ = convert(Tensor{Any}, beta1_)
+ begin
+ end
+ end
+ begin
+ beta2_ = convert(Tensor{Any}, beta2_)
+ begin
+ end
+ end
+ begin
+ epsilon_ = convert(Tensor{Any}, epsilon_)
+ begin
+ end
+ end
+ begin
+ grad_ = convert(Tensor{Any}, grad_)
+ begin
+ end
+ end
+ begin
+ (var_, m_, v_, beta1_power_, lr_, beta1_, beta2_, epsilon_, grad_) = tf.tf_promote(var_, m_, v_, beta1_power_, lr_, beta1_, beta2_, epsilon_, grad_)
+ end
+ end
+ begin
+ begin
+ tf.add_input(desc, var_)
+ end
+ begin
+ tf.add_input(desc, m_)
+ end
+ begin
+ tf.add_input(desc, v_)
+ end
+ begin
+ tf.add_input(desc, beta1_power_)
+ end
+ begin
+ tf.add_input(desc, lr_)
+ end
+ begin
+ tf.add_input(desc, beta1_)
+ end
+ begin
+ tf.add_input(desc, beta2_)
+ end
+ begin
+ tf.add_input(desc, epsilon_)
+ end
+ begin
+ tf.add_input(desc, grad_)
+ end
+ end
+ begin
+ begin
+ if use_locking !== nothing
+ desc["use_locking"] = Base.Bool(use_locking)
+ end
+ end
+ end
+ end
+ tf.Tensor(tf.Operation(desc))
+ end
+ end
+ begin
+ function apply_ada_max_eager(var_, m_, v_, beta1_power_, lr_, beta1_, beta2_, epsilon_, grad_; name=nothing, use_locking=nothing)
+ desc = tf.EagerOp("ApplyAdaMax")
+ var_ = convert(tf.EagerTensor, var_)
+ m_ = convert(tf.EagerTensor, m_)
+ v_ = convert(tf.EagerTensor, v_)
+ beta1_power_ = convert(tf.EagerTensor, beta1_power_)
+ lr_ = convert(tf.EagerTensor, lr_)
+ beta1_ = convert(tf.EagerTensor, beta1_)
+ beta2_ = convert(tf.EagerTensor, beta2_)
+ epsilon_ = convert(tf.EagerTensor, epsilon_)
+ grad_ = convert(tf.EagerTensor, grad_)
+ begin
+ begin
+ tf.add_input(desc, var_)
+ end
+ begin
+ tf.add_input(desc, m_)
+ end
+ begin
+ tf.add_input(desc, v_)
+ end
+ begin
+ tf.add_input(desc, beta1_power_)
+ end
+ begin
+ tf.add_input(desc, lr_)
+ end
+ begin
+ tf.add_input(desc, beta1_)
+ end
+ begin
+ tf.add_input(desc, beta2_)
+ end
+ begin
+ tf.add_input(desc, epsilon_)
+ end
+ begin
+ tf.add_input(desc, grad_)
+ end
+ end
+ begin
+ begin
+ if use_locking !== nothing
+ desc["use_locking"] = Base.Bool(use_locking)
+ end
+ end
+ end
+ begin
+ desc["T"] = tf.data_type(var_)
+ end
+ begin
+ desc["T"] = tf.data_type(m_)
+ end
+ begin
+ desc["T"] = tf.data_type(v_)
+ end
+ begin
+ desc["T"] = tf.data_type(beta1_power_)
+ end
+ begin
+ desc["T"] = tf.data_type(lr_)
+ end
+ begin
+ desc["T"] = tf.data_type(beta1_)
+ end
+ begin
+ desc["T"] = tf.data_type(beta2_)
+ end
+ begin
+ desc["T"] = tf.data_type(epsilon_)
+ end
+ begin
+ desc["T"] = tf.data_type(grad_)
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(apply_ada_max, [var_, m_, v_, beta1_power_, lr_, beta1_, beta2_, epsilon_, grad_], name=nothing, use_locking=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res[1]
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function apply_ada_max(var_, m_, v_, beta1_power_, lr_, beta1_, beta2_, epsilon_, grad_; name=nothing, use_locking=nothing)
+ if tf.in_eager_mode()
+ apply_ada_max_eager(var_, m_, v_, beta1_power_, lr_, beta1_, beta2_, epsilon_, grad_; name=name, use_locking=use_locking)
+ else
+ apply_ada_max_graph(var_, m_, v_, beta1_power_, lr_, beta1_, beta2_, epsilon_, grad_; name=name, use_locking=use_locking)
+ end
+ end
+ end
+end
+
+
+"""
+ decode_and_crop_jpeg(contents, crop_window; channels=0, ratio=1, fancy_upscaling=true, try_recover_truncated=false, acceptable_fraction=?, dct_method=)
+
+
+"""
+begin
+ begin
+ function decode_and_crop_jpeg_graph(contents_, crop_window_; name=nothing, channels=nothing, ratio=nothing, fancy_upscaling=nothing, try_recover_truncated=nothing, acceptable_fraction=nothing, dct_method=nothing)
+ local desc
+ tf.with_op_name(name, "DecodeAndCropJpeg") do
+ desc = tf.NodeDescription("DecodeAndCropJpeg")
+ begin
+ begin
+ contents_ = convert(Tensor{String}, contents_)
+ begin
+ end
+ end
+ begin
+ crop_window_ = convert(Tensor{Int32}, crop_window_)
+ begin
+ end
+ end
+ end
+ begin
+ begin
+ tf.add_input(desc, contents_)
+ end
+ begin
+ tf.add_input(desc, crop_window_)
+ end
+ end
+ begin
+ begin
+ if channels !== nothing
+ desc["channels"] = Base.Int(channels)
+ end
+ end
+ begin
+ if ratio !== nothing
+ desc["ratio"] = Base.Int(ratio)
+ end
+ end
+ begin
+ if fancy_upscaling !== nothing
+ desc["fancy_upscaling"] = Base.Bool(fancy_upscaling)
+ end
+ end
+ begin
+ if try_recover_truncated !== nothing
+ desc["try_recover_truncated"] = Base.Bool(try_recover_truncated)
+ end
+ end
+ begin
+ if acceptable_fraction !== nothing
+ desc["acceptable_fraction"] = Base.identity(acceptable_fraction)
+ end
+ end
+ begin
+ if dct_method !== nothing
+ desc["dct_method"] = Base.String(dct_method)
+ end
+ end
+ end
+ end
+ tf.Tensor(tf.Operation(desc))
+ end
+ end
+ begin
+ function decode_and_crop_jpeg_eager(contents_, crop_window_; name=nothing, channels=nothing, ratio=nothing, fancy_upscaling=nothing, try_recover_truncated=nothing, acceptable_fraction=nothing, dct_method=nothing)
+ desc = tf.EagerOp("DecodeAndCropJpeg")
+ contents_ = convert(tf.EagerTensor, contents_)
+ crop_window_ = convert(tf.EagerTensor, crop_window_)
+ begin
+ begin
+ tf.add_input(desc, contents_)
+ end
+ begin
+ tf.add_input(desc, crop_window_)
+ end
+ end
+ begin
+ begin
+ if channels !== nothing
+ desc["channels"] = Base.Int(channels)
+ end
+ end
+ begin
+ if ratio !== nothing
+ desc["ratio"] = Base.Int(ratio)
+ end
+ end
+ begin
+ if fancy_upscaling !== nothing
+ desc["fancy_upscaling"] = Base.Bool(fancy_upscaling)
+ end
+ end
+ begin
+ if try_recover_truncated !== nothing
+ desc["try_recover_truncated"] = Base.Bool(try_recover_truncated)
+ end
+ end
+ begin
+ if acceptable_fraction !== nothing
+ desc["acceptable_fraction"] = Base.identity(acceptable_fraction)
+ end
+ end
+ begin
+ if dct_method !== nothing
+ desc["dct_method"] = Base.String(dct_method)
+ end
+ end
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(decode_and_crop_jpeg, [contents_, crop_window_], name=nothing, channels=nothing, ratio=nothing, fancy_upscaling=nothing, try_recover_truncated=nothing, acceptable_fraction=nothing, dct_method=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res[1]
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function decode_and_crop_jpeg(contents_, crop_window_; name=nothing, channels=nothing, ratio=nothing, fancy_upscaling=nothing, try_recover_truncated=nothing, acceptable_fraction=nothing, dct_method=nothing)
+ if tf.in_eager_mode()
+ decode_and_crop_jpeg_eager(contents_, crop_window_; name=name, channels=channels, ratio=ratio, fancy_upscaling=fancy_upscaling, try_recover_truncated=try_recover_truncated, acceptable_fraction=acceptable_fraction, dct_method=dct_method)
+ else
+ decode_and_crop_jpeg_graph(contents_, crop_window_; name=name, channels=channels, ratio=ratio, fancy_upscaling=fancy_upscaling, try_recover_truncated=try_recover_truncated, acceptable_fraction=acceptable_fraction, dct_method=dct_method)
+ end
+ end
+ end
+end
+
+
+"""
+ apply_centered_rms_prop(var, mg, ms, mom, lr, rho, momentum, epsilon, grad; use_locking=false)
+
+
+"""
+begin
+ begin
+ function apply_centered_rms_prop_graph(var_, mg_, ms_, mom_, lr_, rho_, momentum_, epsilon_, grad_; name=nothing, use_locking=nothing)
+ local desc
+ tf.with_op_name(name, "ApplyCenteredRMSProp") do
+ desc = tf.NodeDescription("ApplyCenteredRMSProp")
+ begin
+ begin
+ var_ = convert(Tensor{Any}, var_)
+ begin
+ end
+ end
+ begin
+ mg_ = convert(Tensor{Any}, mg_)
+ begin
+ end
+ end
+ begin
+ ms_ = convert(Tensor{Any}, ms_)
+ begin
+ end
+ end
+ begin
+ mom_ = convert(Tensor{Any}, mom_)
+ begin
+ end
+ end
+ begin
+ lr_ = convert(Tensor{Any}, lr_)
+ begin
+ end
+ end
+ begin
+ rho_ = convert(Tensor{Any}, rho_)
+ begin
+ end
+ end
+ begin
+ momentum_ = convert(Tensor{Any}, momentum_)
+ begin
+ end
+ end
+ begin
+ epsilon_ = convert(Tensor{Any}, epsilon_)
+ begin
+ end
+ end
+ begin
+ grad_ = convert(Tensor{Any}, grad_)
+ begin
+ end
+ end
+ begin
+ (var_, mg_, ms_, mom_, lr_, rho_, momentum_, epsilon_, grad_) = tf.tf_promote(var_, mg_, ms_, mom_, lr_, rho_, momentum_, epsilon_, grad_)
+ end
+ end
+ begin
+ begin
+ tf.add_input(desc, var_)
+ end
+ begin
+ tf.add_input(desc, mg_)
+ end
+ begin
+ tf.add_input(desc, ms_)
+ end
+ begin
+ tf.add_input(desc, mom_)
+ end
+ begin
+ tf.add_input(desc, lr_)
+ end
+ begin
+ tf.add_input(desc, rho_)
+ end
+ begin
+ tf.add_input(desc, momentum_)
+ end
+ begin
+ tf.add_input(desc, epsilon_)
+ end
+ begin
+ tf.add_input(desc, grad_)
+ end
+ end
+ begin
+ begin
+ if use_locking !== nothing
+ desc["use_locking"] = Base.Bool(use_locking)
+ end
+ end
+ end
+ end
+ tf.Tensor(tf.Operation(desc))
+ end
+ end
+ begin
+ function apply_centered_rms_prop_eager(var_, mg_, ms_, mom_, lr_, rho_, momentum_, epsilon_, grad_; name=nothing, use_locking=nothing)
+ desc = tf.EagerOp("ApplyCenteredRMSProp")
+ var_ = convert(tf.EagerTensor, var_)
+ mg_ = convert(tf.EagerTensor, mg_)
+ ms_ = convert(tf.EagerTensor, ms_)
+ mom_ = convert(tf.EagerTensor, mom_)
+ lr_ = convert(tf.EagerTensor, lr_)
+ rho_ = convert(tf.EagerTensor, rho_)
+ momentum_ = convert(tf.EagerTensor, momentum_)
+ epsilon_ = convert(tf.EagerTensor, epsilon_)
+ grad_ = convert(tf.EagerTensor, grad_)
+ begin
+ begin
+ tf.add_input(desc, var_)
+ end
+ begin
+ tf.add_input(desc, mg_)
+ end
+ begin
+ tf.add_input(desc, ms_)
+ end
+ begin
+ tf.add_input(desc, mom_)
+ end
+ begin
+ tf.add_input(desc, lr_)
+ end
+ begin
+ tf.add_input(desc, rho_)
+ end
+ begin
+ tf.add_input(desc, momentum_)
+ end
+ begin
+ tf.add_input(desc, epsilon_)
+ end
+ begin
+ tf.add_input(desc, grad_)
+ end
+ end
+ begin
+ begin
+ if use_locking !== nothing
+ desc["use_locking"] = Base.Bool(use_locking)
+ end
+ end
+ end
+ begin
+ desc["T"] = tf.data_type(var_)
+ end
+ begin
+ desc["T"] = tf.data_type(mg_)
+ end
+ begin
+ desc["T"] = tf.data_type(ms_)
+ end
+ begin
+ desc["T"] = tf.data_type(mom_)
+ end
+ begin
+ desc["T"] = tf.data_type(lr_)
+ end
+ begin
+ desc["T"] = tf.data_type(rho_)
+ end
+ begin
+ desc["T"] = tf.data_type(momentum_)
+ end
+ begin
+ desc["T"] = tf.data_type(epsilon_)
+ end
+ begin
+ desc["T"] = tf.data_type(grad_)
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(apply_centered_rms_prop, [var_, mg_, ms_, mom_, lr_, rho_, momentum_, epsilon_, grad_], name=nothing, use_locking=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res[1]
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function apply_centered_rms_prop(var_, mg_, ms_, mom_, lr_, rho_, momentum_, epsilon_, grad_; name=nothing, use_locking=nothing)
+ if tf.in_eager_mode()
+ apply_centered_rms_prop_eager(var_, mg_, ms_, mom_, lr_, rho_, momentum_, epsilon_, grad_; name=name, use_locking=use_locking)
+ else
+ apply_centered_rms_prop_graph(var_, mg_, ms_, mom_, lr_, rho_, momentum_, epsilon_, grad_; name=name, use_locking=use_locking)
+ end
+ end
+ end
+end
+
+
+"""
+ conv3d_backprop_filter_v2(input, filter_sizes, out_backprop; data_format=, dilations=[1, 1, 1, 1, 1])
+
+
+"""
+begin
+ begin
+ function conv3d_backprop_filter_v2_graph(input_, filter_sizes_, out_backprop_; name=nothing, strides=nothing, padding=nothing, data_format=nothing, dilations=nothing)
+ local desc
+ tf.with_op_name(name, "Conv3DBackpropFilterV2") do
+ desc = tf.NodeDescription("Conv3DBackpropFilterV2")
+ begin
+ begin
+ input_ = convert(Tensor{Any}, input_)
+ begin
+ end
+ end
+ begin
+ filter_sizes_ = convert(Tensor{Int32}, filter_sizes_)
+ begin
+ end
+ end
+ begin
+ out_backprop_ = convert(Tensor{Any}, out_backprop_)
+ begin
+ end
+ end
+ begin
+ (input_, out_backprop_) = tf.tf_promote(input_, out_backprop_)
+ end
+ end
+ begin
+ begin
+ tf.add_input(desc, input_)
+ end
+ begin
+ tf.add_input(desc, filter_sizes_)
+ end
+ begin
+ tf.add_input(desc, out_backprop_)
+ end
+ end
+ begin
+ begin
+ if strides !== nothing
+ desc["strides"] = map(Base.identity, strides)
+ end
+ end
+ begin
+ if padding !== nothing
+ desc["padding"] = Base.String(padding)
+ end
+ end
+ begin
+ if data_format !== nothing
+ desc["data_format"] = Base.String(data_format)
+ end
+ end
+ begin
+ if dilations !== nothing
+ desc["dilations"] = map(Base.identity, dilations)
+ end
+ end
+ end
+ end
+ tf.Tensor(tf.Operation(desc))
+ end
+ end
+ begin
+ function conv3d_backprop_filter_v2_eager(input_, filter_sizes_, out_backprop_; name=nothing, strides=nothing, padding=nothing, data_format=nothing, dilations=nothing)
+ desc = tf.EagerOp("Conv3DBackpropFilterV2")
+ input_ = convert(tf.EagerTensor, input_)
+ filter_sizes_ = convert(tf.EagerTensor, filter_sizes_)
+ out_backprop_ = convert(tf.EagerTensor, out_backprop_)
+ begin
+ begin
+ tf.add_input(desc, input_)
+ end
+ begin
+ tf.add_input(desc, filter_sizes_)
+ end
+ begin
+ tf.add_input(desc, out_backprop_)
+ end
+ end
+ begin
+ begin
+ if strides !== nothing
+ desc["strides"] = map(Base.identity, strides)
+ end
+ end
+ begin
+ if padding !== nothing
+ desc["padding"] = Base.String(padding)
+ end
+ end
+ begin
+ if data_format !== nothing
+ desc["data_format"] = Base.String(data_format)
+ end
+ end
+ begin
+ if dilations !== nothing
+ desc["dilations"] = map(Base.identity, dilations)
+ end
+ end
+ end
+ begin
+ desc["T"] = tf.data_type(input_)
+ end
+ begin
+ desc["T"] = tf.data_type(out_backprop_)
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(conv3d_backprop_filter_v2, [input_, filter_sizes_, out_backprop_], name=nothing, strides=nothing, padding=nothing, data_format=nothing, dilations=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res[1]
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function conv3d_backprop_filter_v2(input_, filter_sizes_, out_backprop_; name=nothing, strides=nothing, padding=nothing, data_format=nothing, dilations=nothing)
+ if tf.in_eager_mode()
+ conv3d_backprop_filter_v2_eager(input_, filter_sizes_, out_backprop_; name=name, strides=strides, padding=padding, data_format=data_format, dilations=dilations)
+ else
+ conv3d_backprop_filter_v2_graph(input_, filter_sizes_, out_backprop_; name=name, strides=strides, padding=padding, data_format=data_format, dilations=dilations)
+ end
+ end
+ end
+end
+
+
+"""
+ matrix_triangular_solve(matrix, rhs; lower=true, adjoint=false)
+
+
+"""
+begin
+ begin
+ function matrix_triangular_solve_graph(matrix_, rhs_; name=nothing, lower=nothing, adjoint=nothing)
+ local desc
+ tf.with_op_name(name, "MatrixTriangularSolve") do
+ desc = tf.NodeDescription("MatrixTriangularSolve")
+ begin
+ begin
+ matrix_ = convert(Tensor{Any}, matrix_)
+ begin
+ end
+ end
+ begin
+ rhs_ = convert(Tensor{Any}, rhs_)
+ begin
+ end
+ end
+ begin
+ (matrix_, rhs_) = tf.tf_promote(matrix_, rhs_)
+ end
+ end
+ begin
+ begin
+ tf.add_input(desc, matrix_)
+ end
+ begin
+ tf.add_input(desc, rhs_)
+ end
+ end
+ begin
+ begin
+ if lower !== nothing
+ desc["lower"] = Base.Bool(lower)
+ end
+ end
+ begin
+ if adjoint !== nothing
+ desc["adjoint"] = Base.Bool(adjoint)
+ end
+ end
+ end
+ end
+ tf.Tensor(tf.Operation(desc))
+ end
+ end
+ begin
+ function matrix_triangular_solve_eager(matrix_, rhs_; name=nothing, lower=nothing, adjoint=nothing)
+ desc = tf.EagerOp("MatrixTriangularSolve")
+ matrix_ = convert(tf.EagerTensor, matrix_)
+ rhs_ = convert(tf.EagerTensor, rhs_)
+ begin
+ begin
+ tf.add_input(desc, matrix_)
+ end
+ begin
+ tf.add_input(desc, rhs_)
+ end
+ end
+ begin
+ begin
+ if lower !== nothing
+ desc["lower"] = Base.Bool(lower)
+ end
+ end
+ begin
+ if adjoint !== nothing
+ desc["adjoint"] = Base.Bool(adjoint)
+ end
+ end
+ end
+ begin
+ desc["T"] = tf.data_type(matrix_)
+ end
+ begin
+ desc["T"] = tf.data_type(rhs_)
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(matrix_triangular_solve, [matrix_, rhs_], name=nothing, lower=nothing, adjoint=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res[1]
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function matrix_triangular_solve(matrix_, rhs_; name=nothing, lower=nothing, adjoint=nothing)
+ if tf.in_eager_mode()
+ matrix_triangular_solve_eager(matrix_, rhs_; name=name, lower=lower, adjoint=adjoint)
+ else
+ matrix_triangular_solve_graph(matrix_, rhs_; name=name, lower=lower, adjoint=adjoint)
+ end
+ end
+ end
+end
+
+
+"""
+ reader_num_work_units_completed(reader_handle)
+
+
+"""
+begin
+ begin
+ function reader_num_work_units_completed_graph(reader_handle_; name=nothing)
+ local desc
+ tf.with_op_name(name, "ReaderNumWorkUnitsCompleted") do
+ desc = tf.NodeDescription("ReaderNumWorkUnitsCompleted")
+ begin
+ begin
+ reader_handle_ = convert(Tensor{String}, reader_handle_)
+ begin
+ end
+ end
+ end
+ begin
+ begin
+ tf.add_input(desc, reader_handle_)
+ end
+ end
+ begin
+ end
+ end
+ tf.Tensor(tf.Operation(desc))
+ end
+ end
+ begin
+ function reader_num_work_units_completed_eager(reader_handle_; name=nothing)
+ desc = tf.EagerOp("ReaderNumWorkUnitsCompleted")
+ reader_handle_ = convert(tf.EagerTensor, reader_handle_)
+ begin
+ begin
+ tf.add_input(desc, reader_handle_)
+ end
+ end
+ begin
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(reader_num_work_units_completed, [reader_handle_], name=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res[1]
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function reader_num_work_units_completed(reader_handle_; name=nothing)
+ if tf.in_eager_mode()
+ reader_num_work_units_completed_eager(reader_handle_; name=name)
+ else
+ reader_num_work_units_completed_graph(reader_handle_; name=name)
+ end
+ end
+ end
+end
+
+
+"""
+ write_audio_summary(writer, step, tag, tensor, sample_rate; max_outputs=3)
+
+
+"""
+begin
+ begin
+ function write_audio_summary_graph(writer_, step_, tag_, tensor_, sample_rate_; name=nothing, max_outputs=nothing)
+ local desc
+ tf.with_op_name(name, "WriteAudioSummary") do
+ desc = tf.NodeDescription("WriteAudioSummary")
+ begin
+ begin
+ writer_ = convert(Tensor{Any}, writer_)
+ begin
+ end
+ end
+ begin
+ step_ = convert(Tensor{Int64}, step_)
+ begin
+ end
+ end
+ begin
+ tag_ = convert(Tensor{String}, tag_)
+ begin
+ end
+ end
+ begin
+ tensor_ = convert(Tensor{Float32}, tensor_)
+ begin
+ end
+ end
+ begin
+ sample_rate_ = convert(Tensor{Float32}, sample_rate_)
+ begin
+ end
+ end
+ end
+ begin
+ begin
+ tf.add_input(desc, writer_)
+ end
+ begin
+ tf.add_input(desc, step_)
+ end
+ begin
+ tf.add_input(desc, tag_)
+ end
+ begin
+ tf.add_input(desc, tensor_)
+ end
+ begin
+ tf.add_input(desc, sample_rate_)
+ end
+ end
+ begin
+ begin
+ if max_outputs !== nothing
+ desc["max_outputs"] = Base.Int(max_outputs)
+ end
+ end
+ end
+ end
+ tf.Tensor(tf.Operation(desc))
+ end
+ end
+ begin
+ function write_audio_summary_eager(writer_, step_, tag_, tensor_, sample_rate_; name=nothing, max_outputs=nothing)
+ desc = tf.EagerOp("WriteAudioSummary")
+ writer_ = convert(tf.EagerTensor, writer_)
+ step_ = convert(tf.EagerTensor, step_)
+ tag_ = convert(tf.EagerTensor, tag_)
+ tensor_ = convert(tf.EagerTensor, tensor_)
+ sample_rate_ = convert(tf.EagerTensor, sample_rate_)
+ begin
+ begin
+ tf.add_input(desc, writer_)
+ end
+ begin
+ tf.add_input(desc, step_)
+ end
+ begin
+ tf.add_input(desc, tag_)
+ end
+ begin
+ tf.add_input(desc, tensor_)
+ end
+ begin
+ tf.add_input(desc, sample_rate_)
+ end
+ end
+ begin
+ begin
+ if max_outputs !== nothing
+ desc["max_outputs"] = Base.Int(max_outputs)
+ end
+ end
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(write_audio_summary, [writer_, step_, tag_, tensor_, sample_rate_], name=nothing, max_outputs=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res[1]
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function write_audio_summary(writer_, step_, tag_, tensor_, sample_rate_; name=nothing, max_outputs=nothing)
+ if tf.in_eager_mode()
+ write_audio_summary_eager(writer_, step_, tag_, tensor_, sample_rate_; name=name, max_outputs=max_outputs)
+ else
+ write_audio_summary_graph(writer_, step_, tag_, tensor_, sample_rate_; name=name, max_outputs=max_outputs)
+ end
+ end
+ end
+end
+
+
+"""
+ sharded_filespec(basename, num_shards)
+
+
+"""
+begin
+ begin
+ function sharded_filespec_graph(basename_, num_shards_; name=nothing)
+ local desc
+ tf.with_op_name(name, "ShardedFilespec") do
+ desc = tf.NodeDescription("ShardedFilespec")
+ begin
+ begin
+ basename_ = convert(Tensor{String}, basename_)
+ begin
+ end
+ end
+ begin
+ num_shards_ = convert(Tensor{Int32}, num_shards_)
+ begin
+ end
+ end
+ end
+ begin
+ begin
+ tf.add_input(desc, basename_)
+ end
+ begin
+ tf.add_input(desc, num_shards_)
+ end
+ end
+ begin
+ end
+ end
+ tf.Tensor(tf.Operation(desc))
+ end
+ end
+ begin
+ function sharded_filespec_eager(basename_, num_shards_; name=nothing)
+ desc = tf.EagerOp("ShardedFilespec")
+ basename_ = convert(tf.EagerTensor, basename_)
+ num_shards_ = convert(tf.EagerTensor, num_shards_)
+ begin
+ begin
+ tf.add_input(desc, basename_)
+ end
+ begin
+ tf.add_input(desc, num_shards_)
+ end
+ end
+ begin
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(sharded_filespec, [basename_, num_shards_], name=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res[1]
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function sharded_filespec(basename_, num_shards_; name=nothing)
+ if tf.in_eager_mode()
+ sharded_filespec_eager(basename_, num_shards_; name=name)
+ else
+ sharded_filespec_graph(basename_, num_shards_; name=name)
+ end
+ end
+ end
+end
+
+
+"""
+ div_no_nan(x, y)
+
+
+"""
+begin
+ begin
+ function div_no_nan_graph(x_, y_; name=nothing)
+ local desc
+ tf.with_op_name(name, "DivNoNan") do
+ desc = tf.NodeDescription("DivNoNan")
+ begin
+ begin
+ x_ = convert(Tensor{Any}, x_)
+ begin
+ end
+ end
+ begin
+ y_ = convert(Tensor{Any}, y_)
+ begin
+ end
+ end
+ begin
+ (x_, y_) = tf.tf_promote(x_, y_)
+ end
+ end
+ begin
+ begin
+ tf.add_input(desc, x_)
+ end
+ begin
+ tf.add_input(desc, y_)
+ end
+ end
+ begin
+ end
+ end
+ tf.Tensor(tf.Operation(desc))
+ end
+ end
+ begin
+ function div_no_nan_eager(x_, y_; name=nothing)
+ desc = tf.EagerOp("DivNoNan")
+ x_ = convert(tf.EagerTensor, x_)
+ y_ = convert(tf.EagerTensor, y_)
+ begin
+ begin
+ tf.add_input(desc, x_)
+ end
+ begin
+ tf.add_input(desc, y_)
+ end
+ end
+ begin
+ end
+ begin
+ desc["T"] = tf.data_type(x_)
+ end
+ begin
+ desc["T"] = tf.data_type(y_)
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(div_no_nan, [x_, y_], name=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res[1]
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function div_no_nan(x_, y_; name=nothing)
+ if tf.in_eager_mode()
+ div_no_nan_eager(x_, y_; name=name)
+ else
+ div_no_nan_graph(x_, y_; name=name)
+ end
+ end
+ end
+end
+
+
+"""
+ sparse_accumulator_apply_gradient(handle, local_step, gradient_indices, gradient_values, gradient_shape)
+
+
+"""
+begin
+ begin
+ function sparse_accumulator_apply_gradient_graph(handle_, local_step_, gradient_indices_, gradient_values_, gradient_shape_; name=nothing, dtype=nothing, has_known_shape=nothing)
+ local desc
+ tf.with_op_name(name, "SparseAccumulatorApplyGradient") do
+ desc = tf.NodeDescription("SparseAccumulatorApplyGradient")
+ begin
+ begin
+ handle_ = convert(Tensor{String}, handle_)
+ begin
+ end
+ end
+ begin
+ local_step_ = convert(Tensor{Int64}, local_step_)
+ begin
+ end
+ end
+ begin
+ gradient_indices_ = convert(Tensor{Int64}, gradient_indices_)
+ begin
+ end
+ end
+ begin
+ gradient_values_ = convert(Tensor{Any}, gradient_values_)
+ begin
+ end
+ end
+ begin
+ gradient_shape_ = convert(Tensor{Int64}, gradient_shape_)
+ begin
+ end
+ end
+ begin
+ (gradient_values_,) = tf.tf_promote(gradient_values_)
+ end
+ end
+ begin
+ begin
+ tf.add_input(desc, handle_)
+ end
+ begin
+ tf.add_input(desc, local_step_)
+ end
+ begin
+ tf.add_input(desc, gradient_indices_)
+ end
+ begin
+ tf.add_input(desc, gradient_values_)
+ end
+ begin
+ tf.add_input(desc, gradient_shape_)
+ end
+ end
+ begin
+ begin
+ if dtype !== nothing
+ desc["dtype"] = Base.identity(dtype)
+ end
+ end
+ begin
+ if has_known_shape !== nothing
+ desc["has_known_shape"] = Base.Bool(has_known_shape)
+ end
+ end
+ end
+ end
+ tf.Tensor(tf.Operation(desc))
+ end
+ end
+ begin
+ function sparse_accumulator_apply_gradient_eager(handle_, local_step_, gradient_indices_, gradient_values_, gradient_shape_; name=nothing, dtype=nothing, has_known_shape=nothing)
+ desc = tf.EagerOp("SparseAccumulatorApplyGradient")
+ handle_ = convert(tf.EagerTensor, handle_)
+ local_step_ = convert(tf.EagerTensor, local_step_)
+ gradient_indices_ = convert(tf.EagerTensor, gradient_indices_)
+ gradient_values_ = convert(tf.EagerTensor, gradient_values_)
+ gradient_shape_ = convert(tf.EagerTensor, gradient_shape_)
+ begin
+ begin
+ tf.add_input(desc, handle_)
+ end
+ begin
+ tf.add_input(desc, local_step_)
+ end
+ begin
+ tf.add_input(desc, gradient_indices_)
+ end
+ begin
+ tf.add_input(desc, gradient_values_)
+ end
+ begin
+ tf.add_input(desc, gradient_shape_)
+ end
+ end
+ begin
+ begin
+ if dtype !== nothing
+ desc["dtype"] = Base.identity(dtype)
+ end
+ end
+ begin
+ if has_known_shape !== nothing
+ desc["has_known_shape"] = Base.Bool(has_known_shape)
+ end
+ end
+ end
+ begin
+ desc["dtype"] = tf.data_type(gradient_values_)
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(sparse_accumulator_apply_gradient, [handle_, local_step_, gradient_indices_, gradient_values_, gradient_shape_], name=nothing, dtype=nothing, has_known_shape=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res[1]
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function sparse_accumulator_apply_gradient(handle_, local_step_, gradient_indices_, gradient_values_, gradient_shape_; name=nothing, dtype=nothing, has_known_shape=nothing)
+ if tf.in_eager_mode()
+ sparse_accumulator_apply_gradient_eager(handle_, local_step_, gradient_indices_, gradient_values_, gradient_shape_; name=name, dtype=dtype, has_known_shape=has_known_shape)
+ else
+ sparse_accumulator_apply_gradient_graph(handle_, local_step_, gradient_indices_, gradient_values_, gradient_shape_; name=name, dtype=dtype, has_known_shape=has_known_shape)
+ end
+ end
+ end
+end
+
+
+"""
+ ragged_tensor_to_sparse(rt_nested_splits, rt_dense_values)
+
+
+"""
+begin
+ begin
+ function ragged_tensor_to_sparse_graph(rt_nested_splits_, rt_dense_values_; name=nothing, RAGGED_RANK=nothing)
+ local desc
+ tf.with_op_name(name, "RaggedTensorToSparse") do
+ desc = tf.NodeDescription("RaggedTensorToSparse")
+ begin
+ begin
+ rt_nested_splits_ = [convert(Tensor{Int64}, x) for x = rt_nested_splits_]
+ begin
+ end
+ end
+ begin
+ rt_dense_values_ = convert(Tensor{Any}, rt_dense_values_)
+ begin
+ end
+ end
+ begin
+ (rt_dense_values_,) = tf.tf_promote(rt_dense_values_)
+ end
+ end
+ begin
+ begin
+ tf.add_input(desc, rt_nested_splits_)
+ end
+ begin
+ tf.add_input(desc, rt_dense_values_)
+ end
+ end
+ begin
+ begin
+ if RAGGED_RANK !== nothing
+ desc["RAGGED_RANK"] = Base.Int(RAGGED_RANK)
+ end
+ end
+ end
+ end
+ begin
+ out = tf.Tensor[]
+ op = tf.Operation(desc)
+ for out_idx = 1:3
+ push!(out, tf.Tensor(op, out_idx))
+ end
+ out
+ end
+ end
+ end
+ begin
+ function ragged_tensor_to_sparse_eager(rt_nested_splits_, rt_dense_values_; name=nothing, RAGGED_RANK=nothing)
+ desc = tf.EagerOp("RaggedTensorToSparse")
+ rt_nested_splits_ = convert(tf.EagerTensor, rt_nested_splits_)
+ rt_dense_values_ = convert(tf.EagerTensor, rt_dense_values_)
+ begin
+ begin
+ tf.add_input(desc, rt_nested_splits_)
+ end
+ begin
+ tf.add_input(desc, rt_dense_values_)
+ end
+ end
+ begin
+ begin
+ if RAGGED_RANK !== nothing
+ desc["RAGGED_RANK"] = Base.Int(RAGGED_RANK)
+ end
+ end
+ end
+ begin
+ desc["T"] = tf.data_type(rt_dense_values_)
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(ragged_tensor_to_sparse, [rt_nested_splits_, rt_dense_values_], name=nothing, RAGGED_RANK=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function ragged_tensor_to_sparse(rt_nested_splits_, rt_dense_values_; name=nothing, RAGGED_RANK=nothing)
+ if tf.in_eager_mode()
+ ragged_tensor_to_sparse_eager(rt_nested_splits_, rt_dense_values_; name=name, RAGGED_RANK=RAGGED_RANK)
+ else
+ ragged_tensor_to_sparse_graph(rt_nested_splits_, rt_dense_values_; name=name, RAGGED_RANK=RAGGED_RANK)
+ end
+ end
+ end
+end
+
+
+"""
+ extract_volume_patches(input)
+
+
+"""
+begin
+ begin
+ function extract_volume_patches_graph(input_; name=nothing, ksizes=nothing, strides=nothing, padding=nothing)
+ local desc
+ tf.with_op_name(name, "ExtractVolumePatches") do
+ desc = tf.NodeDescription("ExtractVolumePatches")
+ begin
+ begin
+ input_ = convert(Tensor{Any}, input_)
+ begin
+ end
+ end
+ begin
+ (input_,) = tf.tf_promote(input_)
+ end
+ end
+ begin
+ begin
+ tf.add_input(desc, input_)
+ end
+ end
+ begin
+ begin
+ if ksizes !== nothing
+ desc["ksizes"] = map(Base.identity, ksizes)
+ end
+ end
+ begin
+ if strides !== nothing
+ desc["strides"] = map(Base.identity, strides)
+ end
+ end
+ begin
+ if padding !== nothing
+ desc["padding"] = Base.String(padding)
+ end
+ end
+ end
+ end
+ tf.Tensor(tf.Operation(desc))
+ end
+ end
+ begin
+ function extract_volume_patches_eager(input_; name=nothing, ksizes=nothing, strides=nothing, padding=nothing)
+ desc = tf.EagerOp("ExtractVolumePatches")
+ input_ = convert(tf.EagerTensor, input_)
+ begin
+ begin
+ tf.add_input(desc, input_)
+ end
+ end
+ begin
+ begin
+ if ksizes !== nothing
+ desc["ksizes"] = map(Base.identity, ksizes)
+ end
+ end
+ begin
+ if strides !== nothing
+ desc["strides"] = map(Base.identity, strides)
+ end
+ end
+ begin
+ if padding !== nothing
+ desc["padding"] = Base.String(padding)
+ end
+ end
+ end
+ begin
+ desc["T"] = tf.data_type(input_)
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(extract_volume_patches, [input_], name=nothing, ksizes=nothing, strides=nothing, padding=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res[1]
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function extract_volume_patches(input_; name=nothing, ksizes=nothing, strides=nothing, padding=nothing)
+ if tf.in_eager_mode()
+ extract_volume_patches_eager(input_; name=name, ksizes=ksizes, strides=strides, padding=padding)
+ else
+ extract_volume_patches_graph(input_; name=name, ksizes=ksizes, strides=strides, padding=padding)
+ end
+ end
+ end
+end
+
+
+"""
+ barrier_insert_many(handle, keys, values)
+
+
+"""
+begin
+ begin
+ function barrier_insert_many_graph(handle_, keys_, values_; name=nothing, component_index=nothing)
+ local desc
+ tf.with_op_name(name, "BarrierInsertMany") do
+ desc = tf.NodeDescription("BarrierInsertMany")
+ begin
+ begin
+ handle_ = convert(Tensor{String}, handle_)
+ begin
+ end
+ end
+ begin
+ keys_ = convert(Tensor{String}, keys_)
+ begin
+ end
+ end
+ begin
+ values_ = convert(Tensor{Any}, values_)
+ begin
+ end
+ end
+ begin
+ (values_,) = tf.tf_promote(values_)
+ end
+ end
+ begin
+ begin
+ tf.add_input(desc, handle_)
+ end
+ begin
+ tf.add_input(desc, keys_)
+ end
+ begin
+ tf.add_input(desc, values_)
+ end
+ end
+ begin
+ begin
+ if component_index !== nothing
+ component_index = Base.Int(component_index) - 1
+ end
+ end
+ begin
+ if component_index !== nothing
+ desc["component_index"] = Base.Int(component_index)
+ end
+ end
+ end
+ end
+ tf.Tensor(tf.Operation(desc))
+ end
+ end
+ begin
+ function barrier_insert_many_eager(handle_, keys_, values_; name=nothing, component_index=nothing)
+ desc = tf.EagerOp("BarrierInsertMany")
+ handle_ = convert(tf.EagerTensor, handle_)
+ keys_ = convert(tf.EagerTensor, keys_)
+ values_ = convert(tf.EagerTensor, values_)
+ begin
+ begin
+ tf.add_input(desc, handle_)
+ end
+ begin
+ tf.add_input(desc, keys_)
+ end
+ begin
+ tf.add_input(desc, values_)
+ end
+ end
+ begin
+ begin
+ if component_index !== nothing
+ component_index = Base.Int(component_index) - 1
+ end
+ end
+ begin
+ if component_index !== nothing
+ desc["component_index"] = Base.Int(component_index)
+ end
+ end
+ end
+ begin
+ desc["T"] = tf.data_type(values_)
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(barrier_insert_many, [handle_, keys_, values_], name=nothing, component_index=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res[1]
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function barrier_insert_many(handle_, keys_, values_; name=nothing, component_index=nothing)
+ if tf.in_eager_mode()
+ barrier_insert_many_eager(handle_, keys_, values_; name=name, component_index=component_index)
+ else
+ barrier_insert_many_graph(handle_, keys_, values_; name=name, component_index=component_index)
+ end
+ end
+ end
+end
+
+
+"""
+ const_()
+
+
+"""
+begin
+ begin
+ function const__graph(; name=nothing, value=nothing, dtype=nothing)
+ local desc
+ tf.with_op_name(name, "Const") do
+ desc = tf.NodeDescription("Const")
+ begin
+ end
+ begin
+ end
+ begin
+ begin
+ if value !== nothing
+ desc["value"] = TensorFlow.RawTensor(value)
+ end
+ end
+ begin
+ if dtype !== nothing
+ desc["dtype"] = Base.identity(dtype)
+ end
+ end
+ end
+ end
+ tf.Tensor(tf.Operation(desc))
+ end
+ end
+ begin
+ function const__eager(; name=nothing, value=nothing, dtype=nothing)
+ desc = tf.EagerOp("Const")
+ begin
+ end
+ begin
+ begin
+ if value !== nothing
+ desc["value"] = TensorFlow.RawTensor(value)
+ end
+ end
+ begin
+ if dtype !== nothing
+ desc["dtype"] = Base.identity(dtype)
+ end
+ end
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(const_, [], name=nothing, value=nothing, dtype=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res[1]
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function const_(; name=nothing, value=nothing, dtype=nothing)
+ if tf.in_eager_mode()
+ const__eager(; name=name, value=value, dtype=dtype)
+ else
+ const__graph(; name=name, value=value, dtype=dtype)
+ end
+ end
+ end
+end
+
+
+"""
+ space_to_batch(input, paddings)
+
+
+"""
+begin
+ begin
+ function space_to_batch_graph(input_, paddings_; name=nothing, block_size=nothing)
+ local desc
+ tf.with_op_name(name, "SpaceToBatch") do
+ desc = tf.NodeDescription("SpaceToBatch")
+ begin
+ begin
+ input_ = convert(Tensor{Any}, input_)
+ begin
+ end
+ end
+ begin
+ paddings_ = convert(Tensor{Int32}, paddings_)
+ begin
+ end
+ end
+ begin
+ (input_,) = tf.tf_promote(input_)
+ end
+ begin
+ (paddings_,) = tf.tf_promote(paddings_)
+ end
+ end
+ begin
+ begin
+ tf.add_input(desc, input_)
+ end
+ begin
+ tf.add_input(desc, paddings_)
+ end
+ end
+ begin
+ begin
+ if block_size !== nothing
+ desc["block_size"] = Base.Int(block_size)
+ end
+ end
+ end
+ end
+ tf.Tensor(tf.Operation(desc))
+ end
+ end
+ begin
+ function space_to_batch_eager(input_, paddings_; name=nothing, block_size=nothing)
+ desc = tf.EagerOp("SpaceToBatch")
+ input_ = convert(tf.EagerTensor, input_)
+ paddings_ = convert(tf.EagerTensor, paddings_)
+ begin
+ begin
+ tf.add_input(desc, input_)
+ end
+ begin
+ tf.add_input(desc, paddings_)
+ end
+ end
+ begin
+ begin
+ if block_size !== nothing
+ desc["block_size"] = Base.Int(block_size)
+ end
+ end
+ end
+ begin
+ desc["T"] = tf.data_type(input_)
+ end
+ begin
+ desc["Tpaddings"] = tf.data_type(paddings_)
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(space_to_batch, [input_, paddings_], name=nothing, block_size=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res[1]
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function space_to_batch(input_, paddings_; name=nothing, block_size=nothing)
+ if tf.in_eager_mode()
+ space_to_batch_eager(input_, paddings_; name=name, block_size=block_size)
+ else
+ space_to_batch_graph(input_, paddings_; name=name, block_size=block_size)
+ end
+ end
+ end
+end
+
+
+"""
+ stage_size(; capacity=0, memory_limit=0, container=, shared_name=)
+
+
+"""
+begin
+ begin
+ function stage_size_graph(; name=nothing, capacity=nothing, memory_limit=nothing, dtypes=nothing, container=nothing, shared_name=nothing)
+ local desc
+ tf.with_op_name(name, "StageSize") do
+ desc = tf.NodeDescription("StageSize")
+ begin
+ end
+ begin
+ end
+ begin
+ begin
+ if capacity !== nothing
+ desc["capacity"] = Base.Int(capacity)
+ end
+ end
+ begin
+ if memory_limit !== nothing
+ desc["memory_limit"] = Base.Int(memory_limit)
+ end
+ end
+ begin
+ if dtypes !== nothing
+ desc["dtypes"] = map(Base.identity, dtypes)
+ end
+ end
+ begin
+ if container !== nothing
+ desc["container"] = Base.String(container)
+ end
+ end
+ begin
+ if shared_name !== nothing
+ desc["shared_name"] = Base.String(shared_name)
+ end
+ end
+ end
+ end
+ tf.Tensor(tf.Operation(desc))
+ end
+ end
+ begin
+ function stage_size_eager(; name=nothing, capacity=nothing, memory_limit=nothing, dtypes=nothing, container=nothing, shared_name=nothing)
+ desc = tf.EagerOp("StageSize")
+ begin
+ end
+ begin
+ begin
+ if capacity !== nothing
+ desc["capacity"] = Base.Int(capacity)
+ end
+ end
+ begin
+ if memory_limit !== nothing
+ desc["memory_limit"] = Base.Int(memory_limit)
+ end
+ end
+ begin
+ if dtypes !== nothing
+ desc["dtypes"] = map(Base.identity, dtypes)
+ end
+ end
+ begin
+ if container !== nothing
+ desc["container"] = Base.String(container)
+ end
+ end
+ begin
+ if shared_name !== nothing
+ desc["shared_name"] = Base.String(shared_name)
+ end
+ end
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(stage_size, [], name=nothing, capacity=nothing, memory_limit=nothing, dtypes=nothing, container=nothing, shared_name=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res[1]
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function stage_size(; name=nothing, capacity=nothing, memory_limit=nothing, dtypes=nothing, container=nothing, shared_name=nothing)
+ if tf.in_eager_mode()
+ stage_size_eager(; name=name, capacity=capacity, memory_limit=memory_limit, dtypes=dtypes, container=container, shared_name=shared_name)
+ else
+ stage_size_graph(; name=name, capacity=capacity, memory_limit=memory_limit, dtypes=dtypes, container=container, shared_name=shared_name)
+ end
+ end
+ end
+end
+
+
+"""
+ empty_tensor_list(element_shape, max_num_elements)
+
+
+"""
+begin
+ begin
+ function empty_tensor_list_graph(element_shape_, max_num_elements_; name=nothing, element_dtype=nothing, shape_type=nothing)
+ local desc
+ tf.with_op_name(name, "EmptyTensorList") do
+ desc = tf.NodeDescription("EmptyTensorList")
+ begin
+ begin
+ element_shape_ = convert(Tensor{Any}, element_shape_)
+ begin
+ end
+ end
+ begin
+ max_num_elements_ = convert(Tensor{Int32}, max_num_elements_)
+ begin
+ end
+ end
+ begin
+ (element_shape_,) = tf.tf_promote(element_shape_)
+ end
+ end
+ begin
+ begin
+ tf.add_input(desc, element_shape_)
+ end
+ begin
+ tf.add_input(desc, max_num_elements_)
+ end
+ end
+ begin
+ begin
+ if element_dtype !== nothing
+ desc["element_dtype"] = Base.identity(element_dtype)
+ end
+ end
+ begin
+ if shape_type !== nothing
+ desc["shape_type"] = Base.identity(shape_type)
+ end
+ end
+ end
+ end
+ tf.Tensor(tf.Operation(desc))
+ end
+ end
+ begin
+ function empty_tensor_list_eager(element_shape_, max_num_elements_; name=nothing, element_dtype=nothing, shape_type=nothing)
+ desc = tf.EagerOp("EmptyTensorList")
+ element_shape_ = convert(tf.EagerTensor, element_shape_)
+ max_num_elements_ = convert(tf.EagerTensor, max_num_elements_)
+ begin
+ begin
+ tf.add_input(desc, element_shape_)
+ end
+ begin
+ tf.add_input(desc, max_num_elements_)
+ end
+ end
+ begin
+ begin
+ if element_dtype !== nothing
+ desc["element_dtype"] = Base.identity(element_dtype)
+ end
+ end
+ begin
+ if shape_type !== nothing
+ desc["shape_type"] = Base.identity(shape_type)
+ end
+ end
+ end
+ begin
+ desc["shape_type"] = tf.data_type(element_shape_)
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(empty_tensor_list, [element_shape_, max_num_elements_], name=nothing, element_dtype=nothing, shape_type=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res[1]
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function empty_tensor_list(element_shape_, max_num_elements_; name=nothing, element_dtype=nothing, shape_type=nothing)
+ if tf.in_eager_mode()
+ empty_tensor_list_eager(element_shape_, max_num_elements_; name=name, element_dtype=element_dtype, shape_type=shape_type)
+ else
+ empty_tensor_list_graph(element_shape_, max_num_elements_; name=name, element_dtype=element_dtype, shape_type=shape_type)
+ end
+ end
+ end
+end
+
+
+"""
+ lu(input; output_idx_type=Int32)
+
+
+"""
+begin
+ begin
+ function lu_graph(input_; name=nothing, output_idx_type=nothing)
+ local desc
+ tf.with_op_name(name, "Lu") do
+ desc = tf.NodeDescription("Lu")
+ begin
+ begin
+ input_ = convert(Tensor{Any}, input_)
+ begin
+ end
+ end
+ begin
+ (input_,) = tf.tf_promote(input_)
+ end
+ end
+ begin
+ begin
+ tf.add_input(desc, input_)
+ end
+ end
+ begin
+ begin
+ if output_idx_type !== nothing
+ desc["output_idx_type"] = Base.identity(output_idx_type)
+ end
+ end
+ end
+ end
+ begin
+ out = tf.Tensor[]
+ op = tf.Operation(desc)
+ for out_idx = 1:2
+ push!(out, tf.Tensor(op, out_idx))
+ end
+ out
+ end
+ end
+ end
+ begin
+ function lu_eager(input_; name=nothing, output_idx_type=nothing)
+ desc = tf.EagerOp("Lu")
+ input_ = convert(tf.EagerTensor, input_)
+ begin
+ begin
+ tf.add_input(desc, input_)
+ end
+ end
+ begin
+ begin
+ if output_idx_type !== nothing
+ desc["output_idx_type"] = Base.identity(output_idx_type)
+ end
+ end
+ end
+ begin
+ desc["T"] = tf.data_type(input_)
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(lu, [input_], name=nothing, output_idx_type=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function lu(input_; name=nothing, output_idx_type=nothing)
+ if tf.in_eager_mode()
+ lu_eager(input_; name=name, output_idx_type=output_idx_type)
+ else
+ lu_graph(input_; name=name, output_idx_type=output_idx_type)
+ end
+ end
+ end
+end
+
+
+"""
+ decode_compressed(bytes; compression_type=)
+
+
+"""
+begin
+ begin
+ function decode_compressed_graph(bytes_; name=nothing, compression_type=nothing)
+ local desc
+ tf.with_op_name(name, "DecodeCompressed") do
+ desc = tf.NodeDescription("DecodeCompressed")
+ begin
+ begin
+ bytes_ = convert(Tensor{String}, bytes_)
+ begin
+ end
+ end
+ end
+ begin
+ begin
+ tf.add_input(desc, bytes_)
+ end
+ end
+ begin
+ begin
+ if compression_type !== nothing
+ desc["compression_type"] = Base.String(compression_type)
+ end
+ end
+ end
+ end
+ tf.Tensor(tf.Operation(desc))
+ end
+ end
+ begin
+ function decode_compressed_eager(bytes_; name=nothing, compression_type=nothing)
+ desc = tf.EagerOp("DecodeCompressed")
+ bytes_ = convert(tf.EagerTensor, bytes_)
+ begin
+ begin
+ tf.add_input(desc, bytes_)
+ end
+ end
+ begin
+ begin
+ if compression_type !== nothing
+ desc["compression_type"] = Base.String(compression_type)
+ end
+ end
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(decode_compressed, [bytes_], name=nothing, compression_type=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res[1]
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function decode_compressed(bytes_; name=nothing, compression_type=nothing)
+ if tf.in_eager_mode()
+ decode_compressed_eager(bytes_; name=name, compression_type=compression_type)
+ else
+ decode_compressed_graph(bytes_; name=name, compression_type=compression_type)
+ end
+ end
+ end
+end
+
+
+"""
+ get_session_tensor(handle)
+
+
+"""
+begin
+ begin
+ function get_session_tensor_graph(handle_; name=nothing, dtype=nothing)
+ local desc
+ tf.with_op_name(name, "GetSessionTensor") do
+ desc = tf.NodeDescription("GetSessionTensor")
+ begin
+ begin
+ handle_ = convert(Tensor{String}, handle_)
+ begin
+ end
+ end
+ end
+ begin
+ begin
+ tf.add_input(desc, handle_)
+ end
+ end
+ begin
+ begin
+ if dtype !== nothing
+ desc["dtype"] = Base.identity(dtype)
+ end
+ end
+ end
+ end
+ tf.Tensor(tf.Operation(desc))
+ end
+ end
+ begin
+ function get_session_tensor_eager(handle_; name=nothing, dtype=nothing)
+ desc = tf.EagerOp("GetSessionTensor")
+ handle_ = convert(tf.EagerTensor, handle_)
+ begin
+ begin
+ tf.add_input(desc, handle_)
+ end
+ end
+ begin
+ begin
+ if dtype !== nothing
+ desc["dtype"] = Base.identity(dtype)
+ end
+ end
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(get_session_tensor, [handle_], name=nothing, dtype=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res[1]
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function get_session_tensor(handle_; name=nothing, dtype=nothing)
+ if tf.in_eager_mode()
+ get_session_tensor_eager(handle_; name=name, dtype=dtype)
+ else
+ get_session_tensor_graph(handle_; name=name, dtype=dtype)
+ end
+ end
+ end
+end
+
+
+"""
+ tensor_array_gather_v3(handle, indices, flow_in; element_shape=?)
+
+
+"""
+begin
+ begin
+ function tensor_array_gather_v3_graph(handle_, indices_, flow_in_; name=nothing, dtype=nothing, element_shape=nothing)
+ local desc
+ tf.with_op_name(name, "TensorArrayGatherV3") do
+ desc = tf.NodeDescription("TensorArrayGatherV3")
+ begin
+ begin
+ handle_ = convert(Tensor{Any}, handle_)
+ begin
+ end
+ end
+ begin
+ indices_ = convert(Tensor{Int32}, indices_)
+ begin
+ end
+ end
+ begin
+ flow_in_ = convert(Tensor{Float32}, flow_in_)
+ begin
+ end
+ end
+ end
+ begin
+ begin
+ tf.add_input(desc, handle_)
+ end
+ begin
+ tf.add_input(desc, indices_)
+ end
+ begin
+ tf.add_input(desc, flow_in_)
+ end
+ end
+ begin
+ begin
+ if dtype !== nothing
+ desc["dtype"] = Base.identity(dtype)
+ end
+ end
+ begin
+ if element_shape !== nothing
+ desc["element_shape"] = Base.identity(element_shape)
+ end
+ end
+ end
+ end
+ tf.Tensor(tf.Operation(desc))
+ end
+ end
+ begin
+ function tensor_array_gather_v3_eager(handle_, indices_, flow_in_; name=nothing, dtype=nothing, element_shape=nothing)
+ desc = tf.EagerOp("TensorArrayGatherV3")
+ handle_ = convert(tf.EagerTensor, handle_)
+ indices_ = convert(tf.EagerTensor, indices_)
+ flow_in_ = convert(tf.EagerTensor, flow_in_)
+ begin
+ begin
+ tf.add_input(desc, handle_)
+ end
+ begin
+ tf.add_input(desc, indices_)
+ end
+ begin
+ tf.add_input(desc, flow_in_)
+ end
+ end
+ begin
+ begin
+ if dtype !== nothing
+ desc["dtype"] = Base.identity(dtype)
+ end
+ end
+ begin
+ if element_shape !== nothing
+ desc["element_shape"] = Base.identity(element_shape)
+ end
+ end
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(tensor_array_gather_v3, [handle_, indices_, flow_in_], name=nothing, dtype=nothing, element_shape=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res[1]
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function tensor_array_gather_v3(handle_, indices_, flow_in_; name=nothing, dtype=nothing, element_shape=nothing)
+ if tf.in_eager_mode()
+ tensor_array_gather_v3_eager(handle_, indices_, flow_in_; name=name, dtype=dtype, element_shape=element_shape)
+ else
+ tensor_array_gather_v3_graph(handle_, indices_, flow_in_; name=name, dtype=dtype, element_shape=element_shape)
+ end
+ end
+ end
+end
+
+
+"""
+ destroy_resource_op(resource; ignore_lookup_error=true)
+
+
+"""
+begin
+ begin
+ function destroy_resource_op_graph(resource_; name=nothing, ignore_lookup_error=nothing)
+ local desc
+ tf.with_op_name(name, "DestroyResourceOp") do
+ desc = tf.NodeDescription("DestroyResourceOp")
+ begin
+ begin
+ resource_ = convert(Tensor{Any}, resource_)
+ begin
+ end
+ end
+ end
+ begin
+ begin
+ tf.add_input(desc, resource_)
+ end
+ end
+ begin
+ begin
+ if ignore_lookup_error !== nothing
+ desc["ignore_lookup_error"] = Base.Bool(ignore_lookup_error)
+ end
+ end
+ end
+ end
+ tf.Tensor(tf.Operation(desc))
+ end
+ end
+ begin
+ function destroy_resource_op_eager(resource_; name=nothing, ignore_lookup_error=nothing)
+ desc = tf.EagerOp("DestroyResourceOp")
+ resource_ = convert(tf.EagerTensor, resource_)
+ begin
+ begin
+ tf.add_input(desc, resource_)
+ end
+ end
+ begin
+ begin
+ if ignore_lookup_error !== nothing
+ desc["ignore_lookup_error"] = Base.Bool(ignore_lookup_error)
+ end
+ end
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(destroy_resource_op, [resource_], name=nothing, ignore_lookup_error=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res[1]
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function destroy_resource_op(resource_; name=nothing, ignore_lookup_error=nothing)
+ if tf.in_eager_mode()
+ destroy_resource_op_eager(resource_; name=name, ignore_lookup_error=ignore_lookup_error)
+ else
+ destroy_resource_op_graph(resource_; name=name, ignore_lookup_error=ignore_lookup_error)
+ end
+ end
+ end
+end
+
+
+"""
+ load_tpu_embedding_ftrl_parameters_grad_accum_debug(parameters, accumulators, linears, gradient_accumulators; table_id=-1, table_name=)
+
+Load embedding parameters for a single table.
+"""
+begin
+ begin
+ function load_tpu_embedding_ftrl_parameters_grad_accum_debug_graph(parameters_, accumulators_, linears_, gradient_accumulators_; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing)
+ local desc
+ tf.with_op_name(name, "LoadTPUEmbeddingFTRLParametersGradAccumDebug") do
+ desc = tf.NodeDescription("LoadTPUEmbeddingFTRLParametersGradAccumDebug")
+ begin
+ begin
+ parameters_ = convert(Tensor{Float32}, parameters_)
+ begin
+ end
+ end
+ begin
+ accumulators_ = convert(Tensor{Float32}, accumulators_)
+ begin
+ end
+ end
+ begin
+ linears_ = convert(Tensor{Float32}, linears_)
+ begin
+ end
+ end
+ begin
+ gradient_accumulators_ = convert(Tensor{Float32}, gradient_accumulators_)
+ begin
+ end
+ end
+ end
+ begin
+ begin
+ tf.add_input(desc, parameters_)
+ end
+ begin
+ tf.add_input(desc, accumulators_)
+ end
+ begin
+ tf.add_input(desc, linears_)
+ end
+ begin
+ tf.add_input(desc, gradient_accumulators_)
+ end
+ end
+ begin
+ begin
+ if table_id !== nothing
+ desc["table_id"] = Base.Int(table_id)
+ end
+ end
+ begin
+ if table_name !== nothing
+ desc["table_name"] = Base.String(table_name)
+ end
+ end
+ begin
+ if num_shards !== nothing
+ desc["num_shards"] = Base.Int(num_shards)
+ end
+ end
+ begin
+ if shard_id !== nothing
+ desc["shard_id"] = Base.Int(shard_id)
+ end
+ end
+ end
+ end
+ tf.Tensor(tf.Operation(desc))
+ end
+ end
+ begin
+ function load_tpu_embedding_ftrl_parameters_grad_accum_debug_eager(parameters_, accumulators_, linears_, gradient_accumulators_; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing)
+ desc = tf.EagerOp("LoadTPUEmbeddingFTRLParametersGradAccumDebug")
+ parameters_ = convert(tf.EagerTensor, parameters_)
+ accumulators_ = convert(tf.EagerTensor, accumulators_)
+ linears_ = convert(tf.EagerTensor, linears_)
+ gradient_accumulators_ = convert(tf.EagerTensor, gradient_accumulators_)
+ begin
+ begin
+ tf.add_input(desc, parameters_)
+ end
+ begin
+ tf.add_input(desc, accumulators_)
+ end
+ begin
+ tf.add_input(desc, linears_)
+ end
+ begin
+ tf.add_input(desc, gradient_accumulators_)
+ end
+ end
+ begin
+ begin
+ if table_id !== nothing
+ desc["table_id"] = Base.Int(table_id)
+ end
+ end
+ begin
+ if table_name !== nothing
+ desc["table_name"] = Base.String(table_name)
+ end
+ end
+ begin
+ if num_shards !== nothing
+ desc["num_shards"] = Base.Int(num_shards)
+ end
+ end
+ begin
+ if shard_id !== nothing
+ desc["shard_id"] = Base.Int(shard_id)
+ end
+ end
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(load_tpu_embedding_ftrl_parameters_grad_accum_debug, [parameters_, accumulators_, linears_, gradient_accumulators_], name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res[1]
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function load_tpu_embedding_ftrl_parameters_grad_accum_debug(parameters_, accumulators_, linears_, gradient_accumulators_; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing)
+ if tf.in_eager_mode()
+ load_tpu_embedding_ftrl_parameters_grad_accum_debug_eager(parameters_, accumulators_, linears_, gradient_accumulators_; name=name, table_id=table_id, table_name=table_name, num_shards=num_shards, shard_id=shard_id)
+ else
+ load_tpu_embedding_ftrl_parameters_grad_accum_debug_graph(parameters_, accumulators_, linears_, gradient_accumulators_; name=name, table_id=table_id, table_name=table_name, num_shards=num_shards, shard_id=shard_id)
+ end
+ end
+ end
+end
+
+
+"""
+ text_line_reader(; skip_header_lines=0, container=, shared_name=)
+
+
+"""
+begin
+ begin
+ function text_line_reader_graph(; name=nothing, skip_header_lines=nothing, container=nothing, shared_name=nothing)
+ local desc
+ tf.with_op_name(name, "TextLineReader") do
+ desc = tf.NodeDescription("TextLineReader")
+ begin
+ end
+ begin
+ end
+ begin
+ begin
+ if skip_header_lines !== nothing
+ desc["skip_header_lines"] = Base.Int(skip_header_lines)
+ end
+ end
+ begin
+ if container !== nothing
+ desc["container"] = Base.String(container)
+ end
+ end
+ begin
+ if shared_name !== nothing
+ desc["shared_name"] = Base.String(shared_name)
+ end
+ end
+ end
+ end
+ tf.Tensor(tf.Operation(desc))
+ end
+ end
+ begin
+ function text_line_reader_eager(; name=nothing, skip_header_lines=nothing, container=nothing, shared_name=nothing)
+ desc = tf.EagerOp("TextLineReader")
+ begin
+ end
+ begin
+ begin
+ if skip_header_lines !== nothing
+ desc["skip_header_lines"] = Base.Int(skip_header_lines)
+ end
+ end
+ begin
+ if container !== nothing
+ desc["container"] = Base.String(container)
+ end
+ end
+ begin
+ if shared_name !== nothing
+ desc["shared_name"] = Base.String(shared_name)
+ end
+ end
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(text_line_reader, [], name=nothing, skip_header_lines=nothing, container=nothing, shared_name=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res[1]
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function text_line_reader(; name=nothing, skip_header_lines=nothing, container=nothing, shared_name=nothing)
+ if tf.in_eager_mode()
+ text_line_reader_eager(; name=name, skip_header_lines=skip_header_lines, container=container, shared_name=shared_name)
+ else
+ text_line_reader_graph(; name=name, skip_header_lines=skip_header_lines, container=container, shared_name=shared_name)
+ end
+ end
+ end
+end
+
+
+"""
+ create_summary_db_writer(writer, db_uri, experiment_name, run_name, user_name)
+
+
+"""
+begin
+ begin
+ function create_summary_db_writer_graph(writer_, db_uri_, experiment_name_, run_name_, user_name_; name=nothing)
+ local desc
+ tf.with_op_name(name, "CreateSummaryDbWriter") do
+ desc = tf.NodeDescription("CreateSummaryDbWriter")
+ begin
+ begin
+ writer_ = convert(Tensor{Any}, writer_)
+ begin
+ end
+ end
+ begin
+ db_uri_ = convert(Tensor{String}, db_uri_)
+ begin
+ end
+ end
+ begin
+ experiment_name_ = convert(Tensor{String}, experiment_name_)
+ begin
+ end
+ end
+ begin
+ run_name_ = convert(Tensor{String}, run_name_)
+ begin
+ end
+ end
+ begin
+ user_name_ = convert(Tensor{String}, user_name_)
+ begin
+ end
+ end
+ end
+ begin
+ begin
+ tf.add_input(desc, writer_)
+ end
+ begin
+ tf.add_input(desc, db_uri_)
+ end
+ begin
+ tf.add_input(desc, experiment_name_)
+ end
+ begin
+ tf.add_input(desc, run_name_)
+ end
+ begin
+ tf.add_input(desc, user_name_)
+ end
+ end
+ begin
+ end
+ end
+ tf.Tensor(tf.Operation(desc))
+ end
+ end
+ begin
+ function create_summary_db_writer_eager(writer_, db_uri_, experiment_name_, run_name_, user_name_; name=nothing)
+ desc = tf.EagerOp("CreateSummaryDbWriter")
+ writer_ = convert(tf.EagerTensor, writer_)
+ db_uri_ = convert(tf.EagerTensor, db_uri_)
+ experiment_name_ = convert(tf.EagerTensor, experiment_name_)
+ run_name_ = convert(tf.EagerTensor, run_name_)
+ user_name_ = convert(tf.EagerTensor, user_name_)
+ begin
+ begin
+ tf.add_input(desc, writer_)
+ end
+ begin
+ tf.add_input(desc, db_uri_)
+ end
+ begin
+ tf.add_input(desc, experiment_name_)
+ end
+ begin
+ tf.add_input(desc, run_name_)
+ end
+ begin
+ tf.add_input(desc, user_name_)
+ end
+ end
+ begin
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(create_summary_db_writer, [writer_, db_uri_, experiment_name_, run_name_, user_name_], name=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res[1]
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function create_summary_db_writer(writer_, db_uri_, experiment_name_, run_name_, user_name_; name=nothing)
+ if tf.in_eager_mode()
+ create_summary_db_writer_eager(writer_, db_uri_, experiment_name_, run_name_, user_name_; name=name)
+ else
+ create_summary_db_writer_graph(writer_, db_uri_, experiment_name_, run_name_, user_name_; name=name)
+ end
+ end
+ end
+end
+
+
+"""
+ tanh_grad(y, dy)
+
+
+"""
+begin
+ begin
+ function tanh_grad_graph(y_, dy_; name=nothing)
+ local desc
+ tf.with_op_name(name, "TanhGrad") do
+ desc = tf.NodeDescription("TanhGrad")
+ begin
+ begin
+ y_ = convert(Tensor{Any}, y_)
+ begin
+ end
+ end
+ begin
+ dy_ = convert(Tensor{Any}, dy_)
+ begin
+ end
+ end
+ begin
+ (y_, dy_) = tf.tf_promote(y_, dy_)
+ end
+ end
+ begin
+ begin
+ tf.add_input(desc, y_)
+ end
+ begin
+ tf.add_input(desc, dy_)
+ end
+ end
+ begin
+ end
+ end
+ tf.Tensor(tf.Operation(desc))
+ end
+ end
+ begin
+ function tanh_grad_eager(y_, dy_; name=nothing)
+ desc = tf.EagerOp("TanhGrad")
+ y_ = convert(tf.EagerTensor, y_)
+ dy_ = convert(tf.EagerTensor, dy_)
+ begin
+ begin
+ tf.add_input(desc, y_)
+ end
+ begin
+ tf.add_input(desc, dy_)
+ end
+ end
+ begin
+ end
+ begin
+ desc["T"] = tf.data_type(y_)
+ end
+ begin
+ desc["T"] = tf.data_type(dy_)
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(tanh_grad, [y_, dy_], name=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res[1]
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function tanh_grad(y_, dy_; name=nothing)
+ if tf.in_eager_mode()
+ tanh_grad_eager(y_, dy_; name=name)
+ else
+ tanh_grad_graph(y_, dy_; name=name)
+ end
+ end
+ end
+end
+
+
+"""
+ decode_base64(input)
+
+
+"""
+begin
+ begin
+ function decode_base64_graph(input_; name=nothing)
+ local desc
+ tf.with_op_name(name, "DecodeBase64") do
+ desc = tf.NodeDescription("DecodeBase64")
+ begin
+ begin
+ input_ = convert(Tensor{String}, input_)
+ begin
+ end
+ end
+ end
+ begin
+ begin
+ tf.add_input(desc, input_)
+ end
+ end
+ begin
+ end
+ end
+ tf.Tensor(tf.Operation(desc))
+ end
+ end
+ begin
+ function decode_base64_eager(input_; name=nothing)
+ desc = tf.EagerOp("DecodeBase64")
+ input_ = convert(tf.EagerTensor, input_)
+ begin
+ begin
+ tf.add_input(desc, input_)
+ end
+ end
+ begin
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(decode_base64, [input_], name=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res[1]
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function decode_base64(input_; name=nothing)
+ if tf.in_eager_mode()
+ decode_base64_eager(input_; name=name)
+ else
+ decode_base64_graph(input_; name=name)
+ end
+ end
+ end
+end
+
+
+"""
+ max_pool_grad_grad_v2(orig_input, orig_output, grad, ksize, strides; data_format=)
+
+
+"""
+begin
+ begin
+ function max_pool_grad_grad_v2_graph(orig_input_, orig_output_, grad_, ksize_, strides_; name=nothing, padding=nothing, data_format=nothing)
+ local desc
+ tf.with_op_name(name, "MaxPoolGradGradV2") do
+ desc = tf.NodeDescription("MaxPoolGradGradV2")
+ begin
+ begin
+ orig_input_ = convert(Tensor{Any}, orig_input_)
+ begin
+ end
+ end
+ begin
+ orig_output_ = convert(Tensor{Any}, orig_output_)
+ begin
+ end
+ end
+ begin
+ grad_ = convert(Tensor{Any}, grad_)
+ begin
+ end
+ end
+ begin
+ ksize_ = convert(Tensor{Int32}, ksize_)
+ begin
+ end
+ end
+ begin
+ strides_ = convert(Tensor{Int32}, strides_)
+ begin
+ end
+ end
+ begin
+ (orig_input_, orig_output_, grad_) = tf.tf_promote(orig_input_, orig_output_, grad_)
+ end
+ end
+ begin
+ begin
+ tf.add_input(desc, orig_input_)
+ end
+ begin
+ tf.add_input(desc, orig_output_)
+ end
+ begin
+ tf.add_input(desc, grad_)
+ end
+ begin
+ tf.add_input(desc, ksize_)
+ end
+ begin
+ tf.add_input(desc, strides_)
+ end
+ end
+ begin
+ begin
+ if padding !== nothing
+ desc["padding"] = Base.String(padding)
+ end
+ end
+ begin
+ if data_format !== nothing
+ desc["data_format"] = Base.String(data_format)
+ end
+ end
+ end
+ end
+ tf.Tensor(tf.Operation(desc))
+ end
+ end
+ begin
+ function max_pool_grad_grad_v2_eager(orig_input_, orig_output_, grad_, ksize_, strides_; name=nothing, padding=nothing, data_format=nothing)
+ desc = tf.EagerOp("MaxPoolGradGradV2")
+ orig_input_ = convert(tf.EagerTensor, orig_input_)
+ orig_output_ = convert(tf.EagerTensor, orig_output_)
+ grad_ = convert(tf.EagerTensor, grad_)
+ ksize_ = convert(tf.EagerTensor, ksize_)
+ strides_ = convert(tf.EagerTensor, strides_)
+ begin
+ begin
+ tf.add_input(desc, orig_input_)
+ end
+ begin
+ tf.add_input(desc, orig_output_)
+ end
+ begin
+ tf.add_input(desc, grad_)
+ end
+ begin
+ tf.add_input(desc, ksize_)
+ end
+ begin
+ tf.add_input(desc, strides_)
+ end
+ end
+ begin
+ begin
+ if padding !== nothing
+ desc["padding"] = Base.String(padding)
+ end
+ end
+ begin
+ if data_format !== nothing
+ desc["data_format"] = Base.String(data_format)
+ end
+ end
+ end
+ begin
+ desc["T"] = tf.data_type(orig_input_)
+ end
+ begin
+ desc["T"] = tf.data_type(orig_output_)
+ end
+ begin
+ desc["T"] = tf.data_type(grad_)
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(max_pool_grad_grad_v2, [orig_input_, orig_output_, grad_, ksize_, strides_], name=nothing, padding=nothing, data_format=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res[1]
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function max_pool_grad_grad_v2(orig_input_, orig_output_, grad_, ksize_, strides_; name=nothing, padding=nothing, data_format=nothing)
+ if tf.in_eager_mode()
+ max_pool_grad_grad_v2_eager(orig_input_, orig_output_, grad_, ksize_, strides_; name=name, padding=padding, data_format=data_format)
+ else
+ max_pool_grad_grad_v2_graph(orig_input_, orig_output_, grad_, ksize_, strides_; name=name, padding=padding, data_format=data_format)
+ end
+ end
+ end
+end
+
+
+"""
+ audio_summary_v2(tag, tensor, sample_rate; max_outputs=3)
+
+
+"""
+begin
+ begin
+ function audio_summary_v2_graph(tag_, tensor_, sample_rate_; name=nothing, max_outputs=nothing)
+ local desc
+ tf.with_op_name(name, "AudioSummaryV2") do
+ desc = tf.NodeDescription("AudioSummaryV2")
+ begin
+ begin
+ tag_ = convert(Tensor{String}, tag_)
+ begin
+ end
+ end
+ begin
+ tensor_ = convert(Tensor{Float32}, tensor_)
+ begin
+ end
+ end
+ begin
+ sample_rate_ = convert(Tensor{Float32}, sample_rate_)
+ begin
+ end
+ end
+ end
+ begin
+ begin
+ tf.add_input(desc, tag_)
+ end
+ begin
+ tf.add_input(desc, tensor_)
+ end
+ begin
+ tf.add_input(desc, sample_rate_)
+ end
+ end
+ begin
+ begin
+ if max_outputs !== nothing
+ desc["max_outputs"] = Base.Int(max_outputs)
+ end
+ end
+ end
+ end
+ tf.Tensor(tf.Operation(desc))
+ end
+ end
+ begin
+ function audio_summary_v2_eager(tag_, tensor_, sample_rate_; name=nothing, max_outputs=nothing)
+ desc = tf.EagerOp("AudioSummaryV2")
+ tag_ = convert(tf.EagerTensor, tag_)
+ tensor_ = convert(tf.EagerTensor, tensor_)
+ sample_rate_ = convert(tf.EagerTensor, sample_rate_)
+ begin
+ begin
+ tf.add_input(desc, tag_)
+ end
+ begin
+ tf.add_input(desc, tensor_)
+ end
+ begin
+ tf.add_input(desc, sample_rate_)
+ end
+ end
+ begin
+ begin
+ if max_outputs !== nothing
+ desc["max_outputs"] = Base.Int(max_outputs)
+ end
+ end
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(audio_summary_v2, [tag_, tensor_, sample_rate_], name=nothing, max_outputs=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res[1]
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function audio_summary_v2(tag_, tensor_, sample_rate_; name=nothing, max_outputs=nothing)
+ if tf.in_eager_mode()
+ audio_summary_v2_eager(tag_, tensor_, sample_rate_; name=name, max_outputs=max_outputs)
+ else
+ audio_summary_v2_graph(tag_, tensor_, sample_rate_; name=name, max_outputs=max_outputs)
+ end
+ end
+ end
+end
+
+
+"""
+ stateful_partitioned_call(args; config=, config_proto=, executor_type=)
+
+
+"""
+begin
+ begin
+ function stateful_partitioned_call_graph(args_; name=nothing, Tin=nothing, Tout=nothing, f=nothing, config=nothing, config_proto=nothing, executor_type=nothing)
+ local desc
+ tf.with_op_name(name, "StatefulPartitionedCall") do
+ desc = tf.NodeDescription("StatefulPartitionedCall")
+ begin
+ begin
+ args_ = [convert(Tensor{Any}, x) for x = args_]
+ begin
+ end
+ end
+ end
+ begin
+ begin
+ tf.add_input(desc, args_)
+ end
+ end
+ begin
+ begin
+ if Tin !== nothing
+ desc["Tin"] = map(Base.identity, Tin)
+ end
+ end
+ begin
+ if Tout !== nothing
+ desc["Tout"] = map(Base.identity, Tout)
+ end
+ end
+ begin
+ if f !== nothing
+ desc["f"] = Base.identity(f)
+ end
+ end
+ begin
+ if config !== nothing
+ desc["config"] = Base.String(config)
+ end
+ end
+ begin
+ if config_proto !== nothing
+ desc["config_proto"] = Base.String(config_proto)
+ end
+ end
+ begin
+ if executor_type !== nothing
+ desc["executor_type"] = Base.String(executor_type)
+ end
+ end
+ end
+ end
+ tf.Tensor(tf.Operation(desc))
+ end
+ end
+ begin
+ function stateful_partitioned_call_eager(args_; name=nothing, Tin=nothing, Tout=nothing, f=nothing, config=nothing, config_proto=nothing, executor_type=nothing)
+ desc = tf.EagerOp("StatefulPartitionedCall")
+ args_ = convert(tf.EagerTensor, args_)
+ begin
+ begin
+ tf.add_input(desc, args_)
+ end
+ end
+ begin
+ begin
+ if Tin !== nothing
+ desc["Tin"] = map(Base.identity, Tin)
+ end
+ end
+ begin
+ if Tout !== nothing
+ desc["Tout"] = map(Base.identity, Tout)
+ end
+ end
+ begin
+ if f !== nothing
+ desc["f"] = Base.identity(f)
+ end
+ end
+ begin
+ if config !== nothing
+ desc["config"] = Base.String(config)
+ end
+ end
+ begin
+ if config_proto !== nothing
+ desc["config_proto"] = Base.String(config_proto)
+ end
+ end
+ begin
+ if executor_type !== nothing
+ desc["executor_type"] = Base.String(executor_type)
+ end
+ end
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(stateful_partitioned_call, [args_], name=nothing, Tin=nothing, Tout=nothing, f=nothing, config=nothing, config_proto=nothing, executor_type=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res[1]
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function stateful_partitioned_call(args_; name=nothing, Tin=nothing, Tout=nothing, f=nothing, config=nothing, config_proto=nothing, executor_type=nothing)
+ if tf.in_eager_mode()
+ stateful_partitioned_call_eager(args_; name=name, Tin=Tin, Tout=Tout, f=f, config=config, config_proto=config_proto, executor_type=executor_type)
+ else
+ stateful_partitioned_call_graph(args_; name=name, Tin=Tin, Tout=Tout, f=f, config=config, config_proto=config_proto, executor_type=executor_type)
+ end
+ end
+ end
+end
+
+
+"""
+ _scoped_allocator_concat(backing, inputs; reshape=false)
+
+Acts like a Concat Op that merges multple tensors into one, however it must
+"""
+begin
+ begin
+ function _scoped_allocator_concat_graph(backing_, inputs_; name=nothing, shape=nothing, reshape=nothing, sa_name=nothing, id=nothing, N=nothing)
+ local desc
+ tf.with_op_name(name, "_ScopedAllocatorConcat") do
+ desc = tf.NodeDescription("_ScopedAllocatorConcat")
+ begin
+ begin
+ backing_ = convert(Tensor{Any}, backing_)
+ begin
+ end
+ end
+ begin
+ inputs_ = [convert(Tensor{Any}, x) for x = inputs_]
+ begin
+ end
+ end
+ begin
+ (backing_, inputs_) = tf.tf_promote(backing_, inputs_)
+ end
+ end
+ begin
+ begin
+ tf.add_input(desc, backing_)
+ end
+ begin
+ tf.add_input(desc, inputs_)
+ end
+ end
+ begin
+ begin
+ if shape !== nothing
+ desc["shape"] = Base.identity(shape)
+ end
+ end
+ begin
+ if reshape !== nothing
+ desc["reshape"] = Base.Bool(reshape)
+ end
+ end
+ begin
+ if sa_name !== nothing
+ desc["sa_name"] = Base.String(sa_name)
+ end
+ end
+ begin
+ if id !== nothing
+ desc["id"] = Base.Int(id)
+ end
+ end
+ begin
+ if N !== nothing
+ desc["N"] = Base.Int(N)
+ end
+ end
+ end
+ end
+ tf.Tensor(tf.Operation(desc))
+ end
+ end
+ begin
+ function _scoped_allocator_concat_eager(backing_, inputs_; name=nothing, shape=nothing, reshape=nothing, sa_name=nothing, id=nothing, N=nothing)
+ desc = tf.EagerOp("_ScopedAllocatorConcat")
+ backing_ = convert(tf.EagerTensor, backing_)
+ inputs_ = convert(tf.EagerTensor, inputs_)
+ begin
+ begin
+ tf.add_input(desc, backing_)
+ end
+ begin
+ tf.add_input(desc, inputs_)
+ end
+ end
+ begin
+ begin
+ if shape !== nothing
+ desc["shape"] = Base.identity(shape)
+ end
+ end
+ begin
+ if reshape !== nothing
+ desc["reshape"] = Base.Bool(reshape)
+ end
+ end
+ begin
+ if sa_name !== nothing
+ desc["sa_name"] = Base.String(sa_name)
+ end
+ end
+ begin
+ if id !== nothing
+ desc["id"] = Base.Int(id)
+ end
+ end
+ begin
+ if N !== nothing
+ desc["N"] = Base.Int(N)
+ end
+ end
+ end
+ begin
+ desc["T"] = tf.data_type(backing_)
+ end
+ begin
+ desc["T"] = tf.data_type(inputs_)
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(_scoped_allocator_concat, [backing_, inputs_], name=nothing, shape=nothing, reshape=nothing, sa_name=nothing, id=nothing, N=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res[1]
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function _scoped_allocator_concat(backing_, inputs_; name=nothing, shape=nothing, reshape=nothing, sa_name=nothing, id=nothing, N=nothing)
+ if tf.in_eager_mode()
+ _scoped_allocator_concat_eager(backing_, inputs_; name=name, shape=shape, reshape=reshape, sa_name=sa_name, id=id, N=N)
+ else
+ _scoped_allocator_concat_graph(backing_, inputs_; name=name, shape=shape, reshape=reshape, sa_name=sa_name, id=id, N=N)
+ end
+ end
+ end
+end
+
+
+"""
+ fake_quant_with_min_max_args_gradient(gradients, inputs; min=?, max=?, num_bits=8, narrow_range=false)
+
+
+"""
+begin
+ begin
+ function fake_quant_with_min_max_args_gradient_graph(gradients_, inputs_; name=nothing, min=nothing, max=nothing, num_bits=nothing, narrow_range=nothing)
+ local desc
+ tf.with_op_name(name, "FakeQuantWithMinMaxArgsGradient") do
+ desc = tf.NodeDescription("FakeQuantWithMinMaxArgsGradient")
+ begin
+ begin
+ gradients_ = convert(Tensor{Float32}, gradients_)
+ begin
+ end
+ end
+ begin
+ inputs_ = convert(Tensor{Float32}, inputs_)
+ begin
+ end
+ end
+ end
+ begin
+ begin
+ tf.add_input(desc, gradients_)
+ end
+ begin
+ tf.add_input(desc, inputs_)
+ end
+ end
+ begin
+ begin
+ if min !== nothing
+ desc["min"] = Base.identity(min)
+ end
+ end
+ begin
+ if max !== nothing
+ desc["max"] = Base.identity(max)
+ end
+ end
+ begin
+ if num_bits !== nothing
+ desc["num_bits"] = Base.Int(num_bits)
+ end
+ end
+ begin
+ if narrow_range !== nothing
+ desc["narrow_range"] = Base.Bool(narrow_range)
+ end
+ end
+ end
+ end
+ tf.Tensor(tf.Operation(desc))
+ end
+ end
+ begin
+ function fake_quant_with_min_max_args_gradient_eager(gradients_, inputs_; name=nothing, min=nothing, max=nothing, num_bits=nothing, narrow_range=nothing)
+ desc = tf.EagerOp("FakeQuantWithMinMaxArgsGradient")
+ gradients_ = convert(tf.EagerTensor, gradients_)
+ inputs_ = convert(tf.EagerTensor, inputs_)
+ begin
+ begin
+ tf.add_input(desc, gradients_)
+ end
+ begin
+ tf.add_input(desc, inputs_)
+ end
+ end
+ begin
+ begin
+ if min !== nothing
+ desc["min"] = Base.identity(min)
+ end
+ end
+ begin
+ if max !== nothing
+ desc["max"] = Base.identity(max)
+ end
+ end
+ begin
+ if num_bits !== nothing
+ desc["num_bits"] = Base.Int(num_bits)
+ end
+ end
+ begin
+ if narrow_range !== nothing
+ desc["narrow_range"] = Base.Bool(narrow_range)
+ end
+ end
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(fake_quant_with_min_max_args_gradient, [gradients_, inputs_], name=nothing, min=nothing, max=nothing, num_bits=nothing, narrow_range=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res[1]
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function fake_quant_with_min_max_args_gradient(gradients_, inputs_; name=nothing, min=nothing, max=nothing, num_bits=nothing, narrow_range=nothing)
+ if tf.in_eager_mode()
+ fake_quant_with_min_max_args_gradient_eager(gradients_, inputs_; name=name, min=min, max=max, num_bits=num_bits, narrow_range=narrow_range)
+ else
+ fake_quant_with_min_max_args_gradient_graph(gradients_, inputs_; name=name, min=min, max=max, num_bits=num_bits, narrow_range=narrow_range)
+ end
+ end
+ end
+end
+
+
+"""
+ batch_svd(input; compute_uv=true, full_matrices=false)
+
+
+"""
+begin
+ begin
+ function batch_svd_graph(input_; name=nothing, compute_uv=nothing, full_matrices=nothing)
+ local desc
+ tf.with_op_name(name, "BatchSvd") do
+ desc = tf.NodeDescription("BatchSvd")
+ begin
+ begin
+ input_ = convert(Tensor{Any}, input_)
+ begin
+ end
+ end
+ begin
+ (input_,) = tf.tf_promote(input_)
+ end
+ end
+ begin
+ begin
+ tf.add_input(desc, input_)
+ end
+ end
+ begin
+ begin
+ if compute_uv !== nothing
+ desc["compute_uv"] = Base.Bool(compute_uv)
+ end
+ end
+ begin
+ if full_matrices !== nothing
+ desc["full_matrices"] = Base.Bool(full_matrices)
+ end
+ end
+ end
+ end
+ begin
+ out = tf.Tensor[]
+ op = tf.Operation(desc)
+ for out_idx = 1:3
+ push!(out, tf.Tensor(op, out_idx))
+ end
+ out
+ end
+ end
+ end
+ begin
+ function batch_svd_eager(input_; name=nothing, compute_uv=nothing, full_matrices=nothing)
+ desc = tf.EagerOp("BatchSvd")
+ input_ = convert(tf.EagerTensor, input_)
+ begin
+ begin
+ tf.add_input(desc, input_)
+ end
+ end
+ begin
+ begin
+ if compute_uv !== nothing
+ desc["compute_uv"] = Base.Bool(compute_uv)
+ end
+ end
+ begin
+ if full_matrices !== nothing
+ desc["full_matrices"] = Base.Bool(full_matrices)
+ end
+ end
+ end
+ begin
+ desc["T"] = tf.data_type(input_)
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(batch_svd, [input_], name=nothing, compute_uv=nothing, full_matrices=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function batch_svd(input_; name=nothing, compute_uv=nothing, full_matrices=nothing)
+ if tf.in_eager_mode()
+ batch_svd_eager(input_; name=name, compute_uv=compute_uv, full_matrices=full_matrices)
+ else
+ batch_svd_graph(input_; name=name, compute_uv=compute_uv, full_matrices=full_matrices)
+ end
+ end
+ end
+end
+
+
+"""
+ map_stage(key, indices, values; capacity=0, memory_limit=0, container=, shared_name=)
+
+
+"""
+begin
+ begin
+ function map_stage_graph(key_, indices_, values_; name=nothing, capacity=nothing, memory_limit=nothing, dtypes=nothing, fake_dtypes=nothing, container=nothing, shared_name=nothing)
+ local desc
+ tf.with_op_name(name, "MapStage") do
+ desc = tf.NodeDescription("MapStage")
+ begin
+ begin
+ key_ = convert(Tensor{Int64}, key_)
+ begin
+ end
+ end
+ begin
+ indices_ = convert(Tensor{Int32}, indices_)
+ begin
+ end
+ end
+ begin
+ values_ = [convert(Tensor{Any}, x) for x = values_]
+ begin
+ end
+ end
+ end
+ begin
+ begin
+ tf.add_input(desc, key_)
+ end
+ begin
+ tf.add_input(desc, indices_)
+ end
+ begin
+ tf.add_input(desc, values_)
+ end
+ end
+ begin
+ begin
+ if capacity !== nothing
+ desc["capacity"] = Base.Int(capacity)
+ end
+ end
+ begin
+ if memory_limit !== nothing
+ desc["memory_limit"] = Base.Int(memory_limit)
+ end
+ end
+ begin
+ if dtypes !== nothing
+ desc["dtypes"] = map(Base.identity, dtypes)
+ end
+ end
+ begin
+ if fake_dtypes !== nothing
+ desc["fake_dtypes"] = map(Base.identity, fake_dtypes)
+ end
+ end
+ begin
+ if container !== nothing
+ desc["container"] = Base.String(container)
+ end
+ end
+ begin
+ if shared_name !== nothing
+ desc["shared_name"] = Base.String(shared_name)
+ end
+ end
+ end
+ end
+ tf.Tensor(tf.Operation(desc))
+ end
+ end
+ begin
+ function map_stage_eager(key_, indices_, values_; name=nothing, capacity=nothing, memory_limit=nothing, dtypes=nothing, fake_dtypes=nothing, container=nothing, shared_name=nothing)
+ desc = tf.EagerOp("MapStage")
+ key_ = convert(tf.EagerTensor, key_)
+ indices_ = convert(tf.EagerTensor, indices_)
+ values_ = convert(tf.EagerTensor, values_)
+ begin
+ begin
+ tf.add_input(desc, key_)
+ end
+ begin
+ tf.add_input(desc, indices_)
+ end
+ begin
+ tf.add_input(desc, values_)
+ end
+ end
+ begin
+ begin
+ if capacity !== nothing
+ desc["capacity"] = Base.Int(capacity)
+ end
+ end
+ begin
+ if memory_limit !== nothing
+ desc["memory_limit"] = Base.Int(memory_limit)
+ end
+ end
+ begin
+ if dtypes !== nothing
+ desc["dtypes"] = map(Base.identity, dtypes)
+ end
+ end
+ begin
+ if fake_dtypes !== nothing
+ desc["fake_dtypes"] = map(Base.identity, fake_dtypes)
+ end
+ end
+ begin
+ if container !== nothing
+ desc["container"] = Base.String(container)
+ end
+ end
+ begin
+ if shared_name !== nothing
+ desc["shared_name"] = Base.String(shared_name)
+ end
+ end
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(map_stage, [key_, indices_, values_], name=nothing, capacity=nothing, memory_limit=nothing, dtypes=nothing, fake_dtypes=nothing, container=nothing, shared_name=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res[1]
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function map_stage(key_, indices_, values_; name=nothing, capacity=nothing, memory_limit=nothing, dtypes=nothing, fake_dtypes=nothing, container=nothing, shared_name=nothing)
+ if tf.in_eager_mode()
+ map_stage_eager(key_, indices_, values_; name=name, capacity=capacity, memory_limit=memory_limit, dtypes=dtypes, fake_dtypes=fake_dtypes, container=container, shared_name=shared_name)
+ else
+ map_stage_graph(key_, indices_, values_; name=name, capacity=capacity, memory_limit=memory_limit, dtypes=dtypes, fake_dtypes=fake_dtypes, container=container, shared_name=shared_name)
+ end
+ end
+ end
+end
+
+
+"""
+ resource_sparse_apply_ftrl(var, accum, linear, grad, indices, lr, l1, l2, lr_power; use_locking=false)
+
+
+"""
+begin
+ begin
+ function resource_sparse_apply_ftrl_graph(var_, accum_, linear_, grad_, indices_, lr_, l1_, l2_, lr_power_; name=nothing, use_locking=nothing)
+ local desc
+ tf.with_op_name(name, "ResourceSparseApplyFtrl") do
+ desc = tf.NodeDescription("ResourceSparseApplyFtrl")
+ begin
+ begin
+ var_ = convert(Tensor{Any}, var_)
+ begin
+ end
+ end
+ begin
+ accum_ = convert(Tensor{Any}, accum_)
+ begin
+ end
+ end
+ begin
+ linear_ = convert(Tensor{Any}, linear_)
+ begin
+ end
+ end
+ begin
+ grad_ = convert(Tensor{Any}, grad_)
+ begin
+ end
+ end
+ begin
+ indices_ = convert(Tensor{Any}, indices_)
+ begin
+ indices_ = indices_ - convert(tf.Tensor{eltype(indices_)}, 1)
+ end
+ end
+ begin
+ lr_ = convert(Tensor{Any}, lr_)
+ begin
+ end
+ end
+ begin
+ l1_ = convert(Tensor{Any}, l1_)
+ begin
+ end
+ end
+ begin
+ l2_ = convert(Tensor{Any}, l2_)
+ begin
+ end
+ end
+ begin
+ lr_power_ = convert(Tensor{Any}, lr_power_)
+ begin
+ end
+ end
+ begin
+ (grad_, lr_, l1_, l2_, lr_power_) = tf.tf_promote(grad_, lr_, l1_, l2_, lr_power_)
+ end
+ begin
+ (indices_,) = tf.tf_promote(indices_)
+ end
+ end
+ begin
+ begin
+ tf.add_input(desc, var_)
+ end
+ begin
+ tf.add_input(desc, accum_)
+ end
+ begin
+ tf.add_input(desc, linear_)
+ end
+ begin
+ tf.add_input(desc, grad_)
+ end
+ begin
+ tf.add_input(desc, indices_)
+ end
+ begin
+ tf.add_input(desc, lr_)
+ end
+ begin
+ tf.add_input(desc, l1_)
+ end
+ begin
+ tf.add_input(desc, l2_)
+ end
+ begin
+ tf.add_input(desc, lr_power_)
+ end
+ end
+ begin
+ begin
+ if use_locking !== nothing
+ desc["use_locking"] = Base.Bool(use_locking)
+ end
+ end
+ end
+ end
+ tf.Tensor(tf.Operation(desc))
+ end
+ end
+ begin
+ function resource_sparse_apply_ftrl_eager(var_, accum_, linear_, grad_, indices_, lr_, l1_, l2_, lr_power_; name=nothing, use_locking=nothing)
+ desc = tf.EagerOp("ResourceSparseApplyFtrl")
+ var_ = convert(tf.EagerTensor, var_)
+ accum_ = convert(tf.EagerTensor, accum_)
+ linear_ = convert(tf.EagerTensor, linear_)
+ grad_ = convert(tf.EagerTensor, grad_)
+ indices_ = convert(tf.EagerTensor, indices_)
+ lr_ = convert(tf.EagerTensor, lr_)
+ l1_ = convert(tf.EagerTensor, l1_)
+ l2_ = convert(tf.EagerTensor, l2_)
+ lr_power_ = convert(tf.EagerTensor, lr_power_)
+ begin
+ begin
+ tf.add_input(desc, var_)
+ end
+ begin
+ tf.add_input(desc, accum_)
+ end
+ begin
+ tf.add_input(desc, linear_)
+ end
+ begin
+ tf.add_input(desc, grad_)
+ end
+ begin
+ tf.add_input(desc, indices_)
+ end
+ begin
+ tf.add_input(desc, lr_)
+ end
+ begin
+ tf.add_input(desc, l1_)
+ end
+ begin
+ tf.add_input(desc, l2_)
+ end
+ begin
+ tf.add_input(desc, lr_power_)
+ end
+ end
+ begin
+ begin
+ if use_locking !== nothing
+ desc["use_locking"] = Base.Bool(use_locking)
+ end
+ end
+ end
+ begin
+ desc["T"] = tf.data_type(grad_)
+ end
+ begin
+ desc["Tindices"] = tf.data_type(indices_)
+ end
+ begin
+ desc["T"] = tf.data_type(lr_)
+ end
+ begin
+ desc["T"] = tf.data_type(l1_)
+ end
+ begin
+ desc["T"] = tf.data_type(l2_)
+ end
+ begin
+ desc["T"] = tf.data_type(lr_power_)
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(resource_sparse_apply_ftrl, [var_, accum_, linear_, grad_, indices_, lr_, l1_, l2_, lr_power_], name=nothing, use_locking=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res[1]
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function resource_sparse_apply_ftrl(var_, accum_, linear_, grad_, indices_, lr_, l1_, l2_, lr_power_; name=nothing, use_locking=nothing)
+ if tf.in_eager_mode()
+ resource_sparse_apply_ftrl_eager(var_, accum_, linear_, grad_, indices_, lr_, l1_, l2_, lr_power_; name=name, use_locking=use_locking)
+ else
+ resource_sparse_apply_ftrl_graph(var_, accum_, linear_, grad_, indices_, lr_, l1_, l2_, lr_power_; name=name, use_locking=use_locking)
+ end
+ end
+ end
+end
+
+
+"""
+ resize_nearest_neighbor(images, size; align_corners=false)
+
+
+"""
+begin
+ begin
+ function resize_nearest_neighbor_graph(images_, size_; name=nothing, align_corners=nothing)
+ local desc
+ tf.with_op_name(name, "ResizeNearestNeighbor") do
+ desc = tf.NodeDescription("ResizeNearestNeighbor")
+ begin
+ begin
+ images_ = convert(Tensor{Any}, images_)
+ begin
+ end
+ end
+ begin
+ size_ = convert(Tensor{Int32}, size_)
+ begin
+ end
+ end
+ begin
+ (images_,) = tf.tf_promote(images_)
+ end
+ end
+ begin
+ begin
+ tf.add_input(desc, images_)
+ end
+ begin
+ tf.add_input(desc, size_)
+ end
+ end
+ begin
+ begin
+ if align_corners !== nothing
+ desc["align_corners"] = Base.Bool(align_corners)
+ end
+ end
+ end
+ end
+ tf.Tensor(tf.Operation(desc))
+ end
+ end
+ begin
+ function resize_nearest_neighbor_eager(images_, size_; name=nothing, align_corners=nothing)
+ desc = tf.EagerOp("ResizeNearestNeighbor")
+ images_ = convert(tf.EagerTensor, images_)
+ size_ = convert(tf.EagerTensor, size_)
+ begin
+ begin
+ tf.add_input(desc, images_)
+ end
+ begin
+ tf.add_input(desc, size_)
+ end
+ end
+ begin
+ begin
+ if align_corners !== nothing
+ desc["align_corners"] = Base.Bool(align_corners)
+ end
+ end
+ end
+ begin
+ desc["T"] = tf.data_type(images_)
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(resize_nearest_neighbor, [images_, size_], name=nothing, align_corners=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res[1]
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function resize_nearest_neighbor(images_, size_; name=nothing, align_corners=nothing)
+ if tf.in_eager_mode()
+ resize_nearest_neighbor_eager(images_, size_; name=name, align_corners=align_corners)
+ else
+ resize_nearest_neighbor_graph(images_, size_; name=name, align_corners=align_corners)
+ end
+ end
+ end
+end
+
+
+"""
+ experimental_csv_dataset(filenames, compression_type, buffer_size, header, field_delim, use_quote_delim, na_value, select_cols, record_defaults)
+
+
+"""
+begin
+ begin
+ function experimental_csv_dataset_graph(filenames_, compression_type_, buffer_size_, header_, field_delim_, use_quote_delim_, na_value_, select_cols_, record_defaults_; name=nothing, output_types=nothing, output_shapes=nothing)
+ local desc
+ tf.with_op_name(name, "ExperimentalCSVDataset") do
+ desc = tf.NodeDescription("ExperimentalCSVDataset")
+ begin
+ begin
+ filenames_ = convert(Tensor{String}, filenames_)
+ begin
+ end
+ end
+ begin
+ compression_type_ = convert(Tensor{String}, compression_type_)
+ begin
+ end
+ end
+ begin
+ buffer_size_ = convert(Tensor{Int64}, buffer_size_)
+ begin
+ end
+ end
+ begin
+ header_ = convert(Tensor{Bool}, header_)
+ begin
+ end
+ end
+ begin
+ field_delim_ = convert(Tensor{String}, field_delim_)
+ begin
+ end
+ end
+ begin
+ use_quote_delim_ = convert(Tensor{Bool}, use_quote_delim_)
+ begin
+ end
+ end
+ begin
+ na_value_ = convert(Tensor{String}, na_value_)
+ begin
+ end
+ end
+ begin
+ select_cols_ = convert(Tensor{Int64}, select_cols_)
+ begin
+ end
+ end
+ begin
+ record_defaults_ = [convert(Tensor{Any}, x) for x = record_defaults_]
+ begin
+ end
+ end
+ end
+ begin
+ begin
+ tf.add_input(desc, filenames_)
+ end
+ begin
+ tf.add_input(desc, compression_type_)
+ end
+ begin
+ tf.add_input(desc, buffer_size_)
+ end
+ begin
+ tf.add_input(desc, header_)
+ end
+ begin
+ tf.add_input(desc, field_delim_)
+ end
+ begin
+ tf.add_input(desc, use_quote_delim_)
+ end
+ begin
+ tf.add_input(desc, na_value_)
+ end
+ begin
+ tf.add_input(desc, select_cols_)
+ end
+ begin
+ tf.add_input(desc, record_defaults_)
+ end
+ end
+ begin
+ begin
+ if output_types !== nothing
+ desc["output_types"] = map(Base.identity, output_types)
+ end
+ end
+ begin
+ if output_shapes !== nothing
+ desc["output_shapes"] = map(Base.identity, output_shapes)
+ end
+ end
+ end
+ end
+ tf.Tensor(tf.Operation(desc))
+ end
+ end
+ begin
+ function experimental_csv_dataset_eager(filenames_, compression_type_, buffer_size_, header_, field_delim_, use_quote_delim_, na_value_, select_cols_, record_defaults_; name=nothing, output_types=nothing, output_shapes=nothing)
+ desc = tf.EagerOp("ExperimentalCSVDataset")
+ filenames_ = convert(tf.EagerTensor, filenames_)
+ compression_type_ = convert(tf.EagerTensor, compression_type_)
+ buffer_size_ = convert(tf.EagerTensor, buffer_size_)
+ header_ = convert(tf.EagerTensor, header_)
+ field_delim_ = convert(tf.EagerTensor, field_delim_)
+ use_quote_delim_ = convert(tf.EagerTensor, use_quote_delim_)
+ na_value_ = convert(tf.EagerTensor, na_value_)
+ select_cols_ = convert(tf.EagerTensor, select_cols_)
+ record_defaults_ = convert(tf.EagerTensor, record_defaults_)
+ begin
+ begin
+ tf.add_input(desc, filenames_)
+ end
+ begin
+ tf.add_input(desc, compression_type_)
+ end
+ begin
+ tf.add_input(desc, buffer_size_)
+ end
+ begin
+ tf.add_input(desc, header_)
+ end
+ begin
+ tf.add_input(desc, field_delim_)
+ end
+ begin
+ tf.add_input(desc, use_quote_delim_)
+ end
+ begin
+ tf.add_input(desc, na_value_)
+ end
+ begin
+ tf.add_input(desc, select_cols_)
+ end
+ begin
+ tf.add_input(desc, record_defaults_)
+ end
+ end
+ begin
+ begin
+ if output_types !== nothing
+ desc["output_types"] = map(Base.identity, output_types)
+ end
+ end
+ begin
+ if output_shapes !== nothing
+ desc["output_shapes"] = map(Base.identity, output_shapes)
+ end
+ end
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(experimental_csv_dataset, [filenames_, compression_type_, buffer_size_, header_, field_delim_, use_quote_delim_, na_value_, select_cols_, record_defaults_], name=nothing, output_types=nothing, output_shapes=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res[1]
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function experimental_csv_dataset(filenames_, compression_type_, buffer_size_, header_, field_delim_, use_quote_delim_, na_value_, select_cols_, record_defaults_; name=nothing, output_types=nothing, output_shapes=nothing)
+ if tf.in_eager_mode()
+ experimental_csv_dataset_eager(filenames_, compression_type_, buffer_size_, header_, field_delim_, use_quote_delim_, na_value_, select_cols_, record_defaults_; name=name, output_types=output_types, output_shapes=output_shapes)
+ else
+ experimental_csv_dataset_graph(filenames_, compression_type_, buffer_size_, header_, field_delim_, use_quote_delim_, na_value_, select_cols_, record_defaults_; name=name, output_types=output_types, output_shapes=output_shapes)
+ end
+ end
+ end
+end
+
+
+"""
+ _mkl_mul(x, y, mkl_x, mkl_y)
+
+Returns x * y element-wise.
+"""
+begin
+ begin
+ function _mkl_mul_graph(x_, y_, mkl_x_, mkl_y_; name=nothing)
+ local desc
+ tf.with_op_name(name, "_MklMul") do
+ desc = tf.NodeDescription("_MklMul")
+ begin
+ begin
+ x_ = convert(Tensor{Any}, x_)
+ begin
+ end
+ end
+ begin
+ y_ = convert(Tensor{Any}, y_)
+ begin
+ end
+ end
+ begin
+ mkl_x_ = convert(Tensor{UInt8}, mkl_x_)
+ begin
+ end
+ end
+ begin
+ mkl_y_ = convert(Tensor{UInt8}, mkl_y_)
+ begin
+ end
+ end
+ begin
+ (x_, y_) = tf.tf_promote(x_, y_)
+ end
+ end
+ begin
+ begin
+ tf.add_input(desc, x_)
+ end
+ begin
+ tf.add_input(desc, y_)
+ end
+ begin
+ tf.add_input(desc, mkl_x_)
+ end
+ begin
+ tf.add_input(desc, mkl_y_)
+ end
+ end
+ begin
+ end
+ end
+ begin
+ out = tf.Tensor[]
+ op = tf.Operation(desc)
+ for out_idx = 1:2
+ push!(out, tf.Tensor(op, out_idx))
+ end
+ out
+ end
+ end
+ end
+ begin
+ function _mkl_mul_eager(x_, y_, mkl_x_, mkl_y_; name=nothing)
+ desc = tf.EagerOp("_MklMul")
+ x_ = convert(tf.EagerTensor, x_)
+ y_ = convert(tf.EagerTensor, y_)
+ mkl_x_ = convert(tf.EagerTensor, mkl_x_)
+ mkl_y_ = convert(tf.EagerTensor, mkl_y_)
+ begin
+ begin
+ tf.add_input(desc, x_)
+ end
+ begin
+ tf.add_input(desc, y_)
+ end
+ begin
+ tf.add_input(desc, mkl_x_)
+ end
+ begin
+ tf.add_input(desc, mkl_y_)
+ end
+ end
+ begin
+ end
+ begin
+ desc["T"] = tf.data_type(x_)
+ end
+ begin
+ desc["T"] = tf.data_type(y_)
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(_mkl_mul, [x_, y_, mkl_x_, mkl_y_], name=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function _mkl_mul(x_, y_, mkl_x_, mkl_y_; name=nothing)
+ if tf.in_eager_mode()
+ _mkl_mul_eager(x_, y_, mkl_x_, mkl_y_; name=name)
+ else
+ _mkl_mul_graph(x_, y_, mkl_x_, mkl_y_; name=name)
+ end
+ end
+ end
+end
+
+
+"""
+ batch_matrix_diag(diagonal)
+
+
+"""
+begin
+ begin
+ function batch_matrix_diag_graph(diagonal_; name=nothing)
+ local desc
+ tf.with_op_name(name, "BatchMatrixDiag") do
+ desc = tf.NodeDescription("BatchMatrixDiag")
+ begin
+ begin
+ diagonal_ = convert(Tensor{Any}, diagonal_)
+ begin
+ end
+ end
+ begin
+ (diagonal_,) = tf.tf_promote(diagonal_)
+ end
+ end
+ begin
+ begin
+ tf.add_input(desc, diagonal_)
+ end
+ end
+ begin
+ end
+ end
+ tf.Tensor(tf.Operation(desc))
+ end
+ end
+ begin
+ function batch_matrix_diag_eager(diagonal_; name=nothing)
+ desc = tf.EagerOp("BatchMatrixDiag")
+ diagonal_ = convert(tf.EagerTensor, diagonal_)
+ begin
+ begin
+ tf.add_input(desc, diagonal_)
+ end
+ end
+ begin
+ end
+ begin
+ desc["T"] = tf.data_type(diagonal_)
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(batch_matrix_diag, [diagonal_], name=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res[1]
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function batch_matrix_diag(diagonal_; name=nothing)
+ if tf.in_eager_mode()
+ batch_matrix_diag_eager(diagonal_; name=name)
+ else
+ batch_matrix_diag_graph(diagonal_; name=name)
+ end
+ end
+ end
+end
+
+
+"""
+ is_inf(x)
+
+
+"""
+begin
+ begin
+ function is_inf_graph(x_; name=nothing)
+ local desc
+ tf.with_op_name(name, "IsInf") do
+ desc = tf.NodeDescription("IsInf")
+ begin
+ begin
+ x_ = convert(Tensor{Any}, x_)
+ begin
+ end
+ end
+ begin
+ (x_,) = tf.tf_promote(x_)
+ end
+ end
+ begin
+ begin
+ tf.add_input(desc, x_)
+ end
+ end
+ begin
+ end
+ end
+ tf.Tensor(tf.Operation(desc))
+ end
+ end
+ begin
+ function is_inf_eager(x_; name=nothing)
+ desc = tf.EagerOp("IsInf")
+ x_ = convert(tf.EagerTensor, x_)
+ begin
+ begin
+ tf.add_input(desc, x_)
+ end
+ end
+ begin
+ end
+ begin
+ desc["T"] = tf.data_type(x_)
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(is_inf, [x_], name=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res[1]
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function is_inf(x_; name=nothing)
+ if tf.in_eager_mode()
+ is_inf_eager(x_; name=name)
+ else
+ is_inf_graph(x_; name=name)
+ end
+ end
+ end
+end
+
+
+"""
+ fixed_unigram_candidate_sampler(true_classes; vocab_file=, distortion=?, num_reserved_ids=0, num_shards=1, shard=0, unigrams=Int64[], seed=0, seed2=0)
+
+
+"""
+begin
+ begin
+ function fixed_unigram_candidate_sampler_graph(true_classes_; name=nothing, num_true=nothing, num_sampled=nothing, unique=nothing, range_max=nothing, vocab_file=nothing, distortion=nothing, num_reserved_ids=nothing, num_shards=nothing, shard=nothing, unigrams=nothing, seed=nothing, seed2=nothing)
+ local desc
+ tf.with_op_name(name, "FixedUnigramCandidateSampler") do
+ desc = tf.NodeDescription("FixedUnigramCandidateSampler")
+ begin
+ begin
+ true_classes_ = convert(Tensor{Int64}, true_classes_)
+ begin
+ end
+ end
+ end
+ begin
+ begin
+ tf.add_input(desc, true_classes_)
+ end
+ end
+ begin
+ begin
+ if num_true !== nothing
+ desc["num_true"] = Base.Int(num_true)
+ end
+ end
+ begin
+ if num_sampled !== nothing
+ desc["num_sampled"] = Base.Int(num_sampled)
+ end
+ end
+ begin
+ if unique !== nothing
+ desc["unique"] = Base.Bool(unique)
+ end
+ end
+ begin
+ if range_max !== nothing
+ desc["range_max"] = Base.Int(range_max)
+ end
+ end
+ begin
+ if vocab_file !== nothing
+ desc["vocab_file"] = Base.String(vocab_file)
+ end
+ end
+ begin
+ if distortion !== nothing
+ desc["distortion"] = Base.identity(distortion)
+ end
+ end
+ begin
+ if num_reserved_ids !== nothing
+ desc["num_reserved_ids"] = Base.Int(num_reserved_ids)
+ end
+ end
+ begin
+ if num_shards !== nothing
+ desc["num_shards"] = Base.Int(num_shards)
+ end
+ end
+ begin
+ if shard !== nothing
+ desc["shard"] = Base.Int(shard)
+ end
+ end
+ begin
+ if unigrams !== nothing
+ desc["unigrams"] = map(Base.identity, unigrams)
+ end
+ end
+ begin
+ if seed !== nothing
+ desc["seed"] = Base.Int(seed)
+ end
+ end
+ begin
+ if seed2 !== nothing
+ desc["seed2"] = Base.Int(seed2)
+ end
+ end
+ end
+ end
+ begin
+ out = tf.Tensor[]
+ op = tf.Operation(desc)
+ for out_idx = 1:3
+ push!(out, tf.Tensor(op, out_idx))
+ end
+ out
+ end
+ end
+ end
+ begin
+ function fixed_unigram_candidate_sampler_eager(true_classes_; name=nothing, num_true=nothing, num_sampled=nothing, unique=nothing, range_max=nothing, vocab_file=nothing, distortion=nothing, num_reserved_ids=nothing, num_shards=nothing, shard=nothing, unigrams=nothing, seed=nothing, seed2=nothing)
+ desc = tf.EagerOp("FixedUnigramCandidateSampler")
+ true_classes_ = convert(tf.EagerTensor, true_classes_)
+ begin
+ begin
+ tf.add_input(desc, true_classes_)
+ end
+ end
+ begin
+ begin
+ if num_true !== nothing
+ desc["num_true"] = Base.Int(num_true)
+ end
+ end
+ begin
+ if num_sampled !== nothing
+ desc["num_sampled"] = Base.Int(num_sampled)
+ end
+ end
+ begin
+ if unique !== nothing
+ desc["unique"] = Base.Bool(unique)
+ end
+ end
+ begin
+ if range_max !== nothing
+ desc["range_max"] = Base.Int(range_max)
+ end
+ end
+ begin
+ if vocab_file !== nothing
+ desc["vocab_file"] = Base.String(vocab_file)
+ end
+ end
+ begin
+ if distortion !== nothing
+ desc["distortion"] = Base.identity(distortion)
+ end
+ end
+ begin
+ if num_reserved_ids !== nothing
+ desc["num_reserved_ids"] = Base.Int(num_reserved_ids)
+ end
+ end
+ begin
+ if num_shards !== nothing
+ desc["num_shards"] = Base.Int(num_shards)
+ end
+ end
+ begin
+ if shard !== nothing
+ desc["shard"] = Base.Int(shard)
+ end
+ end
+ begin
+ if unigrams !== nothing
+ desc["unigrams"] = map(Base.identity, unigrams)
+ end
+ end
+ begin
+ if seed !== nothing
+ desc["seed"] = Base.Int(seed)
+ end
+ end
+ begin
+ if seed2 !== nothing
+ desc["seed2"] = Base.Int(seed2)
+ end
+ end
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(fixed_unigram_candidate_sampler, [true_classes_], name=nothing, num_true=nothing, num_sampled=nothing, unique=nothing, range_max=nothing, vocab_file=nothing, distortion=nothing, num_reserved_ids=nothing, num_shards=nothing, shard=nothing, unigrams=nothing, seed=nothing, seed2=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function fixed_unigram_candidate_sampler(true_classes_; name=nothing, num_true=nothing, num_sampled=nothing, unique=nothing, range_max=nothing, vocab_file=nothing, distortion=nothing, num_reserved_ids=nothing, num_shards=nothing, shard=nothing, unigrams=nothing, seed=nothing, seed2=nothing)
+ if tf.in_eager_mode()
+ fixed_unigram_candidate_sampler_eager(true_classes_; name=name, num_true=num_true, num_sampled=num_sampled, unique=unique, range_max=range_max, vocab_file=vocab_file, distortion=distortion, num_reserved_ids=num_reserved_ids, num_shards=num_shards, shard=shard, unigrams=unigrams, seed=seed, seed2=seed2)
+ else
+ fixed_unigram_candidate_sampler_graph(true_classes_; name=name, num_true=num_true, num_sampled=num_sampled, unique=unique, range_max=range_max, vocab_file=vocab_file, distortion=distortion, num_reserved_ids=num_reserved_ids, num_shards=num_shards, shard=shard, unigrams=unigrams, seed=seed, seed2=seed2)
+ end
+ end
+ end
+end
+
+
+"""
+ unravel_index(indices, dims)
+
+
+"""
+begin
+ begin
+ function unravel_index_graph(indices_, dims_; name=nothing)
+ local desc
+ tf.with_op_name(name, "UnravelIndex") do
+ desc = tf.NodeDescription("UnravelIndex")
+ begin
+ begin
+ indices_ = convert(Tensor{Int32}, indices_)
+ begin
+ indices_ = indices_ - convert(tf.Tensor{eltype(indices_)}, 1)
+ end
+ end
+ begin
+ dims_ = convert(Tensor{Int32}, dims_)
+ begin
+ dims_ = dims_ - convert(tf.Tensor{eltype(dims_)}, 1)
+ end
+ end
+ begin
+ (indices_, dims_) = tf.tf_promote(indices_, dims_)
+ end
+ end
+ begin
+ begin
+ tf.add_input(desc, indices_)
+ end
+ begin
+ tf.add_input(desc, dims_)
+ end
+ end
+ begin
+ end
+ end
+ tf.Tensor(tf.Operation(desc))
+ end
+ end
+ begin
+ function unravel_index_eager(indices_, dims_; name=nothing)
+ desc = tf.EagerOp("UnravelIndex")
+ indices_ = convert(tf.EagerTensor, indices_)
+ dims_ = convert(tf.EagerTensor, dims_)
+ begin
+ begin
+ tf.add_input(desc, indices_)
+ end
+ begin
+ tf.add_input(desc, dims_)
+ end
+ end
+ begin
+ end
+ begin
+ desc["Tidx"] = tf.data_type(indices_)
+ end
+ begin
+ desc["Tidx"] = tf.data_type(dims_)
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(unravel_index, [indices_, dims_], name=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res[1]
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function unravel_index(indices_, dims_; name=nothing)
+ if tf.in_eager_mode()
+ unravel_index_eager(indices_, dims_; name=name)
+ else
+ unravel_index_graph(indices_, dims_; name=name)
+ end
+ end
+ end
+end
+
+
+"""
+ sparse_apply_ftrl_v2(var, accum, linear, grad, indices, lr, l1, l2, l2_shrinkage, lr_power; use_locking=false)
+
+
+"""
+begin
+ begin
+ function sparse_apply_ftrl_v2_graph(var_, accum_, linear_, grad_, indices_, lr_, l1_, l2_, l2_shrinkage_, lr_power_; name=nothing, use_locking=nothing)
+ local desc
+ tf.with_op_name(name, "SparseApplyFtrlV2") do
+ desc = tf.NodeDescription("SparseApplyFtrlV2")
+ begin
+ begin
+ var_ = convert(Tensor{Any}, var_)
+ begin
+ end
+ end
+ begin
+ accum_ = convert(Tensor{Any}, accum_)
+ begin
+ end
+ end
+ begin
+ linear_ = convert(Tensor{Any}, linear_)
+ begin
+ end
+ end
+ begin
+ grad_ = convert(Tensor{Any}, grad_)
+ begin
+ end
+ end
+ begin
+ indices_ = convert(Tensor{Any}, indices_)
+ begin
+ indices_ = indices_ - convert(tf.Tensor{eltype(indices_)}, 1)
+ end
+ end
+ begin
+ lr_ = convert(Tensor{Any}, lr_)
+ begin
+ end
+ end
+ begin
+ l1_ = convert(Tensor{Any}, l1_)
+ begin
+ end
+ end
+ begin
+ l2_ = convert(Tensor{Any}, l2_)
+ begin
+ end
+ end
+ begin
+ l2_shrinkage_ = convert(Tensor{Any}, l2_shrinkage_)
+ begin
+ end
+ end
+ begin
+ lr_power_ = convert(Tensor{Any}, lr_power_)
+ begin
+ end
+ end
+ begin
+ (var_, accum_, linear_, grad_, lr_, l1_, l2_, l2_shrinkage_, lr_power_) = tf.tf_promote(var_, accum_, linear_, grad_, lr_, l1_, l2_, l2_shrinkage_, lr_power_)
+ end
+ begin
+ (indices_,) = tf.tf_promote(indices_)
+ end
+ end
+ begin
+ begin
+ tf.add_input(desc, var_)
+ end
+ begin
+ tf.add_input(desc, accum_)
+ end
+ begin
+ tf.add_input(desc, linear_)
+ end
+ begin
+ tf.add_input(desc, grad_)
+ end
+ begin
+ tf.add_input(desc, indices_)
+ end
+ begin
+ tf.add_input(desc, lr_)
+ end
+ begin
+ tf.add_input(desc, l1_)
+ end
+ begin
+ tf.add_input(desc, l2_)
+ end
+ begin
+ tf.add_input(desc, l2_shrinkage_)
+ end
+ begin
+ tf.add_input(desc, lr_power_)
+ end
+ end
+ begin
+ begin
+ if use_locking !== nothing
+ desc["use_locking"] = Base.Bool(use_locking)
+ end
+ end
+ end
+ end
+ tf.Tensor(tf.Operation(desc))
+ end
+ end
+ begin
+ function sparse_apply_ftrl_v2_eager(var_, accum_, linear_, grad_, indices_, lr_, l1_, l2_, l2_shrinkage_, lr_power_; name=nothing, use_locking=nothing)
+ desc = tf.EagerOp("SparseApplyFtrlV2")
+ var_ = convert(tf.EagerTensor, var_)
+ accum_ = convert(tf.EagerTensor, accum_)
+ linear_ = convert(tf.EagerTensor, linear_)
+ grad_ = convert(tf.EagerTensor, grad_)
+ indices_ = convert(tf.EagerTensor, indices_)
+ lr_ = convert(tf.EagerTensor, lr_)
+ l1_ = convert(tf.EagerTensor, l1_)
+ l2_ = convert(tf.EagerTensor, l2_)
+ l2_shrinkage_ = convert(tf.EagerTensor, l2_shrinkage_)
+ lr_power_ = convert(tf.EagerTensor, lr_power_)
+ begin
+ begin
+ tf.add_input(desc, var_)
+ end
+ begin
+ tf.add_input(desc, accum_)
+ end
+ begin
+ tf.add_input(desc, linear_)
+ end
+ begin
+ tf.add_input(desc, grad_)
+ end
+ begin
+ tf.add_input(desc, indices_)
+ end
+ begin
+ tf.add_input(desc, lr_)
+ end
+ begin
+ tf.add_input(desc, l1_)
+ end
+ begin
+ tf.add_input(desc, l2_)
+ end
+ begin
+ tf.add_input(desc, l2_shrinkage_)
+ end
+ begin
+ tf.add_input(desc, lr_power_)
+ end
+ end
+ begin
+ begin
+ if use_locking !== nothing
+ desc["use_locking"] = Base.Bool(use_locking)
+ end
+ end
+ end
+ begin
+ desc["T"] = tf.data_type(var_)
+ end
+ begin
+ desc["T"] = tf.data_type(accum_)
+ end
+ begin
+ desc["T"] = tf.data_type(linear_)
+ end
+ begin
+ desc["T"] = tf.data_type(grad_)
+ end
+ begin
+ desc["Tindices"] = tf.data_type(indices_)
+ end
+ begin
+ desc["T"] = tf.data_type(lr_)
+ end
+ begin
+ desc["T"] = tf.data_type(l1_)
+ end
+ begin
+ desc["T"] = tf.data_type(l2_)
+ end
+ begin
+ desc["T"] = tf.data_type(l2_shrinkage_)
+ end
+ begin
+ desc["T"] = tf.data_type(lr_power_)
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(sparse_apply_ftrl_v2, [var_, accum_, linear_, grad_, indices_, lr_, l1_, l2_, l2_shrinkage_, lr_power_], name=nothing, use_locking=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res[1]
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function sparse_apply_ftrl_v2(var_, accum_, linear_, grad_, indices_, lr_, l1_, l2_, l2_shrinkage_, lr_power_; name=nothing, use_locking=nothing)
+ if tf.in_eager_mode()
+ sparse_apply_ftrl_v2_eager(var_, accum_, linear_, grad_, indices_, lr_, l1_, l2_, l2_shrinkage_, lr_power_; name=name, use_locking=use_locking)
+ else
+ sparse_apply_ftrl_v2_graph(var_, accum_, linear_, grad_, indices_, lr_, l1_, l2_, l2_shrinkage_, lr_power_; name=name, use_locking=use_locking)
+ end
+ end
+ end
+end
+
+
+"""
+ max(input, reduction_indices; keep_dims=false)
+
+
+"""
+begin
+ begin
+ function max_graph(input_, reduction_indices_; name=nothing, keep_dims=nothing)
+ local desc
+ tf.with_op_name(name, "Max") do
+ desc = tf.NodeDescription("Max")
+ begin
+ begin
+ input_ = convert(Tensor{Any}, input_)
+ begin
+ end
+ end
+ begin
+ reduction_indices_ = convert(Tensor{Int32}, reduction_indices_)
+ begin
+ reduction_indices_ = reduction_indices_ - convert(tf.Tensor{eltype(reduction_indices_)}, 1)
+ end
+ end
+ begin
+ (input_,) = tf.tf_promote(input_)
+ end
+ begin
+ (reduction_indices_,) = tf.tf_promote(reduction_indices_)
+ end
+ end
+ begin
+ begin
+ tf.add_input(desc, input_)
+ end
+ begin
+ tf.add_input(desc, reduction_indices_)
+ end
+ end
+ begin
+ begin
+ if keep_dims !== nothing
+ desc["keep_dims"] = Base.Bool(keep_dims)
+ end
+ end
+ end
+ end
+ tf.Tensor(tf.Operation(desc))
+ end
+ end
+ begin
+ function max_eager(input_, reduction_indices_; name=nothing, keep_dims=nothing)
+ desc = tf.EagerOp("Max")
+ input_ = convert(tf.EagerTensor, input_)
+ reduction_indices_ = convert(tf.EagerTensor, reduction_indices_)
+ begin
+ begin
+ tf.add_input(desc, input_)
+ end
+ begin
+ tf.add_input(desc, reduction_indices_)
+ end
+ end
+ begin
+ begin
+ if keep_dims !== nothing
+ desc["keep_dims"] = Base.Bool(keep_dims)
+ end
+ end
+ end
+ begin
+ desc["T"] = tf.data_type(input_)
+ end
+ begin
+ desc["Tidx"] = tf.data_type(reduction_indices_)
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(max, [input_, reduction_indices_], name=nothing, keep_dims=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res[1]
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function max(input_, reduction_indices_; name=nothing, keep_dims=nothing)
+ if tf.in_eager_mode()
+ max_eager(input_, reduction_indices_; name=name, keep_dims=keep_dims)
+ else
+ max_graph(input_, reduction_indices_; name=name, keep_dims=keep_dims)
+ end
+ end
+ end
+end
+
+
+"""
+ ifft2d(input)
+
+
+"""
+begin
+ begin
+ function ifft2d_graph(input_; name=nothing)
+ local desc
+ tf.with_op_name(name, "IFFT2D") do
+ desc = tf.NodeDescription("IFFT2D")
+ begin
+ begin
+ input_ = convert(Tensor{Complex{Float32}}, input_)
+ begin
+ end
+ end
+ begin
+ (input_,) = tf.tf_promote(input_)
+ end
+ end
+ begin
+ begin
+ tf.add_input(desc, input_)
+ end
+ end
+ begin
+ end
+ end
+ tf.Tensor(tf.Operation(desc))
+ end
+ end
+ begin
+ function ifft2d_eager(input_; name=nothing)
+ desc = tf.EagerOp("IFFT2D")
+ input_ = convert(tf.EagerTensor, input_)
+ begin
+ begin
+ tf.add_input(desc, input_)
+ end
+ end
+ begin
+ end
+ begin
+ desc["Tcomplex"] = tf.data_type(input_)
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(ifft2d, [input_], name=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res[1]
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function ifft2d(input_; name=nothing)
+ if tf.in_eager_mode()
+ ifft2d_eager(input_; name=name)
+ else
+ ifft2d_graph(input_; name=name)
+ end
+ end
+ end
+end
+
+
+"""
+ sparse_concat(indices, values, shapes)
+
+
+"""
+begin
+ begin
+ function sparse_concat_graph(indices_, values_, shapes_; name=nothing, concat_dim=nothing, N=nothing)
+ local desc
+ tf.with_op_name(name, "SparseConcat") do
+ desc = tf.NodeDescription("SparseConcat")
+ begin
+ begin
+ indices_ = [convert(Tensor{Int64}, x) for x = indices_]
+ begin
+ end
+ end
+ begin
+ values_ = [convert(Tensor{Any}, x) for x = values_]
+ begin
+ end
+ end
+ begin
+ shapes_ = [convert(Tensor{Int64}, x) for x = shapes_]
+ begin
+ end
+ end
+ begin
+ (values_,) = tf.tf_promote(values_)
+ end
+ end
+ begin
+ begin
+ tf.add_input(desc, indices_)
+ end
+ begin
+ tf.add_input(desc, values_)
+ end
+ begin
+ tf.add_input(desc, shapes_)
+ end
+ end
+ begin
+ begin
+ if concat_dim !== nothing
+ concat_dim = Base.Int(concat_dim) - 1
+ end
+ end
+ begin
+ if concat_dim !== nothing
+ desc["concat_dim"] = Base.Int(concat_dim)
+ end
+ end
+ begin
+ if N !== nothing
+ desc["N"] = Base.Int(N)
+ end
+ end
+ end
+ end
+ begin
+ out = tf.Tensor[]
+ op = tf.Operation(desc)
+ for out_idx = 1:3
+ push!(out, tf.Tensor(op, out_idx))
+ end
+ out
+ end
+ end
+ end
+ begin
+ function sparse_concat_eager(indices_, values_, shapes_; name=nothing, concat_dim=nothing, N=nothing)
+ desc = tf.EagerOp("SparseConcat")
+ indices_ = convert(tf.EagerTensor, indices_)
+ values_ = convert(tf.EagerTensor, values_)
+ shapes_ = convert(tf.EagerTensor, shapes_)
+ begin
+ begin
+ tf.add_input(desc, indices_)
+ end
+ begin
+ tf.add_input(desc, values_)
+ end
+ begin
+ tf.add_input(desc, shapes_)
+ end
+ end
+ begin
+ begin
+ if concat_dim !== nothing
+ concat_dim = Base.Int(concat_dim) - 1
+ end
+ end
+ begin
+ if concat_dim !== nothing
+ desc["concat_dim"] = Base.Int(concat_dim)
+ end
+ end
+ begin
+ if N !== nothing
+ desc["N"] = Base.Int(N)
+ end
+ end
+ end
+ begin
+ desc["T"] = tf.data_type(values_)
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(sparse_concat, [indices_, values_, shapes_], name=nothing, concat_dim=nothing, N=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function sparse_concat(indices_, values_, shapes_; name=nothing, concat_dim=nothing, N=nothing)
+ if tf.in_eager_mode()
+ sparse_concat_eager(indices_, values_, shapes_; name=name, concat_dim=concat_dim, N=N)
+ else
+ sparse_concat_graph(indices_, values_, shapes_; name=name, concat_dim=concat_dim, N=N)
+ end
+ end
+ end
+end
+
+
+"""
+ histogram_summary(tag, values)
+
+
+"""
+begin
+ begin
+ function histogram_summary_graph(tag_, values_; name=nothing)
+ local desc
+ tf.with_op_name(name, "HistogramSummary") do
+ desc = tf.NodeDescription("HistogramSummary")
+ begin
+ begin
+ tag_ = convert(Tensor{String}, tag_)
+ begin
+ end
+ end
+ begin
+ values_ = convert(Tensor{Float32}, values_)
+ begin
+ end
+ end
+ begin
+ (values_,) = tf.tf_promote(values_)
+ end
+ end
+ begin
+ begin
+ tf.add_input(desc, tag_)
+ end
+ begin
+ tf.add_input(desc, values_)
+ end
+ end
+ begin
+ end
+ end
+ tf.Tensor(tf.Operation(desc))
+ end
+ end
+ begin
+ function histogram_summary_eager(tag_, values_; name=nothing)
+ desc = tf.EagerOp("HistogramSummary")
+ tag_ = convert(tf.EagerTensor, tag_)
+ values_ = convert(tf.EagerTensor, values_)
+ begin
+ begin
+ tf.add_input(desc, tag_)
+ end
+ begin
+ tf.add_input(desc, values_)
+ end
+ end
+ begin
+ end
+ begin
+ desc["T"] = tf.data_type(values_)
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(histogram_summary, [tag_, values_], name=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res[1]
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function histogram_summary(tag_, values_; name=nothing)
+ if tf.in_eager_mode()
+ histogram_summary_eager(tag_, values_; name=name)
+ else
+ histogram_summary_graph(tag_, values_; name=name)
+ end
+ end
+ end
+end
+
+
+"""
+ segment_sum(data, segment_ids)
+
+
+"""
+begin
+ begin
+ function segment_sum_graph(data_, segment_ids_; name=nothing)
+ local desc
+ tf.with_op_name(name, "SegmentSum") do
+ desc = tf.NodeDescription("SegmentSum")
+ begin
+ begin
+ data_ = convert(Tensor{Any}, data_)
+ begin
+ end
+ end
+ begin
+ segment_ids_ = convert(Tensor{Any}, segment_ids_)
+ begin
+ segment_ids_ = segment_ids_ - convert(tf.Tensor{eltype(segment_ids_)}, 1)
+ end
+ end
+ begin
+ (data_,) = tf.tf_promote(data_)
+ end
+ begin
+ (segment_ids_,) = tf.tf_promote(segment_ids_)
+ end
+ end
+ begin
+ begin
+ tf.add_input(desc, data_)
+ end
+ begin
+ tf.add_input(desc, segment_ids_)
+ end
+ end
+ begin
+ end
+ end
+ tf.Tensor(tf.Operation(desc))
+ end
+ end
+ begin
+ function segment_sum_eager(data_, segment_ids_; name=nothing)
+ desc = tf.EagerOp("SegmentSum")
+ data_ = convert(tf.EagerTensor, data_)
+ segment_ids_ = convert(tf.EagerTensor, segment_ids_)
+ begin
+ begin
+ tf.add_input(desc, data_)
+ end
+ begin
+ tf.add_input(desc, segment_ids_)
+ end
+ end
+ begin
+ end
+ begin
+ desc["T"] = tf.data_type(data_)
+ end
+ begin
+ desc["Tindices"] = tf.data_type(segment_ids_)
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(segment_sum, [data_, segment_ids_], name=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res[1]
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function segment_sum(data_, segment_ids_; name=nothing)
+ if tf.in_eager_mode()
+ segment_sum_eager(data_, segment_ids_; name=name)
+ else
+ segment_sum_graph(data_, segment_ids_; name=name)
+ end
+ end
+ end
+end
+
+
+"""
+ exp(x)
+
+
+"""
+begin
+ begin
+ function exp_graph(x_; name=nothing)
+ local desc
+ tf.with_op_name(name, "Exp") do
+ desc = tf.NodeDescription("Exp")
+ begin
+ begin
+ x_ = convert(Tensor{Any}, x_)
+ begin
+ end
+ end
+ begin
+ (x_,) = tf.tf_promote(x_)
+ end
+ end
+ begin
+ begin
+ tf.add_input(desc, x_)
+ end
+ end
+ begin
+ end
+ end
+ tf.Tensor(tf.Operation(desc))
+ end
+ end
+ begin
+ function exp_eager(x_; name=nothing)
+ desc = tf.EagerOp("Exp")
+ x_ = convert(tf.EagerTensor, x_)
+ begin
+ begin
+ tf.add_input(desc, x_)
+ end
+ end
+ begin
+ end
+ begin
+ desc["T"] = tf.data_type(x_)
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(exp, [x_], name=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res[1]
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function exp(x_; name=nothing)
+ if tf.in_eager_mode()
+ exp_eager(x_; name=name)
+ else
+ exp_graph(x_; name=name)
+ end
+ end
+ end
+end
+
+
+"""
+ configure_distributed_tpu(; embedding_config=, tpu_embedding_config=, is_global_init=false)
+
+An op that sets up the centralized structures for a distributed TPU
+"""
+begin
+ begin
+ function configure_distributed_tpu_graph(; name=nothing, embedding_config=nothing, tpu_embedding_config=nothing, is_global_init=nothing)
+ local desc
+ tf.with_op_name(name, "ConfigureDistributedTPU") do
+ desc = tf.NodeDescription("ConfigureDistributedTPU")
+ begin
+ end
+ begin
+ end
+ begin
+ begin
+ if embedding_config !== nothing
+ desc["embedding_config"] = Base.String(embedding_config)
+ end
+ end
+ begin
+ if tpu_embedding_config !== nothing
+ desc["tpu_embedding_config"] = Base.String(tpu_embedding_config)
+ end
+ end
+ begin
+ if is_global_init !== nothing
+ desc["is_global_init"] = Base.Bool(is_global_init)
+ end
+ end
+ end
+ end
+ tf.Tensor(tf.Operation(desc))
+ end
+ end
+ begin
+ function configure_distributed_tpu_eager(; name=nothing, embedding_config=nothing, tpu_embedding_config=nothing, is_global_init=nothing)
+ desc = tf.EagerOp("ConfigureDistributedTPU")
+ begin
+ end
+ begin
+ begin
+ if embedding_config !== nothing
+ desc["embedding_config"] = Base.String(embedding_config)
+ end
+ end
+ begin
+ if tpu_embedding_config !== nothing
+ desc["tpu_embedding_config"] = Base.String(tpu_embedding_config)
+ end
+ end
+ begin
+ if is_global_init !== nothing
+ desc["is_global_init"] = Base.Bool(is_global_init)
+ end
+ end
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(configure_distributed_tpu, [], name=nothing, embedding_config=nothing, tpu_embedding_config=nothing, is_global_init=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res[1]
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function configure_distributed_tpu(; name=nothing, embedding_config=nothing, tpu_embedding_config=nothing, is_global_init=nothing)
+ if tf.in_eager_mode()
+ configure_distributed_tpu_eager(; name=name, embedding_config=embedding_config, tpu_embedding_config=tpu_embedding_config, is_global_init=is_global_init)
+ else
+ configure_distributed_tpu_graph(; name=name, embedding_config=embedding_config, tpu_embedding_config=tpu_embedding_config, is_global_init=is_global_init)
+ end
+ end
+ end
+end
+
+
+"""
+ _xla_send_from_host(inputs, dynamic_key)
+
+A placeholder op for multiple values that will be sent from TensorFlow to a
+"""
+begin
+ begin
+ function _xla_send_from_host_graph(inputs_, dynamic_key_; name=nothing, Tinputs=nothing, key=nothing, device_ordinal=nothing)
+ local desc
+ tf.with_op_name(name, "_XlaSendFromHost") do
+ desc = tf.NodeDescription("_XlaSendFromHost")
+ begin
+ begin
+ inputs_ = [convert(Tensor{Any}, x) for x = inputs_]
+ begin
+ end
+ end
+ begin
+ dynamic_key_ = convert(Tensor{String}, dynamic_key_)
+ begin
+ end
+ end
+ end
+ begin
+ begin
+ tf.add_input(desc, inputs_)
+ end
+ begin
+ tf.add_input(desc, dynamic_key_)
+ end
+ end
+ begin
+ begin
+ if Tinputs !== nothing
+ desc["Tinputs"] = map(Base.identity, Tinputs)
+ end
+ end
+ begin
+ if key !== nothing
+ desc["key"] = Base.String(key)
+ end
+ end
+ begin
+ if device_ordinal !== nothing
+ desc["device_ordinal"] = Base.Int(device_ordinal)
+ end
+ end
+ end
+ end
+ tf.Tensor(tf.Operation(desc))
+ end
+ end
+ begin
+ function _xla_send_from_host_eager(inputs_, dynamic_key_; name=nothing, Tinputs=nothing, key=nothing, device_ordinal=nothing)
+ desc = tf.EagerOp("_XlaSendFromHost")
+ inputs_ = convert(tf.EagerTensor, inputs_)
+ dynamic_key_ = convert(tf.EagerTensor, dynamic_key_)
+ begin
+ begin
+ tf.add_input(desc, inputs_)
+ end
+ begin
+ tf.add_input(desc, dynamic_key_)
+ end
+ end
+ begin
+ begin
+ if Tinputs !== nothing
+ desc["Tinputs"] = map(Base.identity, Tinputs)
+ end
+ end
+ begin
+ if key !== nothing
+ desc["key"] = Base.String(key)
+ end
+ end
+ begin
+ if device_ordinal !== nothing
+ desc["device_ordinal"] = Base.Int(device_ordinal)
+ end
+ end
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(_xla_send_from_host, [inputs_, dynamic_key_], name=nothing, Tinputs=nothing, key=nothing, device_ordinal=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res[1]
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function _xla_send_from_host(inputs_, dynamic_key_; name=nothing, Tinputs=nothing, key=nothing, device_ordinal=nothing)
+ if tf.in_eager_mode()
+ _xla_send_from_host_eager(inputs_, dynamic_key_; name=name, Tinputs=Tinputs, key=key, device_ordinal=device_ordinal)
+ else
+ _xla_send_from_host_graph(inputs_, dynamic_key_; name=name, Tinputs=Tinputs, key=key, device_ordinal=device_ordinal)
+ end
+ end
+ end
+end
+
+
+"""
+ get_session_handle_v2(value)
+
+
+"""
+begin
+ begin
+ function get_session_handle_v2_graph(value_; name=nothing)
+ local desc
+ tf.with_op_name(name, "GetSessionHandleV2") do
+ desc = tf.NodeDescription("GetSessionHandleV2")
+ begin
+ begin
+ value_ = convert(Tensor{Any}, value_)
+ begin
+ end
+ end
+ begin
+ (value_,) = tf.tf_promote(value_)
+ end
+ end
+ begin
+ begin
+ tf.add_input(desc, value_)
+ end
+ end
+ begin
+ end
+ end
+ tf.Tensor(tf.Operation(desc))
+ end
+ end
+ begin
+ function get_session_handle_v2_eager(value_; name=nothing)
+ desc = tf.EagerOp("GetSessionHandleV2")
+ value_ = convert(tf.EagerTensor, value_)
+ begin
+ begin
+ tf.add_input(desc, value_)
+ end
+ end
+ begin
+ end
+ begin
+ desc["T"] = tf.data_type(value_)
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(get_session_handle_v2, [value_], name=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res[1]
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function get_session_handle_v2(value_; name=nothing)
+ if tf.in_eager_mode()
+ get_session_handle_v2_eager(value_; name=name)
+ else
+ get_session_handle_v2_graph(value_; name=name)
+ end
+ end
+ end
+end
+
+
+"""
+ relu_grad(gradients, features)
+
+
+"""
+begin
+ begin
+ function relu_grad_graph(gradients_, features_; name=nothing)
+ local desc
+ tf.with_op_name(name, "ReluGrad") do
+ desc = tf.NodeDescription("ReluGrad")
+ begin
+ begin
+ gradients_ = convert(Tensor{Any}, gradients_)
+ begin
+ end
+ end
+ begin
+ features_ = convert(Tensor{Any}, features_)
+ begin
+ end
+ end
+ begin
+ (gradients_, features_) = tf.tf_promote(gradients_, features_)
+ end
+ end
+ begin
+ begin
+ tf.add_input(desc, gradients_)
+ end
+ begin
+ tf.add_input(desc, features_)
+ end
+ end
+ begin
+ end
+ end
+ tf.Tensor(tf.Operation(desc))
+ end
+ end
+ begin
+ function relu_grad_eager(gradients_, features_; name=nothing)
+ desc = tf.EagerOp("ReluGrad")
+ gradients_ = convert(tf.EagerTensor, gradients_)
+ features_ = convert(tf.EagerTensor, features_)
+ begin
+ begin
+ tf.add_input(desc, gradients_)
+ end
+ begin
+ tf.add_input(desc, features_)
+ end
+ end
+ begin
+ end
+ begin
+ desc["T"] = tf.data_type(gradients_)
+ end
+ begin
+ desc["T"] = tf.data_type(features_)
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(relu_grad, [gradients_, features_], name=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res[1]
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function relu_grad(gradients_, features_; name=nothing)
+ if tf.in_eager_mode()
+ relu_grad_eager(gradients_, features_; name=name)
+ else
+ relu_grad_graph(gradients_, features_; name=name)
+ end
+ end
+ end
+end
+
+
+"""
+ unsorted_segment_min(data, segment_ids, num_segments)
+
+
+"""
+begin
+ begin
+ function unsorted_segment_min_graph(data_, segment_ids_, num_segments_; name=nothing)
+ local desc
+ tf.with_op_name(name, "UnsortedSegmentMin") do
+ desc = tf.NodeDescription("UnsortedSegmentMin")
+ begin
+ begin
+ data_ = convert(Tensor{Any}, data_)
+ begin
+ end
+ end
+ begin
+ segment_ids_ = convert(Tensor{Any}, segment_ids_)
+ begin
+ segment_ids_ = segment_ids_ - convert(tf.Tensor{eltype(segment_ids_)}, 1)
+ end
+ end
+ begin
+ num_segments_ = convert(Tensor{Int32}, num_segments_)
+ begin
+ end
+ end
+ begin
+ (num_segments_,) = tf.tf_promote(num_segments_)
+ end
+ begin
+ (data_,) = tf.tf_promote(data_)
+ end
+ begin
+ (segment_ids_,) = tf.tf_promote(segment_ids_)
+ end
+ end
+ begin
+ begin
+ tf.add_input(desc, data_)
+ end
+ begin
+ tf.add_input(desc, segment_ids_)
+ end
+ begin
+ tf.add_input(desc, num_segments_)
+ end
+ end
+ begin
+ end
+ end
+ tf.Tensor(tf.Operation(desc))
+ end
+ end
+ begin
+ function unsorted_segment_min_eager(data_, segment_ids_, num_segments_; name=nothing)
+ desc = tf.EagerOp("UnsortedSegmentMin")
+ data_ = convert(tf.EagerTensor, data_)
+ segment_ids_ = convert(tf.EagerTensor, segment_ids_)
+ num_segments_ = convert(tf.EagerTensor, num_segments_)
+ begin
+ begin
+ tf.add_input(desc, data_)
+ end
+ begin
+ tf.add_input(desc, segment_ids_)
+ end
+ begin
+ tf.add_input(desc, num_segments_)
+ end
+ end
+ begin
+ end
+ begin
+ desc["T"] = tf.data_type(data_)
+ end
+ begin
+ desc["Tindices"] = tf.data_type(segment_ids_)
+ end
+ begin
+ desc["Tnumsegments"] = tf.data_type(num_segments_)
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(unsorted_segment_min, [data_, segment_ids_, num_segments_], name=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res[1]
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function unsorted_segment_min(data_, segment_ids_, num_segments_; name=nothing)
+ if tf.in_eager_mode()
+ unsorted_segment_min_eager(data_, segment_ids_, num_segments_; name=name)
+ else
+ unsorted_segment_min_graph(data_, segment_ids_, num_segments_; name=name)
+ end
+ end
+ end
+end
+
+
+"""
+ parse_example(serialized, names, sparse_keys, dense_keys, dense_defaults)
+
+
+"""
+begin
+ begin
+ function parse_example_graph(serialized_, names_, sparse_keys_, dense_keys_, dense_defaults_; name=nothing, Nsparse=nothing, Ndense=nothing, sparse_types=nothing, Tdense=nothing, dense_shapes=nothing)
+ local desc
+ tf.with_op_name(name, "ParseExample") do
+ desc = tf.NodeDescription("ParseExample")
+ begin
+ begin
+ serialized_ = convert(Tensor{String}, serialized_)
+ begin
+ end
+ end
+ begin
+ names_ = convert(Tensor{String}, names_)
+ begin
+ end
+ end
+ begin
+ sparse_keys_ = [convert(Tensor{String}, x) for x = sparse_keys_]
+ begin
+ end
+ end
+ begin
+ dense_keys_ = [convert(Tensor{String}, x) for x = dense_keys_]
+ begin
+ end
+ end
+ begin
+ dense_defaults_ = [convert(Tensor{Any}, x) for x = dense_defaults_]
+ begin
+ end
+ end
+ end
+ begin
+ begin
+ tf.add_input(desc, serialized_)
+ end
+ begin
+ tf.add_input(desc, names_)
+ end
+ begin
+ tf.add_input(desc, sparse_keys_)
+ end
+ begin
+ tf.add_input(desc, dense_keys_)
+ end
+ begin
+ tf.add_input(desc, dense_defaults_)
+ end
+ end
+ begin
+ begin
+ if Nsparse !== nothing
+ desc["Nsparse"] = Base.Int(Nsparse)
+ end
+ end
+ begin
+ if Ndense !== nothing
+ desc["Ndense"] = Base.Int(Ndense)
+ end
+ end
+ begin
+ if sparse_types !== nothing
+ desc["sparse_types"] = map(Base.identity, sparse_types)
+ end
+ end
+ begin
+ if Tdense !== nothing
+ desc["Tdense"] = map(Base.identity, Tdense)
+ end
+ end
+ begin
+ if dense_shapes !== nothing
+ desc["dense_shapes"] = map(Base.identity, dense_shapes)
+ end
+ end
+ end
+ end
+ begin
+ out = tf.Tensor[]
+ op = tf.Operation(desc)
+ for out_idx = 1:4
+ push!(out, tf.Tensor(op, out_idx))
+ end
+ out
+ end
+ end
+ end
+ begin
+ function parse_example_eager(serialized_, names_, sparse_keys_, dense_keys_, dense_defaults_; name=nothing, Nsparse=nothing, Ndense=nothing, sparse_types=nothing, Tdense=nothing, dense_shapes=nothing)
+ desc = tf.EagerOp("ParseExample")
+ serialized_ = convert(tf.EagerTensor, serialized_)
+ names_ = convert(tf.EagerTensor, names_)
+ sparse_keys_ = convert(tf.EagerTensor, sparse_keys_)
+ dense_keys_ = convert(tf.EagerTensor, dense_keys_)
+ dense_defaults_ = convert(tf.EagerTensor, dense_defaults_)
+ begin
+ begin
+ tf.add_input(desc, serialized_)
+ end
+ begin
+ tf.add_input(desc, names_)
+ end
+ begin
+ tf.add_input(desc, sparse_keys_)
+ end
+ begin
+ tf.add_input(desc, dense_keys_)
+ end
+ begin
+ tf.add_input(desc, dense_defaults_)
+ end
+ end
+ begin
+ begin
+ if Nsparse !== nothing
+ desc["Nsparse"] = Base.Int(Nsparse)
+ end
+ end
+ begin
+ if Ndense !== nothing
+ desc["Ndense"] = Base.Int(Ndense)
+ end
+ end
+ begin
+ if sparse_types !== nothing
+ desc["sparse_types"] = map(Base.identity, sparse_types)
+ end
+ end
+ begin
+ if Tdense !== nothing
+ desc["Tdense"] = map(Base.identity, Tdense)
+ end
+ end
+ begin
+ if dense_shapes !== nothing
+ desc["dense_shapes"] = map(Base.identity, dense_shapes)
+ end
+ end
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(parse_example, [serialized_, names_, sparse_keys_, dense_keys_, dense_defaults_], name=nothing, Nsparse=nothing, Ndense=nothing, sparse_types=nothing, Tdense=nothing, dense_shapes=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function parse_example(serialized_, names_, sparse_keys_, dense_keys_, dense_defaults_; name=nothing, Nsparse=nothing, Ndense=nothing, sparse_types=nothing, Tdense=nothing, dense_shapes=nothing)
+ if tf.in_eager_mode()
+ parse_example_eager(serialized_, names_, sparse_keys_, dense_keys_, dense_defaults_; name=name, Nsparse=Nsparse, Ndense=Ndense, sparse_types=sparse_types, Tdense=Tdense, dense_shapes=dense_shapes)
+ else
+ parse_example_graph(serialized_, names_, sparse_keys_, dense_keys_, dense_defaults_; name=name, Nsparse=Nsparse, Ndense=Ndense, sparse_types=sparse_types, Tdense=Tdense, dense_shapes=dense_shapes)
+ end
+ end
+ end
+end
+
+
+"""
+ queue_enqueue_v2(handle, components; timeout_ms=-1)
+
+
+"""
+begin
+ begin
+ function queue_enqueue_v2_graph(handle_, components_; name=nothing, Tcomponents=nothing, timeout_ms=nothing)
+ local desc
+ tf.with_op_name(name, "QueueEnqueueV2") do
+ desc = tf.NodeDescription("QueueEnqueueV2")
+ begin
+ begin
+ handle_ = convert(Tensor{Any}, handle_)
+ begin
+ end
+ end
+ begin
+ components_ = [convert(Tensor{Any}, x) for x = components_]
+ begin
+ end
+ end
+ end
+ begin
+ begin
+ tf.add_input(desc, handle_)
+ end
+ begin
+ tf.add_input(desc, components_)
+ end
+ end
+ begin
+ begin
+ if Tcomponents !== nothing
+ desc["Tcomponents"] = map(Base.identity, Tcomponents)
+ end
+ end
+ begin
+ if timeout_ms !== nothing
+ desc["timeout_ms"] = Base.Int(timeout_ms)
+ end
+ end
+ end
+ end
+ tf.Tensor(tf.Operation(desc))
+ end
+ end
+ begin
+ function queue_enqueue_v2_eager(handle_, components_; name=nothing, Tcomponents=nothing, timeout_ms=nothing)
+ desc = tf.EagerOp("QueueEnqueueV2")
+ handle_ = convert(tf.EagerTensor, handle_)
+ components_ = convert(tf.EagerTensor, components_)
+ begin
+ begin
+ tf.add_input(desc, handle_)
+ end
+ begin
+ tf.add_input(desc, components_)
+ end
+ end
+ begin
+ begin
+ if Tcomponents !== nothing
+ desc["Tcomponents"] = map(Base.identity, Tcomponents)
+ end
+ end
+ begin
+ if timeout_ms !== nothing
+ desc["timeout_ms"] = Base.Int(timeout_ms)
+ end
+ end
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(queue_enqueue_v2, [handle_, components_], name=nothing, Tcomponents=nothing, timeout_ms=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res[1]
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function queue_enqueue_v2(handle_, components_; name=nothing, Tcomponents=nothing, timeout_ms=nothing)
+ if tf.in_eager_mode()
+ queue_enqueue_v2_eager(handle_, components_; name=name, Tcomponents=Tcomponents, timeout_ms=timeout_ms)
+ else
+ queue_enqueue_v2_graph(handle_, components_; name=name, Tcomponents=Tcomponents, timeout_ms=timeout_ms)
+ end
+ end
+ end
+end
+
+
+"""
+ scatter_nd_add(ref, indices, updates; use_locking=false)
+
+
+"""
+begin
+ begin
+ function scatter_nd_add_graph(ref_, indices_, updates_; name=nothing, use_locking=nothing)
+ local desc
+ tf.with_op_name(name, "ScatterNdAdd") do
+ desc = tf.NodeDescription("ScatterNdAdd")
+ begin
+ begin
+ ref_ = convert(Tensor{Any}, ref_)
+ begin
+ end
+ end
+ begin
+ indices_ = convert(Tensor{Any}, indices_)
+ begin
+ indices_ = indices_ - convert(tf.Tensor{eltype(indices_)}, 1)
+ end
+ end
+ begin
+ updates_ = convert(Tensor{Any}, updates_)
+ begin
+ end
+ end
+ begin
+ (ref_, updates_) = tf.tf_promote(ref_, updates_)
+ end
+ begin
+ (indices_,) = tf.tf_promote(indices_)
+ end
+ end
+ begin
+ begin
+ tf.add_input(desc, ref_)
+ end
+ begin
+ tf.add_input(desc, indices_)
+ end
+ begin
+ tf.add_input(desc, updates_)
+ end
+ end
+ begin
+ begin
+ if use_locking !== nothing
+ desc["use_locking"] = Base.Bool(use_locking)
+ end
+ end
+ end
+ end
+ tf.Tensor(tf.Operation(desc))
+ end
+ end
+ begin
+ function scatter_nd_add_eager(ref_, indices_, updates_; name=nothing, use_locking=nothing)
+ desc = tf.EagerOp("ScatterNdAdd")
+ ref_ = convert(tf.EagerTensor, ref_)
+ indices_ = convert(tf.EagerTensor, indices_)
+ updates_ = convert(tf.EagerTensor, updates_)
+ begin
+ begin
+ tf.add_input(desc, ref_)
+ end
+ begin
+ tf.add_input(desc, indices_)
+ end
+ begin
+ tf.add_input(desc, updates_)
+ end
+ end
+ begin
+ begin
+ if use_locking !== nothing
+ desc["use_locking"] = Base.Bool(use_locking)
+ end
+ end
+ end
+ begin
+ desc["T"] = tf.data_type(ref_)
+ end
+ begin
+ desc["Tindices"] = tf.data_type(indices_)
+ end
+ begin
+ desc["T"] = tf.data_type(updates_)
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(scatter_nd_add, [ref_, indices_, updates_], name=nothing, use_locking=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res[1]
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function scatter_nd_add(ref_, indices_, updates_; name=nothing, use_locking=nothing)
+ if tf.in_eager_mode()
+ scatter_nd_add_eager(ref_, indices_, updates_; name=name, use_locking=use_locking)
+ else
+ scatter_nd_add_graph(ref_, indices_, updates_; name=name, use_locking=use_locking)
+ end
+ end
+ end
+end
+
+
+"""
+ reader_num_records_produced_v2(reader_handle)
+
+
+"""
+begin
+ begin
+ function reader_num_records_produced_v2_graph(reader_handle_; name=nothing)
+ local desc
+ tf.with_op_name(name, "ReaderNumRecordsProducedV2") do
+ desc = tf.NodeDescription("ReaderNumRecordsProducedV2")
+ begin
+ begin
+ reader_handle_ = convert(Tensor{Any}, reader_handle_)
+ begin
+ end
+ end
+ end
+ begin
+ begin
+ tf.add_input(desc, reader_handle_)
+ end
+ end
+ begin
+ end
+ end
+ tf.Tensor(tf.Operation(desc))
+ end
+ end
+ begin
+ function reader_num_records_produced_v2_eager(reader_handle_; name=nothing)
+ desc = tf.EagerOp("ReaderNumRecordsProducedV2")
+ reader_handle_ = convert(tf.EagerTensor, reader_handle_)
+ begin
+ begin
+ tf.add_input(desc, reader_handle_)
+ end
+ end
+ begin
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(reader_num_records_produced_v2, [reader_handle_], name=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res[1]
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function reader_num_records_produced_v2(reader_handle_; name=nothing)
+ if tf.in_eager_mode()
+ reader_num_records_produced_v2_eager(reader_handle_; name=name)
+ else
+ reader_num_records_produced_v2_graph(reader_handle_; name=name)
+ end
+ end
+ end
+end
+
+
+"""
+ load_tpu_embedding_centered_rms_prop_parameters(parameters, ms, mom, mg; table_id=-1, table_name=)
+
+Load embedding parameters for a single table.
+"""
+begin
+ begin
+ function load_tpu_embedding_centered_rms_prop_parameters_graph(parameters_, ms_, mom_, mg_; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing)
+ local desc
+ tf.with_op_name(name, "LoadTPUEmbeddingCenteredRMSPropParameters") do
+ desc = tf.NodeDescription("LoadTPUEmbeddingCenteredRMSPropParameters")
+ begin
+ begin
+ parameters_ = convert(Tensor{Float32}, parameters_)
+ begin
+ end
+ end
+ begin
+ ms_ = convert(Tensor{Float32}, ms_)
+ begin
+ end
+ end
+ begin
+ mom_ = convert(Tensor{Float32}, mom_)
+ begin
+ end
+ end
+ begin
+ mg_ = convert(Tensor{Float32}, mg_)
+ begin
+ end
+ end
+ end
+ begin
+ begin
+ tf.add_input(desc, parameters_)
+ end
+ begin
+ tf.add_input(desc, ms_)
+ end
+ begin
+ tf.add_input(desc, mom_)
+ end
+ begin
+ tf.add_input(desc, mg_)
+ end
+ end
+ begin
+ begin
+ if table_id !== nothing
+ desc["table_id"] = Base.Int(table_id)
+ end
+ end
+ begin
+ if table_name !== nothing
+ desc["table_name"] = Base.String(table_name)
+ end
+ end
+ begin
+ if num_shards !== nothing
+ desc["num_shards"] = Base.Int(num_shards)
+ end
+ end
+ begin
+ if shard_id !== nothing
+ desc["shard_id"] = Base.Int(shard_id)
+ end
+ end
+ end
+ end
+ tf.Tensor(tf.Operation(desc))
+ end
+ end
+ begin
+ function load_tpu_embedding_centered_rms_prop_parameters_eager(parameters_, ms_, mom_, mg_; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing)
+ desc = tf.EagerOp("LoadTPUEmbeddingCenteredRMSPropParameters")
+ parameters_ = convert(tf.EagerTensor, parameters_)
+ ms_ = convert(tf.EagerTensor, ms_)
+ mom_ = convert(tf.EagerTensor, mom_)
+ mg_ = convert(tf.EagerTensor, mg_)
+ begin
+ begin
+ tf.add_input(desc, parameters_)
+ end
+ begin
+ tf.add_input(desc, ms_)
+ end
+ begin
+ tf.add_input(desc, mom_)
+ end
+ begin
+ tf.add_input(desc, mg_)
+ end
+ end
+ begin
+ begin
+ if table_id !== nothing
+ desc["table_id"] = Base.Int(table_id)
+ end
+ end
+ begin
+ if table_name !== nothing
+ desc["table_name"] = Base.String(table_name)
+ end
+ end
+ begin
+ if num_shards !== nothing
+ desc["num_shards"] = Base.Int(num_shards)
+ end
+ end
+ begin
+ if shard_id !== nothing
+ desc["shard_id"] = Base.Int(shard_id)
+ end
+ end
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(load_tpu_embedding_centered_rms_prop_parameters, [parameters_, ms_, mom_, mg_], name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res[1]
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function load_tpu_embedding_centered_rms_prop_parameters(parameters_, ms_, mom_, mg_; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing)
+ if tf.in_eager_mode()
+ load_tpu_embedding_centered_rms_prop_parameters_eager(parameters_, ms_, mom_, mg_; name=name, table_id=table_id, table_name=table_name, num_shards=num_shards, shard_id=shard_id)
+ else
+ load_tpu_embedding_centered_rms_prop_parameters_graph(parameters_, ms_, mom_, mg_; name=name, table_id=table_id, table_name=table_name, num_shards=num_shards, shard_id=shard_id)
+ end
+ end
+ end
+end
+
+
+"""
+ assign_sub(ref, value; use_locking=false)
+
+
+"""
+begin
+ begin
+ function assign_sub_graph(ref_, value_; name=nothing, use_locking=nothing)
+ local desc
+ tf.with_op_name(name, "AssignSub") do
+ desc = tf.NodeDescription("AssignSub")
+ begin
+ begin
+ ref_ = convert(Tensor{Any}, ref_)
+ begin
+ end
+ end
+ begin
+ value_ = convert(Tensor{Any}, value_)
+ begin
+ end
+ end
+ begin
+ (ref_, value_) = tf.tf_promote(ref_, value_)
+ end
+ end
+ begin
+ begin
+ tf.add_input(desc, ref_)
+ end
+ begin
+ tf.add_input(desc, value_)
+ end
+ end
+ begin
+ begin
+ if use_locking !== nothing
+ desc["use_locking"] = Base.Bool(use_locking)
+ end
+ end
+ end
+ end
+ tf.Tensor(tf.Operation(desc))
+ end
+ end
+ begin
+ function assign_sub_eager(ref_, value_; name=nothing, use_locking=nothing)
+ desc = tf.EagerOp("AssignSub")
+ ref_ = convert(tf.EagerTensor, ref_)
+ value_ = convert(tf.EagerTensor, value_)
+ begin
+ begin
+ tf.add_input(desc, ref_)
+ end
+ begin
+ tf.add_input(desc, value_)
+ end
+ end
+ begin
+ begin
+ if use_locking !== nothing
+ desc["use_locking"] = Base.Bool(use_locking)
+ end
+ end
+ end
+ begin
+ desc["T"] = tf.data_type(ref_)
+ end
+ begin
+ desc["T"] = tf.data_type(value_)
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(assign_sub, [ref_, value_], name=nothing, use_locking=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res[1]
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function assign_sub(ref_, value_; name=nothing, use_locking=nothing)
+ if tf.in_eager_mode()
+ assign_sub_eager(ref_, value_; name=name, use_locking=use_locking)
+ else
+ assign_sub_graph(ref_, value_; name=name, use_locking=use_locking)
+ end
+ end
+ end
+end
+
+
+"""
+ unsorted_segment_sum(data, segment_ids, num_segments)
+
+
+"""
+begin
+ begin
+ function unsorted_segment_sum_graph(data_, segment_ids_, num_segments_; name=nothing)
+ local desc
+ tf.with_op_name(name, "UnsortedSegmentSum") do
+ desc = tf.NodeDescription("UnsortedSegmentSum")
+ begin
+ begin
+ data_ = convert(Tensor{Any}, data_)
+ begin
+ end
+ end
+ begin
+ segment_ids_ = convert(Tensor{Any}, segment_ids_)
+ begin
+ segment_ids_ = segment_ids_ - convert(tf.Tensor{eltype(segment_ids_)}, 1)
+ end
+ end
+ begin
+ num_segments_ = convert(Tensor{Int32}, num_segments_)
+ begin
+ end
+ end
+ begin
+ (num_segments_,) = tf.tf_promote(num_segments_)
+ end
+ begin
+ (data_,) = tf.tf_promote(data_)
+ end
+ begin
+ (segment_ids_,) = tf.tf_promote(segment_ids_)
+ end
+ end
+ begin
+ begin
+ tf.add_input(desc, data_)
+ end
+ begin
+ tf.add_input(desc, segment_ids_)
+ end
+ begin
+ tf.add_input(desc, num_segments_)
+ end
+ end
+ begin
+ end
+ end
+ tf.Tensor(tf.Operation(desc))
+ end
+ end
+ begin
+ function unsorted_segment_sum_eager(data_, segment_ids_, num_segments_; name=nothing)
+ desc = tf.EagerOp("UnsortedSegmentSum")
+ data_ = convert(tf.EagerTensor, data_)
+ segment_ids_ = convert(tf.EagerTensor, segment_ids_)
+ num_segments_ = convert(tf.EagerTensor, num_segments_)
+ begin
+ begin
+ tf.add_input(desc, data_)
+ end
+ begin
+ tf.add_input(desc, segment_ids_)
+ end
+ begin
+ tf.add_input(desc, num_segments_)
+ end
+ end
+ begin
+ end
+ begin
+ desc["T"] = tf.data_type(data_)
+ end
+ begin
+ desc["Tindices"] = tf.data_type(segment_ids_)
+ end
+ begin
+ desc["Tnumsegments"] = tf.data_type(num_segments_)
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(unsorted_segment_sum, [data_, segment_ids_, num_segments_], name=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res[1]
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function unsorted_segment_sum(data_, segment_ids_, num_segments_; name=nothing)
+ if tf.in_eager_mode()
+ unsorted_segment_sum_eager(data_, segment_ids_, num_segments_; name=name)
+ else
+ unsorted_segment_sum_graph(data_, segment_ids_, num_segments_; name=name)
+ end
+ end
+ end
+end
+
+
+"""
+ fused_batch_norm_grad(y_backprop, x, scale, reserve_space_1, reserve_space_2; epsilon=?, data_format=, is_training=true)
+
+
+"""
+begin
+ begin
+ function fused_batch_norm_grad_graph(y_backprop_, x_, scale_, reserve_space_1_, reserve_space_2_; name=nothing, epsilon=nothing, data_format=nothing, is_training=nothing)
+ local desc
+ tf.with_op_name(name, "FusedBatchNormGrad") do
+ desc = tf.NodeDescription("FusedBatchNormGrad")
+ begin
+ begin
+ y_backprop_ = convert(Tensor{Any}, y_backprop_)
+ begin
+ end
+ end
+ begin
+ x_ = convert(Tensor{Any}, x_)
+ begin
+ end
+ end
+ begin
+ scale_ = convert(Tensor{Any}, scale_)
+ begin
+ end
+ end
+ begin
+ reserve_space_1_ = convert(Tensor{Any}, reserve_space_1_)
+ begin
+ end
+ end
+ begin
+ reserve_space_2_ = convert(Tensor{Any}, reserve_space_2_)
+ begin
+ end
+ end
+ begin
+ (y_backprop_, x_, scale_, reserve_space_1_, reserve_space_2_) = tf.tf_promote(y_backprop_, x_, scale_, reserve_space_1_, reserve_space_2_)
+ end
+ end
+ begin
+ begin
+ tf.add_input(desc, y_backprop_)
+ end
+ begin
+ tf.add_input(desc, x_)
+ end
+ begin
+ tf.add_input(desc, scale_)
+ end
+ begin
+ tf.add_input(desc, reserve_space_1_)
+ end
+ begin
+ tf.add_input(desc, reserve_space_2_)
+ end
+ end
+ begin
+ begin
+ if epsilon !== nothing
+ desc["epsilon"] = Base.identity(epsilon)
+ end
+ end
+ begin
+ if data_format !== nothing
+ desc["data_format"] = Base.String(data_format)
+ end
+ end
+ begin
+ if is_training !== nothing
+ desc["is_training"] = Base.Bool(is_training)
+ end
+ end
+ end
+ end
+ begin
+ out = tf.Tensor[]
+ op = tf.Operation(desc)
+ for out_idx = 1:5
+ push!(out, tf.Tensor(op, out_idx))
+ end
+ out
+ end
+ end
+ end
+ begin
+ function fused_batch_norm_grad_eager(y_backprop_, x_, scale_, reserve_space_1_, reserve_space_2_; name=nothing, epsilon=nothing, data_format=nothing, is_training=nothing)
+ desc = tf.EagerOp("FusedBatchNormGrad")
+ y_backprop_ = convert(tf.EagerTensor, y_backprop_)
+ x_ = convert(tf.EagerTensor, x_)
+ scale_ = convert(tf.EagerTensor, scale_)
+ reserve_space_1_ = convert(tf.EagerTensor, reserve_space_1_)
+ reserve_space_2_ = convert(tf.EagerTensor, reserve_space_2_)
+ begin
+ begin
+ tf.add_input(desc, y_backprop_)
+ end
+ begin
+ tf.add_input(desc, x_)
+ end
+ begin
+ tf.add_input(desc, scale_)
+ end
+ begin
+ tf.add_input(desc, reserve_space_1_)
+ end
+ begin
+ tf.add_input(desc, reserve_space_2_)
+ end
+ end
+ begin
+ begin
+ if epsilon !== nothing
+ desc["epsilon"] = Base.identity(epsilon)
+ end
+ end
+ begin
+ if data_format !== nothing
+ desc["data_format"] = Base.String(data_format)
+ end
+ end
+ begin
+ if is_training !== nothing
+ desc["is_training"] = Base.Bool(is_training)
+ end
+ end
+ end
+ begin
+ desc["T"] = tf.data_type(y_backprop_)
+ end
+ begin
+ desc["T"] = tf.data_type(x_)
+ end
+ begin
+ desc["T"] = tf.data_type(scale_)
+ end
+ begin
+ desc["T"] = tf.data_type(reserve_space_1_)
+ end
+ begin
+ desc["T"] = tf.data_type(reserve_space_2_)
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(fused_batch_norm_grad, [y_backprop_, x_, scale_, reserve_space_1_, reserve_space_2_], name=nothing, epsilon=nothing, data_format=nothing, is_training=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function fused_batch_norm_grad(y_backprop_, x_, scale_, reserve_space_1_, reserve_space_2_; name=nothing, epsilon=nothing, data_format=nothing, is_training=nothing)
+ if tf.in_eager_mode()
+ fused_batch_norm_grad_eager(y_backprop_, x_, scale_, reserve_space_1_, reserve_space_2_; name=name, epsilon=epsilon, data_format=data_format, is_training=is_training)
+ else
+ fused_batch_norm_grad_graph(y_backprop_, x_, scale_, reserve_space_1_, reserve_space_2_; name=name, epsilon=epsilon, data_format=data_format, is_training=is_training)
+ end
+ end
+ end
+end
+
+
+"""
+ max_pool_grad_v2(orig_input, orig_output, grad, ksize, strides; data_format=)
+
+
+"""
+begin
+ begin
+ function max_pool_grad_v2_graph(orig_input_, orig_output_, grad_, ksize_, strides_; name=nothing, padding=nothing, data_format=nothing)
+ local desc
+ tf.with_op_name(name, "MaxPoolGradV2") do
+ desc = tf.NodeDescription("MaxPoolGradV2")
+ begin
+ begin
+ orig_input_ = convert(Tensor{Float32}, orig_input_)
+ begin
+ end
+ end
+ begin
+ orig_output_ = convert(Tensor{Float32}, orig_output_)
+ begin
+ end
+ end
+ begin
+ grad_ = convert(Tensor{Float32}, grad_)
+ begin
+ end
+ end
+ begin
+ ksize_ = convert(Tensor{Int32}, ksize_)
+ begin
+ end
+ end
+ begin
+ strides_ = convert(Tensor{Int32}, strides_)
+ begin
+ end
+ end
+ begin
+ (orig_input_, orig_output_, grad_) = tf.tf_promote(orig_input_, orig_output_, grad_)
+ end
+ end
+ begin
+ begin
+ tf.add_input(desc, orig_input_)
+ end
+ begin
+ tf.add_input(desc, orig_output_)
+ end
+ begin
+ tf.add_input(desc, grad_)
+ end
+ begin
+ tf.add_input(desc, ksize_)
+ end
+ begin
+ tf.add_input(desc, strides_)
+ end
+ end
+ begin
+ begin
+ if padding !== nothing
+ desc["padding"] = Base.String(padding)
+ end
+ end
+ begin
+ if data_format !== nothing
+ desc["data_format"] = Base.String(data_format)
+ end
+ end
+ end
+ end
+ tf.Tensor(tf.Operation(desc))
+ end
+ end
+ begin
+ function max_pool_grad_v2_eager(orig_input_, orig_output_, grad_, ksize_, strides_; name=nothing, padding=nothing, data_format=nothing)
+ desc = tf.EagerOp("MaxPoolGradV2")
+ orig_input_ = convert(tf.EagerTensor, orig_input_)
+ orig_output_ = convert(tf.EagerTensor, orig_output_)
+ grad_ = convert(tf.EagerTensor, grad_)
+ ksize_ = convert(tf.EagerTensor, ksize_)
+ strides_ = convert(tf.EagerTensor, strides_)
+ begin
+ begin
+ tf.add_input(desc, orig_input_)
+ end
+ begin
+ tf.add_input(desc, orig_output_)
+ end
+ begin
+ tf.add_input(desc, grad_)
+ end
+ begin
+ tf.add_input(desc, ksize_)
+ end
+ begin
+ tf.add_input(desc, strides_)
+ end
+ end
+ begin
+ begin
+ if padding !== nothing
+ desc["padding"] = Base.String(padding)
+ end
+ end
+ begin
+ if data_format !== nothing
+ desc["data_format"] = Base.String(data_format)
+ end
+ end
+ end
+ begin
+ desc["T"] = tf.data_type(orig_input_)
+ end
+ begin
+ desc["T"] = tf.data_type(orig_output_)
+ end
+ begin
+ desc["T"] = tf.data_type(grad_)
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(max_pool_grad_v2, [orig_input_, orig_output_, grad_, ksize_, strides_], name=nothing, padding=nothing, data_format=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res[1]
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function max_pool_grad_v2(orig_input_, orig_output_, grad_, ksize_, strides_; name=nothing, padding=nothing, data_format=nothing)
+ if tf.in_eager_mode()
+ max_pool_grad_v2_eager(orig_input_, orig_output_, grad_, ksize_, strides_; name=name, padding=padding, data_format=data_format)
+ else
+ max_pool_grad_v2_graph(orig_input_, orig_output_, grad_, ksize_, strides_; name=name, padding=padding, data_format=data_format)
+ end
+ end
+ end
+end
+
+
+"""
+ boosted_trees_create_ensemble(tree_ensemble_handle, stamp_token, tree_ensemble_serialized)
+
+
+"""
+begin
+ begin
+ function boosted_trees_create_ensemble_graph(tree_ensemble_handle_, stamp_token_, tree_ensemble_serialized_; name=nothing)
+ local desc
+ tf.with_op_name(name, "BoostedTreesCreateEnsemble") do
+ desc = tf.NodeDescription("BoostedTreesCreateEnsemble")
+ begin
+ begin
+ tree_ensemble_handle_ = convert(Tensor{Any}, tree_ensemble_handle_)
+ begin
+ end
+ end
+ begin
+ stamp_token_ = convert(Tensor{Int64}, stamp_token_)
+ begin
+ end
+ end
+ begin
+ tree_ensemble_serialized_ = convert(Tensor{String}, tree_ensemble_serialized_)
+ begin
+ end
+ end
+ end
+ begin
+ begin
+ tf.add_input(desc, tree_ensemble_handle_)
+ end
+ begin
+ tf.add_input(desc, stamp_token_)
+ end
+ begin
+ tf.add_input(desc, tree_ensemble_serialized_)
+ end
+ end
+ begin
+ end
+ end
+ tf.Tensor(tf.Operation(desc))
+ end
+ end
+ begin
+ function boosted_trees_create_ensemble_eager(tree_ensemble_handle_, stamp_token_, tree_ensemble_serialized_; name=nothing)
+ desc = tf.EagerOp("BoostedTreesCreateEnsemble")
+ tree_ensemble_handle_ = convert(tf.EagerTensor, tree_ensemble_handle_)
+ stamp_token_ = convert(tf.EagerTensor, stamp_token_)
+ tree_ensemble_serialized_ = convert(tf.EagerTensor, tree_ensemble_serialized_)
+ begin
+ begin
+ tf.add_input(desc, tree_ensemble_handle_)
+ end
+ begin
+ tf.add_input(desc, stamp_token_)
+ end
+ begin
+ tf.add_input(desc, tree_ensemble_serialized_)
+ end
+ end
+ begin
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(boosted_trees_create_ensemble, [tree_ensemble_handle_, stamp_token_, tree_ensemble_serialized_], name=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res[1]
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function boosted_trees_create_ensemble(tree_ensemble_handle_, stamp_token_, tree_ensemble_serialized_; name=nothing)
+ if tf.in_eager_mode()
+ boosted_trees_create_ensemble_eager(tree_ensemble_handle_, stamp_token_, tree_ensemble_serialized_; name=name)
+ else
+ boosted_trees_create_ensemble_graph(tree_ensemble_handle_, stamp_token_, tree_ensemble_serialized_; name=name)
+ end
+ end
+ end
+end
+
+
+"""
+ ordered_map_incomplete_size(; capacity=0, memory_limit=0, container=, shared_name=)
+
+
+"""
+begin
+ begin
+ function ordered_map_incomplete_size_graph(; name=nothing, capacity=nothing, memory_limit=nothing, dtypes=nothing, container=nothing, shared_name=nothing)
+ local desc
+ tf.with_op_name(name, "OrderedMapIncompleteSize") do
+ desc = tf.NodeDescription("OrderedMapIncompleteSize")
+ begin
+ end
+ begin
+ end
+ begin
+ begin
+ if capacity !== nothing
+ desc["capacity"] = Base.Int(capacity)
+ end
+ end
+ begin
+ if memory_limit !== nothing
+ desc["memory_limit"] = Base.Int(memory_limit)
+ end
+ end
+ begin
+ if dtypes !== nothing
+ desc["dtypes"] = map(Base.identity, dtypes)
+ end
+ end
+ begin
+ if container !== nothing
+ desc["container"] = Base.String(container)
+ end
+ end
+ begin
+ if shared_name !== nothing
+ desc["shared_name"] = Base.String(shared_name)
+ end
+ end
+ end
+ end
+ tf.Tensor(tf.Operation(desc))
+ end
+ end
+ begin
+ function ordered_map_incomplete_size_eager(; name=nothing, capacity=nothing, memory_limit=nothing, dtypes=nothing, container=nothing, shared_name=nothing)
+ desc = tf.EagerOp("OrderedMapIncompleteSize")
+ begin
+ end
+ begin
+ begin
+ if capacity !== nothing
+ desc["capacity"] = Base.Int(capacity)
+ end
+ end
+ begin
+ if memory_limit !== nothing
+ desc["memory_limit"] = Base.Int(memory_limit)
+ end
+ end
+ begin
+ if dtypes !== nothing
+ desc["dtypes"] = map(Base.identity, dtypes)
+ end
+ end
+ begin
+ if container !== nothing
+ desc["container"] = Base.String(container)
+ end
+ end
+ begin
+ if shared_name !== nothing
+ desc["shared_name"] = Base.String(shared_name)
+ end
+ end
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(ordered_map_incomplete_size, [], name=nothing, capacity=nothing, memory_limit=nothing, dtypes=nothing, container=nothing, shared_name=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res[1]
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function ordered_map_incomplete_size(; name=nothing, capacity=nothing, memory_limit=nothing, dtypes=nothing, container=nothing, shared_name=nothing)
+ if tf.in_eager_mode()
+ ordered_map_incomplete_size_eager(; name=name, capacity=capacity, memory_limit=memory_limit, dtypes=dtypes, container=container, shared_name=shared_name)
+ else
+ ordered_map_incomplete_size_graph(; name=name, capacity=capacity, memory_limit=memory_limit, dtypes=dtypes, container=container, shared_name=shared_name)
+ end
+ end
+ end
+end
+
+
+"""
+ skipgram(; window_size=5, min_count=5, subsample=?)
+
+
+"""
+begin
+ begin
+ function skipgram_graph(; name=nothing, filename=nothing, batch_size=nothing, window_size=nothing, min_count=nothing, subsample=nothing)
+ local desc
+ tf.with_op_name(name, "Skipgram") do
+ desc = tf.NodeDescription("Skipgram")
+ begin
+ end
+ begin
+ end
+ begin
+ begin
+ if filename !== nothing
+ desc["filename"] = Base.String(filename)
+ end
+ end
+ begin
+ if batch_size !== nothing
+ desc["batch_size"] = Base.Int(batch_size)
+ end
+ end
+ begin
+ if window_size !== nothing
+ desc["window_size"] = Base.Int(window_size)
+ end
+ end
+ begin
+ if min_count !== nothing
+ desc["min_count"] = Base.Int(min_count)
+ end
+ end
+ begin
+ if subsample !== nothing
+ desc["subsample"] = Base.identity(subsample)
+ end
+ end
+ end
+ end
+ begin
+ out = tf.Tensor[]
+ op = tf.Operation(desc)
+ for out_idx = 1:7
+ push!(out, tf.Tensor(op, out_idx))
+ end
+ out
+ end
+ end
+ end
+ begin
+ function skipgram_eager(; name=nothing, filename=nothing, batch_size=nothing, window_size=nothing, min_count=nothing, subsample=nothing)
+ desc = tf.EagerOp("Skipgram")
+ begin
+ end
+ begin
+ begin
+ if filename !== nothing
+ desc["filename"] = Base.String(filename)
+ end
+ end
+ begin
+ if batch_size !== nothing
+ desc["batch_size"] = Base.Int(batch_size)
+ end
+ end
+ begin
+ if window_size !== nothing
+ desc["window_size"] = Base.Int(window_size)
+ end
+ end
+ begin
+ if min_count !== nothing
+ desc["min_count"] = Base.Int(min_count)
+ end
+ end
+ begin
+ if subsample !== nothing
+ desc["subsample"] = Base.identity(subsample)
+ end
+ end
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(skipgram, [], name=nothing, filename=nothing, batch_size=nothing, window_size=nothing, min_count=nothing, subsample=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function skipgram(; name=nothing, filename=nothing, batch_size=nothing, window_size=nothing, min_count=nothing, subsample=nothing)
+ if tf.in_eager_mode()
+ skipgram_eager(; name=name, filename=filename, batch_size=batch_size, window_size=window_size, min_count=min_count, subsample=subsample)
+ else
+ skipgram_graph(; name=name, filename=filename, batch_size=batch_size, window_size=window_size, min_count=min_count, subsample=subsample)
+ end
+ end
+ end
+end
+
+
+"""
+ arg_min(input, dimension; output_type=Int64)
+
+
+"""
+begin
+ begin
+ function arg_min_graph(input_, dimension_; name=nothing, output_type=nothing)
+ local desc
+ tf.with_op_name(name, "ArgMin") do
+ desc = tf.NodeDescription("ArgMin")
+ begin
+ begin
+ input_ = convert(Tensor{Any}, input_)
+ begin
+ end
+ end
+ begin
+ dimension_ = convert(Tensor{Int32}, dimension_)
+ begin
+ dimension_ = dimension_ - convert(tf.Tensor{eltype(dimension_)}, 1)
+ end
+ end
+ begin
+ (input_,) = tf.tf_promote(input_)
+ end
+ begin
+ (dimension_,) = tf.tf_promote(dimension_)
+ end
+ end
+ begin
+ begin
+ tf.add_input(desc, input_)
+ end
+ begin
+ tf.add_input(desc, dimension_)
+ end
+ end
+ begin
+ begin
+ if output_type !== nothing
+ desc["output_type"] = Base.identity(output_type)
+ end
+ end
+ end
+ end
+ tf.Tensor(tf.Operation(desc))
+ end
+ end
+ begin
+ function arg_min_eager(input_, dimension_; name=nothing, output_type=nothing)
+ desc = tf.EagerOp("ArgMin")
+ input_ = convert(tf.EagerTensor, input_)
+ dimension_ = convert(tf.EagerTensor, dimension_)
+ begin
+ begin
+ tf.add_input(desc, input_)
+ end
+ begin
+ tf.add_input(desc, dimension_)
+ end
+ end
+ begin
+ begin
+ if output_type !== nothing
+ desc["output_type"] = Base.identity(output_type)
+ end
+ end
+ end
+ begin
+ desc["T"] = tf.data_type(input_)
+ end
+ begin
+ desc["Tidx"] = tf.data_type(dimension_)
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(arg_min, [input_, dimension_], name=nothing, output_type=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res[1]
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function arg_min(input_, dimension_; name=nothing, output_type=nothing)
+ if tf.in_eager_mode()
+ arg_min_eager(input_, dimension_; name=name, output_type=output_type)
+ else
+ arg_min_graph(input_, dimension_; name=name, output_type=output_type)
+ end
+ end
+ end
+end
+
+
+"""
+ queue_dequeue_many(handle, n; timeout_ms=-1)
+
+
+"""
+begin
+ begin
+ function queue_dequeue_many_graph(handle_, n_; name=nothing, component_types=nothing, timeout_ms=nothing)
+ local desc
+ tf.with_op_name(name, "QueueDequeueMany") do
+ desc = tf.NodeDescription("QueueDequeueMany")
+ begin
+ begin
+ handle_ = convert(Tensor{String}, handle_)
+ begin
+ end
+ end
+ begin
+ n_ = convert(Tensor{Int32}, n_)
+ begin
+ end
+ end
+ end
+ begin
+ begin
+ tf.add_input(desc, handle_)
+ end
+ begin
+ tf.add_input(desc, n_)
+ end
+ end
+ begin
+ begin
+ if component_types !== nothing
+ desc["component_types"] = map(Base.identity, component_types)
+ end
+ end
+ begin
+ if timeout_ms !== nothing
+ desc["timeout_ms"] = Base.Int(timeout_ms)
+ end
+ end
+ end
+ end
+ tf.Tensor(tf.Operation(desc))
+ end
+ end
+ begin
+ function queue_dequeue_many_eager(handle_, n_; name=nothing, component_types=nothing, timeout_ms=nothing)
+ desc = tf.EagerOp("QueueDequeueMany")
+ handle_ = convert(tf.EagerTensor, handle_)
+ n_ = convert(tf.EagerTensor, n_)
+ begin
+ begin
+ tf.add_input(desc, handle_)
+ end
+ begin
+ tf.add_input(desc, n_)
+ end
+ end
+ begin
+ begin
+ if component_types !== nothing
+ desc["component_types"] = map(Base.identity, component_types)
+ end
+ end
+ begin
+ if timeout_ms !== nothing
+ desc["timeout_ms"] = Base.Int(timeout_ms)
+ end
+ end
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(queue_dequeue_many, [handle_, n_], name=nothing, component_types=nothing, timeout_ms=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res[1]
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function queue_dequeue_many(handle_, n_; name=nothing, component_types=nothing, timeout_ms=nothing)
+ if tf.in_eager_mode()
+ queue_dequeue_many_eager(handle_, n_; name=name, component_types=component_types, timeout_ms=timeout_ms)
+ else
+ queue_dequeue_many_graph(handle_, n_; name=name, component_types=component_types, timeout_ms=timeout_ms)
+ end
+ end
+ end
+end
+
+
+"""
+ boosted_trees_serialize_ensemble(tree_ensemble_handle)
+
+
+"""
+begin
+ begin
+ function boosted_trees_serialize_ensemble_graph(tree_ensemble_handle_; name=nothing)
+ local desc
+ tf.with_op_name(name, "BoostedTreesSerializeEnsemble") do
+ desc = tf.NodeDescription("BoostedTreesSerializeEnsemble")
+ begin
+ begin
+ tree_ensemble_handle_ = convert(Tensor{Any}, tree_ensemble_handle_)
+ begin
+ end
+ end
+ end
+ begin
+ begin
+ tf.add_input(desc, tree_ensemble_handle_)
+ end
+ end
+ begin
+ end
+ end
+ begin
+ out = tf.Tensor[]
+ op = tf.Operation(desc)
+ for out_idx = 1:2
+ push!(out, tf.Tensor(op, out_idx))
+ end
+ out
+ end
+ end
+ end
+ begin
+ function boosted_trees_serialize_ensemble_eager(tree_ensemble_handle_; name=nothing)
+ desc = tf.EagerOp("BoostedTreesSerializeEnsemble")
+ tree_ensemble_handle_ = convert(tf.EagerTensor, tree_ensemble_handle_)
+ begin
+ begin
+ tf.add_input(desc, tree_ensemble_handle_)
+ end
+ end
+ begin
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(boosted_trees_serialize_ensemble, [tree_ensemble_handle_], name=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function boosted_trees_serialize_ensemble(tree_ensemble_handle_; name=nothing)
+ if tf.in_eager_mode()
+ boosted_trees_serialize_ensemble_eager(tree_ensemble_handle_; name=name)
+ else
+ boosted_trees_serialize_ensemble_graph(tree_ensemble_handle_; name=name)
+ end
+ end
+ end
+end
+
+
+"""
+ minimum(x, y)
+
+
+"""
+begin
+ begin
+ function minimum_graph(x_, y_; name=nothing)
+ local desc
+ tf.with_op_name(name, "Minimum") do
+ desc = tf.NodeDescription("Minimum")
+ begin
+ begin
+ x_ = convert(Tensor{Any}, x_)
+ begin
+ end
+ end
+ begin
+ y_ = convert(Tensor{Any}, y_)
+ begin
+ end
+ end
+ begin
+ (x_, y_) = tf.tf_promote(x_, y_)
+ end
+ end
+ begin
+ begin
+ tf.add_input(desc, x_)
+ end
+ begin
+ tf.add_input(desc, y_)
+ end
+ end
+ begin
+ end
+ end
+ tf.Tensor(tf.Operation(desc))
+ end
+ end
+ begin
+ function minimum_eager(x_, y_; name=nothing)
+ desc = tf.EagerOp("Minimum")
+ x_ = convert(tf.EagerTensor, x_)
+ y_ = convert(tf.EagerTensor, y_)
+ begin
+ begin
+ tf.add_input(desc, x_)
+ end
+ begin
+ tf.add_input(desc, y_)
+ end
+ end
+ begin
+ end
+ begin
+ desc["T"] = tf.data_type(x_)
+ end
+ begin
+ desc["T"] = tf.data_type(y_)
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(minimum, [x_, y_], name=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res[1]
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function minimum(x_, y_; name=nothing)
+ if tf.in_eager_mode()
+ minimum_eager(x_, y_; name=name)
+ else
+ minimum_graph(x_, y_; name=name)
+ end
+ end
+ end
+end
+
+
+"""
+ substr(input, pos, len; unit=)
+
+
+"""
+begin
+ begin
+ function substr_graph(input_, pos_, len_; name=nothing, unit=nothing)
+ local desc
+ tf.with_op_name(name, "Substr") do
+ desc = tf.NodeDescription("Substr")
+ begin
+ begin
+ input_ = convert(Tensor{String}, input_)
+ begin
+ end
+ end
+ begin
+ pos_ = convert(Tensor{Any}, pos_)
+ begin
+ end
+ end
+ begin
+ len_ = convert(Tensor{Any}, len_)
+ begin
+ end
+ end
+ begin
+ (pos_, len_) = tf.tf_promote(pos_, len_)
+ end
+ end
+ begin
+ begin
+ tf.add_input(desc, input_)
+ end
+ begin
+ tf.add_input(desc, pos_)
+ end
+ begin
+ tf.add_input(desc, len_)
+ end
+ end
+ begin
+ begin
+ if unit !== nothing
+ desc["unit"] = Base.String(unit)
+ end
+ end
+ end
+ end
+ tf.Tensor(tf.Operation(desc))
+ end
+ end
+ begin
+ function substr_eager(input_, pos_, len_; name=nothing, unit=nothing)
+ desc = tf.EagerOp("Substr")
+ input_ = convert(tf.EagerTensor, input_)
+ pos_ = convert(tf.EagerTensor, pos_)
+ len_ = convert(tf.EagerTensor, len_)
+ begin
+ begin
+ tf.add_input(desc, input_)
+ end
+ begin
+ tf.add_input(desc, pos_)
+ end
+ begin
+ tf.add_input(desc, len_)
+ end
+ end
+ begin
+ begin
+ if unit !== nothing
+ desc["unit"] = Base.String(unit)
+ end
+ end
+ end
+ begin
+ desc["T"] = tf.data_type(pos_)
+ end
+ begin
+ desc["T"] = tf.data_type(len_)
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(substr, [input_, pos_, len_], name=nothing, unit=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res[1]
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function substr(input_, pos_, len_; name=nothing, unit=nothing)
+ if tf.in_eager_mode()
+ substr_eager(input_, pos_, len_; name=name, unit=unit)
+ else
+ substr_graph(input_, pos_, len_; name=name, unit=unit)
+ end
+ end
+ end
+end
+
+
+"""
+ queue_size(handle)
+
+
+"""
+begin
+ begin
+ function queue_size_graph(handle_; name=nothing)
+ local desc
+ tf.with_op_name(name, "QueueSize") do
+ desc = tf.NodeDescription("QueueSize")
+ begin
+ begin
+ handle_ = convert(Tensor{String}, handle_)
+ begin
+ end
+ end
+ end
+ begin
+ begin
+ tf.add_input(desc, handle_)
+ end
+ end
+ begin
+ end
+ end
+ tf.Tensor(tf.Operation(desc))
+ end
+ end
+ begin
+ function queue_size_eager(handle_; name=nothing)
+ desc = tf.EagerOp("QueueSize")
+ handle_ = convert(tf.EagerTensor, handle_)
+ begin
+ begin
+ tf.add_input(desc, handle_)
+ end
+ end
+ begin
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(queue_size, [handle_], name=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res[1]
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function queue_size(handle_; name=nothing)
+ if tf.in_eager_mode()
+ queue_size_eager(handle_; name=name)
+ else
+ queue_size_graph(handle_; name=name)
+ end
+ end
+ end
+end
+
+
+"""
+ apply_ftrl_v2(var, accum, linear, grad, lr, l1, l2, l2_shrinkage, lr_power; use_locking=false)
+
+
+"""
+begin
+ begin
+ function apply_ftrl_v2_graph(var_, accum_, linear_, grad_, lr_, l1_, l2_, l2_shrinkage_, lr_power_; name=nothing, use_locking=nothing)
+ local desc
+ tf.with_op_name(name, "ApplyFtrlV2") do
+ desc = tf.NodeDescription("ApplyFtrlV2")
+ begin
+ begin
+ var_ = convert(Tensor{Any}, var_)
+ begin
+ end
+ end
+ begin
+ accum_ = convert(Tensor{Any}, accum_)
+ begin
+ end
+ end
+ begin
+ linear_ = convert(Tensor{Any}, linear_)
+ begin
+ end
+ end
+ begin
+ grad_ = convert(Tensor{Any}, grad_)
+ begin
+ end
+ end
+ begin
+ lr_ = convert(Tensor{Any}, lr_)
+ begin
+ end
+ end
+ begin
+ l1_ = convert(Tensor{Any}, l1_)
+ begin
+ end
+ end
+ begin
+ l2_ = convert(Tensor{Any}, l2_)
+ begin
+ end
+ end
+ begin
+ l2_shrinkage_ = convert(Tensor{Any}, l2_shrinkage_)
+ begin
+ end
+ end
+ begin
+ lr_power_ = convert(Tensor{Any}, lr_power_)
+ begin
+ end
+ end
+ begin
+ (var_, accum_, linear_, grad_, lr_, l1_, l2_, l2_shrinkage_, lr_power_) = tf.tf_promote(var_, accum_, linear_, grad_, lr_, l1_, l2_, l2_shrinkage_, lr_power_)
+ end
+ end
+ begin
+ begin
+ tf.add_input(desc, var_)
+ end
+ begin
+ tf.add_input(desc, accum_)
+ end
+ begin
+ tf.add_input(desc, linear_)
+ end
+ begin
+ tf.add_input(desc, grad_)
+ end
+ begin
+ tf.add_input(desc, lr_)
+ end
+ begin
+ tf.add_input(desc, l1_)
+ end
+ begin
+ tf.add_input(desc, l2_)
+ end
+ begin
+ tf.add_input(desc, l2_shrinkage_)
+ end
+ begin
+ tf.add_input(desc, lr_power_)
+ end
+ end
+ begin
+ begin
+ if use_locking !== nothing
+ desc["use_locking"] = Base.Bool(use_locking)
+ end
+ end
+ end
+ end
+ tf.Tensor(tf.Operation(desc))
+ end
+ end
+ begin
+ function apply_ftrl_v2_eager(var_, accum_, linear_, grad_, lr_, l1_, l2_, l2_shrinkage_, lr_power_; name=nothing, use_locking=nothing)
+ desc = tf.EagerOp("ApplyFtrlV2")
+ var_ = convert(tf.EagerTensor, var_)
+ accum_ = convert(tf.EagerTensor, accum_)
+ linear_ = convert(tf.EagerTensor, linear_)
+ grad_ = convert(tf.EagerTensor, grad_)
+ lr_ = convert(tf.EagerTensor, lr_)
+ l1_ = convert(tf.EagerTensor, l1_)
+ l2_ = convert(tf.EagerTensor, l2_)
+ l2_shrinkage_ = convert(tf.EagerTensor, l2_shrinkage_)
+ lr_power_ = convert(tf.EagerTensor, lr_power_)
+ begin
+ begin
+ tf.add_input(desc, var_)
+ end
+ begin
+ tf.add_input(desc, accum_)
+ end
+ begin
+ tf.add_input(desc, linear_)
+ end
+ begin
+ tf.add_input(desc, grad_)
+ end
+ begin
+ tf.add_input(desc, lr_)
+ end
+ begin
+ tf.add_input(desc, l1_)
+ end
+ begin
+ tf.add_input(desc, l2_)
+ end
+ begin
+ tf.add_input(desc, l2_shrinkage_)
+ end
+ begin
+ tf.add_input(desc, lr_power_)
+ end
+ end
+ begin
+ begin
+ if use_locking !== nothing
+ desc["use_locking"] = Base.Bool(use_locking)
+ end
+ end
+ end
+ begin
+ desc["T"] = tf.data_type(var_)
+ end
+ begin
+ desc["T"] = tf.data_type(accum_)
+ end
+ begin
+ desc["T"] = tf.data_type(linear_)
+ end
+ begin
+ desc["T"] = tf.data_type(grad_)
+ end
+ begin
+ desc["T"] = tf.data_type(lr_)
+ end
+ begin
+ desc["T"] = tf.data_type(l1_)
+ end
+ begin
+ desc["T"] = tf.data_type(l2_)
+ end
+ begin
+ desc["T"] = tf.data_type(l2_shrinkage_)
+ end
+ begin
+ desc["T"] = tf.data_type(lr_power_)
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(apply_ftrl_v2, [var_, accum_, linear_, grad_, lr_, l1_, l2_, l2_shrinkage_, lr_power_], name=nothing, use_locking=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res[1]
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function apply_ftrl_v2(var_, accum_, linear_, grad_, lr_, l1_, l2_, l2_shrinkage_, lr_power_; name=nothing, use_locking=nothing)
+ if tf.in_eager_mode()
+ apply_ftrl_v2_eager(var_, accum_, linear_, grad_, lr_, l1_, l2_, l2_shrinkage_, lr_power_; name=name, use_locking=use_locking)
+ else
+ apply_ftrl_v2_graph(var_, accum_, linear_, grad_, lr_, l1_, l2_, l2_shrinkage_, lr_power_; name=name, use_locking=use_locking)
+ end
+ end
+ end
+end
+
+
+"""
+ sparse_segment_mean(data, indices, segment_ids)
+
+
+"""
+begin
+ begin
+ function sparse_segment_mean_graph(data_, indices_, segment_ids_; name=nothing)
+ local desc
+ tf.with_op_name(name, "SparseSegmentMean") do
+ desc = tf.NodeDescription("SparseSegmentMean")
+ begin
+ begin
+ data_ = convert(Tensor{Any}, data_)
+ begin
+ end
+ end
+ begin
+ indices_ = convert(Tensor{Int32}, indices_)
+ begin
+ indices_ = indices_ - convert(tf.Tensor{eltype(indices_)}, 1)
+ end
+ end
+ begin
+ segment_ids_ = convert(Tensor{Int32}, segment_ids_)
+ begin
+ end
+ end
+ begin
+ (data_,) = tf.tf_promote(data_)
+ end
+ begin
+ (indices_,) = tf.tf_promote(indices_)
+ end
+ end
+ begin
+ begin
+ tf.add_input(desc, data_)
+ end
+ begin
+ tf.add_input(desc, indices_)
+ end
+ begin
+ tf.add_input(desc, segment_ids_)
+ end
+ end
+ begin
+ end
+ end
+ tf.Tensor(tf.Operation(desc))
+ end
+ end
+ begin
+ function sparse_segment_mean_eager(data_, indices_, segment_ids_; name=nothing)
+ desc = tf.EagerOp("SparseSegmentMean")
+ data_ = convert(tf.EagerTensor, data_)
+ indices_ = convert(tf.EagerTensor, indices_)
+ segment_ids_ = convert(tf.EagerTensor, segment_ids_)
+ begin
+ begin
+ tf.add_input(desc, data_)
+ end
+ begin
+ tf.add_input(desc, indices_)
+ end
+ begin
+ tf.add_input(desc, segment_ids_)
+ end
+ end
+ begin
+ end
+ begin
+ desc["T"] = tf.data_type(data_)
+ end
+ begin
+ desc["Tidx"] = tf.data_type(indices_)
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(sparse_segment_mean, [data_, indices_, segment_ids_], name=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res[1]
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function sparse_segment_mean(data_, indices_, segment_ids_; name=nothing)
+ if tf.in_eager_mode()
+ sparse_segment_mean_eager(data_, indices_, segment_ids_; name=name)
+ else
+ sparse_segment_mean_graph(data_, indices_, segment_ids_; name=name)
+ end
+ end
+ end
+end
+
+
+"""
+ load_tpu_embedding_momentum_parameters(parameters, momenta; table_id=-1, table_name=)
+
+Load embedding parameters for a single table.
+"""
+begin
+ begin
+ function load_tpu_embedding_momentum_parameters_graph(parameters_, momenta_; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing)
+ local desc
+ tf.with_op_name(name, "LoadTPUEmbeddingMomentumParameters") do
+ desc = tf.NodeDescription("LoadTPUEmbeddingMomentumParameters")
+ begin
+ begin
+ parameters_ = convert(Tensor{Float32}, parameters_)
+ begin
+ end
+ end
+ begin
+ momenta_ = convert(Tensor{Float32}, momenta_)
+ begin
+ end
+ end
+ end
+ begin
+ begin
+ tf.add_input(desc, parameters_)
+ end
+ begin
+ tf.add_input(desc, momenta_)
+ end
+ end
+ begin
+ begin
+ if table_id !== nothing
+ desc["table_id"] = Base.Int(table_id)
+ end
+ end
+ begin
+ if table_name !== nothing
+ desc["table_name"] = Base.String(table_name)
+ end
+ end
+ begin
+ if num_shards !== nothing
+ desc["num_shards"] = Base.Int(num_shards)
+ end
+ end
+ begin
+ if shard_id !== nothing
+ desc["shard_id"] = Base.Int(shard_id)
+ end
+ end
+ end
+ end
+ tf.Tensor(tf.Operation(desc))
+ end
+ end
+ begin
+ function load_tpu_embedding_momentum_parameters_eager(parameters_, momenta_; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing)
+ desc = tf.EagerOp("LoadTPUEmbeddingMomentumParameters")
+ parameters_ = convert(tf.EagerTensor, parameters_)
+ momenta_ = convert(tf.EagerTensor, momenta_)
+ begin
+ begin
+ tf.add_input(desc, parameters_)
+ end
+ begin
+ tf.add_input(desc, momenta_)
+ end
+ end
+ begin
+ begin
+ if table_id !== nothing
+ desc["table_id"] = Base.Int(table_id)
+ end
+ end
+ begin
+ if table_name !== nothing
+ desc["table_name"] = Base.String(table_name)
+ end
+ end
+ begin
+ if num_shards !== nothing
+ desc["num_shards"] = Base.Int(num_shards)
+ end
+ end
+ begin
+ if shard_id !== nothing
+ desc["shard_id"] = Base.Int(shard_id)
+ end
+ end
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(load_tpu_embedding_momentum_parameters, [parameters_, momenta_], name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res[1]
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function load_tpu_embedding_momentum_parameters(parameters_, momenta_; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing)
+ if tf.in_eager_mode()
+ load_tpu_embedding_momentum_parameters_eager(parameters_, momenta_; name=name, table_id=table_id, table_name=table_name, num_shards=num_shards, shard_id=shard_id)
+ else
+ load_tpu_embedding_momentum_parameters_graph(parameters_, momenta_; name=name, table_id=table_id, table_name=table_name, num_shards=num_shards, shard_id=shard_id)
+ end
+ end
+ end
+end
+
+
+"""
+ resource_apply_proximal_adagrad(var, accum, lr, l1, l2, grad; use_locking=false)
+
+
+"""
+begin
+ begin
+ function resource_apply_proximal_adagrad_graph(var_, accum_, lr_, l1_, l2_, grad_; name=nothing, use_locking=nothing)
+ local desc
+ tf.with_op_name(name, "ResourceApplyProximalAdagrad") do
+ desc = tf.NodeDescription("ResourceApplyProximalAdagrad")
+ begin
+ begin
+ var_ = convert(Tensor{Any}, var_)
+ begin
+ end
+ end
+ begin
+ accum_ = convert(Tensor{Any}, accum_)
+ begin
+ end
+ end
+ begin
+ lr_ = convert(Tensor{Any}, lr_)
+ begin
+ end
+ end
+ begin
+ l1_ = convert(Tensor{Any}, l1_)
+ begin
+ end
+ end
+ begin
+ l2_ = convert(Tensor{Any}, l2_)
+ begin
+ end
+ end
+ begin
+ grad_ = convert(Tensor{Any}, grad_)
+ begin
+ end
+ end
+ begin
+ (lr_, l1_, l2_, grad_) = tf.tf_promote(lr_, l1_, l2_, grad_)
+ end
+ end
+ begin
+ begin
+ tf.add_input(desc, var_)
+ end
+ begin
+ tf.add_input(desc, accum_)
+ end
+ begin
+ tf.add_input(desc, lr_)
+ end
+ begin
+ tf.add_input(desc, l1_)
+ end
+ begin
+ tf.add_input(desc, l2_)
+ end
+ begin
+ tf.add_input(desc, grad_)
+ end
+ end
+ begin
+ begin
+ if use_locking !== nothing
+ desc["use_locking"] = Base.Bool(use_locking)
+ end
+ end
+ end
+ end
+ tf.Tensor(tf.Operation(desc))
+ end
+ end
+ begin
+ function resource_apply_proximal_adagrad_eager(var_, accum_, lr_, l1_, l2_, grad_; name=nothing, use_locking=nothing)
+ desc = tf.EagerOp("ResourceApplyProximalAdagrad")
+ var_ = convert(tf.EagerTensor, var_)
+ accum_ = convert(tf.EagerTensor, accum_)
+ lr_ = convert(tf.EagerTensor, lr_)
+ l1_ = convert(tf.EagerTensor, l1_)
+ l2_ = convert(tf.EagerTensor, l2_)
+ grad_ = convert(tf.EagerTensor, grad_)
+ begin
+ begin
+ tf.add_input(desc, var_)
+ end
+ begin
+ tf.add_input(desc, accum_)
+ end
+ begin
+ tf.add_input(desc, lr_)
+ end
+ begin
+ tf.add_input(desc, l1_)
+ end
+ begin
+ tf.add_input(desc, l2_)
+ end
+ begin
+ tf.add_input(desc, grad_)
+ end
+ end
+ begin
+ begin
+ if use_locking !== nothing
+ desc["use_locking"] = Base.Bool(use_locking)
+ end
+ end
+ end
+ begin
+ desc["T"] = tf.data_type(lr_)
+ end
+ begin
+ desc["T"] = tf.data_type(l1_)
+ end
+ begin
+ desc["T"] = tf.data_type(l2_)
+ end
+ begin
+ desc["T"] = tf.data_type(grad_)
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(resource_apply_proximal_adagrad, [var_, accum_, lr_, l1_, l2_, grad_], name=nothing, use_locking=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res[1]
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function resource_apply_proximal_adagrad(var_, accum_, lr_, l1_, l2_, grad_; name=nothing, use_locking=nothing)
+ if tf.in_eager_mode()
+ resource_apply_proximal_adagrad_eager(var_, accum_, lr_, l1_, l2_, grad_; name=name, use_locking=use_locking)
+ else
+ resource_apply_proximal_adagrad_graph(var_, accum_, lr_, l1_, l2_, grad_; name=name, use_locking=use_locking)
+ end
+ end
+ end
+end
+
+
+"""
+ tensor_array_gather_v2(handle, indices, flow_in; element_shape=?)
+
+
+"""
+begin
+ begin
+ function tensor_array_gather_v2_graph(handle_, indices_, flow_in_; name=nothing, dtype=nothing, element_shape=nothing)
+ local desc
+ tf.with_op_name(name, "TensorArrayGatherV2") do
+ desc = tf.NodeDescription("TensorArrayGatherV2")
+ begin
+ begin
+ handle_ = convert(Tensor{String}, handle_)
+ begin
+ end
+ end
+ begin
+ indices_ = convert(Tensor{Int32}, indices_)
+ begin
+ end
+ end
+ begin
+ flow_in_ = convert(Tensor{Float32}, flow_in_)
+ begin
+ end
+ end
+ end
+ begin
+ begin
+ tf.add_input(desc, handle_)
+ end
+ begin
+ tf.add_input(desc, indices_)
+ end
+ begin
+ tf.add_input(desc, flow_in_)
+ end
+ end
+ begin
+ begin
+ if dtype !== nothing
+ desc["dtype"] = Base.identity(dtype)
+ end
+ end
+ begin
+ if element_shape !== nothing
+ desc["element_shape"] = Base.identity(element_shape)
+ end
+ end
+ end
+ end
+ tf.Tensor(tf.Operation(desc))
+ end
+ end
+ begin
+ function tensor_array_gather_v2_eager(handle_, indices_, flow_in_; name=nothing, dtype=nothing, element_shape=nothing)
+ desc = tf.EagerOp("TensorArrayGatherV2")
+ handle_ = convert(tf.EagerTensor, handle_)
+ indices_ = convert(tf.EagerTensor, indices_)
+ flow_in_ = convert(tf.EagerTensor, flow_in_)
+ begin
+ begin
+ tf.add_input(desc, handle_)
+ end
+ begin
+ tf.add_input(desc, indices_)
+ end
+ begin
+ tf.add_input(desc, flow_in_)
+ end
+ end
+ begin
+ begin
+ if dtype !== nothing
+ desc["dtype"] = Base.identity(dtype)
+ end
+ end
+ begin
+ if element_shape !== nothing
+ desc["element_shape"] = Base.identity(element_shape)
+ end
+ end
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(tensor_array_gather_v2, [handle_, indices_, flow_in_], name=nothing, dtype=nothing, element_shape=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res[1]
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function tensor_array_gather_v2(handle_, indices_, flow_in_; name=nothing, dtype=nothing, element_shape=nothing)
+ if tf.in_eager_mode()
+ tensor_array_gather_v2_eager(handle_, indices_, flow_in_; name=name, dtype=dtype, element_shape=element_shape)
+ else
+ tensor_array_gather_v2_graph(handle_, indices_, flow_in_; name=name, dtype=dtype, element_shape=element_shape)
+ end
+ end
+ end
+end
+
+
+"""
+ less(x, y)
+
+
+"""
+begin
+ begin
+ function less_graph(x_, y_; name=nothing)
+ local desc
+ tf.with_op_name(name, "Less") do
+ desc = tf.NodeDescription("Less")
+ begin
+ begin
+ x_ = convert(Tensor{Any}, x_)
+ begin
+ end
+ end
+ begin
+ y_ = convert(Tensor{Any}, y_)
+ begin
+ end
+ end
+ begin
+ (x_, y_) = tf.tf_promote(x_, y_)
+ end
+ end
+ begin
+ begin
+ tf.add_input(desc, x_)
+ end
+ begin
+ tf.add_input(desc, y_)
+ end
+ end
+ begin
+ end
+ end
+ tf.Tensor(tf.Operation(desc))
+ end
+ end
+ begin
+ function less_eager(x_, y_; name=nothing)
+ desc = tf.EagerOp("Less")
+ x_ = convert(tf.EagerTensor, x_)
+ y_ = convert(tf.EagerTensor, y_)
+ begin
+ begin
+ tf.add_input(desc, x_)
+ end
+ begin
+ tf.add_input(desc, y_)
+ end
+ end
+ begin
+ end
+ begin
+ desc["T"] = tf.data_type(x_)
+ end
+ begin
+ desc["T"] = tf.data_type(y_)
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(less, [x_, y_], name=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res[1]
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function less(x_, y_; name=nothing)
+ if tf.in_eager_mode()
+ less_eager(x_, y_; name=name)
+ else
+ less_graph(x_, y_; name=name)
+ end
+ end
+ end
+end
+
+
+"""
+ host_const()
+
+
+"""
+begin
+ begin
+ function host_const_graph(; name=nothing, value=nothing, dtype=nothing)
+ local desc
+ tf.with_op_name(name, "HostConst") do
+ desc = tf.NodeDescription("HostConst")
+ begin
+ end
+ begin
+ end
+ begin
+ begin
+ if value !== nothing
+ desc["value"] = TensorFlow.RawTensor(value)
+ end
+ end
+ begin
+ if dtype !== nothing
+ desc["dtype"] = Base.identity(dtype)
+ end
+ end
+ end
+ end
+ tf.Tensor(tf.Operation(desc))
+ end
+ end
+ begin
+ function host_const_eager(; name=nothing, value=nothing, dtype=nothing)
+ desc = tf.EagerOp("HostConst")
+ begin
+ end
+ begin
+ begin
+ if value !== nothing
+ desc["value"] = TensorFlow.RawTensor(value)
+ end
+ end
+ begin
+ if dtype !== nothing
+ desc["dtype"] = Base.identity(dtype)
+ end
+ end
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(host_const, [], name=nothing, value=nothing, dtype=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res[1]
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function host_const(; name=nothing, value=nothing, dtype=nothing)
+ if tf.in_eager_mode()
+ host_const_eager(; name=name, value=value, dtype=dtype)
+ else
+ host_const_graph(; name=name, value=value, dtype=dtype)
+ end
+ end
+ end
+end
+
+
+"""
+ upper_bound(sorted_inputs, values; out_type=Int32)
+
+
+"""
+begin
+ begin
+ function upper_bound_graph(sorted_inputs_, values_; name=nothing, out_type=nothing)
+ local desc
+ tf.with_op_name(name, "UpperBound") do
+ desc = tf.NodeDescription("UpperBound")
+ begin
+ begin
+ sorted_inputs_ = convert(Tensor{Any}, sorted_inputs_)
+ begin
+ end
+ end
+ begin
+ values_ = convert(Tensor{Any}, values_)
+ begin
+ end
+ end
+ begin
+ (sorted_inputs_, values_) = tf.tf_promote(sorted_inputs_, values_)
+ end
+ end
+ begin
+ begin
+ tf.add_input(desc, sorted_inputs_)
+ end
+ begin
+ tf.add_input(desc, values_)
+ end
+ end
+ begin
+ begin
+ if out_type !== nothing
+ desc["out_type"] = Base.identity(out_type)
+ end
+ end
+ end
+ end
+ tf.Tensor(tf.Operation(desc))
+ end
+ end
+ begin
+ function upper_bound_eager(sorted_inputs_, values_; name=nothing, out_type=nothing)
+ desc = tf.EagerOp("UpperBound")
+ sorted_inputs_ = convert(tf.EagerTensor, sorted_inputs_)
+ values_ = convert(tf.EagerTensor, values_)
+ begin
+ begin
+ tf.add_input(desc, sorted_inputs_)
+ end
+ begin
+ tf.add_input(desc, values_)
+ end
+ end
+ begin
+ begin
+ if out_type !== nothing
+ desc["out_type"] = Base.identity(out_type)
+ end
+ end
+ end
+ begin
+ desc["T"] = tf.data_type(sorted_inputs_)
+ end
+ begin
+ desc["T"] = tf.data_type(values_)
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(upper_bound, [sorted_inputs_, values_], name=nothing, out_type=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res[1]
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function upper_bound(sorted_inputs_, values_; name=nothing, out_type=nothing)
+ if tf.in_eager_mode()
+ upper_bound_eager(sorted_inputs_, values_; name=name, out_type=out_type)
+ else
+ upper_bound_graph(sorted_inputs_, values_; name=name, out_type=out_type)
+ end
+ end
+ end
+end
+
+
+"""
+ tensor_list_get_item(input_handle, index)
+
+
+"""
+begin
+ begin
+ function tensor_list_get_item_graph(input_handle_, index_; name=nothing, element_dtype=nothing)
+ local desc
+ tf.with_op_name(name, "TensorListGetItem") do
+ desc = tf.NodeDescription("TensorListGetItem")
+ begin
+ begin
+ input_handle_ = convert(Tensor{Any}, input_handle_)
+ begin
+ end
+ end
+ begin
+ index_ = convert(Tensor{Int32}, index_)
+ begin
+ end
+ end
+ end
+ begin
+ begin
+ tf.add_input(desc, input_handle_)
+ end
+ begin
+ tf.add_input(desc, index_)
+ end
+ end
+ begin
+ begin
+ if element_dtype !== nothing
+ desc["element_dtype"] = Base.identity(element_dtype)
+ end
+ end
+ end
+ end
+ tf.Tensor(tf.Operation(desc))
+ end
+ end
+ begin
+ function tensor_list_get_item_eager(input_handle_, index_; name=nothing, element_dtype=nothing)
+ desc = tf.EagerOp("TensorListGetItem")
+ input_handle_ = convert(tf.EagerTensor, input_handle_)
+ index_ = convert(tf.EagerTensor, index_)
+ begin
+ begin
+ tf.add_input(desc, input_handle_)
+ end
+ begin
+ tf.add_input(desc, index_)
+ end
+ end
+ begin
+ begin
+ if element_dtype !== nothing
+ desc["element_dtype"] = Base.identity(element_dtype)
+ end
+ end
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(tensor_list_get_item, [input_handle_, index_], name=nothing, element_dtype=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res[1]
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function tensor_list_get_item(input_handle_, index_; name=nothing, element_dtype=nothing)
+ if tf.in_eager_mode()
+ tensor_list_get_item_eager(input_handle_, index_; name=name, element_dtype=element_dtype)
+ else
+ tensor_list_get_item_graph(input_handle_, index_; name=name, element_dtype=element_dtype)
+ end
+ end
+ end
+end
+
+
+"""
+ fake_quant_with_min_max_vars(inputs, min, max; num_bits=8, narrow_range=false)
+
+
+"""
+begin
+ begin
+ function fake_quant_with_min_max_vars_graph(inputs_, min_, max_; name=nothing, num_bits=nothing, narrow_range=nothing)
+ local desc
+ tf.with_op_name(name, "FakeQuantWithMinMaxVars") do
+ desc = tf.NodeDescription("FakeQuantWithMinMaxVars")
+ begin
+ begin
+ inputs_ = convert(Tensor{Float32}, inputs_)
+ begin
+ end
+ end
+ begin
+ min_ = convert(Tensor{Float32}, min_)
+ begin
+ end
+ end
+ begin
+ max_ = convert(Tensor{Float32}, max_)
+ begin
+ end
+ end
+ end
+ begin
+ begin
+ tf.add_input(desc, inputs_)
+ end
+ begin
+ tf.add_input(desc, min_)
+ end
+ begin
+ tf.add_input(desc, max_)
+ end
+ end
+ begin
+ begin
+ if num_bits !== nothing
+ desc["num_bits"] = Base.Int(num_bits)
+ end
+ end
+ begin
+ if narrow_range !== nothing
+ desc["narrow_range"] = Base.Bool(narrow_range)
+ end
+ end
+ end
+ end
+ tf.Tensor(tf.Operation(desc))
+ end
+ end
+ begin
+ function fake_quant_with_min_max_vars_eager(inputs_, min_, max_; name=nothing, num_bits=nothing, narrow_range=nothing)
+ desc = tf.EagerOp("FakeQuantWithMinMaxVars")
+ inputs_ = convert(tf.EagerTensor, inputs_)
+ min_ = convert(tf.EagerTensor, min_)
+ max_ = convert(tf.EagerTensor, max_)
+ begin
+ begin
+ tf.add_input(desc, inputs_)
+ end
+ begin
+ tf.add_input(desc, min_)
+ end
+ begin
+ tf.add_input(desc, max_)
+ end
+ end
+ begin
+ begin
+ if num_bits !== nothing
+ desc["num_bits"] = Base.Int(num_bits)
+ end
+ end
+ begin
+ if narrow_range !== nothing
+ desc["narrow_range"] = Base.Bool(narrow_range)
+ end
+ end
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(fake_quant_with_min_max_vars, [inputs_, min_, max_], name=nothing, num_bits=nothing, narrow_range=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res[1]
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function fake_quant_with_min_max_vars(inputs_, min_, max_; name=nothing, num_bits=nothing, narrow_range=nothing)
+ if tf.in_eager_mode()
+ fake_quant_with_min_max_vars_eager(inputs_, min_, max_; name=name, num_bits=num_bits, narrow_range=narrow_range)
+ else
+ fake_quant_with_min_max_vars_graph(inputs_, min_, max_; name=name, num_bits=num_bits, narrow_range=narrow_range)
+ end
+ end
+ end
+end
+
+
+"""
+ is_boosted_trees_quantile_stream_resource_initialized(quantile_stream_resource_handle)
+
+
+"""
+begin
+ begin
+ function is_boosted_trees_quantile_stream_resource_initialized_graph(quantile_stream_resource_handle_; name=nothing)
+ local desc
+ tf.with_op_name(name, "IsBoostedTreesQuantileStreamResourceInitialized") do
+ desc = tf.NodeDescription("IsBoostedTreesQuantileStreamResourceInitialized")
+ begin
+ begin
+ quantile_stream_resource_handle_ = convert(Tensor{Any}, quantile_stream_resource_handle_)
+ begin
+ end
+ end
+ end
+ begin
+ begin
+ tf.add_input(desc, quantile_stream_resource_handle_)
+ end
+ end
+ begin
+ end
+ end
+ tf.Tensor(tf.Operation(desc))
+ end
+ end
+ begin
+ function is_boosted_trees_quantile_stream_resource_initialized_eager(quantile_stream_resource_handle_; name=nothing)
+ desc = tf.EagerOp("IsBoostedTreesQuantileStreamResourceInitialized")
+ quantile_stream_resource_handle_ = convert(tf.EagerTensor, quantile_stream_resource_handle_)
+ begin
+ begin
+ tf.add_input(desc, quantile_stream_resource_handle_)
+ end
+ end
+ begin
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(is_boosted_trees_quantile_stream_resource_initialized, [quantile_stream_resource_handle_], name=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res[1]
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function is_boosted_trees_quantile_stream_resource_initialized(quantile_stream_resource_handle_; name=nothing)
+ if tf.in_eager_mode()
+ is_boosted_trees_quantile_stream_resource_initialized_eager(quantile_stream_resource_handle_; name=name)
+ else
+ is_boosted_trees_quantile_stream_resource_initialized_graph(quantile_stream_resource_handle_; name=name)
+ end
+ end
+ end
+end
+
+
+"""
+ reader_read_up_to_v2(reader_handle, queue_handle, num_records)
+
+
+"""
+begin
+ begin
+ function reader_read_up_to_v2_graph(reader_handle_, queue_handle_, num_records_; name=nothing)
+ local desc
+ tf.with_op_name(name, "ReaderReadUpToV2") do
+ desc = tf.NodeDescription("ReaderReadUpToV2")
+ begin
+ begin
+ reader_handle_ = convert(Tensor{Any}, reader_handle_)
+ begin
+ end
+ end
+ begin
+ queue_handle_ = convert(Tensor{Any}, queue_handle_)
+ begin
+ end
+ end
+ begin
+ num_records_ = convert(Tensor{Int64}, num_records_)
+ begin
+ end
+ end
+ end
+ begin
+ begin
+ tf.add_input(desc, reader_handle_)
+ end
+ begin
+ tf.add_input(desc, queue_handle_)
+ end
+ begin
+ tf.add_input(desc, num_records_)
+ end
+ end
+ begin
+ end
+ end
+ begin
+ out = tf.Tensor[]
+ op = tf.Operation(desc)
+ for out_idx = 1:2
+ push!(out, tf.Tensor(op, out_idx))
+ end
+ out
+ end
+ end
+ end
+ begin
+ function reader_read_up_to_v2_eager(reader_handle_, queue_handle_, num_records_; name=nothing)
+ desc = tf.EagerOp("ReaderReadUpToV2")
+ reader_handle_ = convert(tf.EagerTensor, reader_handle_)
+ queue_handle_ = convert(tf.EagerTensor, queue_handle_)
+ num_records_ = convert(tf.EagerTensor, num_records_)
+ begin
+ begin
+ tf.add_input(desc, reader_handle_)
+ end
+ begin
+ tf.add_input(desc, queue_handle_)
+ end
+ begin
+ tf.add_input(desc, num_records_)
+ end
+ end
+ begin
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(reader_read_up_to_v2, [reader_handle_, queue_handle_, num_records_], name=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function reader_read_up_to_v2(reader_handle_, queue_handle_, num_records_; name=nothing)
+ if tf.in_eager_mode()
+ reader_read_up_to_v2_eager(reader_handle_, queue_handle_, num_records_; name=name)
+ else
+ reader_read_up_to_v2_graph(reader_handle_, queue_handle_, num_records_; name=name)
+ end
+ end
+ end
+end
+
+
+"""
+ complex(real, imag)
+
+
+"""
+begin
+ begin
+ function complex_graph(real_, imag_; name=nothing)
+ local desc
+ tf.with_op_name(name, "Complex") do
+ desc = tf.NodeDescription("Complex")
+ begin
+ begin
+ real_ = convert(Tensor{Float32}, real_)
+ begin
+ end
+ end
+ begin
+ imag_ = convert(Tensor{Float32}, imag_)
+ begin
+ end
+ end
+ begin
+ (real_, imag_) = tf.tf_promote(real_, imag_)
+ end
+ end
+ begin
+ begin
+ tf.add_input(desc, real_)
+ end
+ begin
+ tf.add_input(desc, imag_)
+ end
+ end
+ begin
+ end
+ end
+ tf.Tensor(tf.Operation(desc))
+ end
+ end
+ begin
+ function complex_eager(real_, imag_; name=nothing)
+ desc = tf.EagerOp("Complex")
+ real_ = convert(tf.EagerTensor, real_)
+ imag_ = convert(tf.EagerTensor, imag_)
+ begin
+ begin
+ tf.add_input(desc, real_)
+ end
+ begin
+ tf.add_input(desc, imag_)
+ end
+ end
+ begin
+ end
+ begin
+ desc["T"] = tf.data_type(real_)
+ end
+ begin
+ desc["T"] = tf.data_type(imag_)
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(complex, [real_, imag_], name=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res[1]
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function complex(real_, imag_; name=nothing)
+ if tf.in_eager_mode()
+ complex_eager(real_, imag_; name=name)
+ else
+ complex_graph(real_, imag_; name=name)
+ end
+ end
+ end
+end
+
+
+"""
+ tensor_list_reserve(element_shape, num_elements)
+
+
+"""
+begin
+ begin
+ function tensor_list_reserve_graph(element_shape_, num_elements_; name=nothing, element_dtype=nothing, shape_type=nothing)
+ local desc
+ tf.with_op_name(name, "TensorListReserve") do
+ desc = tf.NodeDescription("TensorListReserve")
+ begin
+ begin
+ element_shape_ = convert(Tensor{Any}, element_shape_)
+ begin
+ end
+ end
+ begin
+ num_elements_ = convert(Tensor{Int32}, num_elements_)
+ begin
+ end
+ end
+ begin
+ (element_shape_,) = tf.tf_promote(element_shape_)
+ end
+ end
+ begin
+ begin
+ tf.add_input(desc, element_shape_)
+ end
+ begin
+ tf.add_input(desc, num_elements_)
+ end
+ end
+ begin
+ begin
+ if element_dtype !== nothing
+ desc["element_dtype"] = Base.identity(element_dtype)
+ end
+ end
+ begin
+ if shape_type !== nothing
+ desc["shape_type"] = Base.identity(shape_type)
+ end
+ end
+ end
+ end
+ tf.Tensor(tf.Operation(desc))
+ end
+ end
+ begin
+ function tensor_list_reserve_eager(element_shape_, num_elements_; name=nothing, element_dtype=nothing, shape_type=nothing)
+ desc = tf.EagerOp("TensorListReserve")
+ element_shape_ = convert(tf.EagerTensor, element_shape_)
+ num_elements_ = convert(tf.EagerTensor, num_elements_)
+ begin
+ begin
+ tf.add_input(desc, element_shape_)
+ end
+ begin
+ tf.add_input(desc, num_elements_)
+ end
+ end
+ begin
+ begin
+ if element_dtype !== nothing
+ desc["element_dtype"] = Base.identity(element_dtype)
+ end
+ end
+ begin
+ if shape_type !== nothing
+ desc["shape_type"] = Base.identity(shape_type)
+ end
+ end
+ end
+ begin
+ desc["shape_type"] = tf.data_type(element_shape_)
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(tensor_list_reserve, [element_shape_, num_elements_], name=nothing, element_dtype=nothing, shape_type=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res[1]
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function tensor_list_reserve(element_shape_, num_elements_; name=nothing, element_dtype=nothing, shape_type=nothing)
+ if tf.in_eager_mode()
+ tensor_list_reserve_eager(element_shape_, num_elements_; name=name, element_dtype=element_dtype, shape_type=shape_type)
+ else
+ tensor_list_reserve_graph(element_shape_, num_elements_; name=name, element_dtype=element_dtype, shape_type=shape_type)
+ end
+ end
+ end
+end
+
+
+"""
+ bitcast(input)
+
+
+"""
+begin
+ begin
+ function bitcast_graph(input_; name=nothing, type_=nothing)
+ local desc
+ tf.with_op_name(name, "Bitcast") do
+ desc = tf.NodeDescription("Bitcast")
+ begin
+ begin
+ input_ = convert(Tensor{Any}, input_)
+ begin
+ end
+ end
+ begin
+ (input_,) = tf.tf_promote(input_)
+ end
+ end
+ begin
+ begin
+ tf.add_input(desc, input_)
+ end
+ end
+ begin
+ begin
+ if type_ !== nothing
+ desc["type"] = Base.identity(type_)
+ end
+ end
+ end
+ end
+ tf.Tensor(tf.Operation(desc))
+ end
+ end
+ begin
+ function bitcast_eager(input_; name=nothing, type_=nothing)
+ desc = tf.EagerOp("Bitcast")
+ input_ = convert(tf.EagerTensor, input_)
+ begin
+ begin
+ tf.add_input(desc, input_)
+ end
+ end
+ begin
+ begin
+ if type_ !== nothing
+ desc["type"] = Base.identity(type_)
+ end
+ end
+ end
+ begin
+ desc["T"] = tf.data_type(input_)
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(bitcast, [input_], name=nothing, type_=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res[1]
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function bitcast(input_; name=nothing, type_=nothing)
+ if tf.in_eager_mode()
+ bitcast_eager(input_; name=name, type_=type_)
+ else
+ bitcast_graph(input_; name=name, type_=type_)
+ end
+ end
+ end
+end
+
+
+"""
+ priority_queue(; component_types=Int64[], capacity=-1, container=, shared_name=)
+
+
+"""
+begin
+ begin
+ function priority_queue_graph(; name=nothing, component_types=nothing, shapes=nothing, capacity=nothing, container=nothing, shared_name=nothing)
+ local desc
+ tf.with_op_name(name, "PriorityQueue") do
+ desc = tf.NodeDescription("PriorityQueue")
+ begin
+ end
+ begin
+ end
+ begin
+ begin
+ if component_types !== nothing
+ desc["component_types"] = map(Base.identity, component_types)
+ end
+ end
+ begin
+ if shapes !== nothing
+ desc["shapes"] = map(Base.identity, shapes)
+ end
+ end
+ begin
+ if capacity !== nothing
+ desc["capacity"] = Base.Int(capacity)
+ end
+ end
+ begin
+ if container !== nothing
+ desc["container"] = Base.String(container)
+ end
+ end
+ begin
+ if shared_name !== nothing
+ desc["shared_name"] = Base.String(shared_name)
+ end
+ end
+ end
+ end
+ tf.Tensor(tf.Operation(desc))
+ end
+ end
+ begin
+ function priority_queue_eager(; name=nothing, component_types=nothing, shapes=nothing, capacity=nothing, container=nothing, shared_name=nothing)
+ desc = tf.EagerOp("PriorityQueue")
+ begin
+ end
+ begin
+ begin
+ if component_types !== nothing
+ desc["component_types"] = map(Base.identity, component_types)
+ end
+ end
+ begin
+ if shapes !== nothing
+ desc["shapes"] = map(Base.identity, shapes)
+ end
+ end
+ begin
+ if capacity !== nothing
+ desc["capacity"] = Base.Int(capacity)
+ end
+ end
+ begin
+ if container !== nothing
+ desc["container"] = Base.String(container)
+ end
+ end
+ begin
+ if shared_name !== nothing
+ desc["shared_name"] = Base.String(shared_name)
+ end
+ end
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(priority_queue, [], name=nothing, component_types=nothing, shapes=nothing, capacity=nothing, container=nothing, shared_name=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res[1]
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function priority_queue(; name=nothing, component_types=nothing, shapes=nothing, capacity=nothing, container=nothing, shared_name=nothing)
+ if tf.in_eager_mode()
+ priority_queue_eager(; name=name, component_types=component_types, shapes=shapes, capacity=capacity, container=container, shared_name=shared_name)
+ else
+ priority_queue_graph(; name=name, component_types=component_types, shapes=shapes, capacity=capacity, container=container, shared_name=shared_name)
+ end
+ end
+ end
+end
+
+
+"""
+ quantized_batch_norm_with_global_normalization(t, t_min, t_max, m, m_min, m_max, v, v_min, v_max, beta, beta_min, beta_max, gamma, gamma_min, gamma_max)
+
+
+"""
+begin
+ begin
+ function quantized_batch_norm_with_global_normalization_graph(t_, t_min_, t_max_, m_, m_min_, m_max_, v_, v_min_, v_max_, beta_, beta_min_, beta_max_, gamma_, gamma_min_, gamma_max_; name=nothing, out_type=nothing, variance_epsilon=nothing, scale_after_normalization=nothing)
+ local desc
+ tf.with_op_name(name, "QuantizedBatchNormWithGlobalNormalization") do
+ desc = tf.NodeDescription("QuantizedBatchNormWithGlobalNormalization")
+ begin
+ begin
+ t_ = convert(Tensor{Any}, t_)
+ begin
+ end
+ end
+ begin
+ t_min_ = convert(Tensor{Float32}, t_min_)
+ begin
+ end
+ end
+ begin
+ t_max_ = convert(Tensor{Float32}, t_max_)
+ begin
+ end
+ end
+ begin
+ m_ = convert(Tensor{Any}, m_)
+ begin
+ end
+ end
+ begin
+ m_min_ = convert(Tensor{Float32}, m_min_)
+ begin
+ end
+ end
+ begin
+ m_max_ = convert(Tensor{Float32}, m_max_)
+ begin
+ end
+ end
+ begin
+ v_ = convert(Tensor{Any}, v_)
+ begin
+ end
+ end
+ begin
+ v_min_ = convert(Tensor{Float32}, v_min_)
+ begin
+ end
+ end
+ begin
+ v_max_ = convert(Tensor{Float32}, v_max_)
+ begin
+ end
+ end
+ begin
+ beta_ = convert(Tensor{Any}, beta_)
+ begin
+ end
+ end
+ begin
+ beta_min_ = convert(Tensor{Float32}, beta_min_)
+ begin
+ end
+ end
+ begin
+ beta_max_ = convert(Tensor{Float32}, beta_max_)
+ begin
+ end
+ end
+ begin
+ gamma_ = convert(Tensor{Any}, gamma_)
+ begin
+ end
+ end
+ begin
+ gamma_min_ = convert(Tensor{Float32}, gamma_min_)
+ begin
+ end
+ end
+ begin
+ gamma_max_ = convert(Tensor{Float32}, gamma_max_)
+ begin
+ end
+ end
+ begin
+ (t_, m_, v_, beta_, gamma_) = tf.tf_promote(t_, m_, v_, beta_, gamma_)
+ end
+ end
+ begin
+ begin
+ tf.add_input(desc, t_)
+ end
+ begin
+ tf.add_input(desc, t_min_)
+ end
+ begin
+ tf.add_input(desc, t_max_)
+ end
+ begin
+ tf.add_input(desc, m_)
+ end
+ begin
+ tf.add_input(desc, m_min_)
+ end
+ begin
+ tf.add_input(desc, m_max_)
+ end
+ begin
+ tf.add_input(desc, v_)
+ end
+ begin
+ tf.add_input(desc, v_min_)
+ end
+ begin
+ tf.add_input(desc, v_max_)
+ end
+ begin
+ tf.add_input(desc, beta_)
+ end
+ begin
+ tf.add_input(desc, beta_min_)
+ end
+ begin
+ tf.add_input(desc, beta_max_)
+ end
+ begin
+ tf.add_input(desc, gamma_)
+ end
+ begin
+ tf.add_input(desc, gamma_min_)
+ end
+ begin
+ tf.add_input(desc, gamma_max_)
+ end
+ end
+ begin
+ begin
+ if out_type !== nothing
+ desc["out_type"] = Base.identity(out_type)
+ end
+ end
+ begin
+ if variance_epsilon !== nothing
+ desc["variance_epsilon"] = Base.identity(variance_epsilon)
+ end
+ end
+ begin
+ if scale_after_normalization !== nothing
+ desc["scale_after_normalization"] = Base.Bool(scale_after_normalization)
+ end
+ end
+ end
+ end
+ begin
+ out = tf.Tensor[]
+ op = tf.Operation(desc)
+ for out_idx = 1:3
+ push!(out, tf.Tensor(op, out_idx))
+ end
+ out
+ end
+ end
+ end
+ begin
+ function quantized_batch_norm_with_global_normalization_eager(t_, t_min_, t_max_, m_, m_min_, m_max_, v_, v_min_, v_max_, beta_, beta_min_, beta_max_, gamma_, gamma_min_, gamma_max_; name=nothing, out_type=nothing, variance_epsilon=nothing, scale_after_normalization=nothing)
+ desc = tf.EagerOp("QuantizedBatchNormWithGlobalNormalization")
+ t_ = convert(tf.EagerTensor, t_)
+ t_min_ = convert(tf.EagerTensor, t_min_)
+ t_max_ = convert(tf.EagerTensor, t_max_)
+ m_ = convert(tf.EagerTensor, m_)
+ m_min_ = convert(tf.EagerTensor, m_min_)
+ m_max_ = convert(tf.EagerTensor, m_max_)
+ v_ = convert(tf.EagerTensor, v_)
+ v_min_ = convert(tf.EagerTensor, v_min_)
+ v_max_ = convert(tf.EagerTensor, v_max_)
+ beta_ = convert(tf.EagerTensor, beta_)
+ beta_min_ = convert(tf.EagerTensor, beta_min_)
+ beta_max_ = convert(tf.EagerTensor, beta_max_)
+ gamma_ = convert(tf.EagerTensor, gamma_)
+ gamma_min_ = convert(tf.EagerTensor, gamma_min_)
+ gamma_max_ = convert(tf.EagerTensor, gamma_max_)
+ begin
+ begin
+ tf.add_input(desc, t_)
+ end
+ begin
+ tf.add_input(desc, t_min_)
+ end
+ begin
+ tf.add_input(desc, t_max_)
+ end
+ begin
+ tf.add_input(desc, m_)
+ end
+ begin
+ tf.add_input(desc, m_min_)
+ end
+ begin
+ tf.add_input(desc, m_max_)
+ end
+ begin
+ tf.add_input(desc, v_)
+ end
+ begin
+ tf.add_input(desc, v_min_)
+ end
+ begin
+ tf.add_input(desc, v_max_)
+ end
+ begin
+ tf.add_input(desc, beta_)
+ end
+ begin
+ tf.add_input(desc, beta_min_)
+ end
+ begin
+ tf.add_input(desc, beta_max_)
+ end
+ begin
+ tf.add_input(desc, gamma_)
+ end
+ begin
+ tf.add_input(desc, gamma_min_)
+ end
+ begin
+ tf.add_input(desc, gamma_max_)
+ end
+ end
+ begin
+ begin
+ if out_type !== nothing
+ desc["out_type"] = Base.identity(out_type)
+ end
+ end
+ begin
+ if variance_epsilon !== nothing
+ desc["variance_epsilon"] = Base.identity(variance_epsilon)
+ end
+ end
+ begin
+ if scale_after_normalization !== nothing
+ desc["scale_after_normalization"] = Base.Bool(scale_after_normalization)
+ end
+ end
+ end
+ begin
+ desc["Tinput"] = tf.data_type(t_)
+ end
+ begin
+ desc["Tinput"] = tf.data_type(m_)
+ end
+ begin
+ desc["Tinput"] = tf.data_type(v_)
+ end
+ begin
+ desc["Tinput"] = tf.data_type(beta_)
+ end
+ begin
+ desc["Tinput"] = tf.data_type(gamma_)
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(quantized_batch_norm_with_global_normalization, [t_, t_min_, t_max_, m_, m_min_, m_max_, v_, v_min_, v_max_, beta_, beta_min_, beta_max_, gamma_, gamma_min_, gamma_max_], name=nothing, out_type=nothing, variance_epsilon=nothing, scale_after_normalization=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function quantized_batch_norm_with_global_normalization(t_, t_min_, t_max_, m_, m_min_, m_max_, v_, v_min_, v_max_, beta_, beta_min_, beta_max_, gamma_, gamma_min_, gamma_max_; name=nothing, out_type=nothing, variance_epsilon=nothing, scale_after_normalization=nothing)
+ if tf.in_eager_mode()
+ quantized_batch_norm_with_global_normalization_eager(t_, t_min_, t_max_, m_, m_min_, m_max_, v_, v_min_, v_max_, beta_, beta_min_, beta_max_, gamma_, gamma_min_, gamma_max_; name=name, out_type=out_type, variance_epsilon=variance_epsilon, scale_after_normalization=scale_after_normalization)
+ else
+ quantized_batch_norm_with_global_normalization_graph(t_, t_min_, t_max_, m_, m_min_, m_max_, v_, v_min_, v_max_, beta_, beta_min_, beta_max_, gamma_, gamma_min_, gamma_max_; name=name, out_type=out_type, variance_epsilon=variance_epsilon, scale_after_normalization=scale_after_normalization)
+ end
+ end
+ end
+end
+
+
+"""
+ cos(x)
+
+
+"""
+begin
+ begin
+ function cos_graph(x_; name=nothing)
+ local desc
+ tf.with_op_name(name, "Cos") do
+ desc = tf.NodeDescription("Cos")
+ begin
+ begin
+ x_ = convert(Tensor{Any}, x_)
+ begin
+ end
+ end
+ begin
+ (x_,) = tf.tf_promote(x_)
+ end
+ end
+ begin
+ begin
+ tf.add_input(desc, x_)
+ end
+ end
+ begin
+ end
+ end
+ tf.Tensor(tf.Operation(desc))
+ end
+ end
+ begin
+ function cos_eager(x_; name=nothing)
+ desc = tf.EagerOp("Cos")
+ x_ = convert(tf.EagerTensor, x_)
+ begin
+ begin
+ tf.add_input(desc, x_)
+ end
+ end
+ begin
+ end
+ begin
+ desc["T"] = tf.data_type(x_)
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(cos, [x_], name=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res[1]
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function cos(x_; name=nothing)
+ if tf.in_eager_mode()
+ cos_eager(x_; name=name)
+ else
+ cos_graph(x_; name=name)
+ end
+ end
+ end
+end
+
+
+"""
+ quantize_down_and_shrink_range(input, input_min, input_max)
+
+
+"""
+begin
+ begin
+ function quantize_down_and_shrink_range_graph(input_, input_min_, input_max_; name=nothing, out_type=nothing)
+ local desc
+ tf.with_op_name(name, "QuantizeDownAndShrinkRange") do
+ desc = tf.NodeDescription("QuantizeDownAndShrinkRange")
+ begin
+ begin
+ input_ = convert(Tensor{Any}, input_)
+ begin
+ end
+ end
+ begin
+ input_min_ = convert(Tensor{Float32}, input_min_)
+ begin
+ end
+ end
+ begin
+ input_max_ = convert(Tensor{Float32}, input_max_)
+ begin
+ end
+ end
+ begin
+ (input_,) = tf.tf_promote(input_)
+ end
+ end
+ begin
+ begin
+ tf.add_input(desc, input_)
+ end
+ begin
+ tf.add_input(desc, input_min_)
+ end
+ begin
+ tf.add_input(desc, input_max_)
+ end
+ end
+ begin
+ begin
+ if out_type !== nothing
+ desc["out_type"] = Base.identity(out_type)
+ end
+ end
+ end
+ end
+ begin
+ out = tf.Tensor[]
+ op = tf.Operation(desc)
+ for out_idx = 1:3
+ push!(out, tf.Tensor(op, out_idx))
+ end
+ out
+ end
+ end
+ end
+ begin
+ function quantize_down_and_shrink_range_eager(input_, input_min_, input_max_; name=nothing, out_type=nothing)
+ desc = tf.EagerOp("QuantizeDownAndShrinkRange")
+ input_ = convert(tf.EagerTensor, input_)
+ input_min_ = convert(tf.EagerTensor, input_min_)
+ input_max_ = convert(tf.EagerTensor, input_max_)
+ begin
+ begin
+ tf.add_input(desc, input_)
+ end
+ begin
+ tf.add_input(desc, input_min_)
+ end
+ begin
+ tf.add_input(desc, input_max_)
+ end
+ end
+ begin
+ begin
+ if out_type !== nothing
+ desc["out_type"] = Base.identity(out_type)
+ end
+ end
+ end
+ begin
+ desc["Tinput"] = tf.data_type(input_)
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(quantize_down_and_shrink_range, [input_, input_min_, input_max_], name=nothing, out_type=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function quantize_down_and_shrink_range(input_, input_min_, input_max_; name=nothing, out_type=nothing)
+ if tf.in_eager_mode()
+ quantize_down_and_shrink_range_eager(input_, input_min_, input_max_; name=name, out_type=out_type)
+ else
+ quantize_down_and_shrink_range_graph(input_, input_min_, input_max_; name=name, out_type=out_type)
+ end
+ end
+ end
+end
+
+
+"""
+ experimental_random_dataset(seed, seed2)
+
+
+"""
+begin
+ begin
+ function experimental_random_dataset_graph(seed_, seed2_; name=nothing, output_types=nothing, output_shapes=nothing)
+ local desc
+ tf.with_op_name(name, "ExperimentalRandomDataset") do
+ desc = tf.NodeDescription("ExperimentalRandomDataset")
+ begin
+ begin
+ seed_ = convert(Tensor{Int64}, seed_)
+ begin
+ end
+ end
+ begin
+ seed2_ = convert(Tensor{Int64}, seed2_)
+ begin
+ end
+ end
+ end
+ begin
+ begin
+ tf.add_input(desc, seed_)
+ end
+ begin
+ tf.add_input(desc, seed2_)
+ end
+ end
+ begin
+ begin
+ if output_types !== nothing
+ desc["output_types"] = map(Base.identity, output_types)
+ end
+ end
+ begin
+ if output_shapes !== nothing
+ desc["output_shapes"] = map(Base.identity, output_shapes)
+ end
+ end
+ end
+ end
+ tf.Tensor(tf.Operation(desc))
+ end
+ end
+ begin
+ function experimental_random_dataset_eager(seed_, seed2_; name=nothing, output_types=nothing, output_shapes=nothing)
+ desc = tf.EagerOp("ExperimentalRandomDataset")
+ seed_ = convert(tf.EagerTensor, seed_)
+ seed2_ = convert(tf.EagerTensor, seed2_)
+ begin
+ begin
+ tf.add_input(desc, seed_)
+ end
+ begin
+ tf.add_input(desc, seed2_)
+ end
+ end
+ begin
+ begin
+ if output_types !== nothing
+ desc["output_types"] = map(Base.identity, output_types)
+ end
+ end
+ begin
+ if output_shapes !== nothing
+ desc["output_shapes"] = map(Base.identity, output_shapes)
+ end
+ end
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(experimental_random_dataset, [seed_, seed2_], name=nothing, output_types=nothing, output_shapes=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res[1]
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function experimental_random_dataset(seed_, seed2_; name=nothing, output_types=nothing, output_shapes=nothing)
+ if tf.in_eager_mode()
+ experimental_random_dataset_eager(seed_, seed2_; name=name, output_types=output_types, output_shapes=output_shapes)
+ else
+ experimental_random_dataset_graph(seed_, seed2_; name=name, output_types=output_types, output_shapes=output_shapes)
+ end
+ end
+ end
+end
+
+
+"""
+ rpc(address, method, request; protocol=, fail_fast=true, timeout_in_ms=0)
+
+
+"""
+begin
+ begin
+ function rpc_graph(address_, method_, request_; name=nothing, protocol=nothing, fail_fast=nothing, timeout_in_ms=nothing)
+ local desc
+ tf.with_op_name(name, "Rpc") do
+ desc = tf.NodeDescription("Rpc")
+ begin
+ begin
+ address_ = convert(Tensor{String}, address_)
+ begin
+ end
+ end
+ begin
+ method_ = convert(Tensor{String}, method_)
+ begin
+ end
+ end
+ begin
+ request_ = convert(Tensor{String}, request_)
+ begin
+ end
+ end
+ end
+ begin
+ begin
+ tf.add_input(desc, address_)
+ end
+ begin
+ tf.add_input(desc, method_)
+ end
+ begin
+ tf.add_input(desc, request_)
+ end
+ end
+ begin
+ begin
+ if protocol !== nothing
+ desc["protocol"] = Base.String(protocol)
+ end
+ end
+ begin
+ if fail_fast !== nothing
+ desc["fail_fast"] = Base.Bool(fail_fast)
+ end
+ end
+ begin
+ if timeout_in_ms !== nothing
+ desc["timeout_in_ms"] = Base.Int(timeout_in_ms)
+ end
+ end
+ end
+ end
+ tf.Tensor(tf.Operation(desc))
+ end
+ end
+ begin
+ function rpc_eager(address_, method_, request_; name=nothing, protocol=nothing, fail_fast=nothing, timeout_in_ms=nothing)
+ desc = tf.EagerOp("Rpc")
+ address_ = convert(tf.EagerTensor, address_)
+ method_ = convert(tf.EagerTensor, method_)
+ request_ = convert(tf.EagerTensor, request_)
+ begin
+ begin
+ tf.add_input(desc, address_)
+ end
+ begin
+ tf.add_input(desc, method_)
+ end
+ begin
+ tf.add_input(desc, request_)
+ end
+ end
+ begin
+ begin
+ if protocol !== nothing
+ desc["protocol"] = Base.String(protocol)
+ end
+ end
+ begin
+ if fail_fast !== nothing
+ desc["fail_fast"] = Base.Bool(fail_fast)
+ end
+ end
+ begin
+ if timeout_in_ms !== nothing
+ desc["timeout_in_ms"] = Base.Int(timeout_in_ms)
+ end
+ end
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(rpc, [address_, method_, request_], name=nothing, protocol=nothing, fail_fast=nothing, timeout_in_ms=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res[1]
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function rpc(address_, method_, request_; name=nothing, protocol=nothing, fail_fast=nothing, timeout_in_ms=nothing)
+ if tf.in_eager_mode()
+ rpc_eager(address_, method_, request_; name=name, protocol=protocol, fail_fast=fail_fast, timeout_in_ms=timeout_in_ms)
+ else
+ rpc_graph(address_, method_, request_; name=name, protocol=protocol, fail_fast=fail_fast, timeout_in_ms=timeout_in_ms)
+ end
+ end
+ end
+end
+
+
+"""
+ tensor_list_length(input_handle)
+
+
+"""
+begin
+ begin
+ function tensor_list_length_graph(input_handle_; name=nothing)
+ local desc
+ tf.with_op_name(name, "TensorListLength") do
+ desc = tf.NodeDescription("TensorListLength")
+ begin
+ begin
+ input_handle_ = convert(Tensor{Any}, input_handle_)
+ begin
+ end
+ end
+ end
+ begin
+ begin
+ tf.add_input(desc, input_handle_)
+ end
+ end
+ begin
+ end
+ end
+ tf.Tensor(tf.Operation(desc))
+ end
+ end
+ begin
+ function tensor_list_length_eager(input_handle_; name=nothing)
+ desc = tf.EagerOp("TensorListLength")
+ input_handle_ = convert(tf.EagerTensor, input_handle_)
+ begin
+ begin
+ tf.add_input(desc, input_handle_)
+ end
+ end
+ begin
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(tensor_list_length, [input_handle_], name=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res[1]
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function tensor_list_length(input_handle_; name=nothing)
+ if tf.in_eager_mode()
+ tensor_list_length_eager(input_handle_; name=name)
+ else
+ tensor_list_length_graph(input_handle_; name=name)
+ end
+ end
+ end
+end
+
+
+"""
+ map_incomplete_size(; capacity=0, memory_limit=0, container=, shared_name=)
+
+
+"""
+begin
+ begin
+ function map_incomplete_size_graph(; name=nothing, capacity=nothing, memory_limit=nothing, dtypes=nothing, container=nothing, shared_name=nothing)
+ local desc
+ tf.with_op_name(name, "MapIncompleteSize") do
+ desc = tf.NodeDescription("MapIncompleteSize")
+ begin
+ end
+ begin
+ end
+ begin
+ begin
+ if capacity !== nothing
+ desc["capacity"] = Base.Int(capacity)
+ end
+ end
+ begin
+ if memory_limit !== nothing
+ desc["memory_limit"] = Base.Int(memory_limit)
+ end
+ end
+ begin
+ if dtypes !== nothing
+ desc["dtypes"] = map(Base.identity, dtypes)
+ end
+ end
+ begin
+ if container !== nothing
+ desc["container"] = Base.String(container)
+ end
+ end
+ begin
+ if shared_name !== nothing
+ desc["shared_name"] = Base.String(shared_name)
+ end
+ end
+ end
+ end
+ tf.Tensor(tf.Operation(desc))
+ end
+ end
+ begin
+ function map_incomplete_size_eager(; name=nothing, capacity=nothing, memory_limit=nothing, dtypes=nothing, container=nothing, shared_name=nothing)
+ desc = tf.EagerOp("MapIncompleteSize")
+ begin
+ end
+ begin
+ begin
+ if capacity !== nothing
+ desc["capacity"] = Base.Int(capacity)
+ end
+ end
+ begin
+ if memory_limit !== nothing
+ desc["memory_limit"] = Base.Int(memory_limit)
+ end
+ end
+ begin
+ if dtypes !== nothing
+ desc["dtypes"] = map(Base.identity, dtypes)
+ end
+ end
+ begin
+ if container !== nothing
+ desc["container"] = Base.String(container)
+ end
+ end
+ begin
+ if shared_name !== nothing
+ desc["shared_name"] = Base.String(shared_name)
+ end
+ end
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(map_incomplete_size, [], name=nothing, capacity=nothing, memory_limit=nothing, dtypes=nothing, container=nothing, shared_name=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res[1]
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function map_incomplete_size(; name=nothing, capacity=nothing, memory_limit=nothing, dtypes=nothing, container=nothing, shared_name=nothing)
+ if tf.in_eager_mode()
+ map_incomplete_size_eager(; name=name, capacity=capacity, memory_limit=memory_limit, dtypes=dtypes, container=container, shared_name=shared_name)
+ else
+ map_incomplete_size_graph(; name=name, capacity=capacity, memory_limit=memory_limit, dtypes=dtypes, container=container, shared_name=shared_name)
+ end
+ end
+ end
+end
+
+
+"""
+ stateless_while(input)
+
+
+"""
+begin
+ begin
+ function stateless_while_graph(input_; name=nothing, T=nothing, cond=nothing, body=nothing)
+ local desc
+ tf.with_op_name(name, "StatelessWhile") do
+ desc = tf.NodeDescription("StatelessWhile")
+ begin
+ begin
+ input_ = [convert(Tensor{Any}, x) for x = input_]
+ begin
+ end
+ end
+ end
+ begin
+ begin
+ tf.add_input(desc, input_)
+ end
+ end
+ begin
+ begin
+ if T !== nothing
+ desc["T"] = map(Base.identity, T)
+ end
+ end
+ begin
+ if cond !== nothing
+ desc["cond"] = Base.identity(cond)
+ end
+ end
+ begin
+ if body !== nothing
+ desc["body"] = Base.identity(body)
+ end
+ end
+ end
+ end
+ tf.Tensor(tf.Operation(desc))
+ end
+ end
+ begin
+ function stateless_while_eager(input_; name=nothing, T=nothing, cond=nothing, body=nothing)
+ desc = tf.EagerOp("StatelessWhile")
+ input_ = convert(tf.EagerTensor, input_)
+ begin
+ begin
+ tf.add_input(desc, input_)
+ end
+ end
+ begin
+ begin
+ if T !== nothing
+ desc["T"] = map(Base.identity, T)
+ end
+ end
+ begin
+ if cond !== nothing
+ desc["cond"] = Base.identity(cond)
+ end
+ end
+ begin
+ if body !== nothing
+ desc["body"] = Base.identity(body)
+ end
+ end
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(stateless_while, [input_], name=nothing, T=nothing, cond=nothing, body=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res[1]
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function stateless_while(input_; name=nothing, T=nothing, cond=nothing, body=nothing)
+ if tf.in_eager_mode()
+ stateless_while_eager(input_; name=name, T=T, cond=cond, body=body)
+ else
+ stateless_while_graph(input_; name=name, T=T, cond=cond, body=body)
+ end
+ end
+ end
+end
+
+
+"""
+ sparse_conditional_accumulator(; container=, shared_name=, reduction_type=)
+
+
+"""
+begin
+ begin
+ function sparse_conditional_accumulator_graph(; name=nothing, dtype=nothing, shape=nothing, container=nothing, shared_name=nothing, reduction_type=nothing)
+ local desc
+ tf.with_op_name(name, "SparseConditionalAccumulator") do
+ desc = tf.NodeDescription("SparseConditionalAccumulator")
+ begin
+ end
+ begin
+ end
+ begin
+ begin
+ if dtype !== nothing
+ desc["dtype"] = Base.identity(dtype)
+ end
+ end
+ begin
+ if shape !== nothing
+ desc["shape"] = Base.identity(shape)
+ end
+ end
+ begin
+ if container !== nothing
+ desc["container"] = Base.String(container)
+ end
+ end
+ begin
+ if shared_name !== nothing
+ desc["shared_name"] = Base.String(shared_name)
+ end
+ end
+ begin
+ if reduction_type !== nothing
+ desc["reduction_type"] = Base.String(reduction_type)
+ end
+ end
+ end
+ end
+ tf.Tensor(tf.Operation(desc))
+ end
+ end
+ begin
+ function sparse_conditional_accumulator_eager(; name=nothing, dtype=nothing, shape=nothing, container=nothing, shared_name=nothing, reduction_type=nothing)
+ desc = tf.EagerOp("SparseConditionalAccumulator")
+ begin
+ end
+ begin
+ begin
+ if dtype !== nothing
+ desc["dtype"] = Base.identity(dtype)
+ end
+ end
+ begin
+ if shape !== nothing
+ desc["shape"] = Base.identity(shape)
+ end
+ end
+ begin
+ if container !== nothing
+ desc["container"] = Base.String(container)
+ end
+ end
+ begin
+ if shared_name !== nothing
+ desc["shared_name"] = Base.String(shared_name)
+ end
+ end
+ begin
+ if reduction_type !== nothing
+ desc["reduction_type"] = Base.String(reduction_type)
+ end
+ end
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(sparse_conditional_accumulator, [], name=nothing, dtype=nothing, shape=nothing, container=nothing, shared_name=nothing, reduction_type=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res[1]
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function sparse_conditional_accumulator(; name=nothing, dtype=nothing, shape=nothing, container=nothing, shared_name=nothing, reduction_type=nothing)
+ if tf.in_eager_mode()
+ sparse_conditional_accumulator_eager(; name=name, dtype=dtype, shape=shape, container=container, shared_name=shared_name, reduction_type=reduction_type)
+ else
+ sparse_conditional_accumulator_graph(; name=name, dtype=dtype, shape=shape, container=container, shared_name=shared_name, reduction_type=reduction_type)
+ end
+ end
+ end
+end
+
+
+"""
+ segment_min(data, segment_ids)
+
+
+"""
+begin
+ begin
+ function segment_min_graph(data_, segment_ids_; name=nothing)
+ local desc
+ tf.with_op_name(name, "SegmentMin") do
+ desc = tf.NodeDescription("SegmentMin")
+ begin
+ begin
+ data_ = convert(Tensor{Any}, data_)
+ begin
+ end
+ end
+ begin
+ segment_ids_ = convert(Tensor{Any}, segment_ids_)
+ begin
+ segment_ids_ = segment_ids_ - convert(tf.Tensor{eltype(segment_ids_)}, 1)
+ end
+ end
+ begin
+ (data_,) = tf.tf_promote(data_)
+ end
+ begin
+ (segment_ids_,) = tf.tf_promote(segment_ids_)
+ end
+ end
+ begin
+ begin
+ tf.add_input(desc, data_)
+ end
+ begin
+ tf.add_input(desc, segment_ids_)
+ end
+ end
+ begin
+ end
+ end
+ tf.Tensor(tf.Operation(desc))
+ end
+ end
+ begin
+ function segment_min_eager(data_, segment_ids_; name=nothing)
+ desc = tf.EagerOp("SegmentMin")
+ data_ = convert(tf.EagerTensor, data_)
+ segment_ids_ = convert(tf.EagerTensor, segment_ids_)
+ begin
+ begin
+ tf.add_input(desc, data_)
+ end
+ begin
+ tf.add_input(desc, segment_ids_)
+ end
+ end
+ begin
+ end
+ begin
+ desc["T"] = tf.data_type(data_)
+ end
+ begin
+ desc["Tindices"] = tf.data_type(segment_ids_)
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(segment_min, [data_, segment_ids_], name=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res[1]
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function segment_min(data_, segment_ids_; name=nothing)
+ if tf.in_eager_mode()
+ segment_min_eager(data_, segment_ids_; name=name)
+ else
+ segment_min_graph(data_, segment_ids_; name=name)
+ end
+ end
+ end
+end
+
+
+"""
+ write_graph_summary(writer, step, tensor)
+
+
+"""
+begin
+ begin
+ function write_graph_summary_graph(writer_, step_, tensor_; name=nothing)
+ local desc
+ tf.with_op_name(name, "WriteGraphSummary") do
+ desc = tf.NodeDescription("WriteGraphSummary")
+ begin
+ begin
+ writer_ = convert(Tensor{Any}, writer_)
+ begin
+ end
+ end
+ begin
+ step_ = convert(Tensor{Int64}, step_)
+ begin
+ end
+ end
+ begin
+ tensor_ = convert(Tensor{String}, tensor_)
+ begin
+ end
+ end
+ end
+ begin
+ begin
+ tf.add_input(desc, writer_)
+ end
+ begin
+ tf.add_input(desc, step_)
+ end
+ begin
+ tf.add_input(desc, tensor_)
+ end
+ end
+ begin
+ end
+ end
+ tf.Tensor(tf.Operation(desc))
+ end
+ end
+ begin
+ function write_graph_summary_eager(writer_, step_, tensor_; name=nothing)
+ desc = tf.EagerOp("WriteGraphSummary")
+ writer_ = convert(tf.EagerTensor, writer_)
+ step_ = convert(tf.EagerTensor, step_)
+ tensor_ = convert(tf.EagerTensor, tensor_)
+ begin
+ begin
+ tf.add_input(desc, writer_)
+ end
+ begin
+ tf.add_input(desc, step_)
+ end
+ begin
+ tf.add_input(desc, tensor_)
+ end
+ end
+ begin
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(write_graph_summary, [writer_, step_, tensor_], name=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res[1]
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function write_graph_summary(writer_, step_, tensor_; name=nothing)
+ if tf.in_eager_mode()
+ write_graph_summary_eager(writer_, step_, tensor_; name=name)
+ else
+ write_graph_summary_graph(writer_, step_, tensor_; name=name)
+ end
+ end
+ end
+end
+
+
+"""
+ cholesky_grad(l, grad)
+
+
+"""
+begin
+ begin
+ function cholesky_grad_graph(l_, grad_; name=nothing)
+ local desc
+ tf.with_op_name(name, "CholeskyGrad") do
+ desc = tf.NodeDescription("CholeskyGrad")
+ begin
+ begin
+ l_ = convert(Tensor{Any}, l_)
+ begin
+ end
+ end
+ begin
+ grad_ = convert(Tensor{Any}, grad_)
+ begin
+ end
+ end
+ begin
+ (l_, grad_) = tf.tf_promote(l_, grad_)
+ end
+ end
+ begin
+ begin
+ tf.add_input(desc, l_)
+ end
+ begin
+ tf.add_input(desc, grad_)
+ end
+ end
+ begin
+ end
+ end
+ tf.Tensor(tf.Operation(desc))
+ end
+ end
+ begin
+ function cholesky_grad_eager(l_, grad_; name=nothing)
+ desc = tf.EagerOp("CholeskyGrad")
+ l_ = convert(tf.EagerTensor, l_)
+ grad_ = convert(tf.EagerTensor, grad_)
+ begin
+ begin
+ tf.add_input(desc, l_)
+ end
+ begin
+ tf.add_input(desc, grad_)
+ end
+ end
+ begin
+ end
+ begin
+ desc["T"] = tf.data_type(l_)
+ end
+ begin
+ desc["T"] = tf.data_type(grad_)
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(cholesky_grad, [l_, grad_], name=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res[1]
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function cholesky_grad(l_, grad_; name=nothing)
+ if tf.in_eager_mode()
+ cholesky_grad_eager(l_, grad_; name=name)
+ else
+ cholesky_grad_graph(l_, grad_; name=name)
+ end
+ end
+ end
+end
+
+
+"""
+ log_uniform_candidate_sampler(true_classes; seed=0, seed2=0)
+
+
+"""
+begin
+ begin
+ function log_uniform_candidate_sampler_graph(true_classes_; name=nothing, num_true=nothing, num_sampled=nothing, unique=nothing, range_max=nothing, seed=nothing, seed2=nothing)
+ local desc
+ tf.with_op_name(name, "LogUniformCandidateSampler") do
+ desc = tf.NodeDescription("LogUniformCandidateSampler")
+ begin
+ begin
+ true_classes_ = convert(Tensor{Int64}, true_classes_)
+ begin
+ end
+ end
+ end
+ begin
+ begin
+ tf.add_input(desc, true_classes_)
+ end
+ end
+ begin
+ begin
+ if num_true !== nothing
+ desc["num_true"] = Base.Int(num_true)
+ end
+ end
+ begin
+ if num_sampled !== nothing
+ desc["num_sampled"] = Base.Int(num_sampled)
+ end
+ end
+ begin
+ if unique !== nothing
+ desc["unique"] = Base.Bool(unique)
+ end
+ end
+ begin
+ if range_max !== nothing
+ desc["range_max"] = Base.Int(range_max)
+ end
+ end
+ begin
+ if seed !== nothing
+ desc["seed"] = Base.Int(seed)
+ end
+ end
+ begin
+ if seed2 !== nothing
+ desc["seed2"] = Base.Int(seed2)
+ end
+ end
+ end
+ end
+ begin
+ out = tf.Tensor[]
+ op = tf.Operation(desc)
+ for out_idx = 1:3
+ push!(out, tf.Tensor(op, out_idx))
+ end
+ out
+ end
+ end
+ end
+ begin
+ function log_uniform_candidate_sampler_eager(true_classes_; name=nothing, num_true=nothing, num_sampled=nothing, unique=nothing, range_max=nothing, seed=nothing, seed2=nothing)
+ desc = tf.EagerOp("LogUniformCandidateSampler")
+ true_classes_ = convert(tf.EagerTensor, true_classes_)
+ begin
+ begin
+ tf.add_input(desc, true_classes_)
+ end
+ end
+ begin
+ begin
+ if num_true !== nothing
+ desc["num_true"] = Base.Int(num_true)
+ end
+ end
+ begin
+ if num_sampled !== nothing
+ desc["num_sampled"] = Base.Int(num_sampled)
+ end
+ end
+ begin
+ if unique !== nothing
+ desc["unique"] = Base.Bool(unique)
+ end
+ end
+ begin
+ if range_max !== nothing
+ desc["range_max"] = Base.Int(range_max)
+ end
+ end
+ begin
+ if seed !== nothing
+ desc["seed"] = Base.Int(seed)
+ end
+ end
+ begin
+ if seed2 !== nothing
+ desc["seed2"] = Base.Int(seed2)
+ end
+ end
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(log_uniform_candidate_sampler, [true_classes_], name=nothing, num_true=nothing, num_sampled=nothing, unique=nothing, range_max=nothing, seed=nothing, seed2=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function log_uniform_candidate_sampler(true_classes_; name=nothing, num_true=nothing, num_sampled=nothing, unique=nothing, range_max=nothing, seed=nothing, seed2=nothing)
+ if tf.in_eager_mode()
+ log_uniform_candidate_sampler_eager(true_classes_; name=name, num_true=num_true, num_sampled=num_sampled, unique=unique, range_max=range_max, seed=seed, seed2=seed2)
+ else
+ log_uniform_candidate_sampler_graph(true_classes_; name=name, num_true=num_true, num_sampled=num_sampled, unique=unique, range_max=range_max, seed=seed, seed2=seed2)
+ end
+ end
+ end
+end
+
+
+"""
+ serialize_sparse(sparse_indices, sparse_values, sparse_shape; out_type=String)
+
+
+"""
+begin
+ begin
+ function serialize_sparse_graph(sparse_indices_, sparse_values_, sparse_shape_; name=nothing, out_type=nothing)
+ local desc
+ tf.with_op_name(name, "SerializeSparse") do
+ desc = tf.NodeDescription("SerializeSparse")
+ begin
+ begin
+ sparse_indices_ = convert(Tensor{Int64}, sparse_indices_)
+ begin
+ end
+ end
+ begin
+ sparse_values_ = convert(Tensor{Any}, sparse_values_)
+ begin
+ end
+ end
+ begin
+ sparse_shape_ = convert(Tensor{Int64}, sparse_shape_)
+ begin
+ end
+ end
+ begin
+ (sparse_values_,) = tf.tf_promote(sparse_values_)
+ end
+ end
+ begin
+ begin
+ tf.add_input(desc, sparse_indices_)
+ end
+ begin
+ tf.add_input(desc, sparse_values_)
+ end
+ begin
+ tf.add_input(desc, sparse_shape_)
+ end
+ end
+ begin
+ begin
+ if out_type !== nothing
+ desc["out_type"] = Base.identity(out_type)
+ end
+ end
+ end
+ end
+ tf.Tensor(tf.Operation(desc))
+ end
+ end
+ begin
+ function serialize_sparse_eager(sparse_indices_, sparse_values_, sparse_shape_; name=nothing, out_type=nothing)
+ desc = tf.EagerOp("SerializeSparse")
+ sparse_indices_ = convert(tf.EagerTensor, sparse_indices_)
+ sparse_values_ = convert(tf.EagerTensor, sparse_values_)
+ sparse_shape_ = convert(tf.EagerTensor, sparse_shape_)
+ begin
+ begin
+ tf.add_input(desc, sparse_indices_)
+ end
+ begin
+ tf.add_input(desc, sparse_values_)
+ end
+ begin
+ tf.add_input(desc, sparse_shape_)
+ end
+ end
+ begin
+ begin
+ if out_type !== nothing
+ desc["out_type"] = Base.identity(out_type)
+ end
+ end
+ end
+ begin
+ desc["T"] = tf.data_type(sparse_values_)
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(serialize_sparse, [sparse_indices_, sparse_values_, sparse_shape_], name=nothing, out_type=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res[1]
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function serialize_sparse(sparse_indices_, sparse_values_, sparse_shape_; name=nothing, out_type=nothing)
+ if tf.in_eager_mode()
+ serialize_sparse_eager(sparse_indices_, sparse_values_, sparse_shape_; name=name, out_type=out_type)
+ else
+ serialize_sparse_graph(sparse_indices_, sparse_values_, sparse_shape_; name=name, out_type=out_type)
+ end
+ end
+ end
+end
+
+
+"""
+ scatter_nd_non_aliasing_add(input, indices, updates)
+
+
+"""
+begin
+ begin
+ function scatter_nd_non_aliasing_add_graph(input_, indices_, updates_; name=nothing)
+ local desc
+ tf.with_op_name(name, "ScatterNdNonAliasingAdd") do
+ desc = tf.NodeDescription("ScatterNdNonAliasingAdd")
+ begin
+ begin
+ input_ = convert(Tensor{Any}, input_)
+ begin
+ end
+ end
+ begin
+ indices_ = convert(Tensor{Any}, indices_)
+ begin
+ indices_ = indices_ - convert(tf.Tensor{eltype(indices_)}, 1)
+ end
+ end
+ begin
+ updates_ = convert(Tensor{Any}, updates_)
+ begin
+ end
+ end
+ begin
+ (input_, updates_) = tf.tf_promote(input_, updates_)
+ end
+ begin
+ (indices_,) = tf.tf_promote(indices_)
+ end
+ end
+ begin
+ begin
+ tf.add_input(desc, input_)
+ end
+ begin
+ tf.add_input(desc, indices_)
+ end
+ begin
+ tf.add_input(desc, updates_)
+ end
+ end
+ begin
+ end
+ end
+ tf.Tensor(tf.Operation(desc))
+ end
+ end
+ begin
+ function scatter_nd_non_aliasing_add_eager(input_, indices_, updates_; name=nothing)
+ desc = tf.EagerOp("ScatterNdNonAliasingAdd")
+ input_ = convert(tf.EagerTensor, input_)
+ indices_ = convert(tf.EagerTensor, indices_)
+ updates_ = convert(tf.EagerTensor, updates_)
+ begin
+ begin
+ tf.add_input(desc, input_)
+ end
+ begin
+ tf.add_input(desc, indices_)
+ end
+ begin
+ tf.add_input(desc, updates_)
+ end
+ end
+ begin
+ end
+ begin
+ desc["T"] = tf.data_type(input_)
+ end
+ begin
+ desc["Tindices"] = tf.data_type(indices_)
+ end
+ begin
+ desc["T"] = tf.data_type(updates_)
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(scatter_nd_non_aliasing_add, [input_, indices_, updates_], name=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res[1]
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function scatter_nd_non_aliasing_add(input_, indices_, updates_; name=nothing)
+ if tf.in_eager_mode()
+ scatter_nd_non_aliasing_add_eager(input_, indices_, updates_; name=name)
+ else
+ scatter_nd_non_aliasing_add_graph(input_, indices_, updates_; name=name)
+ end
+ end
+ end
+end
+
+
+"""
+ ref_merge(inputs)
+
+
+"""
+begin
+ begin
+ function ref_merge_graph(inputs_; name=nothing, N=nothing)
+ local desc
+ tf.with_op_name(name, "RefMerge") do
+ desc = tf.NodeDescription("RefMerge")
+ begin
+ begin
+ inputs_ = [convert(Tensor{Any}, x) for x = inputs_]
+ begin
+ end
+ end
+ begin
+ (inputs_,) = tf.tf_promote(inputs_)
+ end
+ end
+ begin
+ begin
+ tf.add_input(desc, inputs_)
+ end
+ end
+ begin
+ begin
+ if N !== nothing
+ desc["N"] = Base.Int(N)
+ end
+ end
+ end
+ end
+ begin
+ out = tf.Tensor[]
+ op = tf.Operation(desc)
+ for out_idx = 1:2
+ push!(out, tf.Tensor(op, out_idx))
+ end
+ out
+ end
+ end
+ end
+ begin
+ function ref_merge_eager(inputs_; name=nothing, N=nothing)
+ desc = tf.EagerOp("RefMerge")
+ inputs_ = convert(tf.EagerTensor, inputs_)
+ begin
+ begin
+ tf.add_input(desc, inputs_)
+ end
+ end
+ begin
+ begin
+ if N !== nothing
+ desc["N"] = Base.Int(N)
+ end
+ end
+ end
+ begin
+ desc["T"] = tf.data_type(inputs_)
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(ref_merge, [inputs_], name=nothing, N=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function ref_merge(inputs_; name=nothing, N=nothing)
+ if tf.in_eager_mode()
+ ref_merge_eager(inputs_; name=name, N=N)
+ else
+ ref_merge_graph(inputs_; name=name, N=N)
+ end
+ end
+ end
+end
+
+
+"""
+ tensor_list_concat(input_handle)
+
+
+"""
+begin
+ begin
+ function tensor_list_concat_graph(input_handle_; name=nothing, element_dtype=nothing)
+ local desc
+ tf.with_op_name(name, "TensorListConcat") do
+ desc = tf.NodeDescription("TensorListConcat")
+ begin
+ begin
+ input_handle_ = convert(Tensor{Any}, input_handle_)
+ begin
+ end
+ end
+ end
+ begin
+ begin
+ tf.add_input(desc, input_handle_)
+ end
+ end
+ begin
+ begin
+ if element_dtype !== nothing
+ desc["element_dtype"] = Base.identity(element_dtype)
+ end
+ end
+ end
+ end
+ begin
+ out = tf.Tensor[]
+ op = tf.Operation(desc)
+ for out_idx = 1:2
+ push!(out, tf.Tensor(op, out_idx))
+ end
+ out
+ end
+ end
+ end
+ begin
+ function tensor_list_concat_eager(input_handle_; name=nothing, element_dtype=nothing)
+ desc = tf.EagerOp("TensorListConcat")
+ input_handle_ = convert(tf.EagerTensor, input_handle_)
+ begin
+ begin
+ tf.add_input(desc, input_handle_)
+ end
+ end
+ begin
+ begin
+ if element_dtype !== nothing
+ desc["element_dtype"] = Base.identity(element_dtype)
+ end
+ end
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(tensor_list_concat, [input_handle_], name=nothing, element_dtype=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function tensor_list_concat(input_handle_; name=nothing, element_dtype=nothing)
+ if tf.in_eager_mode()
+ tensor_list_concat_eager(input_handle_; name=name, element_dtype=element_dtype)
+ else
+ tensor_list_concat_graph(input_handle_; name=name, element_dtype=element_dtype)
+ end
+ end
+ end
+end
+
+
+"""
+ cudnn_rnn_canonical_to_params(num_layers, num_units, input_size, weights, biases; rnn_mode=, input_mode=, direction=, dropout=?, seed=0, seed2=0)
+
+
+"""
+begin
+ begin
+ function cudnn_rnn_canonical_to_params_graph(num_layers_, num_units_, input_size_, weights_, biases_; name=nothing, num_params=nothing, rnn_mode=nothing, input_mode=nothing, direction=nothing, dropout=nothing, seed=nothing, seed2=nothing)
+ local desc
+ tf.with_op_name(name, "CudnnRNNCanonicalToParams") do
+ desc = tf.NodeDescription("CudnnRNNCanonicalToParams")
+ begin
+ begin
+ num_layers_ = convert(Tensor{Int32}, num_layers_)
+ begin
+ end
+ end
+ begin
+ num_units_ = convert(Tensor{Int32}, num_units_)
+ begin
+ end
+ end
+ begin
+ input_size_ = convert(Tensor{Int32}, input_size_)
+ begin
+ end
+ end
+ begin
+ weights_ = [convert(Tensor{Any}, x) for x = weights_]
+ begin
+ end
+ end
+ begin
+ biases_ = [convert(Tensor{Any}, x) for x = biases_]
+ begin
+ end
+ end
+ begin
+ (weights_, biases_) = tf.tf_promote(weights_, biases_)
+ end
+ end
+ begin
+ begin
+ tf.add_input(desc, num_layers_)
+ end
+ begin
+ tf.add_input(desc, num_units_)
+ end
+ begin
+ tf.add_input(desc, input_size_)
+ end
+ begin
+ tf.add_input(desc, weights_)
+ end
+ begin
+ tf.add_input(desc, biases_)
+ end
+ end
+ begin
+ begin
+ if num_params !== nothing
+ desc["num_params"] = Base.Int(num_params)
+ end
+ end
+ begin
+ if rnn_mode !== nothing
+ desc["rnn_mode"] = Base.String(rnn_mode)
+ end
+ end
+ begin
+ if input_mode !== nothing
+ desc["input_mode"] = Base.String(input_mode)
+ end
+ end
+ begin
+ if direction !== nothing
+ desc["direction"] = Base.String(direction)
+ end
+ end
+ begin
+ if dropout !== nothing
+ desc["dropout"] = Base.identity(dropout)
+ end
+ end
+ begin
+ if seed !== nothing
+ desc["seed"] = Base.Int(seed)
+ end
+ end
+ begin
+ if seed2 !== nothing
+ desc["seed2"] = Base.Int(seed2)
+ end
+ end
+ end
+ end
+ tf.Tensor(tf.Operation(desc))
+ end
+ end
+ begin
+ function cudnn_rnn_canonical_to_params_eager(num_layers_, num_units_, input_size_, weights_, biases_; name=nothing, num_params=nothing, rnn_mode=nothing, input_mode=nothing, direction=nothing, dropout=nothing, seed=nothing, seed2=nothing)
+ desc = tf.EagerOp("CudnnRNNCanonicalToParams")
+ num_layers_ = convert(tf.EagerTensor, num_layers_)
+ num_units_ = convert(tf.EagerTensor, num_units_)
+ input_size_ = convert(tf.EagerTensor, input_size_)
+ weights_ = convert(tf.EagerTensor, weights_)
+ biases_ = convert(tf.EagerTensor, biases_)
+ begin
+ begin
+ tf.add_input(desc, num_layers_)
+ end
+ begin
+ tf.add_input(desc, num_units_)
+ end
+ begin
+ tf.add_input(desc, input_size_)
+ end
+ begin
+ tf.add_input(desc, weights_)
+ end
+ begin
+ tf.add_input(desc, biases_)
+ end
+ end
+ begin
+ begin
+ if num_params !== nothing
+ desc["num_params"] = Base.Int(num_params)
+ end
+ end
+ begin
+ if rnn_mode !== nothing
+ desc["rnn_mode"] = Base.String(rnn_mode)
+ end
+ end
+ begin
+ if input_mode !== nothing
+ desc["input_mode"] = Base.String(input_mode)
+ end
+ end
+ begin
+ if direction !== nothing
+ desc["direction"] = Base.String(direction)
+ end
+ end
+ begin
+ if dropout !== nothing
+ desc["dropout"] = Base.identity(dropout)
+ end
+ end
+ begin
+ if seed !== nothing
+ desc["seed"] = Base.Int(seed)
+ end
+ end
+ begin
+ if seed2 !== nothing
+ desc["seed2"] = Base.Int(seed2)
+ end
+ end
+ end
+ begin
+ desc["T"] = tf.data_type(weights_)
+ end
+ begin
+ desc["T"] = tf.data_type(biases_)
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(cudnn_rnn_canonical_to_params, [num_layers_, num_units_, input_size_, weights_, biases_], name=nothing, num_params=nothing, rnn_mode=nothing, input_mode=nothing, direction=nothing, dropout=nothing, seed=nothing, seed2=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res[1]
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function cudnn_rnn_canonical_to_params(num_layers_, num_units_, input_size_, weights_, biases_; name=nothing, num_params=nothing, rnn_mode=nothing, input_mode=nothing, direction=nothing, dropout=nothing, seed=nothing, seed2=nothing)
+ if tf.in_eager_mode()
+ cudnn_rnn_canonical_to_params_eager(num_layers_, num_units_, input_size_, weights_, biases_; name=name, num_params=num_params, rnn_mode=rnn_mode, input_mode=input_mode, direction=direction, dropout=dropout, seed=seed, seed2=seed2)
+ else
+ cudnn_rnn_canonical_to_params_graph(num_layers_, num_units_, input_size_, weights_, biases_; name=name, num_params=num_params, rnn_mode=rnn_mode, input_mode=input_mode, direction=direction, dropout=dropout, seed=seed, seed2=seed2)
+ end
+ end
+ end
+end
+
+
+"""
+ sparse_apply_adadelta(var, accum, accum_update, lr, rho, epsilon, grad, indices; use_locking=false)
+
+
+"""
+begin
+ begin
+ function sparse_apply_adadelta_graph(var_, accum_, accum_update_, lr_, rho_, epsilon_, grad_, indices_; name=nothing, use_locking=nothing)
+ local desc
+ tf.with_op_name(name, "SparseApplyAdadelta") do
+ desc = tf.NodeDescription("SparseApplyAdadelta")
+ begin
+ begin
+ var_ = convert(Tensor{Any}, var_)
+ begin
+ end
+ end
+ begin
+ accum_ = convert(Tensor{Any}, accum_)
+ begin
+ end
+ end
+ begin
+ accum_update_ = convert(Tensor{Any}, accum_update_)
+ begin
+ end
+ end
+ begin
+ lr_ = convert(Tensor{Any}, lr_)
+ begin
+ end
+ end
+ begin
+ rho_ = convert(Tensor{Any}, rho_)
+ begin
+ end
+ end
+ begin
+ epsilon_ = convert(Tensor{Any}, epsilon_)
+ begin
+ end
+ end
+ begin
+ grad_ = convert(Tensor{Any}, grad_)
+ begin
+ end
+ end
+ begin
+ indices_ = convert(Tensor{Any}, indices_)
+ begin
+ indices_ = indices_ - convert(tf.Tensor{eltype(indices_)}, 1)
+ end
+ end
+ begin
+ (var_, accum_, accum_update_, lr_, rho_, epsilon_, grad_) = tf.tf_promote(var_, accum_, accum_update_, lr_, rho_, epsilon_, grad_)
+ end
+ begin
+ (indices_,) = tf.tf_promote(indices_)
+ end
+ end
+ begin
+ begin
+ tf.add_input(desc, var_)
+ end
+ begin
+ tf.add_input(desc, accum_)
+ end
+ begin
+ tf.add_input(desc, accum_update_)
+ end
+ begin
+ tf.add_input(desc, lr_)
+ end
+ begin
+ tf.add_input(desc, rho_)
+ end
+ begin
+ tf.add_input(desc, epsilon_)
+ end
+ begin
+ tf.add_input(desc, grad_)
+ end
+ begin
+ tf.add_input(desc, indices_)
+ end
+ end
+ begin
+ begin
+ if use_locking !== nothing
+ desc["use_locking"] = Base.Bool(use_locking)
+ end
+ end
+ end
+ end
+ tf.Tensor(tf.Operation(desc))
+ end
+ end
+ begin
+ function sparse_apply_adadelta_eager(var_, accum_, accum_update_, lr_, rho_, epsilon_, grad_, indices_; name=nothing, use_locking=nothing)
+ desc = tf.EagerOp("SparseApplyAdadelta")
+ var_ = convert(tf.EagerTensor, var_)
+ accum_ = convert(tf.EagerTensor, accum_)
+ accum_update_ = convert(tf.EagerTensor, accum_update_)
+ lr_ = convert(tf.EagerTensor, lr_)
+ rho_ = convert(tf.EagerTensor, rho_)
+ epsilon_ = convert(tf.EagerTensor, epsilon_)
+ grad_ = convert(tf.EagerTensor, grad_)
+ indices_ = convert(tf.EagerTensor, indices_)
+ begin
+ begin
+ tf.add_input(desc, var_)
+ end
+ begin
+ tf.add_input(desc, accum_)
+ end
+ begin
+ tf.add_input(desc, accum_update_)
+ end
+ begin
+ tf.add_input(desc, lr_)
+ end
+ begin
+ tf.add_input(desc, rho_)
+ end
+ begin
+ tf.add_input(desc, epsilon_)
+ end
+ begin
+ tf.add_input(desc, grad_)
+ end
+ begin
+ tf.add_input(desc, indices_)
+ end
+ end
+ begin
+ begin
+ if use_locking !== nothing
+ desc["use_locking"] = Base.Bool(use_locking)
+ end
+ end
+ end
+ begin
+ desc["T"] = tf.data_type(var_)
+ end
+ begin
+ desc["T"] = tf.data_type(accum_)
+ end
+ begin
+ desc["T"] = tf.data_type(accum_update_)
+ end
+ begin
+ desc["T"] = tf.data_type(lr_)
+ end
+ begin
+ desc["T"] = tf.data_type(rho_)
+ end
+ begin
+ desc["T"] = tf.data_type(epsilon_)
+ end
+ begin
+ desc["T"] = tf.data_type(grad_)
+ end
+ begin
+ desc["Tindices"] = tf.data_type(indices_)
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(sparse_apply_adadelta, [var_, accum_, accum_update_, lr_, rho_, epsilon_, grad_, indices_], name=nothing, use_locking=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res[1]
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function sparse_apply_adadelta(var_, accum_, accum_update_, lr_, rho_, epsilon_, grad_, indices_; name=nothing, use_locking=nothing)
+ if tf.in_eager_mode()
+ sparse_apply_adadelta_eager(var_, accum_, accum_update_, lr_, rho_, epsilon_, grad_, indices_; name=name, use_locking=use_locking)
+ else
+ sparse_apply_adadelta_graph(var_, accum_, accum_update_, lr_, rho_, epsilon_, grad_, indices_; name=name, use_locking=use_locking)
+ end
+ end
+ end
+end
+
+
+"""
+ tensor_array_close(handle)
+
+
+"""
+begin
+ begin
+ function tensor_array_close_graph(handle_; name=nothing)
+ local desc
+ tf.with_op_name(name, "TensorArrayClose") do
+ desc = tf.NodeDescription("TensorArrayClose")
+ begin
+ begin
+ handle_ = convert(Tensor{String}, handle_)
+ begin
+ end
+ end
+ end
+ begin
+ begin
+ tf.add_input(desc, handle_)
+ end
+ end
+ begin
+ end
+ end
+ tf.Tensor(tf.Operation(desc))
+ end
+ end
+ begin
+ function tensor_array_close_eager(handle_; name=nothing)
+ desc = tf.EagerOp("TensorArrayClose")
+ handle_ = convert(tf.EagerTensor, handle_)
+ begin
+ begin
+ tf.add_input(desc, handle_)
+ end
+ end
+ begin
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(tensor_array_close, [handle_], name=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res[1]
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function tensor_array_close(handle_; name=nothing)
+ if tf.in_eager_mode()
+ tensor_array_close_eager(handle_; name=name)
+ else
+ tensor_array_close_graph(handle_; name=name)
+ end
+ end
+ end
+end
+
+
+"""
+ selu_grad(gradients, outputs)
+
+
+"""
+begin
+ begin
+ function selu_grad_graph(gradients_, outputs_; name=nothing)
+ local desc
+ tf.with_op_name(name, "SeluGrad") do
+ desc = tf.NodeDescription("SeluGrad")
+ begin
+ begin
+ gradients_ = convert(Tensor{Any}, gradients_)
+ begin
+ end
+ end
+ begin
+ outputs_ = convert(Tensor{Any}, outputs_)
+ begin
+ end
+ end
+ begin
+ (gradients_, outputs_) = tf.tf_promote(gradients_, outputs_)
+ end
+ end
+ begin
+ begin
+ tf.add_input(desc, gradients_)
+ end
+ begin
+ tf.add_input(desc, outputs_)
+ end
+ end
+ begin
+ end
+ end
+ tf.Tensor(tf.Operation(desc))
+ end
+ end
+ begin
+ function selu_grad_eager(gradients_, outputs_; name=nothing)
+ desc = tf.EagerOp("SeluGrad")
+ gradients_ = convert(tf.EagerTensor, gradients_)
+ outputs_ = convert(tf.EagerTensor, outputs_)
+ begin
+ begin
+ tf.add_input(desc, gradients_)
+ end
+ begin
+ tf.add_input(desc, outputs_)
+ end
+ end
+ begin
+ end
+ begin
+ desc["T"] = tf.data_type(gradients_)
+ end
+ begin
+ desc["T"] = tf.data_type(outputs_)
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(selu_grad, [gradients_, outputs_], name=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res[1]
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function selu_grad(gradients_, outputs_; name=nothing)
+ if tf.in_eager_mode()
+ selu_grad_eager(gradients_, outputs_; name=name)
+ else
+ selu_grad_graph(gradients_, outputs_; name=name)
+ end
+ end
+ end
+end
+
+
+"""
+ crop_and_resize_grad_image(grads, boxes, box_ind, image_size; method=)
+
+
+"""
+begin
+ begin
+ function crop_and_resize_grad_image_graph(grads_, boxes_, box_ind_, image_size_; name=nothing, method=nothing)
+ local desc
+ tf.with_op_name(name, "CropAndResizeGradImage") do
+ desc = tf.NodeDescription("CropAndResizeGradImage")
+ begin
+ begin
+ grads_ = convert(Tensor{Float32}, grads_)
+ begin
+ end
+ end
+ begin
+ boxes_ = convert(Tensor{Float32}, boxes_)
+ begin
+ end
+ end
+ begin
+ box_ind_ = convert(Tensor{Int32}, box_ind_)
+ begin
+ end
+ end
+ begin
+ image_size_ = convert(Tensor{Int32}, image_size_)
+ begin
+ end
+ end
+ end
+ begin
+ begin
+ tf.add_input(desc, grads_)
+ end
+ begin
+ tf.add_input(desc, boxes_)
+ end
+ begin
+ tf.add_input(desc, box_ind_)
+ end
+ begin
+ tf.add_input(desc, image_size_)
+ end
+ end
+ begin
+ begin
+ if method !== nothing
+ desc["method"] = Base.String(method)
+ end
+ end
+ end
+ end
+ tf.Tensor(tf.Operation(desc))
+ end
+ end
+ begin
+ function crop_and_resize_grad_image_eager(grads_, boxes_, box_ind_, image_size_; name=nothing, method=nothing)
+ desc = tf.EagerOp("CropAndResizeGradImage")
+ grads_ = convert(tf.EagerTensor, grads_)
+ boxes_ = convert(tf.EagerTensor, boxes_)
+ box_ind_ = convert(tf.EagerTensor, box_ind_)
+ image_size_ = convert(tf.EagerTensor, image_size_)
+ begin
+ begin
+ tf.add_input(desc, grads_)
+ end
+ begin
+ tf.add_input(desc, boxes_)
+ end
+ begin
+ tf.add_input(desc, box_ind_)
+ end
+ begin
+ tf.add_input(desc, image_size_)
+ end
+ end
+ begin
+ begin
+ if method !== nothing
+ desc["method"] = Base.String(method)
+ end
+ end
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(crop_and_resize_grad_image, [grads_, boxes_, box_ind_, image_size_], name=nothing, method=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res[1]
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function crop_and_resize_grad_image(grads_, boxes_, box_ind_, image_size_; name=nothing, method=nothing)
+ if tf.in_eager_mode()
+ crop_and_resize_grad_image_eager(grads_, boxes_, box_ind_, image_size_; name=name, method=method)
+ else
+ crop_and_resize_grad_image_graph(grads_, boxes_, box_ind_, image_size_; name=name, method=method)
+ end
+ end
+ end
+end
+
+
+"""
+ rfft(input, fft_length)
+
+
+"""
+begin
+ begin
+ function rfft_graph(input_, fft_length_; name=nothing)
+ local desc
+ tf.with_op_name(name, "RFFT") do
+ desc = tf.NodeDescription("RFFT")
+ begin
+ begin
+ input_ = convert(Tensor{Float32}, input_)
+ begin
+ end
+ end
+ begin
+ fft_length_ = convert(Tensor{Int32}, fft_length_)
+ begin
+ end
+ end
+ end
+ begin
+ begin
+ tf.add_input(desc, input_)
+ end
+ begin
+ tf.add_input(desc, fft_length_)
+ end
+ end
+ begin
+ end
+ end
+ tf.Tensor(tf.Operation(desc))
+ end
+ end
+ begin
+ function rfft_eager(input_, fft_length_; name=nothing)
+ desc = tf.EagerOp("RFFT")
+ input_ = convert(tf.EagerTensor, input_)
+ fft_length_ = convert(tf.EagerTensor, fft_length_)
+ begin
+ begin
+ tf.add_input(desc, input_)
+ end
+ begin
+ tf.add_input(desc, fft_length_)
+ end
+ end
+ begin
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(rfft, [input_, fft_length_], name=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res[1]
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function rfft(input_, fft_length_; name=nothing)
+ if tf.in_eager_mode()
+ rfft_eager(input_, fft_length_; name=name)
+ else
+ rfft_graph(input_, fft_length_; name=name)
+ end
+ end
+ end
+end
+
+
+"""
+ experimental_sql_dataset(driver_name, data_source_name, query)
+
+
+"""
+begin
+ begin
+ function experimental_sql_dataset_graph(driver_name_, data_source_name_, query_; name=nothing, output_types=nothing, output_shapes=nothing)
+ local desc
+ tf.with_op_name(name, "ExperimentalSqlDataset") do
+ desc = tf.NodeDescription("ExperimentalSqlDataset")
+ begin
+ begin
+ driver_name_ = convert(Tensor{String}, driver_name_)
+ begin
+ end
+ end
+ begin
+ data_source_name_ = convert(Tensor{String}, data_source_name_)
+ begin
+ end
+ end
+ begin
+ query_ = convert(Tensor{String}, query_)
+ begin
+ end
+ end
+ end
+ begin
+ begin
+ tf.add_input(desc, driver_name_)
+ end
+ begin
+ tf.add_input(desc, data_source_name_)
+ end
+ begin
+ tf.add_input(desc, query_)
+ end
+ end
+ begin
+ begin
+ if output_types !== nothing
+ desc["output_types"] = map(Base.identity, output_types)
+ end
+ end
+ begin
+ if output_shapes !== nothing
+ desc["output_shapes"] = map(Base.identity, output_shapes)
+ end
+ end
+ end
+ end
+ tf.Tensor(tf.Operation(desc))
+ end
+ end
+ begin
+ function experimental_sql_dataset_eager(driver_name_, data_source_name_, query_; name=nothing, output_types=nothing, output_shapes=nothing)
+ desc = tf.EagerOp("ExperimentalSqlDataset")
+ driver_name_ = convert(tf.EagerTensor, driver_name_)
+ data_source_name_ = convert(tf.EagerTensor, data_source_name_)
+ query_ = convert(tf.EagerTensor, query_)
+ begin
+ begin
+ tf.add_input(desc, driver_name_)
+ end
+ begin
+ tf.add_input(desc, data_source_name_)
+ end
+ begin
+ tf.add_input(desc, query_)
+ end
+ end
+ begin
+ begin
+ if output_types !== nothing
+ desc["output_types"] = map(Base.identity, output_types)
+ end
+ end
+ begin
+ if output_shapes !== nothing
+ desc["output_shapes"] = map(Base.identity, output_shapes)
+ end
+ end
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(experimental_sql_dataset, [driver_name_, data_source_name_, query_], name=nothing, output_types=nothing, output_shapes=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res[1]
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function experimental_sql_dataset(driver_name_, data_source_name_, query_; name=nothing, output_types=nothing, output_shapes=nothing)
+ if tf.in_eager_mode()
+ experimental_sql_dataset_eager(driver_name_, data_source_name_, query_; name=name, output_types=output_types, output_shapes=output_shapes)
+ else
+ experimental_sql_dataset_graph(driver_name_, data_source_name_, query_; name=name, output_types=output_types, output_shapes=output_shapes)
+ end
+ end
+ end
+end
+
+
+"""
+ resource_apply_power_sign(var, m, lr, logbase, sign_decay, beta, grad; use_locking=false)
+
+
+"""
+begin
+ begin
+ function resource_apply_power_sign_graph(var_, m_, lr_, logbase_, sign_decay_, beta_, grad_; name=nothing, use_locking=nothing)
+ local desc
+ tf.with_op_name(name, "ResourceApplyPowerSign") do
+ desc = tf.NodeDescription("ResourceApplyPowerSign")
+ begin
+ begin
+ var_ = convert(Tensor{Any}, var_)
+ begin
+ end
+ end
+ begin
+ m_ = convert(Tensor{Any}, m_)
+ begin
+ end
+ end
+ begin
+ lr_ = convert(Tensor{Any}, lr_)
+ begin
+ end
+ end
+ begin
+ logbase_ = convert(Tensor{Any}, logbase_)
+ begin
+ end
+ end
+ begin
+ sign_decay_ = convert(Tensor{Any}, sign_decay_)
+ begin
+ end
+ end
+ begin
+ beta_ = convert(Tensor{Any}, beta_)
+ begin
+ end
+ end
+ begin
+ grad_ = convert(Tensor{Any}, grad_)
+ begin
+ end
+ end
+ begin
+ (lr_, logbase_, sign_decay_, beta_, grad_) = tf.tf_promote(lr_, logbase_, sign_decay_, beta_, grad_)
+ end
+ end
+ begin
+ begin
+ tf.add_input(desc, var_)
+ end
+ begin
+ tf.add_input(desc, m_)
+ end
+ begin
+ tf.add_input(desc, lr_)
+ end
+ begin
+ tf.add_input(desc, logbase_)
+ end
+ begin
+ tf.add_input(desc, sign_decay_)
+ end
+ begin
+ tf.add_input(desc, beta_)
+ end
+ begin
+ tf.add_input(desc, grad_)
+ end
+ end
+ begin
+ begin
+ if use_locking !== nothing
+ desc["use_locking"] = Base.Bool(use_locking)
+ end
+ end
+ end
+ end
+ tf.Tensor(tf.Operation(desc))
+ end
+ end
+ begin
+ function resource_apply_power_sign_eager(var_, m_, lr_, logbase_, sign_decay_, beta_, grad_; name=nothing, use_locking=nothing)
+ desc = tf.EagerOp("ResourceApplyPowerSign")
+ var_ = convert(tf.EagerTensor, var_)
+ m_ = convert(tf.EagerTensor, m_)
+ lr_ = convert(tf.EagerTensor, lr_)
+ logbase_ = convert(tf.EagerTensor, logbase_)
+ sign_decay_ = convert(tf.EagerTensor, sign_decay_)
+ beta_ = convert(tf.EagerTensor, beta_)
+ grad_ = convert(tf.EagerTensor, grad_)
+ begin
+ begin
+ tf.add_input(desc, var_)
+ end
+ begin
+ tf.add_input(desc, m_)
+ end
+ begin
+ tf.add_input(desc, lr_)
+ end
+ begin
+ tf.add_input(desc, logbase_)
+ end
+ begin
+ tf.add_input(desc, sign_decay_)
+ end
+ begin
+ tf.add_input(desc, beta_)
+ end
+ begin
+ tf.add_input(desc, grad_)
+ end
+ end
+ begin
+ begin
+ if use_locking !== nothing
+ desc["use_locking"] = Base.Bool(use_locking)
+ end
+ end
+ end
+ begin
+ desc["T"] = tf.data_type(lr_)
+ end
+ begin
+ desc["T"] = tf.data_type(logbase_)
+ end
+ begin
+ desc["T"] = tf.data_type(sign_decay_)
+ end
+ begin
+ desc["T"] = tf.data_type(beta_)
+ end
+ begin
+ desc["T"] = tf.data_type(grad_)
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(resource_apply_power_sign, [var_, m_, lr_, logbase_, sign_decay_, beta_, grad_], name=nothing, use_locking=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res[1]
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function resource_apply_power_sign(var_, m_, lr_, logbase_, sign_decay_, beta_, grad_; name=nothing, use_locking=nothing)
+ if tf.in_eager_mode()
+ resource_apply_power_sign_eager(var_, m_, lr_, logbase_, sign_decay_, beta_, grad_; name=name, use_locking=use_locking)
+ else
+ resource_apply_power_sign_graph(var_, m_, lr_, logbase_, sign_decay_, beta_, grad_; name=name, use_locking=use_locking)
+ end
+ end
+ end
+end
+
+
+"""
+ matrix_determinant(input)
+
+
+"""
+begin
+ begin
+ function matrix_determinant_graph(input_; name=nothing)
+ local desc
+ tf.with_op_name(name, "MatrixDeterminant") do
+ desc = tf.NodeDescription("MatrixDeterminant")
+ begin
+ begin
+ input_ = convert(Tensor{Any}, input_)
+ begin
+ end
+ end
+ begin
+ (input_,) = tf.tf_promote(input_)
+ end
+ end
+ begin
+ begin
+ tf.add_input(desc, input_)
+ end
+ end
+ begin
+ end
+ end
+ tf.Tensor(tf.Operation(desc))
+ end
+ end
+ begin
+ function matrix_determinant_eager(input_; name=nothing)
+ desc = tf.EagerOp("MatrixDeterminant")
+ input_ = convert(tf.EagerTensor, input_)
+ begin
+ begin
+ tf.add_input(desc, input_)
+ end
+ end
+ begin
+ end
+ begin
+ desc["T"] = tf.data_type(input_)
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(matrix_determinant, [input_], name=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res[1]
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function matrix_determinant(input_; name=nothing)
+ if tf.in_eager_mode()
+ matrix_determinant_eager(input_; name=name)
+ else
+ matrix_determinant_graph(input_; name=name)
+ end
+ end
+ end
+end
+
+
+"""
+ static_regex_replace(input; replace_global=true)
+
+
+"""
+begin
+ begin
+ function static_regex_replace_graph(input_; name=nothing, pattern=nothing, rewrite=nothing, replace_global=nothing)
+ local desc
+ tf.with_op_name(name, "StaticRegexReplace") do
+ desc = tf.NodeDescription("StaticRegexReplace")
+ begin
+ begin
+ input_ = convert(Tensor{String}, input_)
+ begin
+ end
+ end
+ end
+ begin
+ begin
+ tf.add_input(desc, input_)
+ end
+ end
+ begin
+ begin
+ if pattern !== nothing
+ desc["pattern"] = Base.String(pattern)
+ end
+ end
+ begin
+ if rewrite !== nothing
+ desc["rewrite"] = Base.String(rewrite)
+ end
+ end
+ begin
+ if replace_global !== nothing
+ desc["replace_global"] = Base.Bool(replace_global)
+ end
+ end
+ end
+ end
+ tf.Tensor(tf.Operation(desc))
+ end
+ end
+ begin
+ function static_regex_replace_eager(input_; name=nothing, pattern=nothing, rewrite=nothing, replace_global=nothing)
+ desc = tf.EagerOp("StaticRegexReplace")
+ input_ = convert(tf.EagerTensor, input_)
+ begin
+ begin
+ tf.add_input(desc, input_)
+ end
+ end
+ begin
+ begin
+ if pattern !== nothing
+ desc["pattern"] = Base.String(pattern)
+ end
+ end
+ begin
+ if rewrite !== nothing
+ desc["rewrite"] = Base.String(rewrite)
+ end
+ end
+ begin
+ if replace_global !== nothing
+ desc["replace_global"] = Base.Bool(replace_global)
+ end
+ end
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(static_regex_replace, [input_], name=nothing, pattern=nothing, rewrite=nothing, replace_global=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res[1]
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function static_regex_replace(input_; name=nothing, pattern=nothing, rewrite=nothing, replace_global=nothing)
+ if tf.in_eager_mode()
+ static_regex_replace_eager(input_; name=name, pattern=pattern, rewrite=rewrite, replace_global=replace_global)
+ else
+ static_regex_replace_graph(input_; name=name, pattern=pattern, rewrite=rewrite, replace_global=replace_global)
+ end
+ end
+ end
+end
+
+
+"""
+ avg_pool(value; data_format=)
+
+
+"""
+begin
+ begin
+ function avg_pool_graph(value_; name=nothing, ksize=nothing, strides=nothing, padding=nothing, data_format=nothing)
+ local desc
+ tf.with_op_name(name, "AvgPool") do
+ desc = tf.NodeDescription("AvgPool")
+ begin
+ begin
+ value_ = convert(Tensor{Any}, value_)
+ begin
+ end
+ end
+ begin
+ (value_,) = tf.tf_promote(value_)
+ end
+ end
+ begin
+ begin
+ tf.add_input(desc, value_)
+ end
+ end
+ begin
+ begin
+ if ksize !== nothing
+ desc["ksize"] = map(Base.identity, ksize)
+ end
+ end
+ begin
+ if strides !== nothing
+ desc["strides"] = map(Base.identity, strides)
+ end
+ end
+ begin
+ if padding !== nothing
+ desc["padding"] = Base.String(padding)
+ end
+ end
+ begin
+ if data_format !== nothing
+ desc["data_format"] = Base.String(data_format)
+ end
+ end
+ end
+ end
+ tf.Tensor(tf.Operation(desc))
+ end
+ end
+ begin
+ function avg_pool_eager(value_; name=nothing, ksize=nothing, strides=nothing, padding=nothing, data_format=nothing)
+ desc = tf.EagerOp("AvgPool")
+ value_ = convert(tf.EagerTensor, value_)
+ begin
+ begin
+ tf.add_input(desc, value_)
+ end
+ end
+ begin
+ begin
+ if ksize !== nothing
+ desc["ksize"] = map(Base.identity, ksize)
+ end
+ end
+ begin
+ if strides !== nothing
+ desc["strides"] = map(Base.identity, strides)
+ end
+ end
+ begin
+ if padding !== nothing
+ desc["padding"] = Base.String(padding)
+ end
+ end
+ begin
+ if data_format !== nothing
+ desc["data_format"] = Base.String(data_format)
+ end
+ end
+ end
+ begin
+ desc["T"] = tf.data_type(value_)
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(avg_pool, [value_], name=nothing, ksize=nothing, strides=nothing, padding=nothing, data_format=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res[1]
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function avg_pool(value_; name=nothing, ksize=nothing, strides=nothing, padding=nothing, data_format=nothing)
+ if tf.in_eager_mode()
+ avg_pool_eager(value_; name=name, ksize=ksize, strides=strides, padding=padding, data_format=data_format)
+ else
+ avg_pool_graph(value_; name=name, ksize=ksize, strides=strides, padding=padding, data_format=data_format)
+ end
+ end
+ end
+end
+
+
+"""
+ sparse_dense_cwise_add(sp_indices, sp_values, sp_shape, dense)
+
+
+"""
+begin
+ begin
+ function sparse_dense_cwise_add_graph(sp_indices_, sp_values_, sp_shape_, dense_; name=nothing)
+ local desc
+ tf.with_op_name(name, "SparseDenseCwiseAdd") do
+ desc = tf.NodeDescription("SparseDenseCwiseAdd")
+ begin
+ begin
+ sp_indices_ = convert(Tensor{Int64}, sp_indices_)
+ begin
+ end
+ end
+ begin
+ sp_values_ = convert(Tensor{Any}, sp_values_)
+ begin
+ end
+ end
+ begin
+ sp_shape_ = convert(Tensor{Int64}, sp_shape_)
+ begin
+ end
+ end
+ begin
+ dense_ = convert(Tensor{Any}, dense_)
+ begin
+ end
+ end
+ begin
+ (sp_values_, dense_) = tf.tf_promote(sp_values_, dense_)
+ end
+ end
+ begin
+ begin
+ tf.add_input(desc, sp_indices_)
+ end
+ begin
+ tf.add_input(desc, sp_values_)
+ end
+ begin
+ tf.add_input(desc, sp_shape_)
+ end
+ begin
+ tf.add_input(desc, dense_)
+ end
+ end
+ begin
+ end
+ end
+ tf.Tensor(tf.Operation(desc))
+ end
+ end
+ begin
+ function sparse_dense_cwise_add_eager(sp_indices_, sp_values_, sp_shape_, dense_; name=nothing)
+ desc = tf.EagerOp("SparseDenseCwiseAdd")
+ sp_indices_ = convert(tf.EagerTensor, sp_indices_)
+ sp_values_ = convert(tf.EagerTensor, sp_values_)
+ sp_shape_ = convert(tf.EagerTensor, sp_shape_)
+ dense_ = convert(tf.EagerTensor, dense_)
+ begin
+ begin
+ tf.add_input(desc, sp_indices_)
+ end
+ begin
+ tf.add_input(desc, sp_values_)
+ end
+ begin
+ tf.add_input(desc, sp_shape_)
+ end
+ begin
+ tf.add_input(desc, dense_)
+ end
+ end
+ begin
+ end
+ begin
+ desc["T"] = tf.data_type(sp_values_)
+ end
+ begin
+ desc["T"] = tf.data_type(dense_)
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(sparse_dense_cwise_add, [sp_indices_, sp_values_, sp_shape_, dense_], name=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res[1]
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function sparse_dense_cwise_add(sp_indices_, sp_values_, sp_shape_, dense_; name=nothing)
+ if tf.in_eager_mode()
+ sparse_dense_cwise_add_eager(sp_indices_, sp_values_, sp_shape_, dense_; name=name)
+ else
+ sparse_dense_cwise_add_graph(sp_indices_, sp_values_, sp_shape_, dense_; name=name)
+ end
+ end
+ end
+end
+
+
+"""
+ bias_add_v1(value, bias)
+
+
+"""
+begin
+ begin
+ function bias_add_v1_graph(value_, bias_; name=nothing)
+ local desc
+ tf.with_op_name(name, "BiasAddV1") do
+ desc = tf.NodeDescription("BiasAddV1")
+ begin
+ begin
+ value_ = convert(Tensor{Any}, value_)
+ begin
+ end
+ end
+ begin
+ bias_ = convert(Tensor{Any}, bias_)
+ begin
+ end
+ end
+ begin
+ (value_, bias_) = tf.tf_promote(value_, bias_)
+ end
+ end
+ begin
+ begin
+ tf.add_input(desc, value_)
+ end
+ begin
+ tf.add_input(desc, bias_)
+ end
+ end
+ begin
+ end
+ end
+ tf.Tensor(tf.Operation(desc))
+ end
+ end
+ begin
+ function bias_add_v1_eager(value_, bias_; name=nothing)
+ desc = tf.EagerOp("BiasAddV1")
+ value_ = convert(tf.EagerTensor, value_)
+ bias_ = convert(tf.EagerTensor, bias_)
+ begin
+ begin
+ tf.add_input(desc, value_)
+ end
+ begin
+ tf.add_input(desc, bias_)
+ end
+ end
+ begin
+ end
+ begin
+ desc["T"] = tf.data_type(value_)
+ end
+ begin
+ desc["T"] = tf.data_type(bias_)
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(bias_add_v1, [value_, bias_], name=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res[1]
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function bias_add_v1(value_, bias_; name=nothing)
+ if tf.in_eager_mode()
+ bias_add_v1_eager(value_, bias_; name=name)
+ else
+ bias_add_v1_graph(value_, bias_; name=name)
+ end
+ end
+ end
+end
+
+
+"""
+ invert_permutation(x)
+
+
+"""
+begin
+ begin
+ function invert_permutation_graph(x_; name=nothing)
+ local desc
+ tf.with_op_name(name, "InvertPermutation") do
+ desc = tf.NodeDescription("InvertPermutation")
+ begin
+ begin
+ x_ = convert(Tensor{Int32}, x_)
+ begin
+ end
+ end
+ begin
+ (x_,) = tf.tf_promote(x_)
+ end
+ end
+ begin
+ begin
+ tf.add_input(desc, x_)
+ end
+ end
+ begin
+ end
+ end
+ tf.Tensor(tf.Operation(desc))
+ end
+ end
+ begin
+ function invert_permutation_eager(x_; name=nothing)
+ desc = tf.EagerOp("InvertPermutation")
+ x_ = convert(tf.EagerTensor, x_)
+ begin
+ begin
+ tf.add_input(desc, x_)
+ end
+ end
+ begin
+ end
+ begin
+ desc["T"] = tf.data_type(x_)
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(invert_permutation, [x_], name=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res[1]
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function invert_permutation(x_; name=nothing)
+ if tf.in_eager_mode()
+ invert_permutation_eager(x_; name=name)
+ else
+ invert_permutation_graph(x_; name=name)
+ end
+ end
+ end
+end
+
+
+"""
+ hash_table_v2(; container=, shared_name=, use_node_name_sharing=false)
+
+
+"""
+begin
+ begin
+ function hash_table_v2_graph(; name=nothing, container=nothing, shared_name=nothing, use_node_name_sharing=nothing, key_dtype=nothing, value_dtype=nothing)
+ local desc
+ tf.with_op_name(name, "HashTableV2") do
+ desc = tf.NodeDescription("HashTableV2")
+ begin
+ end
+ begin
+ end
+ begin
+ begin
+ if container !== nothing
+ desc["container"] = Base.String(container)
+ end
+ end
+ begin
+ if shared_name !== nothing
+ desc["shared_name"] = Base.String(shared_name)
+ end
+ end
+ begin
+ if use_node_name_sharing !== nothing
+ desc["use_node_name_sharing"] = Base.Bool(use_node_name_sharing)
+ end
+ end
+ begin
+ if key_dtype !== nothing
+ desc["key_dtype"] = Base.identity(key_dtype)
+ end
+ end
+ begin
+ if value_dtype !== nothing
+ desc["value_dtype"] = Base.identity(value_dtype)
+ end
+ end
+ end
+ end
+ tf.Tensor(tf.Operation(desc))
+ end
+ end
+ begin
+ function hash_table_v2_eager(; name=nothing, container=nothing, shared_name=nothing, use_node_name_sharing=nothing, key_dtype=nothing, value_dtype=nothing)
+ desc = tf.EagerOp("HashTableV2")
+ begin
+ end
+ begin
+ begin
+ if container !== nothing
+ desc["container"] = Base.String(container)
+ end
+ end
+ begin
+ if shared_name !== nothing
+ desc["shared_name"] = Base.String(shared_name)
+ end
+ end
+ begin
+ if use_node_name_sharing !== nothing
+ desc["use_node_name_sharing"] = Base.Bool(use_node_name_sharing)
+ end
+ end
+ begin
+ if key_dtype !== nothing
+ desc["key_dtype"] = Base.identity(key_dtype)
+ end
+ end
+ begin
+ if value_dtype !== nothing
+ desc["value_dtype"] = Base.identity(value_dtype)
+ end
+ end
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(hash_table_v2, [], name=nothing, container=nothing, shared_name=nothing, use_node_name_sharing=nothing, key_dtype=nothing, value_dtype=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res[1]
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function hash_table_v2(; name=nothing, container=nothing, shared_name=nothing, use_node_name_sharing=nothing, key_dtype=nothing, value_dtype=nothing)
+ if tf.in_eager_mode()
+ hash_table_v2_eager(; name=name, container=container, shared_name=shared_name, use_node_name_sharing=use_node_name_sharing, key_dtype=key_dtype, value_dtype=value_dtype)
+ else
+ hash_table_v2_graph(; name=name, container=container, shared_name=shared_name, use_node_name_sharing=use_node_name_sharing, key_dtype=key_dtype, value_dtype=value_dtype)
+ end
+ end
+ end
+end
+
+
+"""
+ sparse_apply_momentum(var, accum, lr, grad, indices, momentum; use_locking=false, use_nesterov=false)
+
+
+"""
+begin
+ begin
+ function sparse_apply_momentum_graph(var_, accum_, lr_, grad_, indices_, momentum_; name=nothing, use_locking=nothing, use_nesterov=nothing)
+ local desc
+ tf.with_op_name(name, "SparseApplyMomentum") do
+ desc = tf.NodeDescription("SparseApplyMomentum")
+ begin
+ begin
+ var_ = convert(Tensor{Any}, var_)
+ begin
+ end
+ end
+ begin
+ accum_ = convert(Tensor{Any}, accum_)
+ begin
+ end
+ end
+ begin
+ lr_ = convert(Tensor{Any}, lr_)
+ begin
+ end
+ end
+ begin
+ grad_ = convert(Tensor{Any}, grad_)
+ begin
+ end
+ end
+ begin
+ indices_ = convert(Tensor{Any}, indices_)
+ begin
+ indices_ = indices_ - convert(tf.Tensor{eltype(indices_)}, 1)
+ end
+ end
+ begin
+ momentum_ = convert(Tensor{Any}, momentum_)
+ begin
+ end
+ end
+ begin
+ (var_, accum_, lr_, grad_, momentum_) = tf.tf_promote(var_, accum_, lr_, grad_, momentum_)
+ end
+ begin
+ (indices_,) = tf.tf_promote(indices_)
+ end
+ end
+ begin
+ begin
+ tf.add_input(desc, var_)
+ end
+ begin
+ tf.add_input(desc, accum_)
+ end
+ begin
+ tf.add_input(desc, lr_)
+ end
+ begin
+ tf.add_input(desc, grad_)
+ end
+ begin
+ tf.add_input(desc, indices_)
+ end
+ begin
+ tf.add_input(desc, momentum_)
+ end
+ end
+ begin
+ begin
+ if use_locking !== nothing
+ desc["use_locking"] = Base.Bool(use_locking)
+ end
+ end
+ begin
+ if use_nesterov !== nothing
+ desc["use_nesterov"] = Base.Bool(use_nesterov)
+ end
+ end
+ end
+ end
+ tf.Tensor(tf.Operation(desc))
+ end
+ end
+ begin
+ function sparse_apply_momentum_eager(var_, accum_, lr_, grad_, indices_, momentum_; name=nothing, use_locking=nothing, use_nesterov=nothing)
+ desc = tf.EagerOp("SparseApplyMomentum")
+ var_ = convert(tf.EagerTensor, var_)
+ accum_ = convert(tf.EagerTensor, accum_)
+ lr_ = convert(tf.EagerTensor, lr_)
+ grad_ = convert(tf.EagerTensor, grad_)
+ indices_ = convert(tf.EagerTensor, indices_)
+ momentum_ = convert(tf.EagerTensor, momentum_)
+ begin
+ begin
+ tf.add_input(desc, var_)
+ end
+ begin
+ tf.add_input(desc, accum_)
+ end
+ begin
+ tf.add_input(desc, lr_)
+ end
+ begin
+ tf.add_input(desc, grad_)
+ end
+ begin
+ tf.add_input(desc, indices_)
+ end
+ begin
+ tf.add_input(desc, momentum_)
+ end
+ end
+ begin
+ begin
+ if use_locking !== nothing
+ desc["use_locking"] = Base.Bool(use_locking)
+ end
+ end
+ begin
+ if use_nesterov !== nothing
+ desc["use_nesterov"] = Base.Bool(use_nesterov)
+ end
+ end
+ end
+ begin
+ desc["T"] = tf.data_type(var_)
+ end
+ begin
+ desc["T"] = tf.data_type(accum_)
+ end
+ begin
+ desc["T"] = tf.data_type(lr_)
+ end
+ begin
+ desc["T"] = tf.data_type(grad_)
+ end
+ begin
+ desc["Tindices"] = tf.data_type(indices_)
+ end
+ begin
+ desc["T"] = tf.data_type(momentum_)
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(sparse_apply_momentum, [var_, accum_, lr_, grad_, indices_, momentum_], name=nothing, use_locking=nothing, use_nesterov=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res[1]
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function sparse_apply_momentum(var_, accum_, lr_, grad_, indices_, momentum_; name=nothing, use_locking=nothing, use_nesterov=nothing)
+ if tf.in_eager_mode()
+ sparse_apply_momentum_eager(var_, accum_, lr_, grad_, indices_, momentum_; name=name, use_locking=use_locking, use_nesterov=use_nesterov)
+ else
+ sparse_apply_momentum_graph(var_, accum_, lr_, grad_, indices_, momentum_; name=name, use_locking=use_locking, use_nesterov=use_nesterov)
+ end
+ end
+ end
+end
+
+
+"""
+ infeed_enqueue(input; shape=?, device_ordinal=-1)
+
+An op which feeds a single Tensor value into the computation.
+"""
+begin
+ begin
+ function infeed_enqueue_graph(input_; name=nothing, dtype=nothing, shape=nothing, device_ordinal=nothing)
+ local desc
+ tf.with_op_name(name, "InfeedEnqueue") do
+ desc = tf.NodeDescription("InfeedEnqueue")
+ begin
+ begin
+ input_ = convert(Tensor{Any}, input_)
+ begin
+ end
+ end
+ begin
+ (input_,) = tf.tf_promote(input_)
+ end
+ end
+ begin
+ begin
+ tf.add_input(desc, input_)
+ end
+ end
+ begin
+ begin
+ if dtype !== nothing
+ desc["dtype"] = Base.identity(dtype)
+ end
+ end
+ begin
+ if shape !== nothing
+ desc["shape"] = Base.identity(shape)
+ end
+ end
+ begin
+ if device_ordinal !== nothing
+ desc["device_ordinal"] = Base.Int(device_ordinal)
+ end
+ end
+ end
+ end
+ tf.Tensor(tf.Operation(desc))
+ end
+ end
+ begin
+ function infeed_enqueue_eager(input_; name=nothing, dtype=nothing, shape=nothing, device_ordinal=nothing)
+ desc = tf.EagerOp("InfeedEnqueue")
+ input_ = convert(tf.EagerTensor, input_)
+ begin
+ begin
+ tf.add_input(desc, input_)
+ end
+ end
+ begin
+ begin
+ if dtype !== nothing
+ desc["dtype"] = Base.identity(dtype)
+ end
+ end
+ begin
+ if shape !== nothing
+ desc["shape"] = Base.identity(shape)
+ end
+ end
+ begin
+ if device_ordinal !== nothing
+ desc["device_ordinal"] = Base.Int(device_ordinal)
+ end
+ end
+ end
+ begin
+ desc["dtype"] = tf.data_type(input_)
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(infeed_enqueue, [input_], name=nothing, dtype=nothing, shape=nothing, device_ordinal=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res[1]
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function infeed_enqueue(input_; name=nothing, dtype=nothing, shape=nothing, device_ordinal=nothing)
+ if tf.in_eager_mode()
+ infeed_enqueue_eager(input_; name=name, dtype=dtype, shape=shape, device_ordinal=device_ordinal)
+ else
+ infeed_enqueue_graph(input_; name=name, dtype=dtype, shape=shape, device_ordinal=device_ordinal)
+ end
+ end
+ end
+end
+
+
+"""
+ stateless_random_uniform_int(shape, seed, minval, maxval)
+
+
+"""
+begin
+ begin
+ function stateless_random_uniform_int_graph(shape_, seed_, minval_, maxval_; name=nothing, dtype=nothing)
+ local desc
+ tf.with_op_name(name, "StatelessRandomUniformInt") do
+ desc = tf.NodeDescription("StatelessRandomUniformInt")
+ begin
+ begin
+ shape_ = convert(Tensor{Any}, shape_)
+ begin
+ end
+ end
+ begin
+ seed_ = convert(Tensor{Int64}, seed_)
+ begin
+ end
+ end
+ begin
+ minval_ = convert(Tensor{Any}, minval_)
+ begin
+ end
+ end
+ begin
+ maxval_ = convert(Tensor{Any}, maxval_)
+ begin
+ end
+ end
+ begin
+ (minval_, maxval_) = tf.tf_promote(minval_, maxval_)
+ end
+ begin
+ (shape_,) = tf.tf_promote(shape_)
+ end
+ begin
+ (seed_,) = tf.tf_promote(seed_)
+ end
+ end
+ begin
+ begin
+ tf.add_input(desc, shape_)
+ end
+ begin
+ tf.add_input(desc, seed_)
+ end
+ begin
+ tf.add_input(desc, minval_)
+ end
+ begin
+ tf.add_input(desc, maxval_)
+ end
+ end
+ begin
+ begin
+ if dtype !== nothing
+ desc["dtype"] = Base.identity(dtype)
+ end
+ end
+ end
+ end
+ tf.Tensor(tf.Operation(desc))
+ end
+ end
+ begin
+ function stateless_random_uniform_int_eager(shape_, seed_, minval_, maxval_; name=nothing, dtype=nothing)
+ desc = tf.EagerOp("StatelessRandomUniformInt")
+ shape_ = convert(tf.EagerTensor, shape_)
+ seed_ = convert(tf.EagerTensor, seed_)
+ minval_ = convert(tf.EagerTensor, minval_)
+ maxval_ = convert(tf.EagerTensor, maxval_)
+ begin
+ begin
+ tf.add_input(desc, shape_)
+ end
+ begin
+ tf.add_input(desc, seed_)
+ end
+ begin
+ tf.add_input(desc, minval_)
+ end
+ begin
+ tf.add_input(desc, maxval_)
+ end
+ end
+ begin
+ begin
+ if dtype !== nothing
+ desc["dtype"] = Base.identity(dtype)
+ end
+ end
+ end
+ begin
+ desc["T"] = tf.data_type(shape_)
+ end
+ begin
+ desc["Tseed"] = tf.data_type(seed_)
+ end
+ begin
+ desc["dtype"] = tf.data_type(minval_)
+ end
+ begin
+ desc["dtype"] = tf.data_type(maxval_)
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(stateless_random_uniform_int, [shape_, seed_, minval_, maxval_], name=nothing, dtype=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res[1]
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function stateless_random_uniform_int(shape_, seed_, minval_, maxval_; name=nothing, dtype=nothing)
+ if tf.in_eager_mode()
+ stateless_random_uniform_int_eager(shape_, seed_, minval_, maxval_; name=name, dtype=dtype)
+ else
+ stateless_random_uniform_int_graph(shape_, seed_, minval_, maxval_; name=name, dtype=dtype)
+ end
+ end
+ end
+end
+
+
+"""
+ _send(tensor; client_terminated=false)
+
+Sends the named tensor from send_device to recv_device.
+"""
+begin
+ begin
+ function _send_graph(tensor_; name=nothing, tensor_name=nothing, send_device=nothing, send_device_incarnation=nothing, recv_device=nothing, client_terminated=nothing)
+ local desc
+ tf.with_op_name(name, "_Send") do
+ desc = tf.NodeDescription("_Send")
+ begin
+ begin
+ tensor_ = convert(Tensor{Any}, tensor_)
+ begin
+ end
+ end
+ begin
+ (tensor_,) = tf.tf_promote(tensor_)
+ end
+ end
+ begin
+ begin
+ tf.add_input(desc, tensor_)
+ end
+ end
+ begin
+ begin
+ if tensor_name !== nothing
+ desc["tensor_name"] = Base.String(tensor_name)
+ end
+ end
+ begin
+ if send_device !== nothing
+ desc["send_device"] = Base.String(send_device)
+ end
+ end
+ begin
+ if send_device_incarnation !== nothing
+ desc["send_device_incarnation"] = Base.Int(send_device_incarnation)
+ end
+ end
+ begin
+ if recv_device !== nothing
+ desc["recv_device"] = Base.String(recv_device)
+ end
+ end
+ begin
+ if client_terminated !== nothing
+ desc["client_terminated"] = Base.Bool(client_terminated)
+ end
+ end
+ end
+ end
+ tf.Tensor(tf.Operation(desc))
+ end
+ end
+ begin
+ function _send_eager(tensor_; name=nothing, tensor_name=nothing, send_device=nothing, send_device_incarnation=nothing, recv_device=nothing, client_terminated=nothing)
+ desc = tf.EagerOp("_Send")
+ tensor_ = convert(tf.EagerTensor, tensor_)
+ begin
+ begin
+ tf.add_input(desc, tensor_)
+ end
+ end
+ begin
+ begin
+ if tensor_name !== nothing
+ desc["tensor_name"] = Base.String(tensor_name)
+ end
+ end
+ begin
+ if send_device !== nothing
+ desc["send_device"] = Base.String(send_device)
+ end
+ end
+ begin
+ if send_device_incarnation !== nothing
+ desc["send_device_incarnation"] = Base.Int(send_device_incarnation)
+ end
+ end
+ begin
+ if recv_device !== nothing
+ desc["recv_device"] = Base.String(recv_device)
+ end
+ end
+ begin
+ if client_terminated !== nothing
+ desc["client_terminated"] = Base.Bool(client_terminated)
+ end
+ end
+ end
+ begin
+ desc["T"] = tf.data_type(tensor_)
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(_send, [tensor_], name=nothing, tensor_name=nothing, send_device=nothing, send_device_incarnation=nothing, recv_device=nothing, client_terminated=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res[1]
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function _send(tensor_; name=nothing, tensor_name=nothing, send_device=nothing, send_device_incarnation=nothing, recv_device=nothing, client_terminated=nothing)
+ if tf.in_eager_mode()
+ _send_eager(tensor_; name=name, tensor_name=tensor_name, send_device=send_device, send_device_incarnation=send_device_incarnation, recv_device=recv_device, client_terminated=client_terminated)
+ else
+ _send_graph(tensor_; name=name, tensor_name=tensor_name, send_device=send_device, send_device_incarnation=send_device_incarnation, recv_device=recv_device, client_terminated=client_terminated)
+ end
+ end
+ end
+end
+
+
+"""
+ load_tpu_embedding_adadelta_parameters_grad_accum_debug(parameters, accumulators, updates, gradient_accumulators; table_id=-1, table_name=)
+
+Load embedding parameters for a single table.
+"""
+begin
+ begin
+ function load_tpu_embedding_adadelta_parameters_grad_accum_debug_graph(parameters_, accumulators_, updates_, gradient_accumulators_; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing)
+ local desc
+ tf.with_op_name(name, "LoadTPUEmbeddingAdadeltaParametersGradAccumDebug") do
+ desc = tf.NodeDescription("LoadTPUEmbeddingAdadeltaParametersGradAccumDebug")
+ begin
+ begin
+ parameters_ = convert(Tensor{Float32}, parameters_)
+ begin
+ end
+ end
+ begin
+ accumulators_ = convert(Tensor{Float32}, accumulators_)
+ begin
+ end
+ end
+ begin
+ updates_ = convert(Tensor{Float32}, updates_)
+ begin
+ end
+ end
+ begin
+ gradient_accumulators_ = convert(Tensor{Float32}, gradient_accumulators_)
+ begin
+ end
+ end
+ end
+ begin
+ begin
+ tf.add_input(desc, parameters_)
+ end
+ begin
+ tf.add_input(desc, accumulators_)
+ end
+ begin
+ tf.add_input(desc, updates_)
+ end
+ begin
+ tf.add_input(desc, gradient_accumulators_)
+ end
+ end
+ begin
+ begin
+ if table_id !== nothing
+ desc["table_id"] = Base.Int(table_id)
+ end
+ end
+ begin
+ if table_name !== nothing
+ desc["table_name"] = Base.String(table_name)
+ end
+ end
+ begin
+ if num_shards !== nothing
+ desc["num_shards"] = Base.Int(num_shards)
+ end
+ end
+ begin
+ if shard_id !== nothing
+ desc["shard_id"] = Base.Int(shard_id)
+ end
+ end
+ end
+ end
+ tf.Tensor(tf.Operation(desc))
+ end
+ end
+ begin
+ function load_tpu_embedding_adadelta_parameters_grad_accum_debug_eager(parameters_, accumulators_, updates_, gradient_accumulators_; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing)
+ desc = tf.EagerOp("LoadTPUEmbeddingAdadeltaParametersGradAccumDebug")
+ parameters_ = convert(tf.EagerTensor, parameters_)
+ accumulators_ = convert(tf.EagerTensor, accumulators_)
+ updates_ = convert(tf.EagerTensor, updates_)
+ gradient_accumulators_ = convert(tf.EagerTensor, gradient_accumulators_)
+ begin
+ begin
+ tf.add_input(desc, parameters_)
+ end
+ begin
+ tf.add_input(desc, accumulators_)
+ end
+ begin
+ tf.add_input(desc, updates_)
+ end
+ begin
+ tf.add_input(desc, gradient_accumulators_)
+ end
+ end
+ begin
+ begin
+ if table_id !== nothing
+ desc["table_id"] = Base.Int(table_id)
+ end
+ end
+ begin
+ if table_name !== nothing
+ desc["table_name"] = Base.String(table_name)
+ end
+ end
+ begin
+ if num_shards !== nothing
+ desc["num_shards"] = Base.Int(num_shards)
+ end
+ end
+ begin
+ if shard_id !== nothing
+ desc["shard_id"] = Base.Int(shard_id)
+ end
+ end
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(load_tpu_embedding_adadelta_parameters_grad_accum_debug, [parameters_, accumulators_, updates_, gradient_accumulators_], name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res[1]
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function load_tpu_embedding_adadelta_parameters_grad_accum_debug(parameters_, accumulators_, updates_, gradient_accumulators_; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing)
+ if tf.in_eager_mode()
+ load_tpu_embedding_adadelta_parameters_grad_accum_debug_eager(parameters_, accumulators_, updates_, gradient_accumulators_; name=name, table_id=table_id, table_name=table_name, num_shards=num_shards, shard_id=shard_id)
+ else
+ load_tpu_embedding_adadelta_parameters_grad_accum_debug_graph(parameters_, accumulators_, updates_, gradient_accumulators_; name=name, table_id=table_id, table_name=table_name, num_shards=num_shards, shard_id=shard_id)
+ end
+ end
+ end
+end
+
+
+"""
+ map_peek(key, indices; capacity=0, memory_limit=0, container=, shared_name=)
+
+
+"""
+begin
+ begin
+ function map_peek_graph(key_, indices_; name=nothing, capacity=nothing, memory_limit=nothing, dtypes=nothing, container=nothing, shared_name=nothing)
+ local desc
+ tf.with_op_name(name, "MapPeek") do
+ desc = tf.NodeDescription("MapPeek")
+ begin
+ begin
+ key_ = convert(Tensor{Int64}, key_)
+ begin
+ end
+ end
+ begin
+ indices_ = convert(Tensor{Int32}, indices_)
+ begin
+ end
+ end
+ end
+ begin
+ begin
+ tf.add_input(desc, key_)
+ end
+ begin
+ tf.add_input(desc, indices_)
+ end
+ end
+ begin
+ begin
+ if capacity !== nothing
+ desc["capacity"] = Base.Int(capacity)
+ end
+ end
+ begin
+ if memory_limit !== nothing
+ desc["memory_limit"] = Base.Int(memory_limit)
+ end
+ end
+ begin
+ if dtypes !== nothing
+ desc["dtypes"] = map(Base.identity, dtypes)
+ end
+ end
+ begin
+ if container !== nothing
+ desc["container"] = Base.String(container)
+ end
+ end
+ begin
+ if shared_name !== nothing
+ desc["shared_name"] = Base.String(shared_name)
+ end
+ end
+ end
+ end
+ tf.Tensor(tf.Operation(desc))
+ end
+ end
+ begin
+ function map_peek_eager(key_, indices_; name=nothing, capacity=nothing, memory_limit=nothing, dtypes=nothing, container=nothing, shared_name=nothing)
+ desc = tf.EagerOp("MapPeek")
+ key_ = convert(tf.EagerTensor, key_)
+ indices_ = convert(tf.EagerTensor, indices_)
+ begin
+ begin
+ tf.add_input(desc, key_)
+ end
+ begin
+ tf.add_input(desc, indices_)
+ end
+ end
+ begin
+ begin
+ if capacity !== nothing
+ desc["capacity"] = Base.Int(capacity)
+ end
+ end
+ begin
+ if memory_limit !== nothing
+ desc["memory_limit"] = Base.Int(memory_limit)
+ end
+ end
+ begin
+ if dtypes !== nothing
+ desc["dtypes"] = map(Base.identity, dtypes)
+ end
+ end
+ begin
+ if container !== nothing
+ desc["container"] = Base.String(container)
+ end
+ end
+ begin
+ if shared_name !== nothing
+ desc["shared_name"] = Base.String(shared_name)
+ end
+ end
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(map_peek, [key_, indices_], name=nothing, capacity=nothing, memory_limit=nothing, dtypes=nothing, container=nothing, shared_name=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res[1]
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function map_peek(key_, indices_; name=nothing, capacity=nothing, memory_limit=nothing, dtypes=nothing, container=nothing, shared_name=nothing)
+ if tf.in_eager_mode()
+ map_peek_eager(key_, indices_; name=name, capacity=capacity, memory_limit=memory_limit, dtypes=dtypes, container=container, shared_name=shared_name)
+ else
+ map_peek_graph(key_, indices_; name=name, capacity=capacity, memory_limit=memory_limit, dtypes=dtypes, container=container, shared_name=shared_name)
+ end
+ end
+ end
+end
+
+
+"""
+ write_scalar_summary(writer, step, tag, value)
+
+
+"""
+begin
+ begin
+ function write_scalar_summary_graph(writer_, step_, tag_, value_; name=nothing)
+ local desc
+ tf.with_op_name(name, "WriteScalarSummary") do
+ desc = tf.NodeDescription("WriteScalarSummary")
+ begin
+ begin
+ writer_ = convert(Tensor{Any}, writer_)
+ begin
+ end
+ end
+ begin
+ step_ = convert(Tensor{Int64}, step_)
+ begin
+ end
+ end
+ begin
+ tag_ = convert(Tensor{String}, tag_)
+ begin
+ end
+ end
+ begin
+ value_ = convert(Tensor{Any}, value_)
+ begin
+ end
+ end
+ begin
+ (value_,) = tf.tf_promote(value_)
+ end
+ end
+ begin
+ begin
+ tf.add_input(desc, writer_)
+ end
+ begin
+ tf.add_input(desc, step_)
+ end
+ begin
+ tf.add_input(desc, tag_)
+ end
+ begin
+ tf.add_input(desc, value_)
+ end
+ end
+ begin
+ end
+ end
+ tf.Tensor(tf.Operation(desc))
+ end
+ end
+ begin
+ function write_scalar_summary_eager(writer_, step_, tag_, value_; name=nothing)
+ desc = tf.EagerOp("WriteScalarSummary")
+ writer_ = convert(tf.EagerTensor, writer_)
+ step_ = convert(tf.EagerTensor, step_)
+ tag_ = convert(tf.EagerTensor, tag_)
+ value_ = convert(tf.EagerTensor, value_)
+ begin
+ begin
+ tf.add_input(desc, writer_)
+ end
+ begin
+ tf.add_input(desc, step_)
+ end
+ begin
+ tf.add_input(desc, tag_)
+ end
+ begin
+ tf.add_input(desc, value_)
+ end
+ end
+ begin
+ end
+ begin
+ desc["T"] = tf.data_type(value_)
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(write_scalar_summary, [writer_, step_, tag_, value_], name=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res[1]
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function write_scalar_summary(writer_, step_, tag_, value_; name=nothing)
+ if tf.in_eager_mode()
+ write_scalar_summary_eager(writer_, step_, tag_, value_; name=name)
+ else
+ write_scalar_summary_graph(writer_, step_, tag_, value_; name=name)
+ end
+ end
+ end
+end
+
+
+"""
+ ordered_map_unstage_no_key(indices; capacity=0, memory_limit=0, container=, shared_name=)
+
+
+"""
+begin
+ begin
+ function ordered_map_unstage_no_key_graph(indices_; name=nothing, capacity=nothing, memory_limit=nothing, dtypes=nothing, container=nothing, shared_name=nothing)
+ local desc
+ tf.with_op_name(name, "OrderedMapUnstageNoKey") do
+ desc = tf.NodeDescription("OrderedMapUnstageNoKey")
+ begin
+ begin
+ indices_ = convert(Tensor{Int32}, indices_)
+ begin
+ end
+ end
+ end
+ begin
+ begin
+ tf.add_input(desc, indices_)
+ end
+ end
+ begin
+ begin
+ if capacity !== nothing
+ desc["capacity"] = Base.Int(capacity)
+ end
+ end
+ begin
+ if memory_limit !== nothing
+ desc["memory_limit"] = Base.Int(memory_limit)
+ end
+ end
+ begin
+ if dtypes !== nothing
+ desc["dtypes"] = map(Base.identity, dtypes)
+ end
+ end
+ begin
+ if container !== nothing
+ desc["container"] = Base.String(container)
+ end
+ end
+ begin
+ if shared_name !== nothing
+ desc["shared_name"] = Base.String(shared_name)
+ end
+ end
+ end
+ end
+ begin
+ out = tf.Tensor[]
+ op = tf.Operation(desc)
+ for out_idx = 1:2
+ push!(out, tf.Tensor(op, out_idx))
+ end
+ out
+ end
+ end
+ end
+ begin
+ function ordered_map_unstage_no_key_eager(indices_; name=nothing, capacity=nothing, memory_limit=nothing, dtypes=nothing, container=nothing, shared_name=nothing)
+ desc = tf.EagerOp("OrderedMapUnstageNoKey")
+ indices_ = convert(tf.EagerTensor, indices_)
+ begin
+ begin
+ tf.add_input(desc, indices_)
+ end
+ end
+ begin
+ begin
+ if capacity !== nothing
+ desc["capacity"] = Base.Int(capacity)
+ end
+ end
+ begin
+ if memory_limit !== nothing
+ desc["memory_limit"] = Base.Int(memory_limit)
+ end
+ end
+ begin
+ if dtypes !== nothing
+ desc["dtypes"] = map(Base.identity, dtypes)
+ end
+ end
+ begin
+ if container !== nothing
+ desc["container"] = Base.String(container)
+ end
+ end
+ begin
+ if shared_name !== nothing
+ desc["shared_name"] = Base.String(shared_name)
+ end
+ end
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(ordered_map_unstage_no_key, [indices_], name=nothing, capacity=nothing, memory_limit=nothing, dtypes=nothing, container=nothing, shared_name=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function ordered_map_unstage_no_key(indices_; name=nothing, capacity=nothing, memory_limit=nothing, dtypes=nothing, container=nothing, shared_name=nothing)
+ if tf.in_eager_mode()
+ ordered_map_unstage_no_key_eager(indices_; name=name, capacity=capacity, memory_limit=memory_limit, dtypes=dtypes, container=container, shared_name=shared_name)
+ else
+ ordered_map_unstage_no_key_graph(indices_; name=name, capacity=capacity, memory_limit=memory_limit, dtypes=dtypes, container=container, shared_name=shared_name)
+ end
+ end
+ end
+end
+
+
+"""
+ sparse_apply_centered_rms_prop(var, mg, ms, mom, lr, rho, momentum, epsilon, grad, indices; use_locking=false)
+
+
+"""
+begin
+ begin
+ function sparse_apply_centered_rms_prop_graph(var_, mg_, ms_, mom_, lr_, rho_, momentum_, epsilon_, grad_, indices_; name=nothing, use_locking=nothing)
+ local desc
+ tf.with_op_name(name, "SparseApplyCenteredRMSProp") do
+ desc = tf.NodeDescription("SparseApplyCenteredRMSProp")
+ begin
+ begin
+ var_ = convert(Tensor{Any}, var_)
+ begin
+ end
+ end
+ begin
+ mg_ = convert(Tensor{Any}, mg_)
+ begin
+ end
+ end
+ begin
+ ms_ = convert(Tensor{Any}, ms_)
+ begin
+ end
+ end
+ begin
+ mom_ = convert(Tensor{Any}, mom_)
+ begin
+ end
+ end
+ begin
+ lr_ = convert(Tensor{Any}, lr_)
+ begin
+ end
+ end
+ begin
+ rho_ = convert(Tensor{Any}, rho_)
+ begin
+ end
+ end
+ begin
+ momentum_ = convert(Tensor{Any}, momentum_)
+ begin
+ end
+ end
+ begin
+ epsilon_ = convert(Tensor{Any}, epsilon_)
+ begin
+ end
+ end
+ begin
+ grad_ = convert(Tensor{Any}, grad_)
+ begin
+ end
+ end
+ begin
+ indices_ = convert(Tensor{Any}, indices_)
+ begin
+ indices_ = indices_ - convert(tf.Tensor{eltype(indices_)}, 1)
+ end
+ end
+ begin
+ (var_, mg_, ms_, mom_, lr_, rho_, momentum_, epsilon_, grad_) = tf.tf_promote(var_, mg_, ms_, mom_, lr_, rho_, momentum_, epsilon_, grad_)
+ end
+ begin
+ (indices_,) = tf.tf_promote(indices_)
+ end
+ end
+ begin
+ begin
+ tf.add_input(desc, var_)
+ end
+ begin
+ tf.add_input(desc, mg_)
+ end
+ begin
+ tf.add_input(desc, ms_)
+ end
+ begin
+ tf.add_input(desc, mom_)
+ end
+ begin
+ tf.add_input(desc, lr_)
+ end
+ begin
+ tf.add_input(desc, rho_)
+ end
+ begin
+ tf.add_input(desc, momentum_)
+ end
+ begin
+ tf.add_input(desc, epsilon_)
+ end
+ begin
+ tf.add_input(desc, grad_)
+ end
+ begin
+ tf.add_input(desc, indices_)
+ end
+ end
+ begin
+ begin
+ if use_locking !== nothing
+ desc["use_locking"] = Base.Bool(use_locking)
+ end
+ end
+ end
+ end
+ tf.Tensor(tf.Operation(desc))
+ end
+ end
+ begin
+ function sparse_apply_centered_rms_prop_eager(var_, mg_, ms_, mom_, lr_, rho_, momentum_, epsilon_, grad_, indices_; name=nothing, use_locking=nothing)
+ desc = tf.EagerOp("SparseApplyCenteredRMSProp")
+ var_ = convert(tf.EagerTensor, var_)
+ mg_ = convert(tf.EagerTensor, mg_)
+ ms_ = convert(tf.EagerTensor, ms_)
+ mom_ = convert(tf.EagerTensor, mom_)
+ lr_ = convert(tf.EagerTensor, lr_)
+ rho_ = convert(tf.EagerTensor, rho_)
+ momentum_ = convert(tf.EagerTensor, momentum_)
+ epsilon_ = convert(tf.EagerTensor, epsilon_)
+ grad_ = convert(tf.EagerTensor, grad_)
+ indices_ = convert(tf.EagerTensor, indices_)
+ begin
+ begin
+ tf.add_input(desc, var_)
+ end
+ begin
+ tf.add_input(desc, mg_)
+ end
+ begin
+ tf.add_input(desc, ms_)
+ end
+ begin
+ tf.add_input(desc, mom_)
+ end
+ begin
+ tf.add_input(desc, lr_)
+ end
+ begin
+ tf.add_input(desc, rho_)
+ end
+ begin
+ tf.add_input(desc, momentum_)
+ end
+ begin
+ tf.add_input(desc, epsilon_)
+ end
+ begin
+ tf.add_input(desc, grad_)
+ end
+ begin
+ tf.add_input(desc, indices_)
+ end
+ end
+ begin
+ begin
+ if use_locking !== nothing
+ desc["use_locking"] = Base.Bool(use_locking)
+ end
+ end
+ end
+ begin
+ desc["T"] = tf.data_type(var_)
+ end
+ begin
+ desc["T"] = tf.data_type(mg_)
+ end
+ begin
+ desc["T"] = tf.data_type(ms_)
+ end
+ begin
+ desc["T"] = tf.data_type(mom_)
+ end
+ begin
+ desc["T"] = tf.data_type(lr_)
+ end
+ begin
+ desc["T"] = tf.data_type(rho_)
+ end
+ begin
+ desc["T"] = tf.data_type(momentum_)
+ end
+ begin
+ desc["T"] = tf.data_type(epsilon_)
+ end
+ begin
+ desc["T"] = tf.data_type(grad_)
+ end
+ begin
+ desc["Tindices"] = tf.data_type(indices_)
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(sparse_apply_centered_rms_prop, [var_, mg_, ms_, mom_, lr_, rho_, momentum_, epsilon_, grad_, indices_], name=nothing, use_locking=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res[1]
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function sparse_apply_centered_rms_prop(var_, mg_, ms_, mom_, lr_, rho_, momentum_, epsilon_, grad_, indices_; name=nothing, use_locking=nothing)
+ if tf.in_eager_mode()
+ sparse_apply_centered_rms_prop_eager(var_, mg_, ms_, mom_, lr_, rho_, momentum_, epsilon_, grad_, indices_; name=name, use_locking=use_locking)
+ else
+ sparse_apply_centered_rms_prop_graph(var_, mg_, ms_, mom_, lr_, rho_, momentum_, epsilon_, grad_, indices_; name=name, use_locking=use_locking)
+ end
+ end
+ end
+end
+
+
+"""
+ conv3d_backprop_input_v2(input_sizes, filter, out_backprop; data_format=, dilations=[1, 1, 1, 1, 1])
+
+
+"""
+begin
+ begin
+ function conv3d_backprop_input_v2_graph(input_sizes_, filter_, out_backprop_; name=nothing, strides=nothing, padding=nothing, data_format=nothing, dilations=nothing)
+ local desc
+ tf.with_op_name(name, "Conv3DBackpropInputV2") do
+ desc = tf.NodeDescription("Conv3DBackpropInputV2")
+ begin
+ begin
+ input_sizes_ = convert(Tensor{Int32}, input_sizes_)
+ begin
+ end
+ end
+ begin
+ filter_ = convert(Tensor{Any}, filter_)
+ begin
+ end
+ end
+ begin
+ out_backprop_ = convert(Tensor{Any}, out_backprop_)
+ begin
+ end
+ end
+ begin
+ (filter_, out_backprop_) = tf.tf_promote(filter_, out_backprop_)
+ end
+ begin
+ (input_sizes_,) = tf.tf_promote(input_sizes_)
+ end
+ end
+ begin
+ begin
+ tf.add_input(desc, input_sizes_)
+ end
+ begin
+ tf.add_input(desc, filter_)
+ end
+ begin
+ tf.add_input(desc, out_backprop_)
+ end
+ end
+ begin
+ begin
+ if strides !== nothing
+ desc["strides"] = map(Base.identity, strides)
+ end
+ end
+ begin
+ if padding !== nothing
+ desc["padding"] = Base.String(padding)
+ end
+ end
+ begin
+ if data_format !== nothing
+ desc["data_format"] = Base.String(data_format)
+ end
+ end
+ begin
+ if dilations !== nothing
+ desc["dilations"] = map(Base.identity, dilations)
+ end
+ end
+ end
+ end
+ tf.Tensor(tf.Operation(desc))
+ end
+ end
+ begin
+ function conv3d_backprop_input_v2_eager(input_sizes_, filter_, out_backprop_; name=nothing, strides=nothing, padding=nothing, data_format=nothing, dilations=nothing)
+ desc = tf.EagerOp("Conv3DBackpropInputV2")
+ input_sizes_ = convert(tf.EagerTensor, input_sizes_)
+ filter_ = convert(tf.EagerTensor, filter_)
+ out_backprop_ = convert(tf.EagerTensor, out_backprop_)
+ begin
+ begin
+ tf.add_input(desc, input_sizes_)
+ end
+ begin
+ tf.add_input(desc, filter_)
+ end
+ begin
+ tf.add_input(desc, out_backprop_)
+ end
+ end
+ begin
+ begin
+ if strides !== nothing
+ desc["strides"] = map(Base.identity, strides)
+ end
+ end
+ begin
+ if padding !== nothing
+ desc["padding"] = Base.String(padding)
+ end
+ end
+ begin
+ if data_format !== nothing
+ desc["data_format"] = Base.String(data_format)
+ end
+ end
+ begin
+ if dilations !== nothing
+ desc["dilations"] = map(Base.identity, dilations)
+ end
+ end
+ end
+ begin
+ desc["Tshape"] = tf.data_type(input_sizes_)
+ end
+ begin
+ desc["T"] = tf.data_type(filter_)
+ end
+ begin
+ desc["T"] = tf.data_type(out_backprop_)
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(conv3d_backprop_input_v2, [input_sizes_, filter_, out_backprop_], name=nothing, strides=nothing, padding=nothing, data_format=nothing, dilations=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res[1]
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function conv3d_backprop_input_v2(input_sizes_, filter_, out_backprop_; name=nothing, strides=nothing, padding=nothing, data_format=nothing, dilations=nothing)
+ if tf.in_eager_mode()
+ conv3d_backprop_input_v2_eager(input_sizes_, filter_, out_backprop_; name=name, strides=strides, padding=padding, data_format=data_format, dilations=dilations)
+ else
+ conv3d_backprop_input_v2_graph(input_sizes_, filter_, out_backprop_; name=name, strides=strides, padding=padding, data_format=data_format, dilations=dilations)
+ end
+ end
+ end
+end
+
+
+"""
+ retrieve_tpu_embedding_proximal_adagrad_parameters(; table_id=-1, table_name=)
+
+Retrieve embedding parameters for a single table.
+"""
+begin
+ begin
+ function retrieve_tpu_embedding_proximal_adagrad_parameters_graph(; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing)
+ local desc
+ tf.with_op_name(name, "RetrieveTPUEmbeddingProximalAdagradParameters") do
+ desc = tf.NodeDescription("RetrieveTPUEmbeddingProximalAdagradParameters")
+ begin
+ end
+ begin
+ end
+ begin
+ begin
+ if table_id !== nothing
+ desc["table_id"] = Base.Int(table_id)
+ end
+ end
+ begin
+ if table_name !== nothing
+ desc["table_name"] = Base.String(table_name)
+ end
+ end
+ begin
+ if num_shards !== nothing
+ desc["num_shards"] = Base.Int(num_shards)
+ end
+ end
+ begin
+ if shard_id !== nothing
+ desc["shard_id"] = Base.Int(shard_id)
+ end
+ end
+ end
+ end
+ begin
+ out = tf.Tensor[]
+ op = tf.Operation(desc)
+ for out_idx = 1:2
+ push!(out, tf.Tensor(op, out_idx))
+ end
+ out
+ end
+ end
+ end
+ begin
+ function retrieve_tpu_embedding_proximal_adagrad_parameters_eager(; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing)
+ desc = tf.EagerOp("RetrieveTPUEmbeddingProximalAdagradParameters")
+ begin
+ end
+ begin
+ begin
+ if table_id !== nothing
+ desc["table_id"] = Base.Int(table_id)
+ end
+ end
+ begin
+ if table_name !== nothing
+ desc["table_name"] = Base.String(table_name)
+ end
+ end
+ begin
+ if num_shards !== nothing
+ desc["num_shards"] = Base.Int(num_shards)
+ end
+ end
+ begin
+ if shard_id !== nothing
+ desc["shard_id"] = Base.Int(shard_id)
+ end
+ end
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(retrieve_tpu_embedding_proximal_adagrad_parameters, [], name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function retrieve_tpu_embedding_proximal_adagrad_parameters(; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing)
+ if tf.in_eager_mode()
+ retrieve_tpu_embedding_proximal_adagrad_parameters_eager(; name=name, table_id=table_id, table_name=table_name, num_shards=num_shards, shard_id=shard_id)
+ else
+ retrieve_tpu_embedding_proximal_adagrad_parameters_graph(; name=name, table_id=table_id, table_name=table_name, num_shards=num_shards, shard_id=shard_id)
+ end
+ end
+ end
+end
+
+
+"""
+ random_shuffle(value; seed=0, seed2=0)
+
+
+"""
+begin
+ begin
+ function random_shuffle_graph(value_; name=nothing, seed=nothing, seed2=nothing)
+ local desc
+ tf.with_op_name(name, "RandomShuffle") do
+ desc = tf.NodeDescription("RandomShuffle")
+ begin
+ begin
+ value_ = convert(Tensor{Any}, value_)
+ begin
+ end
+ end
+ begin
+ (value_,) = tf.tf_promote(value_)
+ end
+ end
+ begin
+ begin
+ tf.add_input(desc, value_)
+ end
+ end
+ begin
+ begin
+ if seed !== nothing
+ desc["seed"] = Base.Int(seed)
+ end
+ end
+ begin
+ if seed2 !== nothing
+ desc["seed2"] = Base.Int(seed2)
+ end
+ end
+ end
+ end
+ tf.Tensor(tf.Operation(desc))
+ end
+ end
+ begin
+ function random_shuffle_eager(value_; name=nothing, seed=nothing, seed2=nothing)
+ desc = tf.EagerOp("RandomShuffle")
+ value_ = convert(tf.EagerTensor, value_)
+ begin
+ begin
+ tf.add_input(desc, value_)
+ end
+ end
+ begin
+ begin
+ if seed !== nothing
+ desc["seed"] = Base.Int(seed)
+ end
+ end
+ begin
+ if seed2 !== nothing
+ desc["seed2"] = Base.Int(seed2)
+ end
+ end
+ end
+ begin
+ desc["T"] = tf.data_type(value_)
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(random_shuffle, [value_], name=nothing, seed=nothing, seed2=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res[1]
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function random_shuffle(value_; name=nothing, seed=nothing, seed2=nothing)
+ if tf.in_eager_mode()
+ random_shuffle_eager(value_; name=name, seed=seed, seed2=seed2)
+ else
+ random_shuffle_graph(value_; name=name, seed=seed, seed2=seed2)
+ end
+ end
+ end
+end
+
+
+"""
+ uniform_candidate_sampler(true_classes; seed=0, seed2=0)
+
+
+"""
+begin
+ begin
+ function uniform_candidate_sampler_graph(true_classes_; name=nothing, num_true=nothing, num_sampled=nothing, unique=nothing, range_max=nothing, seed=nothing, seed2=nothing)
+ local desc
+ tf.with_op_name(name, "UniformCandidateSampler") do
+ desc = tf.NodeDescription("UniformCandidateSampler")
+ begin
+ begin
+ true_classes_ = convert(Tensor{Int64}, true_classes_)
+ begin
+ end
+ end
+ end
+ begin
+ begin
+ tf.add_input(desc, true_classes_)
+ end
+ end
+ begin
+ begin
+ if num_true !== nothing
+ desc["num_true"] = Base.Int(num_true)
+ end
+ end
+ begin
+ if num_sampled !== nothing
+ desc["num_sampled"] = Base.Int(num_sampled)
+ end
+ end
+ begin
+ if unique !== nothing
+ desc["unique"] = Base.Bool(unique)
+ end
+ end
+ begin
+ if range_max !== nothing
+ desc["range_max"] = Base.Int(range_max)
+ end
+ end
+ begin
+ if seed !== nothing
+ desc["seed"] = Base.Int(seed)
+ end
+ end
+ begin
+ if seed2 !== nothing
+ desc["seed2"] = Base.Int(seed2)
+ end
+ end
+ end
+ end
+ begin
+ out = tf.Tensor[]
+ op = tf.Operation(desc)
+ for out_idx = 1:3
+ push!(out, tf.Tensor(op, out_idx))
+ end
+ out
+ end
+ end
+ end
+ begin
+ function uniform_candidate_sampler_eager(true_classes_; name=nothing, num_true=nothing, num_sampled=nothing, unique=nothing, range_max=nothing, seed=nothing, seed2=nothing)
+ desc = tf.EagerOp("UniformCandidateSampler")
+ true_classes_ = convert(tf.EagerTensor, true_classes_)
+ begin
+ begin
+ tf.add_input(desc, true_classes_)
+ end
+ end
+ begin
+ begin
+ if num_true !== nothing
+ desc["num_true"] = Base.Int(num_true)
+ end
+ end
+ begin
+ if num_sampled !== nothing
+ desc["num_sampled"] = Base.Int(num_sampled)
+ end
+ end
+ begin
+ if unique !== nothing
+ desc["unique"] = Base.Bool(unique)
+ end
+ end
+ begin
+ if range_max !== nothing
+ desc["range_max"] = Base.Int(range_max)
+ end
+ end
+ begin
+ if seed !== nothing
+ desc["seed"] = Base.Int(seed)
+ end
+ end
+ begin
+ if seed2 !== nothing
+ desc["seed2"] = Base.Int(seed2)
+ end
+ end
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(uniform_candidate_sampler, [true_classes_], name=nothing, num_true=nothing, num_sampled=nothing, unique=nothing, range_max=nothing, seed=nothing, seed2=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function uniform_candidate_sampler(true_classes_; name=nothing, num_true=nothing, num_sampled=nothing, unique=nothing, range_max=nothing, seed=nothing, seed2=nothing)
+ if tf.in_eager_mode()
+ uniform_candidate_sampler_eager(true_classes_; name=name, num_true=num_true, num_sampled=num_sampled, unique=unique, range_max=range_max, seed=seed, seed2=seed2)
+ else
+ uniform_candidate_sampler_graph(true_classes_; name=name, num_true=num_true, num_sampled=num_sampled, unique=unique, range_max=range_max, seed=seed, seed2=seed2)
+ end
+ end
+ end
+end
+
+
+"""
+ tensor_array_split_v2(handle, value, lengths, flow_in)
+
+
+"""
+begin
+ begin
+ function tensor_array_split_v2_graph(handle_, value_, lengths_, flow_in_; name=nothing)
+ local desc
+ tf.with_op_name(name, "TensorArraySplitV2") do
+ desc = tf.NodeDescription("TensorArraySplitV2")
+ begin
+ begin
+ handle_ = convert(Tensor{String}, handle_)
+ begin
+ end
+ end
+ begin
+ value_ = convert(Tensor{Any}, value_)
+ begin
+ end
+ end
+ begin
+ lengths_ = convert(Tensor{Int64}, lengths_)
+ begin
+ end
+ end
+ begin
+ flow_in_ = convert(Tensor{Float32}, flow_in_)
+ begin
+ end
+ end
+ begin
+ (value_,) = tf.tf_promote(value_)
+ end
+ end
+ begin
+ begin
+ tf.add_input(desc, handle_)
+ end
+ begin
+ tf.add_input(desc, value_)
+ end
+ begin
+ tf.add_input(desc, lengths_)
+ end
+ begin
+ tf.add_input(desc, flow_in_)
+ end
+ end
+ begin
+ end
+ end
+ tf.Tensor(tf.Operation(desc))
+ end
+ end
+ begin
+ function tensor_array_split_v2_eager(handle_, value_, lengths_, flow_in_; name=nothing)
+ desc = tf.EagerOp("TensorArraySplitV2")
+ handle_ = convert(tf.EagerTensor, handle_)
+ value_ = convert(tf.EagerTensor, value_)
+ lengths_ = convert(tf.EagerTensor, lengths_)
+ flow_in_ = convert(tf.EagerTensor, flow_in_)
+ begin
+ begin
+ tf.add_input(desc, handle_)
+ end
+ begin
+ tf.add_input(desc, value_)
+ end
+ begin
+ tf.add_input(desc, lengths_)
+ end
+ begin
+ tf.add_input(desc, flow_in_)
+ end
+ end
+ begin
+ end
+ begin
+ desc["T"] = tf.data_type(value_)
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(tensor_array_split_v2, [handle_, value_, lengths_, flow_in_], name=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res[1]
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function tensor_array_split_v2(handle_, value_, lengths_, flow_in_; name=nothing)
+ if tf.in_eager_mode()
+ tensor_array_split_v2_eager(handle_, value_, lengths_, flow_in_; name=name)
+ else
+ tensor_array_split_v2_graph(handle_, value_, lengths_, flow_in_; name=name)
+ end
+ end
+ end
+end
+
+
+"""
+ mutable_dense_hash_table_v2(empty_key, deleted_key; container=, shared_name=, use_node_name_sharing=false, value_shape=?, initial_num_buckets=131072, max_load_factor=?)
+
+
+"""
+begin
+ begin
+ function mutable_dense_hash_table_v2_graph(empty_key_, deleted_key_; name=nothing, container=nothing, shared_name=nothing, use_node_name_sharing=nothing, key_dtype=nothing, value_dtype=nothing, value_shape=nothing, initial_num_buckets=nothing, max_load_factor=nothing)
+ local desc
+ tf.with_op_name(name, "MutableDenseHashTableV2") do
+ desc = tf.NodeDescription("MutableDenseHashTableV2")
+ begin
+ begin
+ empty_key_ = convert(Tensor{Any}, empty_key_)
+ begin
+ end
+ end
+ begin
+ deleted_key_ = convert(Tensor{Any}, deleted_key_)
+ begin
+ end
+ end
+ begin
+ (empty_key_, deleted_key_) = tf.tf_promote(empty_key_, deleted_key_)
+ end
+ end
+ begin
+ begin
+ tf.add_input(desc, empty_key_)
+ end
+ begin
+ tf.add_input(desc, deleted_key_)
+ end
+ end
+ begin
+ begin
+ if container !== nothing
+ desc["container"] = Base.String(container)
+ end
+ end
+ begin
+ if shared_name !== nothing
+ desc["shared_name"] = Base.String(shared_name)
+ end
+ end
+ begin
+ if use_node_name_sharing !== nothing
+ desc["use_node_name_sharing"] = Base.Bool(use_node_name_sharing)
+ end
+ end
+ begin
+ if key_dtype !== nothing
+ desc["key_dtype"] = Base.identity(key_dtype)
+ end
+ end
+ begin
+ if value_dtype !== nothing
+ desc["value_dtype"] = Base.identity(value_dtype)
+ end
+ end
+ begin
+ if value_shape !== nothing
+ desc["value_shape"] = Base.identity(value_shape)
+ end
+ end
+ begin
+ if initial_num_buckets !== nothing
+ desc["initial_num_buckets"] = Base.Int(initial_num_buckets)
+ end
+ end
+ begin
+ if max_load_factor !== nothing
+ desc["max_load_factor"] = Base.identity(max_load_factor)
+ end
+ end
+ end
+ end
+ tf.Tensor(tf.Operation(desc))
+ end
+ end
+ begin
+ function mutable_dense_hash_table_v2_eager(empty_key_, deleted_key_; name=nothing, container=nothing, shared_name=nothing, use_node_name_sharing=nothing, key_dtype=nothing, value_dtype=nothing, value_shape=nothing, initial_num_buckets=nothing, max_load_factor=nothing)
+ desc = tf.EagerOp("MutableDenseHashTableV2")
+ empty_key_ = convert(tf.EagerTensor, empty_key_)
+ deleted_key_ = convert(tf.EagerTensor, deleted_key_)
+ begin
+ begin
+ tf.add_input(desc, empty_key_)
+ end
+ begin
+ tf.add_input(desc, deleted_key_)
+ end
+ end
+ begin
+ begin
+ if container !== nothing
+ desc["container"] = Base.String(container)
+ end
+ end
+ begin
+ if shared_name !== nothing
+ desc["shared_name"] = Base.String(shared_name)
+ end
+ end
+ begin
+ if use_node_name_sharing !== nothing
+ desc["use_node_name_sharing"] = Base.Bool(use_node_name_sharing)
+ end
+ end
+ begin
+ if key_dtype !== nothing
+ desc["key_dtype"] = Base.identity(key_dtype)
+ end
+ end
+ begin
+ if value_dtype !== nothing
+ desc["value_dtype"] = Base.identity(value_dtype)
+ end
+ end
+ begin
+ if value_shape !== nothing
+ desc["value_shape"] = Base.identity(value_shape)
+ end
+ end
+ begin
+ if initial_num_buckets !== nothing
+ desc["initial_num_buckets"] = Base.Int(initial_num_buckets)
+ end
+ end
+ begin
+ if max_load_factor !== nothing
+ desc["max_load_factor"] = Base.identity(max_load_factor)
+ end
+ end
+ end
+ begin
+ desc["key_dtype"] = tf.data_type(empty_key_)
+ end
+ begin
+ desc["key_dtype"] = tf.data_type(deleted_key_)
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(mutable_dense_hash_table_v2, [empty_key_, deleted_key_], name=nothing, container=nothing, shared_name=nothing, use_node_name_sharing=nothing, key_dtype=nothing, value_dtype=nothing, value_shape=nothing, initial_num_buckets=nothing, max_load_factor=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res[1]
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function mutable_dense_hash_table_v2(empty_key_, deleted_key_; name=nothing, container=nothing, shared_name=nothing, use_node_name_sharing=nothing, key_dtype=nothing, value_dtype=nothing, value_shape=nothing, initial_num_buckets=nothing, max_load_factor=nothing)
+ if tf.in_eager_mode()
+ mutable_dense_hash_table_v2_eager(empty_key_, deleted_key_; name=name, container=container, shared_name=shared_name, use_node_name_sharing=use_node_name_sharing, key_dtype=key_dtype, value_dtype=value_dtype, value_shape=value_shape, initial_num_buckets=initial_num_buckets, max_load_factor=max_load_factor)
+ else
+ mutable_dense_hash_table_v2_graph(empty_key_, deleted_key_; name=name, container=container, shared_name=shared_name, use_node_name_sharing=use_node_name_sharing, key_dtype=key_dtype, value_dtype=value_dtype, value_shape=value_shape, initial_num_buckets=initial_num_buckets, max_load_factor=max_load_factor)
+ end
+ end
+ end
+end
+
+
+"""
+ draw_bounding_boxes(images, boxes)
+
+
+"""
+begin
+ begin
+ function draw_bounding_boxes_graph(images_, boxes_; name=nothing)
+ local desc
+ tf.with_op_name(name, "DrawBoundingBoxes") do
+ desc = tf.NodeDescription("DrawBoundingBoxes")
+ begin
+ begin
+ images_ = convert(Tensor{Float32}, images_)
+ begin
+ end
+ end
+ begin
+ boxes_ = convert(Tensor{Float32}, boxes_)
+ begin
+ end
+ end
+ begin
+ (images_,) = tf.tf_promote(images_)
+ end
+ end
+ begin
+ begin
+ tf.add_input(desc, images_)
+ end
+ begin
+ tf.add_input(desc, boxes_)
+ end
+ end
+ begin
+ end
+ end
+ tf.Tensor(tf.Operation(desc))
+ end
+ end
+ begin
+ function draw_bounding_boxes_eager(images_, boxes_; name=nothing)
+ desc = tf.EagerOp("DrawBoundingBoxes")
+ images_ = convert(tf.EagerTensor, images_)
+ boxes_ = convert(tf.EagerTensor, boxes_)
+ begin
+ begin
+ tf.add_input(desc, images_)
+ end
+ begin
+ tf.add_input(desc, boxes_)
+ end
+ end
+ begin
+ end
+ begin
+ desc["T"] = tf.data_type(images_)
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(draw_bounding_boxes, [images_, boxes_], name=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res[1]
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function draw_bounding_boxes(images_, boxes_; name=nothing)
+ if tf.in_eager_mode()
+ draw_bounding_boxes_eager(images_, boxes_; name=name)
+ else
+ draw_bounding_boxes_graph(images_, boxes_; name=name)
+ end
+ end
+ end
+end
+
+
+"""
+ sparse_apply_proximal_adagrad(var, accum, lr, l1, l2, grad, indices; use_locking=false)
+
+
+"""
+begin
+ begin
+ function sparse_apply_proximal_adagrad_graph(var_, accum_, lr_, l1_, l2_, grad_, indices_; name=nothing, use_locking=nothing)
+ local desc
+ tf.with_op_name(name, "SparseApplyProximalAdagrad") do
+ desc = tf.NodeDescription("SparseApplyProximalAdagrad")
+ begin
+ begin
+ var_ = convert(Tensor{Any}, var_)
+ begin
+ end
+ end
+ begin
+ accum_ = convert(Tensor{Any}, accum_)
+ begin
+ end
+ end
+ begin
+ lr_ = convert(Tensor{Any}, lr_)
+ begin
+ end
+ end
+ begin
+ l1_ = convert(Tensor{Any}, l1_)
+ begin
+ end
+ end
+ begin
+ l2_ = convert(Tensor{Any}, l2_)
+ begin
+ end
+ end
+ begin
+ grad_ = convert(Tensor{Any}, grad_)
+ begin
+ end
+ end
+ begin
+ indices_ = convert(Tensor{Any}, indices_)
+ begin
+ indices_ = indices_ - convert(tf.Tensor{eltype(indices_)}, 1)
+ end
+ end
+ begin
+ (var_, accum_, lr_, l1_, l2_, grad_) = tf.tf_promote(var_, accum_, lr_, l1_, l2_, grad_)
+ end
+ begin
+ (indices_,) = tf.tf_promote(indices_)
+ end
+ end
+ begin
+ begin
+ tf.add_input(desc, var_)
+ end
+ begin
+ tf.add_input(desc, accum_)
+ end
+ begin
+ tf.add_input(desc, lr_)
+ end
+ begin
+ tf.add_input(desc, l1_)
+ end
+ begin
+ tf.add_input(desc, l2_)
+ end
+ begin
+ tf.add_input(desc, grad_)
+ end
+ begin
+ tf.add_input(desc, indices_)
+ end
+ end
+ begin
+ begin
+ if use_locking !== nothing
+ desc["use_locking"] = Base.Bool(use_locking)
+ end
+ end
+ end
+ end
+ tf.Tensor(tf.Operation(desc))
+ end
+ end
+ begin
+ function sparse_apply_proximal_adagrad_eager(var_, accum_, lr_, l1_, l2_, grad_, indices_; name=nothing, use_locking=nothing)
+ desc = tf.EagerOp("SparseApplyProximalAdagrad")
+ var_ = convert(tf.EagerTensor, var_)
+ accum_ = convert(tf.EagerTensor, accum_)
+ lr_ = convert(tf.EagerTensor, lr_)
+ l1_ = convert(tf.EagerTensor, l1_)
+ l2_ = convert(tf.EagerTensor, l2_)
+ grad_ = convert(tf.EagerTensor, grad_)
+ indices_ = convert(tf.EagerTensor, indices_)
+ begin
+ begin
+ tf.add_input(desc, var_)
+ end
+ begin
+ tf.add_input(desc, accum_)
+ end
+ begin
+ tf.add_input(desc, lr_)
+ end
+ begin
+ tf.add_input(desc, l1_)
+ end
+ begin
+ tf.add_input(desc, l2_)
+ end
+ begin
+ tf.add_input(desc, grad_)
+ end
+ begin
+ tf.add_input(desc, indices_)
+ end
+ end
+ begin
+ begin
+ if use_locking !== nothing
+ desc["use_locking"] = Base.Bool(use_locking)
+ end
+ end
+ end
+ begin
+ desc["T"] = tf.data_type(var_)
+ end
+ begin
+ desc["T"] = tf.data_type(accum_)
+ end
+ begin
+ desc["T"] = tf.data_type(lr_)
+ end
+ begin
+ desc["T"] = tf.data_type(l1_)
+ end
+ begin
+ desc["T"] = tf.data_type(l2_)
+ end
+ begin
+ desc["T"] = tf.data_type(grad_)
+ end
+ begin
+ desc["Tindices"] = tf.data_type(indices_)
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(sparse_apply_proximal_adagrad, [var_, accum_, lr_, l1_, l2_, grad_, indices_], name=nothing, use_locking=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res[1]
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function sparse_apply_proximal_adagrad(var_, accum_, lr_, l1_, l2_, grad_, indices_; name=nothing, use_locking=nothing)
+ if tf.in_eager_mode()
+ sparse_apply_proximal_adagrad_eager(var_, accum_, lr_, l1_, l2_, grad_, indices_; name=name, use_locking=use_locking)
+ else
+ sparse_apply_proximal_adagrad_graph(var_, accum_, lr_, l1_, l2_, grad_, indices_; name=name, use_locking=use_locking)
+ end
+ end
+ end
+end
+
+
+"""
+ range_dataset(start, stop, step)
+
+
+"""
+begin
+ begin
+ function range_dataset_graph(start_, stop_, step_; name=nothing, output_types=nothing, output_shapes=nothing)
+ local desc
+ tf.with_op_name(name, "RangeDataset") do
+ desc = tf.NodeDescription("RangeDataset")
+ begin
+ begin
+ start_ = convert(Tensor{Int64}, start_)
+ begin
+ end
+ end
+ begin
+ stop_ = convert(Tensor{Int64}, stop_)
+ begin
+ end
+ end
+ begin
+ step_ = convert(Tensor{Int64}, step_)
+ begin
+ end
+ end
+ end
+ begin
+ begin
+ tf.add_input(desc, start_)
+ end
+ begin
+ tf.add_input(desc, stop_)
+ end
+ begin
+ tf.add_input(desc, step_)
+ end
+ end
+ begin
+ begin
+ if output_types !== nothing
+ desc["output_types"] = map(Base.identity, output_types)
+ end
+ end
+ begin
+ if output_shapes !== nothing
+ desc["output_shapes"] = map(Base.identity, output_shapes)
+ end
+ end
+ end
+ end
+ tf.Tensor(tf.Operation(desc))
+ end
+ end
+ begin
+ function range_dataset_eager(start_, stop_, step_; name=nothing, output_types=nothing, output_shapes=nothing)
+ desc = tf.EagerOp("RangeDataset")
+ start_ = convert(tf.EagerTensor, start_)
+ stop_ = convert(tf.EagerTensor, stop_)
+ step_ = convert(tf.EagerTensor, step_)
+ begin
+ begin
+ tf.add_input(desc, start_)
+ end
+ begin
+ tf.add_input(desc, stop_)
+ end
+ begin
+ tf.add_input(desc, step_)
+ end
+ end
+ begin
+ begin
+ if output_types !== nothing
+ desc["output_types"] = map(Base.identity, output_types)
+ end
+ end
+ begin
+ if output_shapes !== nothing
+ desc["output_shapes"] = map(Base.identity, output_shapes)
+ end
+ end
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(range_dataset, [start_, stop_, step_], name=nothing, output_types=nothing, output_shapes=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res[1]
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function range_dataset(start_, stop_, step_; name=nothing, output_types=nothing, output_shapes=nothing)
+ if tf.in_eager_mode()
+ range_dataset_eager(start_, stop_, step_; name=name, output_types=output_types, output_shapes=output_shapes)
+ else
+ range_dataset_graph(start_, stop_, step_; name=name, output_types=output_types, output_shapes=output_shapes)
+ end
+ end
+ end
+end
+
+
+"""
+ reader_restore_state_v2(reader_handle, state)
+
+
+"""
+begin
+ begin
+ function reader_restore_state_v2_graph(reader_handle_, state_; name=nothing)
+ local desc
+ tf.with_op_name(name, "ReaderRestoreStateV2") do
+ desc = tf.NodeDescription("ReaderRestoreStateV2")
+ begin
+ begin
+ reader_handle_ = convert(Tensor{Any}, reader_handle_)
+ begin
+ end
+ end
+ begin
+ state_ = convert(Tensor{String}, state_)
+ begin
+ end
+ end
+ end
+ begin
+ begin
+ tf.add_input(desc, reader_handle_)
+ end
+ begin
+ tf.add_input(desc, state_)
+ end
+ end
+ begin
+ end
+ end
+ tf.Tensor(tf.Operation(desc))
+ end
+ end
+ begin
+ function reader_restore_state_v2_eager(reader_handle_, state_; name=nothing)
+ desc = tf.EagerOp("ReaderRestoreStateV2")
+ reader_handle_ = convert(tf.EagerTensor, reader_handle_)
+ state_ = convert(tf.EagerTensor, state_)
+ begin
+ begin
+ tf.add_input(desc, reader_handle_)
+ end
+ begin
+ tf.add_input(desc, state_)
+ end
+ end
+ begin
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(reader_restore_state_v2, [reader_handle_, state_], name=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res[1]
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function reader_restore_state_v2(reader_handle_, state_; name=nothing)
+ if tf.in_eager_mode()
+ reader_restore_state_v2_eager(reader_handle_, state_; name=name)
+ else
+ reader_restore_state_v2_graph(reader_handle_, state_; name=name)
+ end
+ end
+ end
+end
+
+
+"""
+ top_kv2(input, k; sorted=true)
+
+
+"""
+begin
+ begin
+ function top_kv2_graph(input_, k_; name=nothing, sorted=nothing)
+ local desc
+ tf.with_op_name(name, "TopKV2") do
+ desc = tf.NodeDescription("TopKV2")
+ begin
+ begin
+ input_ = convert(Tensor{Any}, input_)
+ begin
+ end
+ end
+ begin
+ k_ = convert(Tensor{Int32}, k_)
+ begin
+ end
+ end
+ begin
+ (input_,) = tf.tf_promote(input_)
+ end
+ end
+ begin
+ begin
+ tf.add_input(desc, input_)
+ end
+ begin
+ tf.add_input(desc, k_)
+ end
+ end
+ begin
+ begin
+ if sorted !== nothing
+ desc["sorted"] = Base.Bool(sorted)
+ end
+ end
+ end
+ end
+ begin
+ out = tf.Tensor[]
+ op = tf.Operation(desc)
+ for out_idx = 1:2
+ push!(out, tf.Tensor(op, out_idx))
+ end
+ out
+ end
+ end
+ end
+ begin
+ function top_kv2_eager(input_, k_; name=nothing, sorted=nothing)
+ desc = tf.EagerOp("TopKV2")
+ input_ = convert(tf.EagerTensor, input_)
+ k_ = convert(tf.EagerTensor, k_)
+ begin
+ begin
+ tf.add_input(desc, input_)
+ end
+ begin
+ tf.add_input(desc, k_)
+ end
+ end
+ begin
+ begin
+ if sorted !== nothing
+ desc["sorted"] = Base.Bool(sorted)
+ end
+ end
+ end
+ begin
+ desc["T"] = tf.data_type(input_)
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(top_kv2, [input_, k_], name=nothing, sorted=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function top_kv2(input_, k_; name=nothing, sorted=nothing)
+ if tf.in_eager_mode()
+ top_kv2_eager(input_, k_; name=name, sorted=sorted)
+ else
+ top_kv2_graph(input_, k_; name=name, sorted=sorted)
+ end
+ end
+ end
+end
+
+
+"""
+ atanh(x)
+
+
+"""
+begin
+ begin
+ function atanh_graph(x_; name=nothing)
+ local desc
+ tf.with_op_name(name, "Atanh") do
+ desc = tf.NodeDescription("Atanh")
+ begin
+ begin
+ x_ = convert(Tensor{Any}, x_)
+ begin
+ end
+ end
+ begin
+ (x_,) = tf.tf_promote(x_)
+ end
+ end
+ begin
+ begin
+ tf.add_input(desc, x_)
+ end
+ end
+ begin
+ end
+ end
+ tf.Tensor(tf.Operation(desc))
+ end
+ end
+ begin
+ function atanh_eager(x_; name=nothing)
+ desc = tf.EagerOp("Atanh")
+ x_ = convert(tf.EagerTensor, x_)
+ begin
+ begin
+ tf.add_input(desc, x_)
+ end
+ end
+ begin
+ end
+ begin
+ desc["T"] = tf.data_type(x_)
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(atanh, [x_], name=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res[1]
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function atanh(x_; name=nothing)
+ if tf.in_eager_mode()
+ atanh_eager(x_; name=name)
+ else
+ atanh_graph(x_; name=name)
+ end
+ end
+ end
+end
+
+
+"""
+ debug_gradient_identity(input)
+
+
+"""
+begin
+ begin
+ function debug_gradient_identity_graph(input_; name=nothing)
+ local desc
+ tf.with_op_name(name, "DebugGradientIdentity") do
+ desc = tf.NodeDescription("DebugGradientIdentity")
+ begin
+ begin
+ input_ = convert(Tensor{Any}, input_)
+ begin
+ end
+ end
+ begin
+ (input_,) = tf.tf_promote(input_)
+ end
+ end
+ begin
+ begin
+ tf.add_input(desc, input_)
+ end
+ end
+ begin
+ end
+ end
+ tf.Tensor(tf.Operation(desc))
+ end
+ end
+ begin
+ function debug_gradient_identity_eager(input_; name=nothing)
+ desc = tf.EagerOp("DebugGradientIdentity")
+ input_ = convert(tf.EagerTensor, input_)
+ begin
+ begin
+ tf.add_input(desc, input_)
+ end
+ end
+ begin
+ end
+ begin
+ desc["T"] = tf.data_type(input_)
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(debug_gradient_identity, [input_], name=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res[1]
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function debug_gradient_identity(input_; name=nothing)
+ if tf.in_eager_mode()
+ debug_gradient_identity_eager(input_; name=name)
+ else
+ debug_gradient_identity_graph(input_; name=name)
+ end
+ end
+ end
+end
+
+
+"""
+ sparse_add_grad(backprop_val_grad, a_indices, b_indices, sum_indices)
+
+
+"""
+begin
+ begin
+ function sparse_add_grad_graph(backprop_val_grad_, a_indices_, b_indices_, sum_indices_; name=nothing)
+ local desc
+ tf.with_op_name(name, "SparseAddGrad") do
+ desc = tf.NodeDescription("SparseAddGrad")
+ begin
+ begin
+ backprop_val_grad_ = convert(Tensor{Any}, backprop_val_grad_)
+ begin
+ end
+ end
+ begin
+ a_indices_ = convert(Tensor{Int64}, a_indices_)
+ begin
+ end
+ end
+ begin
+ b_indices_ = convert(Tensor{Int64}, b_indices_)
+ begin
+ end
+ end
+ begin
+ sum_indices_ = convert(Tensor{Int64}, sum_indices_)
+ begin
+ end
+ end
+ begin
+ (backprop_val_grad_,) = tf.tf_promote(backprop_val_grad_)
+ end
+ end
+ begin
+ begin
+ tf.add_input(desc, backprop_val_grad_)
+ end
+ begin
+ tf.add_input(desc, a_indices_)
+ end
+ begin
+ tf.add_input(desc, b_indices_)
+ end
+ begin
+ tf.add_input(desc, sum_indices_)
+ end
+ end
+ begin
+ end
+ end
+ begin
+ out = tf.Tensor[]
+ op = tf.Operation(desc)
+ for out_idx = 1:2
+ push!(out, tf.Tensor(op, out_idx))
+ end
+ out
+ end
+ end
+ end
+ begin
+ function sparse_add_grad_eager(backprop_val_grad_, a_indices_, b_indices_, sum_indices_; name=nothing)
+ desc = tf.EagerOp("SparseAddGrad")
+ backprop_val_grad_ = convert(tf.EagerTensor, backprop_val_grad_)
+ a_indices_ = convert(tf.EagerTensor, a_indices_)
+ b_indices_ = convert(tf.EagerTensor, b_indices_)
+ sum_indices_ = convert(tf.EagerTensor, sum_indices_)
+ begin
+ begin
+ tf.add_input(desc, backprop_val_grad_)
+ end
+ begin
+ tf.add_input(desc, a_indices_)
+ end
+ begin
+ tf.add_input(desc, b_indices_)
+ end
+ begin
+ tf.add_input(desc, sum_indices_)
+ end
+ end
+ begin
+ end
+ begin
+ desc["T"] = tf.data_type(backprop_val_grad_)
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(sparse_add_grad, [backprop_val_grad_, a_indices_, b_indices_, sum_indices_], name=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function sparse_add_grad(backprop_val_grad_, a_indices_, b_indices_, sum_indices_; name=nothing)
+ if tf.in_eager_mode()
+ sparse_add_grad_eager(backprop_val_grad_, a_indices_, b_indices_, sum_indices_; name=name)
+ else
+ sparse_add_grad_graph(backprop_val_grad_, a_indices_, b_indices_, sum_indices_; name=name)
+ end
+ end
+ end
+end
+
+
+"""
+ resource_scatter_add(resource, indices, updates)
+
+
+"""
+begin
+ begin
+ function resource_scatter_add_graph(resource_, indices_, updates_; name=nothing, dtype=nothing)
+ local desc
+ tf.with_op_name(name, "ResourceScatterAdd") do
+ desc = tf.NodeDescription("ResourceScatterAdd")
+ begin
+ begin
+ resource_ = convert(Tensor{Any}, resource_)
+ begin
+ end
+ end
+ begin
+ indices_ = convert(Tensor{Any}, indices_)
+ begin
+ indices_ = indices_ - convert(tf.Tensor{eltype(indices_)}, 1)
+ end
+ end
+ begin
+ updates_ = convert(Tensor{Any}, updates_)
+ begin
+ end
+ end
+ begin
+ (updates_,) = tf.tf_promote(updates_)
+ end
+ begin
+ (indices_,) = tf.tf_promote(indices_)
+ end
+ end
+ begin
+ begin
+ tf.add_input(desc, resource_)
+ end
+ begin
+ tf.add_input(desc, indices_)
+ end
+ begin
+ tf.add_input(desc, updates_)
+ end
+ end
+ begin
+ begin
+ if dtype !== nothing
+ desc["dtype"] = Base.identity(dtype)
+ end
+ end
+ end
+ end
+ tf.Tensor(tf.Operation(desc))
+ end
+ end
+ begin
+ function resource_scatter_add_eager(resource_, indices_, updates_; name=nothing, dtype=nothing)
+ desc = tf.EagerOp("ResourceScatterAdd")
+ resource_ = convert(tf.EagerTensor, resource_)
+ indices_ = convert(tf.EagerTensor, indices_)
+ updates_ = convert(tf.EagerTensor, updates_)
+ begin
+ begin
+ tf.add_input(desc, resource_)
+ end
+ begin
+ tf.add_input(desc, indices_)
+ end
+ begin
+ tf.add_input(desc, updates_)
+ end
+ end
+ begin
+ begin
+ if dtype !== nothing
+ desc["dtype"] = Base.identity(dtype)
+ end
+ end
+ end
+ begin
+ desc["Tindices"] = tf.data_type(indices_)
+ end
+ begin
+ desc["dtype"] = tf.data_type(updates_)
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(resource_scatter_add, [resource_, indices_, updates_], name=nothing, dtype=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res[1]
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function resource_scatter_add(resource_, indices_, updates_; name=nothing, dtype=nothing)
+ if tf.in_eager_mode()
+ resource_scatter_add_eager(resource_, indices_, updates_; name=name, dtype=dtype)
+ else
+ resource_scatter_add_graph(resource_, indices_, updates_; name=name, dtype=dtype)
+ end
+ end
+ end
+end
+
+
+"""
+ ceil(x)
+
+
+"""
+begin
+ begin
+ function ceil_graph(x_; name=nothing)
+ local desc
+ tf.with_op_name(name, "Ceil") do
+ desc = tf.NodeDescription("Ceil")
+ begin
+ begin
+ x_ = convert(Tensor{Any}, x_)
+ begin
+ end
+ end
+ begin
+ (x_,) = tf.tf_promote(x_)
+ end
+ end
+ begin
+ begin
+ tf.add_input(desc, x_)
+ end
+ end
+ begin
+ end
+ end
+ tf.Tensor(tf.Operation(desc))
+ end
+ end
+ begin
+ function ceil_eager(x_; name=nothing)
+ desc = tf.EagerOp("Ceil")
+ x_ = convert(tf.EagerTensor, x_)
+ begin
+ begin
+ tf.add_input(desc, x_)
+ end
+ end
+ begin
+ end
+ begin
+ desc["T"] = tf.data_type(x_)
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(ceil, [x_], name=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res[1]
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function ceil(x_; name=nothing)
+ if tf.in_eager_mode()
+ ceil_eager(x_; name=name)
+ else
+ ceil_graph(x_; name=name)
+ end
+ end
+ end
+end
+
+
+"""
+ save(filename, tensor_names, data)
+
+
+"""
+begin
+ begin
+ function save_graph(filename_, tensor_names_, data_; name=nothing, T=nothing)
+ local desc
+ tf.with_op_name(name, "Save") do
+ desc = tf.NodeDescription("Save")
+ begin
+ begin
+ filename_ = convert(Tensor{String}, filename_)
+ begin
+ end
+ end
+ begin
+ tensor_names_ = convert(Tensor{String}, tensor_names_)
+ begin
+ end
+ end
+ begin
+ data_ = [convert(Tensor{Any}, x) for x = data_]
+ begin
+ end
+ end
+ end
+ begin
+ begin
+ tf.add_input(desc, filename_)
+ end
+ begin
+ tf.add_input(desc, tensor_names_)
+ end
+ begin
+ tf.add_input(desc, data_)
+ end
+ end
+ begin
+ begin
+ if T !== nothing
+ desc["T"] = map(Base.identity, T)
+ end
+ end
+ end
+ end
+ tf.Tensor(tf.Operation(desc))
+ end
+ end
+ begin
+ function save_eager(filename_, tensor_names_, data_; name=nothing, T=nothing)
+ desc = tf.EagerOp("Save")
+ filename_ = convert(tf.EagerTensor, filename_)
+ tensor_names_ = convert(tf.EagerTensor, tensor_names_)
+ data_ = convert(tf.EagerTensor, data_)
+ begin
+ begin
+ tf.add_input(desc, filename_)
+ end
+ begin
+ tf.add_input(desc, tensor_names_)
+ end
+ begin
+ tf.add_input(desc, data_)
+ end
+ end
+ begin
+ begin
+ if T !== nothing
+ desc["T"] = map(Base.identity, T)
+ end
+ end
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(save, [filename_, tensor_names_, data_], name=nothing, T=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res[1]
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function save(filename_, tensor_names_, data_; name=nothing, T=nothing)
+ if tf.in_eager_mode()
+ save_eager(filename_, tensor_names_, data_; name=name, T=T)
+ else
+ save_graph(filename_, tensor_names_, data_; name=name, T=T)
+ end
+ end
+ end
+end
+
+
+"""
+ retrieve_tpu_embedding_centered_rms_prop_parameters(; table_id=-1, table_name=)
+
+Retrieve embedding parameters for a single table.
+"""
+begin
+ begin
+ function retrieve_tpu_embedding_centered_rms_prop_parameters_graph(; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing)
+ local desc
+ tf.with_op_name(name, "RetrieveTPUEmbeddingCenteredRMSPropParameters") do
+ desc = tf.NodeDescription("RetrieveTPUEmbeddingCenteredRMSPropParameters")
+ begin
+ end
+ begin
+ end
+ begin
+ begin
+ if table_id !== nothing
+ desc["table_id"] = Base.Int(table_id)
+ end
+ end
+ begin
+ if table_name !== nothing
+ desc["table_name"] = Base.String(table_name)
+ end
+ end
+ begin
+ if num_shards !== nothing
+ desc["num_shards"] = Base.Int(num_shards)
+ end
+ end
+ begin
+ if shard_id !== nothing
+ desc["shard_id"] = Base.Int(shard_id)
+ end
+ end
+ end
+ end
+ begin
+ out = tf.Tensor[]
+ op = tf.Operation(desc)
+ for out_idx = 1:4
+ push!(out, tf.Tensor(op, out_idx))
+ end
+ out
+ end
+ end
+ end
+ begin
+ function retrieve_tpu_embedding_centered_rms_prop_parameters_eager(; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing)
+ desc = tf.EagerOp("RetrieveTPUEmbeddingCenteredRMSPropParameters")
+ begin
+ end
+ begin
+ begin
+ if table_id !== nothing
+ desc["table_id"] = Base.Int(table_id)
+ end
+ end
+ begin
+ if table_name !== nothing
+ desc["table_name"] = Base.String(table_name)
+ end
+ end
+ begin
+ if num_shards !== nothing
+ desc["num_shards"] = Base.Int(num_shards)
+ end
+ end
+ begin
+ if shard_id !== nothing
+ desc["shard_id"] = Base.Int(shard_id)
+ end
+ end
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(retrieve_tpu_embedding_centered_rms_prop_parameters, [], name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function retrieve_tpu_embedding_centered_rms_prop_parameters(; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing)
+ if tf.in_eager_mode()
+ retrieve_tpu_embedding_centered_rms_prop_parameters_eager(; name=name, table_id=table_id, table_name=table_name, num_shards=num_shards, shard_id=shard_id)
+ else
+ retrieve_tpu_embedding_centered_rms_prop_parameters_graph(; name=name, table_id=table_id, table_name=table_name, num_shards=num_shards, shard_id=shard_id)
+ end
+ end
+ end
+end
+
+
+"""
+ quantized_concat(concat_dim, values, input_mins, input_maxes)
+
+
+"""
+begin
+ begin
+ function quantized_concat_graph(concat_dim_, values_, input_mins_, input_maxes_; name=nothing, N=nothing)
+ local desc
+ tf.with_op_name(name, "QuantizedConcat") do
+ desc = tf.NodeDescription("QuantizedConcat")
+ begin
+ begin
+ concat_dim_ = convert(Tensor{Int32}, concat_dim_)
+ begin
+ end
+ end
+ begin
+ values_ = [convert(Tensor{Any}, x) for x = values_]
+ begin
+ end
+ end
+ begin
+ input_mins_ = [convert(Tensor{Float32}, x) for x = input_mins_]
+ begin
+ end
+ end
+ begin
+ input_maxes_ = [convert(Tensor{Float32}, x) for x = input_maxes_]
+ begin
+ end
+ end
+ begin
+ (values_,) = tf.tf_promote(values_)
+ end
+ end
+ begin
+ begin
+ tf.add_input(desc, concat_dim_)
+ end
+ begin
+ tf.add_input(desc, values_)
+ end
+ begin
+ tf.add_input(desc, input_mins_)
+ end
+ begin
+ tf.add_input(desc, input_maxes_)
+ end
+ end
+ begin
+ begin
+ if N !== nothing
+ desc["N"] = Base.Int(N)
+ end
+ end
+ end
+ end
+ begin
+ out = tf.Tensor[]
+ op = tf.Operation(desc)
+ for out_idx = 1:3
+ push!(out, tf.Tensor(op, out_idx))
+ end
+ out
+ end
+ end
+ end
+ begin
+ function quantized_concat_eager(concat_dim_, values_, input_mins_, input_maxes_; name=nothing, N=nothing)
+ desc = tf.EagerOp("QuantizedConcat")
+ concat_dim_ = convert(tf.EagerTensor, concat_dim_)
+ values_ = convert(tf.EagerTensor, values_)
+ input_mins_ = convert(tf.EagerTensor, input_mins_)
+ input_maxes_ = convert(tf.EagerTensor, input_maxes_)
+ begin
+ begin
+ tf.add_input(desc, concat_dim_)
+ end
+ begin
+ tf.add_input(desc, values_)
+ end
+ begin
+ tf.add_input(desc, input_mins_)
+ end
+ begin
+ tf.add_input(desc, input_maxes_)
+ end
+ end
+ begin
+ begin
+ if N !== nothing
+ desc["N"] = Base.Int(N)
+ end
+ end
+ end
+ begin
+ desc["T"] = tf.data_type(values_)
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(quantized_concat, [concat_dim_, values_, input_mins_, input_maxes_], name=nothing, N=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function quantized_concat(concat_dim_, values_, input_mins_, input_maxes_; name=nothing, N=nothing)
+ if tf.in_eager_mode()
+ quantized_concat_eager(concat_dim_, values_, input_mins_, input_maxes_; name=name, N=N)
+ else
+ quantized_concat_graph(concat_dim_, values_, input_mins_, input_maxes_; name=name, N=N)
+ end
+ end
+ end
+end
+
+
+"""
+ zeros_like(x)
+
+
+"""
+begin
+ begin
+ function zeros_like_graph(x_; name=nothing)
+ local desc
+ tf.with_op_name(name, "ZerosLike") do
+ desc = tf.NodeDescription("ZerosLike")
+ begin
+ begin
+ x_ = convert(Tensor{Any}, x_)
+ begin
+ end
+ end
+ begin
+ (x_,) = tf.tf_promote(x_)
+ end
+ end
+ begin
+ begin
+ tf.add_input(desc, x_)
+ end
+ end
+ begin
+ end
+ end
+ tf.Tensor(tf.Operation(desc))
+ end
+ end
+ begin
+ function zeros_like_eager(x_; name=nothing)
+ desc = tf.EagerOp("ZerosLike")
+ x_ = convert(tf.EagerTensor, x_)
+ begin
+ begin
+ tf.add_input(desc, x_)
+ end
+ end
+ begin
+ end
+ begin
+ desc["T"] = tf.data_type(x_)
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(zeros_like, [x_], name=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res[1]
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function zeros_like(x_; name=nothing)
+ if tf.in_eager_mode()
+ zeros_like_eager(x_; name=name)
+ else
+ zeros_like_graph(x_; name=name)
+ end
+ end
+ end
+end
+
+
+"""
+ fractional_avg_pool(value; pseudo_random=false, overlapping=false, deterministic=false, seed=0, seed2=0)
+
+
+"""
+begin
+ begin
+ function fractional_avg_pool_graph(value_; name=nothing, pooling_ratio=nothing, pseudo_random=nothing, overlapping=nothing, deterministic=nothing, seed=nothing, seed2=nothing)
+ local desc
+ tf.with_op_name(name, "FractionalAvgPool") do
+ desc = tf.NodeDescription("FractionalAvgPool")
+ begin
+ begin
+ value_ = convert(Tensor{Any}, value_)
+ begin
+ end
+ end
+ begin
+ (value_,) = tf.tf_promote(value_)
+ end
+ end
+ begin
+ begin
+ tf.add_input(desc, value_)
+ end
+ end
+ begin
+ begin
+ if pooling_ratio !== nothing
+ desc["pooling_ratio"] = map(Base.identity, pooling_ratio)
+ end
+ end
+ begin
+ if pseudo_random !== nothing
+ desc["pseudo_random"] = Base.Bool(pseudo_random)
+ end
+ end
+ begin
+ if overlapping !== nothing
+ desc["overlapping"] = Base.Bool(overlapping)
+ end
+ end
+ begin
+ if deterministic !== nothing
+ desc["deterministic"] = Base.Bool(deterministic)
+ end
+ end
+ begin
+ if seed !== nothing
+ desc["seed"] = Base.Int(seed)
+ end
+ end
+ begin
+ if seed2 !== nothing
+ desc["seed2"] = Base.Int(seed2)
+ end
+ end
+ end
+ end
+ begin
+ out = tf.Tensor[]
+ op = tf.Operation(desc)
+ for out_idx = 1:3
+ push!(out, tf.Tensor(op, out_idx))
+ end
+ out
+ end
+ end
+ end
+ begin
+ function fractional_avg_pool_eager(value_; name=nothing, pooling_ratio=nothing, pseudo_random=nothing, overlapping=nothing, deterministic=nothing, seed=nothing, seed2=nothing)
+ desc = tf.EagerOp("FractionalAvgPool")
+ value_ = convert(tf.EagerTensor, value_)
+ begin
+ begin
+ tf.add_input(desc, value_)
+ end
+ end
+ begin
+ begin
+ if pooling_ratio !== nothing
+ desc["pooling_ratio"] = map(Base.identity, pooling_ratio)
+ end
+ end
+ begin
+ if pseudo_random !== nothing
+ desc["pseudo_random"] = Base.Bool(pseudo_random)
+ end
+ end
+ begin
+ if overlapping !== nothing
+ desc["overlapping"] = Base.Bool(overlapping)
+ end
+ end
+ begin
+ if deterministic !== nothing
+ desc["deterministic"] = Base.Bool(deterministic)
+ end
+ end
+ begin
+ if seed !== nothing
+ desc["seed"] = Base.Int(seed)
+ end
+ end
+ begin
+ if seed2 !== nothing
+ desc["seed2"] = Base.Int(seed2)
+ end
+ end
+ end
+ begin
+ desc["T"] = tf.data_type(value_)
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(fractional_avg_pool, [value_], name=nothing, pooling_ratio=nothing, pseudo_random=nothing, overlapping=nothing, deterministic=nothing, seed=nothing, seed2=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function fractional_avg_pool(value_; name=nothing, pooling_ratio=nothing, pseudo_random=nothing, overlapping=nothing, deterministic=nothing, seed=nothing, seed2=nothing)
+ if tf.in_eager_mode()
+ fractional_avg_pool_eager(value_; name=name, pooling_ratio=pooling_ratio, pseudo_random=pseudo_random, overlapping=overlapping, deterministic=deterministic, seed=seed, seed2=seed2)
+ else
+ fractional_avg_pool_graph(value_; name=name, pooling_ratio=pooling_ratio, pseudo_random=pseudo_random, overlapping=overlapping, deterministic=deterministic, seed=seed, seed2=seed2)
+ end
+ end
+ end
+end
+
+
+"""
+ edit_distance(hypothesis_indices, hypothesis_values, hypothesis_shape, truth_indices, truth_values, truth_shape; normalize=true)
+
+
+"""
+begin
+ begin
+ function edit_distance_graph(hypothesis_indices_, hypothesis_values_, hypothesis_shape_, truth_indices_, truth_values_, truth_shape_; name=nothing, normalize=nothing)
+ local desc
+ tf.with_op_name(name, "EditDistance") do
+ desc = tf.NodeDescription("EditDistance")
+ begin
+ begin
+ hypothesis_indices_ = convert(Tensor{Int64}, hypothesis_indices_)
+ begin
+ end
+ end
+ begin
+ hypothesis_values_ = convert(Tensor{Any}, hypothesis_values_)
+ begin
+ end
+ end
+ begin
+ hypothesis_shape_ = convert(Tensor{Int64}, hypothesis_shape_)
+ begin
+ end
+ end
+ begin
+ truth_indices_ = convert(Tensor{Int64}, truth_indices_)
+ begin
+ end
+ end
+ begin
+ truth_values_ = convert(Tensor{Any}, truth_values_)
+ begin
+ end
+ end
+ begin
+ truth_shape_ = convert(Tensor{Int64}, truth_shape_)
+ begin
+ end
+ end
+ begin
+ (hypothesis_values_, truth_values_) = tf.tf_promote(hypothesis_values_, truth_values_)
+ end
+ end
+ begin
+ begin
+ tf.add_input(desc, hypothesis_indices_)
+ end
+ begin
+ tf.add_input(desc, hypothesis_values_)
+ end
+ begin
+ tf.add_input(desc, hypothesis_shape_)
+ end
+ begin
+ tf.add_input(desc, truth_indices_)
+ end
+ begin
+ tf.add_input(desc, truth_values_)
+ end
+ begin
+ tf.add_input(desc, truth_shape_)
+ end
+ end
+ begin
+ begin
+ if normalize !== nothing
+ desc["normalize"] = Base.Bool(normalize)
+ end
+ end
+ end
+ end
+ tf.Tensor(tf.Operation(desc))
+ end
+ end
+ begin
+ function edit_distance_eager(hypothesis_indices_, hypothesis_values_, hypothesis_shape_, truth_indices_, truth_values_, truth_shape_; name=nothing, normalize=nothing)
+ desc = tf.EagerOp("EditDistance")
+ hypothesis_indices_ = convert(tf.EagerTensor, hypothesis_indices_)
+ hypothesis_values_ = convert(tf.EagerTensor, hypothesis_values_)
+ hypothesis_shape_ = convert(tf.EagerTensor, hypothesis_shape_)
+ truth_indices_ = convert(tf.EagerTensor, truth_indices_)
+ truth_values_ = convert(tf.EagerTensor, truth_values_)
+ truth_shape_ = convert(tf.EagerTensor, truth_shape_)
+ begin
+ begin
+ tf.add_input(desc, hypothesis_indices_)
+ end
+ begin
+ tf.add_input(desc, hypothesis_values_)
+ end
+ begin
+ tf.add_input(desc, hypothesis_shape_)
+ end
+ begin
+ tf.add_input(desc, truth_indices_)
+ end
+ begin
+ tf.add_input(desc, truth_values_)
+ end
+ begin
+ tf.add_input(desc, truth_shape_)
+ end
+ end
+ begin
+ begin
+ if normalize !== nothing
+ desc["normalize"] = Base.Bool(normalize)
+ end
+ end
+ end
+ begin
+ desc["T"] = tf.data_type(hypothesis_values_)
+ end
+ begin
+ desc["T"] = tf.data_type(truth_values_)
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(edit_distance, [hypothesis_indices_, hypothesis_values_, hypothesis_shape_, truth_indices_, truth_values_, truth_shape_], name=nothing, normalize=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res[1]
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function edit_distance(hypothesis_indices_, hypothesis_values_, hypothesis_shape_, truth_indices_, truth_values_, truth_shape_; name=nothing, normalize=nothing)
+ if tf.in_eager_mode()
+ edit_distance_eager(hypothesis_indices_, hypothesis_values_, hypothesis_shape_, truth_indices_, truth_values_, truth_shape_; name=name, normalize=normalize)
+ else
+ edit_distance_graph(hypothesis_indices_, hypothesis_values_, hypothesis_shape_, truth_indices_, truth_values_, truth_shape_; name=name, normalize=normalize)
+ end
+ end
+ end
+end
+
+
+"""
+ unique_v2(x, axis; out_idx=Int32)
+
+
+"""
+begin
+ begin
+ function unique_v2_graph(x_, axis_; name=nothing, out_idx=nothing)
+ local desc
+ tf.with_op_name(name, "UniqueV2") do
+ desc = tf.NodeDescription("UniqueV2")
+ begin
+ begin
+ x_ = convert(Tensor{Any}, x_)
+ begin
+ end
+ end
+ begin
+ axis_ = convert(Tensor{Int64}, axis_)
+ begin
+ end
+ end
+ begin
+ (x_,) = tf.tf_promote(x_)
+ end
+ begin
+ (axis_,) = tf.tf_promote(axis_)
+ end
+ end
+ begin
+ begin
+ tf.add_input(desc, x_)
+ end
+ begin
+ tf.add_input(desc, axis_)
+ end
+ end
+ begin
+ begin
+ if out_idx !== nothing
+ desc["out_idx"] = Base.identity(out_idx)
+ end
+ end
+ end
+ end
+ begin
+ out = tf.Tensor[]
+ op = tf.Operation(desc)
+ for out_idx = 1:2
+ push!(out, tf.Tensor(op, out_idx))
+ end
+ out
+ end
+ end
+ end
+ begin
+ function unique_v2_eager(x_, axis_; name=nothing, out_idx=nothing)
+ desc = tf.EagerOp("UniqueV2")
+ x_ = convert(tf.EagerTensor, x_)
+ axis_ = convert(tf.EagerTensor, axis_)
+ begin
+ begin
+ tf.add_input(desc, x_)
+ end
+ begin
+ tf.add_input(desc, axis_)
+ end
+ end
+ begin
+ begin
+ if out_idx !== nothing
+ desc["out_idx"] = Base.identity(out_idx)
+ end
+ end
+ end
+ begin
+ desc["T"] = tf.data_type(x_)
+ end
+ begin
+ desc["Taxis"] = tf.data_type(axis_)
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(unique_v2, [x_, axis_], name=nothing, out_idx=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function unique_v2(x_, axis_; name=nothing, out_idx=nothing)
+ if tf.in_eager_mode()
+ unique_v2_eager(x_, axis_; name=name, out_idx=out_idx)
+ else
+ unique_v2_graph(x_, axis_; name=name, out_idx=out_idx)
+ end
+ end
+ end
+end
+
+
+"""
+ quantize_and_dequantize_v2(input, input_min, input_max; signed_input=true, num_bits=8, range_given=false, round_mode=)
+
+
+"""
+begin
+ begin
+ function quantize_and_dequantize_v2_graph(input_, input_min_, input_max_; name=nothing, signed_input=nothing, num_bits=nothing, range_given=nothing, round_mode=nothing)
+ local desc
+ tf.with_op_name(name, "QuantizeAndDequantizeV2") do
+ desc = tf.NodeDescription("QuantizeAndDequantizeV2")
+ begin
+ begin
+ input_ = convert(Tensor{Any}, input_)
+ begin
+ end
+ end
+ begin
+ input_min_ = convert(Tensor{Any}, input_min_)
+ begin
+ end
+ end
+ begin
+ input_max_ = convert(Tensor{Any}, input_max_)
+ begin
+ end
+ end
+ begin
+ (input_, input_min_, input_max_) = tf.tf_promote(input_, input_min_, input_max_)
+ end
+ end
+ begin
+ begin
+ tf.add_input(desc, input_)
+ end
+ begin
+ tf.add_input(desc, input_min_)
+ end
+ begin
+ tf.add_input(desc, input_max_)
+ end
+ end
+ begin
+ begin
+ if signed_input !== nothing
+ desc["signed_input"] = Base.Bool(signed_input)
+ end
+ end
+ begin
+ if num_bits !== nothing
+ desc["num_bits"] = Base.Int(num_bits)
+ end
+ end
+ begin
+ if range_given !== nothing
+ desc["range_given"] = Base.Bool(range_given)
+ end
+ end
+ begin
+ if round_mode !== nothing
+ desc["round_mode"] = Base.String(round_mode)
+ end
+ end
+ end
+ end
+ tf.Tensor(tf.Operation(desc))
+ end
+ end
+ begin
+ function quantize_and_dequantize_v2_eager(input_, input_min_, input_max_; name=nothing, signed_input=nothing, num_bits=nothing, range_given=nothing, round_mode=nothing)
+ desc = tf.EagerOp("QuantizeAndDequantizeV2")
+ input_ = convert(tf.EagerTensor, input_)
+ input_min_ = convert(tf.EagerTensor, input_min_)
+ input_max_ = convert(tf.EagerTensor, input_max_)
+ begin
+ begin
+ tf.add_input(desc, input_)
+ end
+ begin
+ tf.add_input(desc, input_min_)
+ end
+ begin
+ tf.add_input(desc, input_max_)
+ end
+ end
+ begin
+ begin
+ if signed_input !== nothing
+ desc["signed_input"] = Base.Bool(signed_input)
+ end
+ end
+ begin
+ if num_bits !== nothing
+ desc["num_bits"] = Base.Int(num_bits)
+ end
+ end
+ begin
+ if range_given !== nothing
+ desc["range_given"] = Base.Bool(range_given)
+ end
+ end
+ begin
+ if round_mode !== nothing
+ desc["round_mode"] = Base.String(round_mode)
+ end
+ end
+ end
+ begin
+ desc["T"] = tf.data_type(input_)
+ end
+ begin
+ desc["T"] = tf.data_type(input_min_)
+ end
+ begin
+ desc["T"] = tf.data_type(input_max_)
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(quantize_and_dequantize_v2, [input_, input_min_, input_max_], name=nothing, signed_input=nothing, num_bits=nothing, range_given=nothing, round_mode=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res[1]
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function quantize_and_dequantize_v2(input_, input_min_, input_max_; name=nothing, signed_input=nothing, num_bits=nothing, range_given=nothing, round_mode=nothing)
+ if tf.in_eager_mode()
+ quantize_and_dequantize_v2_eager(input_, input_min_, input_max_; name=name, signed_input=signed_input, num_bits=num_bits, range_given=range_given, round_mode=round_mode)
+ else
+ quantize_and_dequantize_v2_graph(input_, input_min_, input_max_; name=name, signed_input=signed_input, num_bits=num_bits, range_given=range_given, round_mode=round_mode)
+ end
+ end
+ end
+end
+
+
+"""
+ quantize_and_dequantize(input; signed_input=true, num_bits=8, range_given=false, input_min=?, input_max=?)
+
+
+"""
+begin
+ begin
+ function quantize_and_dequantize_graph(input_; name=nothing, signed_input=nothing, num_bits=nothing, range_given=nothing, input_min=nothing, input_max=nothing)
+ local desc
+ tf.with_op_name(name, "QuantizeAndDequantize") do
+ desc = tf.NodeDescription("QuantizeAndDequantize")
+ begin
+ begin
+ input_ = convert(Tensor{Any}, input_)
+ begin
+ end
+ end
+ begin
+ (input_,) = tf.tf_promote(input_)
+ end
+ end
+ begin
+ begin
+ tf.add_input(desc, input_)
+ end
+ end
+ begin
+ begin
+ if signed_input !== nothing
+ desc["signed_input"] = Base.Bool(signed_input)
+ end
+ end
+ begin
+ if num_bits !== nothing
+ desc["num_bits"] = Base.Int(num_bits)
+ end
+ end
+ begin
+ if range_given !== nothing
+ desc["range_given"] = Base.Bool(range_given)
+ end
+ end
+ begin
+ if input_min !== nothing
+ desc["input_min"] = Base.identity(input_min)
+ end
+ end
+ begin
+ if input_max !== nothing
+ desc["input_max"] = Base.identity(input_max)
+ end
+ end
+ end
+ end
+ tf.Tensor(tf.Operation(desc))
+ end
+ end
+ begin
+ function quantize_and_dequantize_eager(input_; name=nothing, signed_input=nothing, num_bits=nothing, range_given=nothing, input_min=nothing, input_max=nothing)
+ desc = tf.EagerOp("QuantizeAndDequantize")
+ input_ = convert(tf.EagerTensor, input_)
+ begin
+ begin
+ tf.add_input(desc, input_)
+ end
+ end
+ begin
+ begin
+ if signed_input !== nothing
+ desc["signed_input"] = Base.Bool(signed_input)
+ end
+ end
+ begin
+ if num_bits !== nothing
+ desc["num_bits"] = Base.Int(num_bits)
+ end
+ end
+ begin
+ if range_given !== nothing
+ desc["range_given"] = Base.Bool(range_given)
+ end
+ end
+ begin
+ if input_min !== nothing
+ desc["input_min"] = Base.identity(input_min)
+ end
+ end
+ begin
+ if input_max !== nothing
+ desc["input_max"] = Base.identity(input_max)
+ end
+ end
+ end
+ begin
+ desc["T"] = tf.data_type(input_)
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(quantize_and_dequantize, [input_], name=nothing, signed_input=nothing, num_bits=nothing, range_given=nothing, input_min=nothing, input_max=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res[1]
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function quantize_and_dequantize(input_; name=nothing, signed_input=nothing, num_bits=nothing, range_given=nothing, input_min=nothing, input_max=nothing)
+ if tf.in_eager_mode()
+ quantize_and_dequantize_eager(input_; name=name, signed_input=signed_input, num_bits=num_bits, range_given=range_given, input_min=input_min, input_max=input_max)
+ else
+ quantize_and_dequantize_graph(input_; name=name, signed_input=signed_input, num_bits=num_bits, range_given=range_given, input_min=input_min, input_max=input_max)
+ end
+ end
+ end
+end
+
+
+"""
+ tensor_list_pop_back(input_handle)
+
+
+"""
+begin
+ begin
+ function tensor_list_pop_back_graph(input_handle_; name=nothing, element_dtype=nothing)
+ local desc
+ tf.with_op_name(name, "TensorListPopBack") do
+ desc = tf.NodeDescription("TensorListPopBack")
+ begin
+ begin
+ input_handle_ = convert(Tensor{Any}, input_handle_)
+ begin
+ end
+ end
+ end
+ begin
+ begin
+ tf.add_input(desc, input_handle_)
+ end
+ end
+ begin
+ begin
+ if element_dtype !== nothing
+ desc["element_dtype"] = Base.identity(element_dtype)
+ end
+ end
+ end
+ end
+ begin
+ out = tf.Tensor[]
+ op = tf.Operation(desc)
+ for out_idx = 1:2
+ push!(out, tf.Tensor(op, out_idx))
+ end
+ out
+ end
+ end
+ end
+ begin
+ function tensor_list_pop_back_eager(input_handle_; name=nothing, element_dtype=nothing)
+ desc = tf.EagerOp("TensorListPopBack")
+ input_handle_ = convert(tf.EagerTensor, input_handle_)
+ begin
+ begin
+ tf.add_input(desc, input_handle_)
+ end
+ end
+ begin
+ begin
+ if element_dtype !== nothing
+ desc["element_dtype"] = Base.identity(element_dtype)
+ end
+ end
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(tensor_list_pop_back, [input_handle_], name=nothing, element_dtype=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function tensor_list_pop_back(input_handle_; name=nothing, element_dtype=nothing)
+ if tf.in_eager_mode()
+ tensor_list_pop_back_eager(input_handle_; name=name, element_dtype=element_dtype)
+ else
+ tensor_list_pop_back_graph(input_handle_; name=name, element_dtype=element_dtype)
+ end
+ end
+ end
+end
+
+
+"""
+ debug_nan_count(input; device_name=, tensor_name=, debug_urls=Int64[], gated_grpc=false)
+
+Debug NaN Value Counter Op
+"""
+begin
+ begin
+ function debug_nan_count_graph(input_; name=nothing, device_name=nothing, tensor_name=nothing, debug_urls=nothing, gated_grpc=nothing)
+ local desc
+ tf.with_op_name(name, "DebugNanCount") do
+ desc = tf.NodeDescription("DebugNanCount")
+ begin
+ begin
+ input_ = convert(Tensor{Any}, input_)
+ begin
+ end
+ end
+ begin
+ (input_,) = tf.tf_promote(input_)
+ end
+ end
+ begin
+ begin
+ tf.add_input(desc, input_)
+ end
+ end
+ begin
+ begin
+ if device_name !== nothing
+ desc["device_name"] = Base.String(device_name)
+ end
+ end
+ begin
+ if tensor_name !== nothing
+ desc["tensor_name"] = Base.String(tensor_name)
+ end
+ end
+ begin
+ if debug_urls !== nothing
+ desc["debug_urls"] = map(Base.identity, debug_urls)
+ end
+ end
+ begin
+ if gated_grpc !== nothing
+ desc["gated_grpc"] = Base.Bool(gated_grpc)
+ end
+ end
+ end
+ end
+ tf.Tensor(tf.Operation(desc))
+ end
+ end
+ begin
+ function debug_nan_count_eager(input_; name=nothing, device_name=nothing, tensor_name=nothing, debug_urls=nothing, gated_grpc=nothing)
+ desc = tf.EagerOp("DebugNanCount")
+ input_ = convert(tf.EagerTensor, input_)
+ begin
+ begin
+ tf.add_input(desc, input_)
+ end
+ end
+ begin
+ begin
+ if device_name !== nothing
+ desc["device_name"] = Base.String(device_name)
+ end
+ end
+ begin
+ if tensor_name !== nothing
+ desc["tensor_name"] = Base.String(tensor_name)
+ end
+ end
+ begin
+ if debug_urls !== nothing
+ desc["debug_urls"] = map(Base.identity, debug_urls)
+ end
+ end
+ begin
+ if gated_grpc !== nothing
+ desc["gated_grpc"] = Base.Bool(gated_grpc)
+ end
+ end
+ end
+ begin
+ desc["T"] = tf.data_type(input_)
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(debug_nan_count, [input_], name=nothing, device_name=nothing, tensor_name=nothing, debug_urls=nothing, gated_grpc=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res[1]
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function debug_nan_count(input_; name=nothing, device_name=nothing, tensor_name=nothing, debug_urls=nothing, gated_grpc=nothing)
+ if tf.in_eager_mode()
+ debug_nan_count_eager(input_; name=name, device_name=device_name, tensor_name=tensor_name, debug_urls=debug_urls, gated_grpc=gated_grpc)
+ else
+ debug_nan_count_graph(input_; name=name, device_name=device_name, tensor_name=tensor_name, debug_urls=debug_urls, gated_grpc=gated_grpc)
+ end
+ end
+ end
+end
+
+
+"""
+ apply_adagrad_da(var, gradient_accumulator, gradient_squared_accumulator, grad, lr, l1, l2, global_step; use_locking=false)
+
+
+"""
+begin
+ begin
+ function apply_adagrad_da_graph(var_, gradient_accumulator_, gradient_squared_accumulator_, grad_, lr_, l1_, l2_, global_step_; name=nothing, use_locking=nothing)
+ local desc
+ tf.with_op_name(name, "ApplyAdagradDA") do
+ desc = tf.NodeDescription("ApplyAdagradDA")
+ begin
+ begin
+ var_ = convert(Tensor{Any}, var_)
+ begin
+ end
+ end
+ begin
+ gradient_accumulator_ = convert(Tensor{Any}, gradient_accumulator_)
+ begin
+ end
+ end
+ begin
+ gradient_squared_accumulator_ = convert(Tensor{Any}, gradient_squared_accumulator_)
+ begin
+ end
+ end
+ begin
+ grad_ = convert(Tensor{Any}, grad_)
+ begin
+ end
+ end
+ begin
+ lr_ = convert(Tensor{Any}, lr_)
+ begin
+ end
+ end
+ begin
+ l1_ = convert(Tensor{Any}, l1_)
+ begin
+ end
+ end
+ begin
+ l2_ = convert(Tensor{Any}, l2_)
+ begin
+ end
+ end
+ begin
+ global_step_ = convert(Tensor{Int64}, global_step_)
+ begin
+ end
+ end
+ begin
+ (var_, gradient_accumulator_, gradient_squared_accumulator_, grad_, lr_, l1_, l2_) = tf.tf_promote(var_, gradient_accumulator_, gradient_squared_accumulator_, grad_, lr_, l1_, l2_)
+ end
+ end
+ begin
+ begin
+ tf.add_input(desc, var_)
+ end
+ begin
+ tf.add_input(desc, gradient_accumulator_)
+ end
+ begin
+ tf.add_input(desc, gradient_squared_accumulator_)
+ end
+ begin
+ tf.add_input(desc, grad_)
+ end
+ begin
+ tf.add_input(desc, lr_)
+ end
+ begin
+ tf.add_input(desc, l1_)
+ end
+ begin
+ tf.add_input(desc, l2_)
+ end
+ begin
+ tf.add_input(desc, global_step_)
+ end
+ end
+ begin
+ begin
+ if use_locking !== nothing
+ desc["use_locking"] = Base.Bool(use_locking)
+ end
+ end
+ end
+ end
+ tf.Tensor(tf.Operation(desc))
+ end
+ end
+ begin
+ function apply_adagrad_da_eager(var_, gradient_accumulator_, gradient_squared_accumulator_, grad_, lr_, l1_, l2_, global_step_; name=nothing, use_locking=nothing)
+ desc = tf.EagerOp("ApplyAdagradDA")
+ var_ = convert(tf.EagerTensor, var_)
+ gradient_accumulator_ = convert(tf.EagerTensor, gradient_accumulator_)
+ gradient_squared_accumulator_ = convert(tf.EagerTensor, gradient_squared_accumulator_)
+ grad_ = convert(tf.EagerTensor, grad_)
+ lr_ = convert(tf.EagerTensor, lr_)
+ l1_ = convert(tf.EagerTensor, l1_)
+ l2_ = convert(tf.EagerTensor, l2_)
+ global_step_ = convert(tf.EagerTensor, global_step_)
+ begin
+ begin
+ tf.add_input(desc, var_)
+ end
+ begin
+ tf.add_input(desc, gradient_accumulator_)
+ end
+ begin
+ tf.add_input(desc, gradient_squared_accumulator_)
+ end
+ begin
+ tf.add_input(desc, grad_)
+ end
+ begin
+ tf.add_input(desc, lr_)
+ end
+ begin
+ tf.add_input(desc, l1_)
+ end
+ begin
+ tf.add_input(desc, l2_)
+ end
+ begin
+ tf.add_input(desc, global_step_)
+ end
+ end
+ begin
+ begin
+ if use_locking !== nothing
+ desc["use_locking"] = Base.Bool(use_locking)
+ end
+ end
+ end
+ begin
+ desc["T"] = tf.data_type(var_)
+ end
+ begin
+ desc["T"] = tf.data_type(gradient_accumulator_)
+ end
+ begin
+ desc["T"] = tf.data_type(gradient_squared_accumulator_)
+ end
+ begin
+ desc["T"] = tf.data_type(grad_)
+ end
+ begin
+ desc["T"] = tf.data_type(lr_)
+ end
+ begin
+ desc["T"] = tf.data_type(l1_)
+ end
+ begin
+ desc["T"] = tf.data_type(l2_)
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(apply_adagrad_da, [var_, gradient_accumulator_, gradient_squared_accumulator_, grad_, lr_, l1_, l2_, global_step_], name=nothing, use_locking=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res[1]
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function apply_adagrad_da(var_, gradient_accumulator_, gradient_squared_accumulator_, grad_, lr_, l1_, l2_, global_step_; name=nothing, use_locking=nothing)
+ if tf.in_eager_mode()
+ apply_adagrad_da_eager(var_, gradient_accumulator_, gradient_squared_accumulator_, grad_, lr_, l1_, l2_, global_step_; name=name, use_locking=use_locking)
+ else
+ apply_adagrad_da_graph(var_, gradient_accumulator_, gradient_squared_accumulator_, grad_, lr_, l1_, l2_, global_step_; name=name, use_locking=use_locking)
+ end
+ end
+ end
+end
+
+
+"""
+ depthwise_conv2d_native(input, filter; data_format=, dilations=[1, 1, 1, 1])
+
+
+"""
+begin
+ begin
+ function depthwise_conv2d_native_graph(input_, filter_; name=nothing, strides=nothing, padding=nothing, data_format=nothing, dilations=nothing)
+ local desc
+ tf.with_op_name(name, "DepthwiseConv2dNative") do
+ desc = tf.NodeDescription("DepthwiseConv2dNative")
+ begin
+ begin
+ input_ = convert(Tensor{Any}, input_)
+ begin
+ end
+ end
+ begin
+ filter_ = convert(Tensor{Any}, filter_)
+ begin
+ end
+ end
+ begin
+ (input_, filter_) = tf.tf_promote(input_, filter_)
+ end
+ end
+ begin
+ begin
+ tf.add_input(desc, input_)
+ end
+ begin
+ tf.add_input(desc, filter_)
+ end
+ end
+ begin
+ begin
+ if strides !== nothing
+ desc["strides"] = map(Base.identity, strides)
+ end
+ end
+ begin
+ if padding !== nothing
+ desc["padding"] = Base.String(padding)
+ end
+ end
+ begin
+ if data_format !== nothing
+ desc["data_format"] = Base.String(data_format)
+ end
+ end
+ begin
+ if dilations !== nothing
+ desc["dilations"] = map(Base.identity, dilations)
+ end
+ end
+ end
+ end
+ tf.Tensor(tf.Operation(desc))
+ end
+ end
+ begin
+ function depthwise_conv2d_native_eager(input_, filter_; name=nothing, strides=nothing, padding=nothing, data_format=nothing, dilations=nothing)
+ desc = tf.EagerOp("DepthwiseConv2dNative")
+ input_ = convert(tf.EagerTensor, input_)
+ filter_ = convert(tf.EagerTensor, filter_)
+ begin
+ begin
+ tf.add_input(desc, input_)
+ end
+ begin
+ tf.add_input(desc, filter_)
+ end
+ end
+ begin
+ begin
+ if strides !== nothing
+ desc["strides"] = map(Base.identity, strides)
+ end
+ end
+ begin
+ if padding !== nothing
+ desc["padding"] = Base.String(padding)
+ end
+ end
+ begin
+ if data_format !== nothing
+ desc["data_format"] = Base.String(data_format)
+ end
+ end
+ begin
+ if dilations !== nothing
+ desc["dilations"] = map(Base.identity, dilations)
+ end
+ end
+ end
+ begin
+ desc["T"] = tf.data_type(input_)
+ end
+ begin
+ desc["T"] = tf.data_type(filter_)
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(depthwise_conv2d_native, [input_, filter_], name=nothing, strides=nothing, padding=nothing, data_format=nothing, dilations=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res[1]
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function depthwise_conv2d_native(input_, filter_; name=nothing, strides=nothing, padding=nothing, data_format=nothing, dilations=nothing)
+ if tf.in_eager_mode()
+ depthwise_conv2d_native_eager(input_, filter_; name=name, strides=strides, padding=padding, data_format=data_format, dilations=dilations)
+ else
+ depthwise_conv2d_native_graph(input_, filter_; name=name, strides=strides, padding=padding, data_format=data_format, dilations=dilations)
+ end
+ end
+ end
+end
+
+
+"""
+ serialize_iterator(resource_handle)
+
+
+"""
+begin
+ begin
+ function serialize_iterator_graph(resource_handle_; name=nothing)
+ local desc
+ tf.with_op_name(name, "SerializeIterator") do
+ desc = tf.NodeDescription("SerializeIterator")
+ begin
+ begin
+ resource_handle_ = convert(Tensor{Any}, resource_handle_)
+ begin
+ end
+ end
+ end
+ begin
+ begin
+ tf.add_input(desc, resource_handle_)
+ end
+ end
+ begin
+ end
+ end
+ tf.Tensor(tf.Operation(desc))
+ end
+ end
+ begin
+ function serialize_iterator_eager(resource_handle_; name=nothing)
+ desc = tf.EagerOp("SerializeIterator")
+ resource_handle_ = convert(tf.EagerTensor, resource_handle_)
+ begin
+ begin
+ tf.add_input(desc, resource_handle_)
+ end
+ end
+ begin
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(serialize_iterator, [resource_handle_], name=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res[1]
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function serialize_iterator(resource_handle_; name=nothing)
+ if tf.in_eager_mode()
+ serialize_iterator_eager(resource_handle_; name=name)
+ else
+ serialize_iterator_graph(resource_handle_; name=name)
+ end
+ end
+ end
+end
+
+
+"""
+ dataset_to_graph(input_dataset)
+
+
+"""
+begin
+ begin
+ function dataset_to_graph_graph(input_dataset_; name=nothing)
+ local desc
+ tf.with_op_name(name, "DatasetToGraph") do
+ desc = tf.NodeDescription("DatasetToGraph")
+ begin
+ begin
+ input_dataset_ = convert(Tensor{Any}, input_dataset_)
+ begin
+ end
+ end
+ end
+ begin
+ begin
+ tf.add_input(desc, input_dataset_)
+ end
+ end
+ begin
+ end
+ end
+ tf.Tensor(tf.Operation(desc))
+ end
+ end
+ begin
+ function dataset_to_graph_eager(input_dataset_; name=nothing)
+ desc = tf.EagerOp("DatasetToGraph")
+ input_dataset_ = convert(tf.EagerTensor, input_dataset_)
+ begin
+ begin
+ tf.add_input(desc, input_dataset_)
+ end
+ end
+ begin
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(dataset_to_graph, [input_dataset_], name=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res[1]
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function dataset_to_graph(input_dataset_; name=nothing)
+ if tf.in_eager_mode()
+ dataset_to_graph_eager(input_dataset_; name=name)
+ else
+ dataset_to_graph_graph(input_dataset_; name=name)
+ end
+ end
+ end
+end
+
+
+"""
+ top_k(input; sorted=true)
+
+
+"""
+begin
+ begin
+ function top_k_graph(input_; name=nothing, k=nothing, sorted=nothing)
+ local desc
+ tf.with_op_name(name, "TopK") do
+ desc = tf.NodeDescription("TopK")
+ begin
+ begin
+ input_ = convert(Tensor{Any}, input_)
+ begin
+ end
+ end
+ begin
+ (input_,) = tf.tf_promote(input_)
+ end
+ end
+ begin
+ begin
+ tf.add_input(desc, input_)
+ end
+ end
+ begin
+ begin
+ if k !== nothing
+ desc["k"] = Base.Int(k)
+ end
+ end
+ begin
+ if sorted !== nothing
+ desc["sorted"] = Base.Bool(sorted)
+ end
+ end
+ end
+ end
+ begin
+ out = tf.Tensor[]
+ op = tf.Operation(desc)
+ for out_idx = 1:2
+ push!(out, tf.Tensor(op, out_idx))
+ end
+ out
+ end
+ end
+ end
+ begin
+ function top_k_eager(input_; name=nothing, k=nothing, sorted=nothing)
+ desc = tf.EagerOp("TopK")
+ input_ = convert(tf.EagerTensor, input_)
+ begin
+ begin
+ tf.add_input(desc, input_)
+ end
+ end
+ begin
+ begin
+ if k !== nothing
+ desc["k"] = Base.Int(k)
+ end
+ end
+ begin
+ if sorted !== nothing
+ desc["sorted"] = Base.Bool(sorted)
+ end
+ end
+ end
+ begin
+ desc["T"] = tf.data_type(input_)
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(top_k, [input_], name=nothing, k=nothing, sorted=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function top_k(input_; name=nothing, k=nothing, sorted=nothing)
+ if tf.in_eager_mode()
+ top_k_eager(input_; name=name, k=k, sorted=sorted)
+ else
+ top_k_graph(input_; name=name, k=k, sorted=sorted)
+ end
+ end
+ end
+end
+
+
+"""
+ resource_apply_ftrl_v2(var, accum, linear, grad, lr, l1, l2, l2_shrinkage, lr_power; use_locking=false)
+
+
+"""
+begin
+ begin
+ function resource_apply_ftrl_v2_graph(var_, accum_, linear_, grad_, lr_, l1_, l2_, l2_shrinkage_, lr_power_; name=nothing, use_locking=nothing)
+ local desc
+ tf.with_op_name(name, "ResourceApplyFtrlV2") do
+ desc = tf.NodeDescription("ResourceApplyFtrlV2")
+ begin
+ begin
+ var_ = convert(Tensor{Any}, var_)
+ begin
+ end
+ end
+ begin
+ accum_ = convert(Tensor{Any}, accum_)
+ begin
+ end
+ end
+ begin
+ linear_ = convert(Tensor{Any}, linear_)
+ begin
+ end
+ end
+ begin
+ grad_ = convert(Tensor{Any}, grad_)
+ begin
+ end
+ end
+ begin
+ lr_ = convert(Tensor{Any}, lr_)
+ begin
+ end
+ end
+ begin
+ l1_ = convert(Tensor{Any}, l1_)
+ begin
+ end
+ end
+ begin
+ l2_ = convert(Tensor{Any}, l2_)
+ begin
+ end
+ end
+ begin
+ l2_shrinkage_ = convert(Tensor{Any}, l2_shrinkage_)
+ begin
+ end
+ end
+ begin
+ lr_power_ = convert(Tensor{Any}, lr_power_)
+ begin
+ end
+ end
+ begin
+ (grad_, lr_, l1_, l2_, l2_shrinkage_, lr_power_) = tf.tf_promote(grad_, lr_, l1_, l2_, l2_shrinkage_, lr_power_)
+ end
+ end
+ begin
+ begin
+ tf.add_input(desc, var_)
+ end
+ begin
+ tf.add_input(desc, accum_)
+ end
+ begin
+ tf.add_input(desc, linear_)
+ end
+ begin
+ tf.add_input(desc, grad_)
+ end
+ begin
+ tf.add_input(desc, lr_)
+ end
+ begin
+ tf.add_input(desc, l1_)
+ end
+ begin
+ tf.add_input(desc, l2_)
+ end
+ begin
+ tf.add_input(desc, l2_shrinkage_)
+ end
+ begin
+ tf.add_input(desc, lr_power_)
+ end
+ end
+ begin
+ begin
+ if use_locking !== nothing
+ desc["use_locking"] = Base.Bool(use_locking)
+ end
+ end
+ end
+ end
+ tf.Tensor(tf.Operation(desc))
+ end
+ end
+ begin
+ function resource_apply_ftrl_v2_eager(var_, accum_, linear_, grad_, lr_, l1_, l2_, l2_shrinkage_, lr_power_; name=nothing, use_locking=nothing)
+ desc = tf.EagerOp("ResourceApplyFtrlV2")
+ var_ = convert(tf.EagerTensor, var_)
+ accum_ = convert(tf.EagerTensor, accum_)
+ linear_ = convert(tf.EagerTensor, linear_)
+ grad_ = convert(tf.EagerTensor, grad_)
+ lr_ = convert(tf.EagerTensor, lr_)
+ l1_ = convert(tf.EagerTensor, l1_)
+ l2_ = convert(tf.EagerTensor, l2_)
+ l2_shrinkage_ = convert(tf.EagerTensor, l2_shrinkage_)
+ lr_power_ = convert(tf.EagerTensor, lr_power_)
+ begin
+ begin
+ tf.add_input(desc, var_)
+ end
+ begin
+ tf.add_input(desc, accum_)
+ end
+ begin
+ tf.add_input(desc, linear_)
+ end
+ begin
+ tf.add_input(desc, grad_)
+ end
+ begin
+ tf.add_input(desc, lr_)
+ end
+ begin
+ tf.add_input(desc, l1_)
+ end
+ begin
+ tf.add_input(desc, l2_)
+ end
+ begin
+ tf.add_input(desc, l2_shrinkage_)
+ end
+ begin
+ tf.add_input(desc, lr_power_)
+ end
+ end
+ begin
+ begin
+ if use_locking !== nothing
+ desc["use_locking"] = Base.Bool(use_locking)
+ end
+ end
+ end
+ begin
+ desc["T"] = tf.data_type(grad_)
+ end
+ begin
+ desc["T"] = tf.data_type(lr_)
+ end
+ begin
+ desc["T"] = tf.data_type(l1_)
+ end
+ begin
+ desc["T"] = tf.data_type(l2_)
+ end
+ begin
+ desc["T"] = tf.data_type(l2_shrinkage_)
+ end
+ begin
+ desc["T"] = tf.data_type(lr_power_)
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(resource_apply_ftrl_v2, [var_, accum_, linear_, grad_, lr_, l1_, l2_, l2_shrinkage_, lr_power_], name=nothing, use_locking=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res[1]
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function resource_apply_ftrl_v2(var_, accum_, linear_, grad_, lr_, l1_, l2_, l2_shrinkage_, lr_power_; name=nothing, use_locking=nothing)
+ if tf.in_eager_mode()
+ resource_apply_ftrl_v2_eager(var_, accum_, linear_, grad_, lr_, l1_, l2_, l2_shrinkage_, lr_power_; name=name, use_locking=use_locking)
+ else
+ resource_apply_ftrl_v2_graph(var_, accum_, linear_, grad_, lr_, l1_, l2_, l2_shrinkage_, lr_power_; name=name, use_locking=use_locking)
+ end
+ end
+ end
+end
+
+
+"""
+ _nccl_broadcast_recv(shape)
+
+Replacement node for NcclBroadcast.
+"""
+begin
+ begin
+ function _nccl_broadcast_recv_graph(shape_; name=nothing, num_devices=nothing, shared_name=nothing)
+ local desc
+ tf.with_op_name(name, "_NcclBroadcastRecv") do
+ desc = tf.NodeDescription("_NcclBroadcastRecv")
+ begin
+ begin
+ shape_ = convert(Tensor{Int32}, shape_)
+ begin
+ end
+ end
+ end
+ begin
+ begin
+ tf.add_input(desc, shape_)
+ end
+ end
+ begin
+ begin
+ if num_devices !== nothing
+ desc["num_devices"] = Base.Int(num_devices)
+ end
+ end
+ begin
+ if shared_name !== nothing
+ desc["shared_name"] = Base.String(shared_name)
+ end
+ end
+ end
+ end
+ tf.Tensor(tf.Operation(desc))
+ end
+ end
+ begin
+ function _nccl_broadcast_recv_eager(shape_; name=nothing, num_devices=nothing, shared_name=nothing)
+ desc = tf.EagerOp("_NcclBroadcastRecv")
+ shape_ = convert(tf.EagerTensor, shape_)
+ begin
+ begin
+ tf.add_input(desc, shape_)
+ end
+ end
+ begin
+ begin
+ if num_devices !== nothing
+ desc["num_devices"] = Base.Int(num_devices)
+ end
+ end
+ begin
+ if shared_name !== nothing
+ desc["shared_name"] = Base.String(shared_name)
+ end
+ end
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(_nccl_broadcast_recv, [shape_], name=nothing, num_devices=nothing, shared_name=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res[1]
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function _nccl_broadcast_recv(shape_; name=nothing, num_devices=nothing, shared_name=nothing)
+ if tf.in_eager_mode()
+ _nccl_broadcast_recv_eager(shape_; name=name, num_devices=num_devices, shared_name=shared_name)
+ else
+ _nccl_broadcast_recv_graph(shape_; name=name, num_devices=num_devices, shared_name=shared_name)
+ end
+ end
+ end
+end
+
+
+"""
+ queue_is_closed(handle)
+
+
+"""
+begin
+ begin
+ function queue_is_closed_graph(handle_; name=nothing)
+ local desc
+ tf.with_op_name(name, "QueueIsClosed") do
+ desc = tf.NodeDescription("QueueIsClosed")
+ begin
+ begin
+ handle_ = convert(Tensor{String}, handle_)
+ begin
+ end
+ end
+ end
+ begin
+ begin
+ tf.add_input(desc, handle_)
+ end
+ end
+ begin
+ end
+ end
+ tf.Tensor(tf.Operation(desc))
+ end
+ end
+ begin
+ function queue_is_closed_eager(handle_; name=nothing)
+ desc = tf.EagerOp("QueueIsClosed")
+ handle_ = convert(tf.EagerTensor, handle_)
+ begin
+ begin
+ tf.add_input(desc, handle_)
+ end
+ end
+ begin
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(queue_is_closed, [handle_], name=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res[1]
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function queue_is_closed(handle_; name=nothing)
+ if tf.in_eager_mode()
+ queue_is_closed_eager(handle_; name=name)
+ else
+ queue_is_closed_graph(handle_; name=name)
+ end
+ end
+ end
+end
+
+
+"""
+ shuffle_dataset(input_dataset, buffer_size, seed, seed2; reshuffle_each_iteration=true)
+
+
+"""
+begin
+ begin
+ function shuffle_dataset_graph(input_dataset_, buffer_size_, seed_, seed2_; name=nothing, reshuffle_each_iteration=nothing, output_types=nothing, output_shapes=nothing)
+ local desc
+ tf.with_op_name(name, "ShuffleDataset") do
+ desc = tf.NodeDescription("ShuffleDataset")
+ begin
+ begin
+ input_dataset_ = convert(Tensor{Any}, input_dataset_)
+ begin
+ end
+ end
+ begin
+ buffer_size_ = convert(Tensor{Int64}, buffer_size_)
+ begin
+ end
+ end
+ begin
+ seed_ = convert(Tensor{Int64}, seed_)
+ begin
+ end
+ end
+ begin
+ seed2_ = convert(Tensor{Int64}, seed2_)
+ begin
+ end
+ end
+ end
+ begin
+ begin
+ tf.add_input(desc, input_dataset_)
+ end
+ begin
+ tf.add_input(desc, buffer_size_)
+ end
+ begin
+ tf.add_input(desc, seed_)
+ end
+ begin
+ tf.add_input(desc, seed2_)
+ end
+ end
+ begin
+ begin
+ if reshuffle_each_iteration !== nothing
+ desc["reshuffle_each_iteration"] = Base.Bool(reshuffle_each_iteration)
+ end
+ end
+ begin
+ if output_types !== nothing
+ desc["output_types"] = map(Base.identity, output_types)
+ end
+ end
+ begin
+ if output_shapes !== nothing
+ desc["output_shapes"] = map(Base.identity, output_shapes)
+ end
+ end
+ end
+ end
+ tf.Tensor(tf.Operation(desc))
+ end
+ end
+ begin
+ function shuffle_dataset_eager(input_dataset_, buffer_size_, seed_, seed2_; name=nothing, reshuffle_each_iteration=nothing, output_types=nothing, output_shapes=nothing)
+ desc = tf.EagerOp("ShuffleDataset")
+ input_dataset_ = convert(tf.EagerTensor, input_dataset_)
+ buffer_size_ = convert(tf.EagerTensor, buffer_size_)
+ seed_ = convert(tf.EagerTensor, seed_)
+ seed2_ = convert(tf.EagerTensor, seed2_)
+ begin
+ begin
+ tf.add_input(desc, input_dataset_)
+ end
+ begin
+ tf.add_input(desc, buffer_size_)
+ end
+ begin
+ tf.add_input(desc, seed_)
+ end
+ begin
+ tf.add_input(desc, seed2_)
+ end
+ end
+ begin
+ begin
+ if reshuffle_each_iteration !== nothing
+ desc["reshuffle_each_iteration"] = Base.Bool(reshuffle_each_iteration)
+ end
+ end
+ begin
+ if output_types !== nothing
+ desc["output_types"] = map(Base.identity, output_types)
+ end
+ end
+ begin
+ if output_shapes !== nothing
+ desc["output_shapes"] = map(Base.identity, output_shapes)
+ end
+ end
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(shuffle_dataset, [input_dataset_, buffer_size_, seed_, seed2_], name=nothing, reshuffle_each_iteration=nothing, output_types=nothing, output_shapes=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res[1]
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function shuffle_dataset(input_dataset_, buffer_size_, seed_, seed2_; name=nothing, reshuffle_each_iteration=nothing, output_types=nothing, output_shapes=nothing)
+ if tf.in_eager_mode()
+ shuffle_dataset_eager(input_dataset_, buffer_size_, seed_, seed2_; name=name, reshuffle_each_iteration=reshuffle_each_iteration, output_types=output_types, output_shapes=output_shapes)
+ else
+ shuffle_dataset_graph(input_dataset_, buffer_size_, seed_, seed2_; name=name, reshuffle_each_iteration=reshuffle_each_iteration, output_types=output_types, output_shapes=output_shapes)
+ end
+ end
+ end
+end
+
+
+"""
+ deserialize_sparse(serialized_sparse)
+
+
+"""
+begin
+ begin
+ function deserialize_sparse_graph(serialized_sparse_; name=nothing, dtype=nothing)
+ local desc
+ tf.with_op_name(name, "DeserializeSparse") do
+ desc = tf.NodeDescription("DeserializeSparse")
+ begin
+ begin
+ serialized_sparse_ = convert(Tensor{String}, serialized_sparse_)
+ begin
+ end
+ end
+ begin
+ (serialized_sparse_,) = tf.tf_promote(serialized_sparse_)
+ end
+ end
+ begin
+ begin
+ tf.add_input(desc, serialized_sparse_)
+ end
+ end
+ begin
+ begin
+ if dtype !== nothing
+ desc["dtype"] = Base.identity(dtype)
+ end
+ end
+ end
+ end
+ begin
+ out = tf.Tensor[]
+ op = tf.Operation(desc)
+ for out_idx = 1:3
+ push!(out, tf.Tensor(op, out_idx))
+ end
+ out
+ end
+ end
+ end
+ begin
+ function deserialize_sparse_eager(serialized_sparse_; name=nothing, dtype=nothing)
+ desc = tf.EagerOp("DeserializeSparse")
+ serialized_sparse_ = convert(tf.EagerTensor, serialized_sparse_)
+ begin
+ begin
+ tf.add_input(desc, serialized_sparse_)
+ end
+ end
+ begin
+ begin
+ if dtype !== nothing
+ desc["dtype"] = Base.identity(dtype)
+ end
+ end
+ end
+ begin
+ desc["Tserialized"] = tf.data_type(serialized_sparse_)
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(deserialize_sparse, [serialized_sparse_], name=nothing, dtype=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function deserialize_sparse(serialized_sparse_; name=nothing, dtype=nothing)
+ if tf.in_eager_mode()
+ deserialize_sparse_eager(serialized_sparse_; name=name, dtype=dtype)
+ else
+ deserialize_sparse_graph(serialized_sparse_; name=name, dtype=dtype)
+ end
+ end
+ end
+end
+
+
+"""
+ priority_queue_v2(; component_types=Int64[], capacity=-1, container=, shared_name=)
+
+
+"""
+begin
+ begin
+ function priority_queue_v2_graph(; name=nothing, component_types=nothing, shapes=nothing, capacity=nothing, container=nothing, shared_name=nothing)
+ local desc
+ tf.with_op_name(name, "PriorityQueueV2") do
+ desc = tf.NodeDescription("PriorityQueueV2")
+ begin
+ end
+ begin
+ end
+ begin
+ begin
+ if component_types !== nothing
+ desc["component_types"] = map(Base.identity, component_types)
+ end
+ end
+ begin
+ if shapes !== nothing
+ desc["shapes"] = map(Base.identity, shapes)
+ end
+ end
+ begin
+ if capacity !== nothing
+ desc["capacity"] = Base.Int(capacity)
+ end
+ end
+ begin
+ if container !== nothing
+ desc["container"] = Base.String(container)
+ end
+ end
+ begin
+ if shared_name !== nothing
+ desc["shared_name"] = Base.String(shared_name)
+ end
+ end
+ end
+ end
+ tf.Tensor(tf.Operation(desc))
+ end
+ end
+ begin
+ function priority_queue_v2_eager(; name=nothing, component_types=nothing, shapes=nothing, capacity=nothing, container=nothing, shared_name=nothing)
+ desc = tf.EagerOp("PriorityQueueV2")
+ begin
+ end
+ begin
+ begin
+ if component_types !== nothing
+ desc["component_types"] = map(Base.identity, component_types)
+ end
+ end
+ begin
+ if shapes !== nothing
+ desc["shapes"] = map(Base.identity, shapes)
+ end
+ end
+ begin
+ if capacity !== nothing
+ desc["capacity"] = Base.Int(capacity)
+ end
+ end
+ begin
+ if container !== nothing
+ desc["container"] = Base.String(container)
+ end
+ end
+ begin
+ if shared_name !== nothing
+ desc["shared_name"] = Base.String(shared_name)
+ end
+ end
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(priority_queue_v2, [], name=nothing, component_types=nothing, shapes=nothing, capacity=nothing, container=nothing, shared_name=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res[1]
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function priority_queue_v2(; name=nothing, component_types=nothing, shapes=nothing, capacity=nothing, container=nothing, shared_name=nothing)
+ if tf.in_eager_mode()
+ priority_queue_v2_eager(; name=name, component_types=component_types, shapes=shapes, capacity=capacity, container=container, shared_name=shared_name)
+ else
+ priority_queue_v2_graph(; name=name, component_types=component_types, shapes=shapes, capacity=capacity, container=container, shared_name=shared_name)
+ end
+ end
+ end
+end
+
+
+"""
+ _device_arg()
+
+A graph node which represents an argument to a function.
+"""
+begin
+ begin
+ function _device_arg_graph(; name=nothing, index=nothing)
+ local desc
+ tf.with_op_name(name, "_DeviceArg") do
+ desc = tf.NodeDescription("_DeviceArg")
+ begin
+ end
+ begin
+ end
+ begin
+ begin
+ if index !== nothing
+ desc["index"] = Base.Int(index)
+ end
+ end
+ end
+ end
+ tf.Tensor(tf.Operation(desc))
+ end
+ end
+ begin
+ function _device_arg_eager(; name=nothing, index=nothing)
+ desc = tf.EagerOp("_DeviceArg")
+ begin
+ end
+ begin
+ begin
+ if index !== nothing
+ desc["index"] = Base.Int(index)
+ end
+ end
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(_device_arg, [], name=nothing, index=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res[1]
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function _device_arg(; name=nothing, index=nothing)
+ if tf.in_eager_mode()
+ _device_arg_eager(; name=name, index=index)
+ else
+ _device_arg_graph(; name=name, index=index)
+ end
+ end
+ end
+end
+
+
+"""
+ truncated_normal(shape; seed=0, seed2=0)
+
+
+"""
+begin
+ begin
+ function truncated_normal_graph(shape_; name=nothing, seed=nothing, seed2=nothing, dtype=nothing)
+ local desc
+ tf.with_op_name(name, "TruncatedNormal") do
+ desc = tf.NodeDescription("TruncatedNormal")
+ begin
+ begin
+ shape_ = convert(Tensor{Any}, shape_)
+ begin
+ end
+ end
+ begin
+ (shape_,) = tf.tf_promote(shape_)
+ end
+ end
+ begin
+ begin
+ tf.add_input(desc, shape_)
+ end
+ end
+ begin
+ begin
+ if seed !== nothing
+ desc["seed"] = Base.Int(seed)
+ end
+ end
+ begin
+ if seed2 !== nothing
+ desc["seed2"] = Base.Int(seed2)
+ end
+ end
+ begin
+ if dtype !== nothing
+ desc["dtype"] = Base.identity(dtype)
+ end
+ end
+ end
+ end
+ tf.Tensor(tf.Operation(desc))
+ end
+ end
+ begin
+ function truncated_normal_eager(shape_; name=nothing, seed=nothing, seed2=nothing, dtype=nothing)
+ desc = tf.EagerOp("TruncatedNormal")
+ shape_ = convert(tf.EagerTensor, shape_)
+ begin
+ begin
+ tf.add_input(desc, shape_)
+ end
+ end
+ begin
+ begin
+ if seed !== nothing
+ desc["seed"] = Base.Int(seed)
+ end
+ end
+ begin
+ if seed2 !== nothing
+ desc["seed2"] = Base.Int(seed2)
+ end
+ end
+ begin
+ if dtype !== nothing
+ desc["dtype"] = Base.identity(dtype)
+ end
+ end
+ end
+ begin
+ desc["T"] = tf.data_type(shape_)
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(truncated_normal, [shape_], name=nothing, seed=nothing, seed2=nothing, dtype=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res[1]
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function truncated_normal(shape_; name=nothing, seed=nothing, seed2=nothing, dtype=nothing)
+ if tf.in_eager_mode()
+ truncated_normal_eager(shape_; name=name, seed=seed, seed2=seed2, dtype=dtype)
+ else
+ truncated_normal_graph(shape_; name=name, seed=seed, seed2=seed2, dtype=dtype)
+ end
+ end
+ end
+end
+
+
+"""
+ tensor_forest_tree_predict(tree_handle, dense_features)
+
+
+"""
+begin
+ begin
+ function tensor_forest_tree_predict_graph(tree_handle_, dense_features_; name=nothing, logits_dimension=nothing)
+ local desc
+ tf.with_op_name(name, "TensorForestTreePredict") do
+ desc = tf.NodeDescription("TensorForestTreePredict")
+ begin
+ begin
+ tree_handle_ = convert(Tensor{Any}, tree_handle_)
+ begin
+ end
+ end
+ begin
+ dense_features_ = convert(Tensor{Float32}, dense_features_)
+ begin
+ end
+ end
+ end
+ begin
+ begin
+ tf.add_input(desc, tree_handle_)
+ end
+ begin
+ tf.add_input(desc, dense_features_)
+ end
+ end
+ begin
+ begin
+ if logits_dimension !== nothing
+ desc["logits_dimension"] = Base.Int(logits_dimension)
+ end
+ end
+ end
+ end
+ tf.Tensor(tf.Operation(desc))
+ end
+ end
+ begin
+ function tensor_forest_tree_predict_eager(tree_handle_, dense_features_; name=nothing, logits_dimension=nothing)
+ desc = tf.EagerOp("TensorForestTreePredict")
+ tree_handle_ = convert(tf.EagerTensor, tree_handle_)
+ dense_features_ = convert(tf.EagerTensor, dense_features_)
+ begin
+ begin
+ tf.add_input(desc, tree_handle_)
+ end
+ begin
+ tf.add_input(desc, dense_features_)
+ end
+ end
+ begin
+ begin
+ if logits_dimension !== nothing
+ desc["logits_dimension"] = Base.Int(logits_dimension)
+ end
+ end
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(tensor_forest_tree_predict, [tree_handle_, dense_features_], name=nothing, logits_dimension=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res[1]
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function tensor_forest_tree_predict(tree_handle_, dense_features_; name=nothing, logits_dimension=nothing)
+ if tf.in_eager_mode()
+ tensor_forest_tree_predict_eager(tree_handle_, dense_features_; name=name, logits_dimension=logits_dimension)
+ else
+ tensor_forest_tree_predict_graph(tree_handle_, dense_features_; name=name, logits_dimension=logits_dimension)
+ end
+ end
+ end
+end
+
+
+"""
+ stack_v2(max_size; stack_name=)
+
+
+"""
+begin
+ begin
+ function stack_v2_graph(max_size_; name=nothing, elem_type=nothing, stack_name=nothing)
+ local desc
+ tf.with_op_name(name, "StackV2") do
+ desc = tf.NodeDescription("StackV2")
+ begin
+ begin
+ max_size_ = convert(Tensor{Int32}, max_size_)
+ begin
+ end
+ end
+ end
+ begin
+ begin
+ tf.add_input(desc, max_size_)
+ end
+ end
+ begin
+ begin
+ if elem_type !== nothing
+ desc["elem_type"] = Base.identity(elem_type)
+ end
+ end
+ begin
+ if stack_name !== nothing
+ desc["stack_name"] = Base.String(stack_name)
+ end
+ end
+ end
+ end
+ tf.Tensor(tf.Operation(desc))
+ end
+ end
+ begin
+ function stack_v2_eager(max_size_; name=nothing, elem_type=nothing, stack_name=nothing)
+ desc = tf.EagerOp("StackV2")
+ max_size_ = convert(tf.EagerTensor, max_size_)
+ begin
+ begin
+ tf.add_input(desc, max_size_)
+ end
+ end
+ begin
+ begin
+ if elem_type !== nothing
+ desc["elem_type"] = Base.identity(elem_type)
+ end
+ end
+ begin
+ if stack_name !== nothing
+ desc["stack_name"] = Base.String(stack_name)
+ end
+ end
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(stack_v2, [max_size_], name=nothing, elem_type=nothing, stack_name=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res[1]
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function stack_v2(max_size_; name=nothing, elem_type=nothing, stack_name=nothing)
+ if tf.in_eager_mode()
+ stack_v2_eager(max_size_; name=name, elem_type=elem_type, stack_name=stack_name)
+ else
+ stack_v2_graph(max_size_; name=name, elem_type=elem_type, stack_name=stack_name)
+ end
+ end
+ end
+end
+
+
+"""
+ accumulator_num_accumulated(handle)
+
+
+"""
+begin
+ begin
+ function accumulator_num_accumulated_graph(handle_; name=nothing)
+ local desc
+ tf.with_op_name(name, "AccumulatorNumAccumulated") do
+ desc = tf.NodeDescription("AccumulatorNumAccumulated")
+ begin
+ begin
+ handle_ = convert(Tensor{String}, handle_)
+ begin
+ end
+ end
+ end
+ begin
+ begin
+ tf.add_input(desc, handle_)
+ end
+ end
+ begin
+ end
+ end
+ tf.Tensor(tf.Operation(desc))
+ end
+ end
+ begin
+ function accumulator_num_accumulated_eager(handle_; name=nothing)
+ desc = tf.EagerOp("AccumulatorNumAccumulated")
+ handle_ = convert(tf.EagerTensor, handle_)
+ begin
+ begin
+ tf.add_input(desc, handle_)
+ end
+ end
+ begin
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(accumulator_num_accumulated, [handle_], name=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res[1]
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function accumulator_num_accumulated(handle_; name=nothing)
+ if tf.in_eager_mode()
+ accumulator_num_accumulated_eager(handle_; name=name)
+ else
+ accumulator_num_accumulated_graph(handle_; name=name)
+ end
+ end
+ end
+end
+
+
+"""
+ reader_reset_v2(reader_handle)
+
+
+"""
+begin
+ begin
+ function reader_reset_v2_graph(reader_handle_; name=nothing)
+ local desc
+ tf.with_op_name(name, "ReaderResetV2") do
+ desc = tf.NodeDescription("ReaderResetV2")
+ begin
+ begin
+ reader_handle_ = convert(Tensor{Any}, reader_handle_)
+ begin
+ end
+ end
+ end
+ begin
+ begin
+ tf.add_input(desc, reader_handle_)
+ end
+ end
+ begin
+ end
+ end
+ tf.Tensor(tf.Operation(desc))
+ end
+ end
+ begin
+ function reader_reset_v2_eager(reader_handle_; name=nothing)
+ desc = tf.EagerOp("ReaderResetV2")
+ reader_handle_ = convert(tf.EagerTensor, reader_handle_)
+ begin
+ begin
+ tf.add_input(desc, reader_handle_)
+ end
+ end
+ begin
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(reader_reset_v2, [reader_handle_], name=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res[1]
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function reader_reset_v2(reader_handle_; name=nothing)
+ if tf.in_eager_mode()
+ reader_reset_v2_eager(reader_handle_; name=name)
+ else
+ reader_reset_v2_graph(reader_handle_; name=name)
+ end
+ end
+ end
+end
+
+
+"""
+ apply_add_sign(var, m, lr, alpha, sign_decay, beta, grad; use_locking=false)
+
+
+"""
+begin
+ begin
+ function apply_add_sign_graph(var_, m_, lr_, alpha_, sign_decay_, beta_, grad_; name=nothing, use_locking=nothing)
+ local desc
+ tf.with_op_name(name, "ApplyAddSign") do
+ desc = tf.NodeDescription("ApplyAddSign")
+ begin
+ begin
+ var_ = convert(Tensor{Any}, var_)
+ begin
+ end
+ end
+ begin
+ m_ = convert(Tensor{Any}, m_)
+ begin
+ end
+ end
+ begin
+ lr_ = convert(Tensor{Any}, lr_)
+ begin
+ end
+ end
+ begin
+ alpha_ = convert(Tensor{Any}, alpha_)
+ begin
+ end
+ end
+ begin
+ sign_decay_ = convert(Tensor{Any}, sign_decay_)
+ begin
+ end
+ end
+ begin
+ beta_ = convert(Tensor{Any}, beta_)
+ begin
+ end
+ end
+ begin
+ grad_ = convert(Tensor{Any}, grad_)
+ begin
+ end
+ end
+ begin
+ (var_, m_, lr_, alpha_, sign_decay_, beta_, grad_) = tf.tf_promote(var_, m_, lr_, alpha_, sign_decay_, beta_, grad_)
+ end
+ end
+ begin
+ begin
+ tf.add_input(desc, var_)
+ end
+ begin
+ tf.add_input(desc, m_)
+ end
+ begin
+ tf.add_input(desc, lr_)
+ end
+ begin
+ tf.add_input(desc, alpha_)
+ end
+ begin
+ tf.add_input(desc, sign_decay_)
+ end
+ begin
+ tf.add_input(desc, beta_)
+ end
+ begin
+ tf.add_input(desc, grad_)
+ end
+ end
+ begin
+ begin
+ if use_locking !== nothing
+ desc["use_locking"] = Base.Bool(use_locking)
+ end
+ end
+ end
+ end
+ tf.Tensor(tf.Operation(desc))
+ end
+ end
+ begin
+ function apply_add_sign_eager(var_, m_, lr_, alpha_, sign_decay_, beta_, grad_; name=nothing, use_locking=nothing)
+ desc = tf.EagerOp("ApplyAddSign")
+ var_ = convert(tf.EagerTensor, var_)
+ m_ = convert(tf.EagerTensor, m_)
+ lr_ = convert(tf.EagerTensor, lr_)
+ alpha_ = convert(tf.EagerTensor, alpha_)
+ sign_decay_ = convert(tf.EagerTensor, sign_decay_)
+ beta_ = convert(tf.EagerTensor, beta_)
+ grad_ = convert(tf.EagerTensor, grad_)
+ begin
+ begin
+ tf.add_input(desc, var_)
+ end
+ begin
+ tf.add_input(desc, m_)
+ end
+ begin
+ tf.add_input(desc, lr_)
+ end
+ begin
+ tf.add_input(desc, alpha_)
+ end
+ begin
+ tf.add_input(desc, sign_decay_)
+ end
+ begin
+ tf.add_input(desc, beta_)
+ end
+ begin
+ tf.add_input(desc, grad_)
+ end
+ end
+ begin
+ begin
+ if use_locking !== nothing
+ desc["use_locking"] = Base.Bool(use_locking)
+ end
+ end
+ end
+ begin
+ desc["T"] = tf.data_type(var_)
+ end
+ begin
+ desc["T"] = tf.data_type(m_)
+ end
+ begin
+ desc["T"] = tf.data_type(lr_)
+ end
+ begin
+ desc["T"] = tf.data_type(alpha_)
+ end
+ begin
+ desc["T"] = tf.data_type(sign_decay_)
+ end
+ begin
+ desc["T"] = tf.data_type(beta_)
+ end
+ begin
+ desc["T"] = tf.data_type(grad_)
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(apply_add_sign, [var_, m_, lr_, alpha_, sign_decay_, beta_, grad_], name=nothing, use_locking=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res[1]
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function apply_add_sign(var_, m_, lr_, alpha_, sign_decay_, beta_, grad_; name=nothing, use_locking=nothing)
+ if tf.in_eager_mode()
+ apply_add_sign_eager(var_, m_, lr_, alpha_, sign_decay_, beta_, grad_; name=name, use_locking=use_locking)
+ else
+ apply_add_sign_graph(var_, m_, lr_, alpha_, sign_decay_, beta_, grad_; name=name, use_locking=use_locking)
+ end
+ end
+ end
+end
+
+
+"""
+ retrieve_tpu_embedding_rms_prop_parameters_grad_accum_debug(; table_id=-1, table_name=)
+
+Retrieve embedding parameters for a single table.
+"""
+begin
+ begin
+ function retrieve_tpu_embedding_rms_prop_parameters_grad_accum_debug_graph(; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing)
+ local desc
+ tf.with_op_name(name, "RetrieveTPUEmbeddingRMSPropParametersGradAccumDebug") do
+ desc = tf.NodeDescription("RetrieveTPUEmbeddingRMSPropParametersGradAccumDebug")
+ begin
+ end
+ begin
+ end
+ begin
+ begin
+ if table_id !== nothing
+ desc["table_id"] = Base.Int(table_id)
+ end
+ end
+ begin
+ if table_name !== nothing
+ desc["table_name"] = Base.String(table_name)
+ end
+ end
+ begin
+ if num_shards !== nothing
+ desc["num_shards"] = Base.Int(num_shards)
+ end
+ end
+ begin
+ if shard_id !== nothing
+ desc["shard_id"] = Base.Int(shard_id)
+ end
+ end
+ end
+ end
+ begin
+ out = tf.Tensor[]
+ op = tf.Operation(desc)
+ for out_idx = 1:4
+ push!(out, tf.Tensor(op, out_idx))
+ end
+ out
+ end
+ end
+ end
+ begin
+ function retrieve_tpu_embedding_rms_prop_parameters_grad_accum_debug_eager(; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing)
+ desc = tf.EagerOp("RetrieveTPUEmbeddingRMSPropParametersGradAccumDebug")
+ begin
+ end
+ begin
+ begin
+ if table_id !== nothing
+ desc["table_id"] = Base.Int(table_id)
+ end
+ end
+ begin
+ if table_name !== nothing
+ desc["table_name"] = Base.String(table_name)
+ end
+ end
+ begin
+ if num_shards !== nothing
+ desc["num_shards"] = Base.Int(num_shards)
+ end
+ end
+ begin
+ if shard_id !== nothing
+ desc["shard_id"] = Base.Int(shard_id)
+ end
+ end
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(retrieve_tpu_embedding_rms_prop_parameters_grad_accum_debug, [], name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function retrieve_tpu_embedding_rms_prop_parameters_grad_accum_debug(; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing)
+ if tf.in_eager_mode()
+ retrieve_tpu_embedding_rms_prop_parameters_grad_accum_debug_eager(; name=name, table_id=table_id, table_name=table_name, num_shards=num_shards, shard_id=shard_id)
+ else
+ retrieve_tpu_embedding_rms_prop_parameters_grad_accum_debug_graph(; name=name, table_id=table_id, table_name=table_name, num_shards=num_shards, shard_id=shard_id)
+ end
+ end
+ end
+end
+
+
+"""
+ rint(x)
+
+
+"""
+begin
+ begin
+ function rint_graph(x_; name=nothing)
+ local desc
+ tf.with_op_name(name, "Rint") do
+ desc = tf.NodeDescription("Rint")
+ begin
+ begin
+ x_ = convert(Tensor{Any}, x_)
+ begin
+ end
+ end
+ begin
+ (x_,) = tf.tf_promote(x_)
+ end
+ end
+ begin
+ begin
+ tf.add_input(desc, x_)
+ end
+ end
+ begin
+ end
+ end
+ tf.Tensor(tf.Operation(desc))
+ end
+ end
+ begin
+ function rint_eager(x_; name=nothing)
+ desc = tf.EagerOp("Rint")
+ x_ = convert(tf.EagerTensor, x_)
+ begin
+ begin
+ tf.add_input(desc, x_)
+ end
+ end
+ begin
+ end
+ begin
+ desc["T"] = tf.data_type(x_)
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(rint, [x_], name=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res[1]
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function rint(x_; name=nothing)
+ if tf.in_eager_mode()
+ rint_eager(x_; name=name)
+ else
+ rint_graph(x_; name=name)
+ end
+ end
+ end
+end
+
+
+"""
+ retrieve_tpu_embedding_adadelta_parameters_grad_accum_debug(; table_id=-1, table_name=)
+
+Retrieve embedding parameters for a single table.
+"""
+begin
+ begin
+ function retrieve_tpu_embedding_adadelta_parameters_grad_accum_debug_graph(; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing)
+ local desc
+ tf.with_op_name(name, "RetrieveTPUEmbeddingAdadeltaParametersGradAccumDebug") do
+ desc = tf.NodeDescription("RetrieveTPUEmbeddingAdadeltaParametersGradAccumDebug")
+ begin
+ end
+ begin
+ end
+ begin
+ begin
+ if table_id !== nothing
+ desc["table_id"] = Base.Int(table_id)
+ end
+ end
+ begin
+ if table_name !== nothing
+ desc["table_name"] = Base.String(table_name)
+ end
+ end
+ begin
+ if num_shards !== nothing
+ desc["num_shards"] = Base.Int(num_shards)
+ end
+ end
+ begin
+ if shard_id !== nothing
+ desc["shard_id"] = Base.Int(shard_id)
+ end
+ end
+ end
+ end
+ begin
+ out = tf.Tensor[]
+ op = tf.Operation(desc)
+ for out_idx = 1:4
+ push!(out, tf.Tensor(op, out_idx))
+ end
+ out
+ end
+ end
+ end
+ begin
+ function retrieve_tpu_embedding_adadelta_parameters_grad_accum_debug_eager(; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing)
+ desc = tf.EagerOp("RetrieveTPUEmbeddingAdadeltaParametersGradAccumDebug")
+ begin
+ end
+ begin
+ begin
+ if table_id !== nothing
+ desc["table_id"] = Base.Int(table_id)
+ end
+ end
+ begin
+ if table_name !== nothing
+ desc["table_name"] = Base.String(table_name)
+ end
+ end
+ begin
+ if num_shards !== nothing
+ desc["num_shards"] = Base.Int(num_shards)
+ end
+ end
+ begin
+ if shard_id !== nothing
+ desc["shard_id"] = Base.Int(shard_id)
+ end
+ end
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(retrieve_tpu_embedding_adadelta_parameters_grad_accum_debug, [], name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function retrieve_tpu_embedding_adadelta_parameters_grad_accum_debug(; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing)
+ if tf.in_eager_mode()
+ retrieve_tpu_embedding_adadelta_parameters_grad_accum_debug_eager(; name=name, table_id=table_id, table_name=table_name, num_shards=num_shards, shard_id=shard_id)
+ else
+ retrieve_tpu_embedding_adadelta_parameters_grad_accum_debug_graph(; name=name, table_id=table_id, table_name=table_name, num_shards=num_shards, shard_id=shard_id)
+ end
+ end
+ end
+end
+
+
+"""
+ extract_glimpse(input, size, offsets; centered=true, normalized=true, uniform_noise=true)
+
+
+"""
+begin
+ begin
+ function extract_glimpse_graph(input_, size_, offsets_; name=nothing, centered=nothing, normalized=nothing, uniform_noise=nothing)
+ local desc
+ tf.with_op_name(name, "ExtractGlimpse") do
+ desc = tf.NodeDescription("ExtractGlimpse")
+ begin
+ begin
+ input_ = convert(Tensor{Float32}, input_)
+ begin
+ end
+ end
+ begin
+ size_ = convert(Tensor{Int32}, size_)
+ begin
+ end
+ end
+ begin
+ offsets_ = convert(Tensor{Float32}, offsets_)
+ begin
+ end
+ end
+ end
+ begin
+ begin
+ tf.add_input(desc, input_)
+ end
+ begin
+ tf.add_input(desc, size_)
+ end
+ begin
+ tf.add_input(desc, offsets_)
+ end
+ end
+ begin
+ begin
+ if centered !== nothing
+ desc["centered"] = Base.Bool(centered)
+ end
+ end
+ begin
+ if normalized !== nothing
+ desc["normalized"] = Base.Bool(normalized)
+ end
+ end
+ begin
+ if uniform_noise !== nothing
+ desc["uniform_noise"] = Base.Bool(uniform_noise)
+ end
+ end
+ end
+ end
+ tf.Tensor(tf.Operation(desc))
+ end
+ end
+ begin
+ function extract_glimpse_eager(input_, size_, offsets_; name=nothing, centered=nothing, normalized=nothing, uniform_noise=nothing)
+ desc = tf.EagerOp("ExtractGlimpse")
+ input_ = convert(tf.EagerTensor, input_)
+ size_ = convert(tf.EagerTensor, size_)
+ offsets_ = convert(tf.EagerTensor, offsets_)
+ begin
+ begin
+ tf.add_input(desc, input_)
+ end
+ begin
+ tf.add_input(desc, size_)
+ end
+ begin
+ tf.add_input(desc, offsets_)
+ end
+ end
+ begin
+ begin
+ if centered !== nothing
+ desc["centered"] = Base.Bool(centered)
+ end
+ end
+ begin
+ if normalized !== nothing
+ desc["normalized"] = Base.Bool(normalized)
+ end
+ end
+ begin
+ if uniform_noise !== nothing
+ desc["uniform_noise"] = Base.Bool(uniform_noise)
+ end
+ end
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(extract_glimpse, [input_, size_, offsets_], name=nothing, centered=nothing, normalized=nothing, uniform_noise=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res[1]
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function extract_glimpse(input_, size_, offsets_; name=nothing, centered=nothing, normalized=nothing, uniform_noise=nothing)
+ if tf.in_eager_mode()
+ extract_glimpse_eager(input_, size_, offsets_; name=name, centered=centered, normalized=normalized, uniform_noise=uniform_noise)
+ else
+ extract_glimpse_graph(input_, size_, offsets_; name=name, centered=centered, normalized=normalized, uniform_noise=uniform_noise)
+ end
+ end
+ end
+end
+
+
+"""
+ string_to_hash_bucket_strong(input)
+
+
+"""
+begin
+ begin
+ function string_to_hash_bucket_strong_graph(input_; name=nothing, num_buckets=nothing, key=nothing)
+ local desc
+ tf.with_op_name(name, "StringToHashBucketStrong") do
+ desc = tf.NodeDescription("StringToHashBucketStrong")
+ begin
+ begin
+ input_ = convert(Tensor{String}, input_)
+ begin
+ end
+ end
+ end
+ begin
+ begin
+ tf.add_input(desc, input_)
+ end
+ end
+ begin
+ begin
+ if num_buckets !== nothing
+ desc["num_buckets"] = Base.Int(num_buckets)
+ end
+ end
+ begin
+ if key !== nothing
+ desc["key"] = map(Base.identity, key)
+ end
+ end
+ end
+ end
+ tf.Tensor(tf.Operation(desc))
+ end
+ end
+ begin
+ function string_to_hash_bucket_strong_eager(input_; name=nothing, num_buckets=nothing, key=nothing)
+ desc = tf.EagerOp("StringToHashBucketStrong")
+ input_ = convert(tf.EagerTensor, input_)
+ begin
+ begin
+ tf.add_input(desc, input_)
+ end
+ end
+ begin
+ begin
+ if num_buckets !== nothing
+ desc["num_buckets"] = Base.Int(num_buckets)
+ end
+ end
+ begin
+ if key !== nothing
+ desc["key"] = map(Base.identity, key)
+ end
+ end
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(string_to_hash_bucket_strong, [input_], name=nothing, num_buckets=nothing, key=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res[1]
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function string_to_hash_bucket_strong(input_; name=nothing, num_buckets=nothing, key=nothing)
+ if tf.in_eager_mode()
+ string_to_hash_bucket_strong_eager(input_; name=name, num_buckets=num_buckets, key=key)
+ else
+ string_to_hash_bucket_strong_graph(input_; name=name, num_buckets=num_buckets, key=key)
+ end
+ end
+ end
+end
+
+
+"""
+ one_shot_iterator(; container=, shared_name=)
+
+
+"""
+begin
+ begin
+ function one_shot_iterator_graph(; name=nothing, dataset_factory=nothing, output_types=nothing, output_shapes=nothing, container=nothing, shared_name=nothing)
+ local desc
+ tf.with_op_name(name, "OneShotIterator") do
+ desc = tf.NodeDescription("OneShotIterator")
+ begin
+ end
+ begin
+ end
+ begin
+ begin
+ if dataset_factory !== nothing
+ desc["dataset_factory"] = Base.identity(dataset_factory)
+ end
+ end
+ begin
+ if output_types !== nothing
+ desc["output_types"] = map(Base.identity, output_types)
+ end
+ end
+ begin
+ if output_shapes !== nothing
+ desc["output_shapes"] = map(Base.identity, output_shapes)
+ end
+ end
+ begin
+ if container !== nothing
+ desc["container"] = Base.String(container)
+ end
+ end
+ begin
+ if shared_name !== nothing
+ desc["shared_name"] = Base.String(shared_name)
+ end
+ end
+ end
+ end
+ tf.Tensor(tf.Operation(desc))
+ end
+ end
+ begin
+ function one_shot_iterator_eager(; name=nothing, dataset_factory=nothing, output_types=nothing, output_shapes=nothing, container=nothing, shared_name=nothing)
+ desc = tf.EagerOp("OneShotIterator")
+ begin
+ end
+ begin
+ begin
+ if dataset_factory !== nothing
+ desc["dataset_factory"] = Base.identity(dataset_factory)
+ end
+ end
+ begin
+ if output_types !== nothing
+ desc["output_types"] = map(Base.identity, output_types)
+ end
+ end
+ begin
+ if output_shapes !== nothing
+ desc["output_shapes"] = map(Base.identity, output_shapes)
+ end
+ end
+ begin
+ if container !== nothing
+ desc["container"] = Base.String(container)
+ end
+ end
+ begin
+ if shared_name !== nothing
+ desc["shared_name"] = Base.String(shared_name)
+ end
+ end
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(one_shot_iterator, [], name=nothing, dataset_factory=nothing, output_types=nothing, output_shapes=nothing, container=nothing, shared_name=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res[1]
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function one_shot_iterator(; name=nothing, dataset_factory=nothing, output_types=nothing, output_shapes=nothing, container=nothing, shared_name=nothing)
+ if tf.in_eager_mode()
+ one_shot_iterator_eager(; name=name, dataset_factory=dataset_factory, output_types=output_types, output_shapes=output_shapes, container=container, shared_name=shared_name)
+ else
+ one_shot_iterator_graph(; name=name, dataset_factory=dataset_factory, output_types=output_types, output_shapes=output_shapes, container=container, shared_name=shared_name)
+ end
+ end
+ end
+end
+
+
+"""
+ resource_sparse_apply_momentum(var, accum, lr, grad, indices, momentum; use_locking=false, use_nesterov=false)
+
+
+"""
+begin
+ begin
+ function resource_sparse_apply_momentum_graph(var_, accum_, lr_, grad_, indices_, momentum_; name=nothing, use_locking=nothing, use_nesterov=nothing)
+ local desc
+ tf.with_op_name(name, "ResourceSparseApplyMomentum") do
+ desc = tf.NodeDescription("ResourceSparseApplyMomentum")
+ begin
+ begin
+ var_ = convert(Tensor{Any}, var_)
+ begin
+ end
+ end
+ begin
+ accum_ = convert(Tensor{Any}, accum_)
+ begin
+ end
+ end
+ begin
+ lr_ = convert(Tensor{Any}, lr_)
+ begin
+ end
+ end
+ begin
+ grad_ = convert(Tensor{Any}, grad_)
+ begin
+ end
+ end
+ begin
+ indices_ = convert(Tensor{Any}, indices_)
+ begin
+ indices_ = indices_ - convert(tf.Tensor{eltype(indices_)}, 1)
+ end
+ end
+ begin
+ momentum_ = convert(Tensor{Any}, momentum_)
+ begin
+ end
+ end
+ begin
+ (lr_, grad_, momentum_) = tf.tf_promote(lr_, grad_, momentum_)
+ end
+ begin
+ (indices_,) = tf.tf_promote(indices_)
+ end
+ end
+ begin
+ begin
+ tf.add_input(desc, var_)
+ end
+ begin
+ tf.add_input(desc, accum_)
+ end
+ begin
+ tf.add_input(desc, lr_)
+ end
+ begin
+ tf.add_input(desc, grad_)
+ end
+ begin
+ tf.add_input(desc, indices_)
+ end
+ begin
+ tf.add_input(desc, momentum_)
+ end
+ end
+ begin
+ begin
+ if use_locking !== nothing
+ desc["use_locking"] = Base.Bool(use_locking)
+ end
+ end
+ begin
+ if use_nesterov !== nothing
+ desc["use_nesterov"] = Base.Bool(use_nesterov)
+ end
+ end
+ end
+ end
+ tf.Tensor(tf.Operation(desc))
+ end
+ end
+ begin
+ function resource_sparse_apply_momentum_eager(var_, accum_, lr_, grad_, indices_, momentum_; name=nothing, use_locking=nothing, use_nesterov=nothing)
+ desc = tf.EagerOp("ResourceSparseApplyMomentum")
+ var_ = convert(tf.EagerTensor, var_)
+ accum_ = convert(tf.EagerTensor, accum_)
+ lr_ = convert(tf.EagerTensor, lr_)
+ grad_ = convert(tf.EagerTensor, grad_)
+ indices_ = convert(tf.EagerTensor, indices_)
+ momentum_ = convert(tf.EagerTensor, momentum_)
+ begin
+ begin
+ tf.add_input(desc, var_)
+ end
+ begin
+ tf.add_input(desc, accum_)
+ end
+ begin
+ tf.add_input(desc, lr_)
+ end
+ begin
+ tf.add_input(desc, grad_)
+ end
+ begin
+ tf.add_input(desc, indices_)
+ end
+ begin
+ tf.add_input(desc, momentum_)
+ end
+ end
+ begin
+ begin
+ if use_locking !== nothing
+ desc["use_locking"] = Base.Bool(use_locking)
+ end
+ end
+ begin
+ if use_nesterov !== nothing
+ desc["use_nesterov"] = Base.Bool(use_nesterov)
+ end
+ end
+ end
+ begin
+ desc["T"] = tf.data_type(lr_)
+ end
+ begin
+ desc["T"] = tf.data_type(grad_)
+ end
+ begin
+ desc["Tindices"] = tf.data_type(indices_)
+ end
+ begin
+ desc["T"] = tf.data_type(momentum_)
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(resource_sparse_apply_momentum, [var_, accum_, lr_, grad_, indices_, momentum_], name=nothing, use_locking=nothing, use_nesterov=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res[1]
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function resource_sparse_apply_momentum(var_, accum_, lr_, grad_, indices_, momentum_; name=nothing, use_locking=nothing, use_nesterov=nothing)
+ if tf.in_eager_mode()
+ resource_sparse_apply_momentum_eager(var_, accum_, lr_, grad_, indices_, momentum_; name=name, use_locking=use_locking, use_nesterov=use_nesterov)
+ else
+ resource_sparse_apply_momentum_graph(var_, accum_, lr_, grad_, indices_, momentum_; name=name, use_locking=use_locking, use_nesterov=use_nesterov)
+ end
+ end
+ end
+end
+
+
+"""
+ save_slices(filename, tensor_names, shapes_and_slices, data)
+
+
+"""
+begin
+ begin
+ function save_slices_graph(filename_, tensor_names_, shapes_and_slices_, data_; name=nothing, T=nothing)
+ local desc
+ tf.with_op_name(name, "SaveSlices") do
+ desc = tf.NodeDescription("SaveSlices")
+ begin
+ begin
+ filename_ = convert(Tensor{String}, filename_)
+ begin
+ end
+ end
+ begin
+ tensor_names_ = convert(Tensor{String}, tensor_names_)
+ begin
+ end
+ end
+ begin
+ shapes_and_slices_ = convert(Tensor{String}, shapes_and_slices_)
+ begin
+ end
+ end
+ begin
+ data_ = [convert(Tensor{Any}, x) for x = data_]
+ begin
+ end
+ end
+ end
+ begin
+ begin
+ tf.add_input(desc, filename_)
+ end
+ begin
+ tf.add_input(desc, tensor_names_)
+ end
+ begin
+ tf.add_input(desc, shapes_and_slices_)
+ end
+ begin
+ tf.add_input(desc, data_)
+ end
+ end
+ begin
+ begin
+ if T !== nothing
+ desc["T"] = map(Base.identity, T)
+ end
+ end
+ end
+ end
+ tf.Tensor(tf.Operation(desc))
+ end
+ end
+ begin
+ function save_slices_eager(filename_, tensor_names_, shapes_and_slices_, data_; name=nothing, T=nothing)
+ desc = tf.EagerOp("SaveSlices")
+ filename_ = convert(tf.EagerTensor, filename_)
+ tensor_names_ = convert(tf.EagerTensor, tensor_names_)
+ shapes_and_slices_ = convert(tf.EagerTensor, shapes_and_slices_)
+ data_ = convert(tf.EagerTensor, data_)
+ begin
+ begin
+ tf.add_input(desc, filename_)
+ end
+ begin
+ tf.add_input(desc, tensor_names_)
+ end
+ begin
+ tf.add_input(desc, shapes_and_slices_)
+ end
+ begin
+ tf.add_input(desc, data_)
+ end
+ end
+ begin
+ begin
+ if T !== nothing
+ desc["T"] = map(Base.identity, T)
+ end
+ end
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(save_slices, [filename_, tensor_names_, shapes_and_slices_, data_], name=nothing, T=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res[1]
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function save_slices(filename_, tensor_names_, shapes_and_slices_, data_; name=nothing, T=nothing)
+ if tf.in_eager_mode()
+ save_slices_eager(filename_, tensor_names_, shapes_and_slices_, data_; name=name, T=T)
+ else
+ save_slices_graph(filename_, tensor_names_, shapes_and_slices_, data_; name=name, T=T)
+ end
+ end
+ end
+end
+
+
+"""
+ experimental_dataset_cardinality(input_dataset)
+
+
+"""
+begin
+ begin
+ function experimental_dataset_cardinality_graph(input_dataset_; name=nothing)
+ local desc
+ tf.with_op_name(name, "ExperimentalDatasetCardinality") do
+ desc = tf.NodeDescription("ExperimentalDatasetCardinality")
+ begin
+ begin
+ input_dataset_ = convert(Tensor{Any}, input_dataset_)
+ begin
+ end
+ end
+ end
+ begin
+ begin
+ tf.add_input(desc, input_dataset_)
+ end
+ end
+ begin
+ end
+ end
+ tf.Tensor(tf.Operation(desc))
+ end
+ end
+ begin
+ function experimental_dataset_cardinality_eager(input_dataset_; name=nothing)
+ desc = tf.EagerOp("ExperimentalDatasetCardinality")
+ input_dataset_ = convert(tf.EagerTensor, input_dataset_)
+ begin
+ begin
+ tf.add_input(desc, input_dataset_)
+ end
+ end
+ begin
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(experimental_dataset_cardinality, [input_dataset_], name=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res[1]
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function experimental_dataset_cardinality(input_dataset_; name=nothing)
+ if tf.in_eager_mode()
+ experimental_dataset_cardinality_eager(input_dataset_; name=name)
+ else
+ experimental_dataset_cardinality_graph(input_dataset_; name=name)
+ end
+ end
+ end
+end
+
+
+"""
+ experimental_numa_map_and_batch_dataset(input_dataset, other_arguments, batch_size, num_parallel_calls, drop_remainder; preserve_cardinality=false)
+
+
+"""
+begin
+ begin
+ function experimental_numa_map_and_batch_dataset_graph(input_dataset_, other_arguments_, batch_size_, num_parallel_calls_, drop_remainder_; name=nothing, f=nothing, Targuments=nothing, output_types=nothing, output_shapes=nothing, preserve_cardinality=nothing)
+ local desc
+ tf.with_op_name(name, "ExperimentalNumaMapAndBatchDataset") do
+ desc = tf.NodeDescription("ExperimentalNumaMapAndBatchDataset")
+ begin
+ begin
+ input_dataset_ = convert(Tensor{Any}, input_dataset_)
+ begin
+ end
+ end
+ begin
+ other_arguments_ = [convert(Tensor{Any}, x) for x = other_arguments_]
+ begin
+ end
+ end
+ begin
+ batch_size_ = convert(Tensor{Int64}, batch_size_)
+ begin
+ end
+ end
+ begin
+ num_parallel_calls_ = convert(Tensor{Int64}, num_parallel_calls_)
+ begin
+ end
+ end
+ begin
+ drop_remainder_ = convert(Tensor{Bool}, drop_remainder_)
+ begin
+ end
+ end
+ end
+ begin
+ begin
+ tf.add_input(desc, input_dataset_)
+ end
+ begin
+ tf.add_input(desc, other_arguments_)
+ end
+ begin
+ tf.add_input(desc, batch_size_)
+ end
+ begin
+ tf.add_input(desc, num_parallel_calls_)
+ end
+ begin
+ tf.add_input(desc, drop_remainder_)
+ end
+ end
+ begin
+ begin
+ if f !== nothing
+ desc["f"] = Base.identity(f)
+ end
+ end
+ begin
+ if Targuments !== nothing
+ desc["Targuments"] = map(Base.identity, Targuments)
+ end
+ end
+ begin
+ if output_types !== nothing
+ desc["output_types"] = map(Base.identity, output_types)
+ end
+ end
+ begin
+ if output_shapes !== nothing
+ desc["output_shapes"] = map(Base.identity, output_shapes)
+ end
+ end
+ begin
+ if preserve_cardinality !== nothing
+ desc["preserve_cardinality"] = Base.Bool(preserve_cardinality)
+ end
+ end
+ end
+ end
+ tf.Tensor(tf.Operation(desc))
+ end
+ end
+ begin
+ function experimental_numa_map_and_batch_dataset_eager(input_dataset_, other_arguments_, batch_size_, num_parallel_calls_, drop_remainder_; name=nothing, f=nothing, Targuments=nothing, output_types=nothing, output_shapes=nothing, preserve_cardinality=nothing)
+ desc = tf.EagerOp("ExperimentalNumaMapAndBatchDataset")
+ input_dataset_ = convert(tf.EagerTensor, input_dataset_)
+ other_arguments_ = convert(tf.EagerTensor, other_arguments_)
+ batch_size_ = convert(tf.EagerTensor, batch_size_)
+ num_parallel_calls_ = convert(tf.EagerTensor, num_parallel_calls_)
+ drop_remainder_ = convert(tf.EagerTensor, drop_remainder_)
+ begin
+ begin
+ tf.add_input(desc, input_dataset_)
+ end
+ begin
+ tf.add_input(desc, other_arguments_)
+ end
+ begin
+ tf.add_input(desc, batch_size_)
+ end
+ begin
+ tf.add_input(desc, num_parallel_calls_)
+ end
+ begin
+ tf.add_input(desc, drop_remainder_)
+ end
+ end
+ begin
+ begin
+ if f !== nothing
+ desc["f"] = Base.identity(f)
+ end
+ end
+ begin
+ if Targuments !== nothing
+ desc["Targuments"] = map(Base.identity, Targuments)
+ end
+ end
+ begin
+ if output_types !== nothing
+ desc["output_types"] = map(Base.identity, output_types)
+ end
+ end
+ begin
+ if output_shapes !== nothing
+ desc["output_shapes"] = map(Base.identity, output_shapes)
+ end
+ end
+ begin
+ if preserve_cardinality !== nothing
+ desc["preserve_cardinality"] = Base.Bool(preserve_cardinality)
+ end
+ end
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(experimental_numa_map_and_batch_dataset, [input_dataset_, other_arguments_, batch_size_, num_parallel_calls_, drop_remainder_], name=nothing, f=nothing, Targuments=nothing, output_types=nothing, output_shapes=nothing, preserve_cardinality=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res[1]
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function experimental_numa_map_and_batch_dataset(input_dataset_, other_arguments_, batch_size_, num_parallel_calls_, drop_remainder_; name=nothing, f=nothing, Targuments=nothing, output_types=nothing, output_shapes=nothing, preserve_cardinality=nothing)
+ if tf.in_eager_mode()
+ experimental_numa_map_and_batch_dataset_eager(input_dataset_, other_arguments_, batch_size_, num_parallel_calls_, drop_remainder_; name=name, f=f, Targuments=Targuments, output_types=output_types, output_shapes=output_shapes, preserve_cardinality=preserve_cardinality)
+ else
+ experimental_numa_map_and_batch_dataset_graph(input_dataset_, other_arguments_, batch_size_, num_parallel_calls_, drop_remainder_; name=name, f=f, Targuments=Targuments, output_types=output_types, output_shapes=output_shapes, preserve_cardinality=preserve_cardinality)
+ end
+ end
+ end
+end
+
+
+"""
+ is_finite(x)
+
+
+"""
+begin
+ begin
+ function is_finite_graph(x_; name=nothing)
+ local desc
+ tf.with_op_name(name, "IsFinite") do
+ desc = tf.NodeDescription("IsFinite")
+ begin
+ begin
+ x_ = convert(Tensor{Any}, x_)
+ begin
+ end
+ end
+ begin
+ (x_,) = tf.tf_promote(x_)
+ end
+ end
+ begin
+ begin
+ tf.add_input(desc, x_)
+ end
+ end
+ begin
+ end
+ end
+ tf.Tensor(tf.Operation(desc))
+ end
+ end
+ begin
+ function is_finite_eager(x_; name=nothing)
+ desc = tf.EagerOp("IsFinite")
+ x_ = convert(tf.EagerTensor, x_)
+ begin
+ begin
+ tf.add_input(desc, x_)
+ end
+ end
+ begin
+ end
+ begin
+ desc["T"] = tf.data_type(x_)
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(is_finite, [x_], name=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res[1]
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function is_finite(x_; name=nothing)
+ if tf.in_eager_mode()
+ is_finite_eager(x_; name=name)
+ else
+ is_finite_graph(x_; name=name)
+ end
+ end
+ end
+end
+
+
+"""
+ all_to_all(input, group_assignment)
+
+An Op to exchange data across TPU replicas. On each replica, the input is
+"""
+begin
+ begin
+ function all_to_all_graph(input_, group_assignment_; name=nothing, concat_dimension=nothing, split_dimension=nothing, split_count=nothing)
+ local desc
+ tf.with_op_name(name, "AllToAll") do
+ desc = tf.NodeDescription("AllToAll")
+ begin
+ begin
+ input_ = convert(Tensor{Any}, input_)
+ begin
+ end
+ end
+ begin
+ group_assignment_ = convert(Tensor{Int32}, group_assignment_)
+ begin
+ end
+ end
+ begin
+ (input_,) = tf.tf_promote(input_)
+ end
+ end
+ begin
+ begin
+ tf.add_input(desc, input_)
+ end
+ begin
+ tf.add_input(desc, group_assignment_)
+ end
+ end
+ begin
+ begin
+ if concat_dimension !== nothing
+ desc["concat_dimension"] = Base.Int(concat_dimension)
+ end
+ end
+ begin
+ if split_dimension !== nothing
+ desc["split_dimension"] = Base.Int(split_dimension)
+ end
+ end
+ begin
+ if split_count !== nothing
+ desc["split_count"] = Base.Int(split_count)
+ end
+ end
+ end
+ end
+ tf.Tensor(tf.Operation(desc))
+ end
+ end
+ begin
+ function all_to_all_eager(input_, group_assignment_; name=nothing, concat_dimension=nothing, split_dimension=nothing, split_count=nothing)
+ desc = tf.EagerOp("AllToAll")
+ input_ = convert(tf.EagerTensor, input_)
+ group_assignment_ = convert(tf.EagerTensor, group_assignment_)
+ begin
+ begin
+ tf.add_input(desc, input_)
+ end
+ begin
+ tf.add_input(desc, group_assignment_)
+ end
+ end
+ begin
+ begin
+ if concat_dimension !== nothing
+ desc["concat_dimension"] = Base.Int(concat_dimension)
+ end
+ end
+ begin
+ if split_dimension !== nothing
+ desc["split_dimension"] = Base.Int(split_dimension)
+ end
+ end
+ begin
+ if split_count !== nothing
+ desc["split_count"] = Base.Int(split_count)
+ end
+ end
+ end
+ begin
+ desc["T"] = tf.data_type(input_)
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(all_to_all, [input_, group_assignment_], name=nothing, concat_dimension=nothing, split_dimension=nothing, split_count=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res[1]
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function all_to_all(input_, group_assignment_; name=nothing, concat_dimension=nothing, split_dimension=nothing, split_count=nothing)
+ if tf.in_eager_mode()
+ all_to_all_eager(input_, group_assignment_; name=name, concat_dimension=concat_dimension, split_dimension=split_dimension, split_count=split_count)
+ else
+ all_to_all_graph(input_, group_assignment_; name=name, concat_dimension=concat_dimension, split_dimension=split_dimension, split_count=split_count)
+ end
+ end
+ end
+end
+
+
+"""
+ take_many_sparse_from_tensors_map(sparse_handles; container=, shared_name=)
+
+
+"""
+begin
+ begin
+ function take_many_sparse_from_tensors_map_graph(sparse_handles_; name=nothing, dtype=nothing, container=nothing, shared_name=nothing)
+ local desc
+ tf.with_op_name(name, "TakeManySparseFromTensorsMap") do
+ desc = tf.NodeDescription("TakeManySparseFromTensorsMap")
+ begin
+ begin
+ sparse_handles_ = convert(Tensor{Int64}, sparse_handles_)
+ begin
+ end
+ end
+ end
+ begin
+ begin
+ tf.add_input(desc, sparse_handles_)
+ end
+ end
+ begin
+ begin
+ if dtype !== nothing
+ desc["dtype"] = Base.identity(dtype)
+ end
+ end
+ begin
+ if container !== nothing
+ desc["container"] = Base.String(container)
+ end
+ end
+ begin
+ if shared_name !== nothing
+ desc["shared_name"] = Base.String(shared_name)
+ end
+ end
+ end
+ end
+ begin
+ out = tf.Tensor[]
+ op = tf.Operation(desc)
+ for out_idx = 1:3
+ push!(out, tf.Tensor(op, out_idx))
+ end
+ out
+ end
+ end
+ end
+ begin
+ function take_many_sparse_from_tensors_map_eager(sparse_handles_; name=nothing, dtype=nothing, container=nothing, shared_name=nothing)
+ desc = tf.EagerOp("TakeManySparseFromTensorsMap")
+ sparse_handles_ = convert(tf.EagerTensor, sparse_handles_)
+ begin
+ begin
+ tf.add_input(desc, sparse_handles_)
+ end
+ end
+ begin
+ begin
+ if dtype !== nothing
+ desc["dtype"] = Base.identity(dtype)
+ end
+ end
+ begin
+ if container !== nothing
+ desc["container"] = Base.String(container)
+ end
+ end
+ begin
+ if shared_name !== nothing
+ desc["shared_name"] = Base.String(shared_name)
+ end
+ end
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(take_many_sparse_from_tensors_map, [sparse_handles_], name=nothing, dtype=nothing, container=nothing, shared_name=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function take_many_sparse_from_tensors_map(sparse_handles_; name=nothing, dtype=nothing, container=nothing, shared_name=nothing)
+ if tf.in_eager_mode()
+ take_many_sparse_from_tensors_map_eager(sparse_handles_; name=name, dtype=dtype, container=container, shared_name=shared_name)
+ else
+ take_many_sparse_from_tensors_map_graph(sparse_handles_; name=name, dtype=dtype, container=container, shared_name=shared_name)
+ end
+ end
+ end
+end
+
+
+"""
+ batch_matrix_diag_part(input)
+
+
+"""
+begin
+ begin
+ function batch_matrix_diag_part_graph(input_; name=nothing)
+ local desc
+ tf.with_op_name(name, "BatchMatrixDiagPart") do
+ desc = tf.NodeDescription("BatchMatrixDiagPart")
+ begin
+ begin
+ input_ = convert(Tensor{Any}, input_)
+ begin
+ end
+ end
+ begin
+ (input_,) = tf.tf_promote(input_)
+ end
+ end
+ begin
+ begin
+ tf.add_input(desc, input_)
+ end
+ end
+ begin
+ end
+ end
+ tf.Tensor(tf.Operation(desc))
+ end
+ end
+ begin
+ function batch_matrix_diag_part_eager(input_; name=nothing)
+ desc = tf.EagerOp("BatchMatrixDiagPart")
+ input_ = convert(tf.EagerTensor, input_)
+ begin
+ begin
+ tf.add_input(desc, input_)
+ end
+ end
+ begin
+ end
+ begin
+ desc["T"] = tf.data_type(input_)
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(batch_matrix_diag_part, [input_], name=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res[1]
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function batch_matrix_diag_part(input_; name=nothing)
+ if tf.in_eager_mode()
+ batch_matrix_diag_part_eager(input_; name=name)
+ else
+ batch_matrix_diag_part_graph(input_; name=name)
+ end
+ end
+ end
+end
+
+
+"""
+ fixed_length_record_dataset(filenames, header_bytes, record_bytes, footer_bytes, buffer_size)
+
+
+"""
+begin
+ begin
+ function fixed_length_record_dataset_graph(filenames_, header_bytes_, record_bytes_, footer_bytes_, buffer_size_; name=nothing)
+ local desc
+ tf.with_op_name(name, "FixedLengthRecordDataset") do
+ desc = tf.NodeDescription("FixedLengthRecordDataset")
+ begin
+ begin
+ filenames_ = convert(Tensor{String}, filenames_)
+ begin
+ end
+ end
+ begin
+ header_bytes_ = convert(Tensor{Int64}, header_bytes_)
+ begin
+ end
+ end
+ begin
+ record_bytes_ = convert(Tensor{Int64}, record_bytes_)
+ begin
+ end
+ end
+ begin
+ footer_bytes_ = convert(Tensor{Int64}, footer_bytes_)
+ begin
+ end
+ end
+ begin
+ buffer_size_ = convert(Tensor{Int64}, buffer_size_)
+ begin
+ end
+ end
+ end
+ begin
+ begin
+ tf.add_input(desc, filenames_)
+ end
+ begin
+ tf.add_input(desc, header_bytes_)
+ end
+ begin
+ tf.add_input(desc, record_bytes_)
+ end
+ begin
+ tf.add_input(desc, footer_bytes_)
+ end
+ begin
+ tf.add_input(desc, buffer_size_)
+ end
+ end
+ begin
+ end
+ end
+ tf.Tensor(tf.Operation(desc))
+ end
+ end
+ begin
+ function fixed_length_record_dataset_eager(filenames_, header_bytes_, record_bytes_, footer_bytes_, buffer_size_; name=nothing)
+ desc = tf.EagerOp("FixedLengthRecordDataset")
+ filenames_ = convert(tf.EagerTensor, filenames_)
+ header_bytes_ = convert(tf.EagerTensor, header_bytes_)
+ record_bytes_ = convert(tf.EagerTensor, record_bytes_)
+ footer_bytes_ = convert(tf.EagerTensor, footer_bytes_)
+ buffer_size_ = convert(tf.EagerTensor, buffer_size_)
+ begin
+ begin
+ tf.add_input(desc, filenames_)
+ end
+ begin
+ tf.add_input(desc, header_bytes_)
+ end
+ begin
+ tf.add_input(desc, record_bytes_)
+ end
+ begin
+ tf.add_input(desc, footer_bytes_)
+ end
+ begin
+ tf.add_input(desc, buffer_size_)
+ end
+ end
+ begin
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(fixed_length_record_dataset, [filenames_, header_bytes_, record_bytes_, footer_bytes_, buffer_size_], name=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res[1]
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function fixed_length_record_dataset(filenames_, header_bytes_, record_bytes_, footer_bytes_, buffer_size_; name=nothing)
+ if tf.in_eager_mode()
+ fixed_length_record_dataset_eager(filenames_, header_bytes_, record_bytes_, footer_bytes_, buffer_size_; name=name)
+ else
+ fixed_length_record_dataset_graph(filenames_, header_bytes_, record_bytes_, footer_bytes_, buffer_size_; name=name)
+ end
+ end
+ end
+end
+
+
+"""
+ stack_push(handle, elem; swap_memory=false)
+
+
+"""
+begin
+ begin
+ function stack_push_graph(handle_, elem_; name=nothing, swap_memory=nothing)
+ local desc
+ tf.with_op_name(name, "StackPush") do
+ desc = tf.NodeDescription("StackPush")
+ begin
+ begin
+ handle_ = convert(Tensor{String}, handle_)
+ begin
+ end
+ end
+ begin
+ elem_ = convert(Tensor{Any}, elem_)
+ begin
+ end
+ end
+ begin
+ (elem_,) = tf.tf_promote(elem_)
+ end
+ end
+ begin
+ begin
+ tf.add_input(desc, handle_)
+ end
+ begin
+ tf.add_input(desc, elem_)
+ end
+ end
+ begin
+ begin
+ if swap_memory !== nothing
+ desc["swap_memory"] = Base.Bool(swap_memory)
+ end
+ end
+ end
+ end
+ tf.Tensor(tf.Operation(desc))
+ end
+ end
+ begin
+ function stack_push_eager(handle_, elem_; name=nothing, swap_memory=nothing)
+ desc = tf.EagerOp("StackPush")
+ handle_ = convert(tf.EagerTensor, handle_)
+ elem_ = convert(tf.EagerTensor, elem_)
+ begin
+ begin
+ tf.add_input(desc, handle_)
+ end
+ begin
+ tf.add_input(desc, elem_)
+ end
+ end
+ begin
+ begin
+ if swap_memory !== nothing
+ desc["swap_memory"] = Base.Bool(swap_memory)
+ end
+ end
+ end
+ begin
+ desc["T"] = tf.data_type(elem_)
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(stack_push, [handle_, elem_], name=nothing, swap_memory=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res[1]
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function stack_push(handle_, elem_; name=nothing, swap_memory=nothing)
+ if tf.in_eager_mode()
+ stack_push_eager(handle_, elem_; name=name, swap_memory=swap_memory)
+ else
+ stack_push_graph(handle_, elem_; name=name, swap_memory=swap_memory)
+ end
+ end
+ end
+end
+
+
+"""
+ placeholder_v2()
+
+
+"""
+begin
+ begin
+ function placeholder_v2_graph(; name=nothing, dtype=nothing, shape=nothing)
+ local desc
+ tf.with_op_name(name, "PlaceholderV2") do
+ desc = tf.NodeDescription("PlaceholderV2")
+ begin
+ end
+ begin
+ end
+ begin
+ begin
+ if dtype !== nothing
+ desc["dtype"] = Base.identity(dtype)
+ end
+ end
+ begin
+ if shape !== nothing
+ desc["shape"] = Base.identity(shape)
+ end
+ end
+ end
+ end
+ tf.Tensor(tf.Operation(desc))
+ end
+ end
+ begin
+ function placeholder_v2_eager(; name=nothing, dtype=nothing, shape=nothing)
+ desc = tf.EagerOp("PlaceholderV2")
+ begin
+ end
+ begin
+ begin
+ if dtype !== nothing
+ desc["dtype"] = Base.identity(dtype)
+ end
+ end
+ begin
+ if shape !== nothing
+ desc["shape"] = Base.identity(shape)
+ end
+ end
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(placeholder_v2, [], name=nothing, dtype=nothing, shape=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res[1]
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function placeholder_v2(; name=nothing, dtype=nothing, shape=nothing)
+ if tf.in_eager_mode()
+ placeholder_v2_eager(; name=name, dtype=dtype, shape=shape)
+ else
+ placeholder_v2_graph(; name=name, dtype=dtype, shape=shape)
+ end
+ end
+ end
+end
+
+
+"""
+ multi_device_iterator_init(dataset, multi_device_iterator, max_buffer_size)
+
+
+"""
+begin
+ begin
+ function multi_device_iterator_init_graph(dataset_, multi_device_iterator_, max_buffer_size_; name=nothing)
+ local desc
+ tf.with_op_name(name, "MultiDeviceIteratorInit") do
+ desc = tf.NodeDescription("MultiDeviceIteratorInit")
+ begin
+ begin
+ dataset_ = convert(Tensor{Any}, dataset_)
+ begin
+ end
+ end
+ begin
+ multi_device_iterator_ = convert(Tensor{Any}, multi_device_iterator_)
+ begin
+ end
+ end
+ begin
+ max_buffer_size_ = convert(Tensor{Int64}, max_buffer_size_)
+ begin
+ end
+ end
+ end
+ begin
+ begin
+ tf.add_input(desc, dataset_)
+ end
+ begin
+ tf.add_input(desc, multi_device_iterator_)
+ end
+ begin
+ tf.add_input(desc, max_buffer_size_)
+ end
+ end
+ begin
+ end
+ end
+ tf.Tensor(tf.Operation(desc))
+ end
+ end
+ begin
+ function multi_device_iterator_init_eager(dataset_, multi_device_iterator_, max_buffer_size_; name=nothing)
+ desc = tf.EagerOp("MultiDeviceIteratorInit")
+ dataset_ = convert(tf.EagerTensor, dataset_)
+ multi_device_iterator_ = convert(tf.EagerTensor, multi_device_iterator_)
+ max_buffer_size_ = convert(tf.EagerTensor, max_buffer_size_)
+ begin
+ begin
+ tf.add_input(desc, dataset_)
+ end
+ begin
+ tf.add_input(desc, multi_device_iterator_)
+ end
+ begin
+ tf.add_input(desc, max_buffer_size_)
+ end
+ end
+ begin
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(multi_device_iterator_init, [dataset_, multi_device_iterator_, max_buffer_size_], name=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res[1]
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function multi_device_iterator_init(dataset_, multi_device_iterator_, max_buffer_size_; name=nothing)
+ if tf.in_eager_mode()
+ multi_device_iterator_init_eager(dataset_, multi_device_iterator_, max_buffer_size_; name=name)
+ else
+ multi_device_iterator_init_graph(dataset_, multi_device_iterator_, max_buffer_size_; name=name)
+ end
+ end
+ end
+end
+
+
+"""
+ gcs_configure_block_cache(max_cache_size, block_size, max_staleness)
+
+Re-configures the GCS block cache with the new configuration values.
+"""
+begin
+ begin
+ function gcs_configure_block_cache_graph(max_cache_size_, block_size_, max_staleness_; name=nothing)
+ local desc
+ tf.with_op_name(name, "GcsConfigureBlockCache") do
+ desc = tf.NodeDescription("GcsConfigureBlockCache")
+ begin
+ begin
+ max_cache_size_ = convert(Tensor{Any}, max_cache_size_)
+ begin
+ end
+ end
+ begin
+ block_size_ = convert(Tensor{Any}, block_size_)
+ begin
+ end
+ end
+ begin
+ max_staleness_ = convert(Tensor{Any}, max_staleness_)
+ begin
+ end
+ end
+ end
+ begin
+ begin
+ tf.add_input(desc, max_cache_size_)
+ end
+ begin
+ tf.add_input(desc, block_size_)
+ end
+ begin
+ tf.add_input(desc, max_staleness_)
+ end
+ end
+ begin
+ end
+ end
+ tf.Tensor(tf.Operation(desc))
+ end
+ end
+ begin
+ function gcs_configure_block_cache_eager(max_cache_size_, block_size_, max_staleness_; name=nothing)
+ desc = tf.EagerOp("GcsConfigureBlockCache")
+ max_cache_size_ = convert(tf.EagerTensor, max_cache_size_)
+ block_size_ = convert(tf.EagerTensor, block_size_)
+ max_staleness_ = convert(tf.EagerTensor, max_staleness_)
+ begin
+ begin
+ tf.add_input(desc, max_cache_size_)
+ end
+ begin
+ tf.add_input(desc, block_size_)
+ end
+ begin
+ tf.add_input(desc, max_staleness_)
+ end
+ end
+ begin
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(gcs_configure_block_cache, [max_cache_size_, block_size_, max_staleness_], name=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res[1]
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function gcs_configure_block_cache(max_cache_size_, block_size_, max_staleness_; name=nothing)
+ if tf.in_eager_mode()
+ gcs_configure_block_cache_eager(max_cache_size_, block_size_, max_staleness_; name=name)
+ else
+ gcs_configure_block_cache_graph(max_cache_size_, block_size_, max_staleness_; name=name)
+ end
+ end
+ end
+end
+
+
+"""
+ queue_dequeue_v2(handle; timeout_ms=-1)
+
+
+"""
+begin
+ begin
+ function queue_dequeue_v2_graph(handle_; name=nothing, component_types=nothing, timeout_ms=nothing)
+ local desc
+ tf.with_op_name(name, "QueueDequeueV2") do
+ desc = tf.NodeDescription("QueueDequeueV2")
+ begin
+ begin
+ handle_ = convert(Tensor{Any}, handle_)
+ begin
+ end
+ end
+ end
+ begin
+ begin
+ tf.add_input(desc, handle_)
+ end
+ end
+ begin
+ begin
+ if component_types !== nothing
+ desc["component_types"] = map(Base.identity, component_types)
+ end
+ end
+ begin
+ if timeout_ms !== nothing
+ desc["timeout_ms"] = Base.Int(timeout_ms)
+ end
+ end
+ end
+ end
+ tf.Tensor(tf.Operation(desc))
+ end
+ end
+ begin
+ function queue_dequeue_v2_eager(handle_; name=nothing, component_types=nothing, timeout_ms=nothing)
+ desc = tf.EagerOp("QueueDequeueV2")
+ handle_ = convert(tf.EagerTensor, handle_)
+ begin
+ begin
+ tf.add_input(desc, handle_)
+ end
+ end
+ begin
+ begin
+ if component_types !== nothing
+ desc["component_types"] = map(Base.identity, component_types)
+ end
+ end
+ begin
+ if timeout_ms !== nothing
+ desc["timeout_ms"] = Base.Int(timeout_ms)
+ end
+ end
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(queue_dequeue_v2, [handle_], name=nothing, component_types=nothing, timeout_ms=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res[1]
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function queue_dequeue_v2(handle_; name=nothing, component_types=nothing, timeout_ms=nothing)
+ if tf.in_eager_mode()
+ queue_dequeue_v2_eager(handle_; name=name, component_types=component_types, timeout_ms=timeout_ms)
+ else
+ queue_dequeue_v2_graph(handle_; name=name, component_types=component_types, timeout_ms=timeout_ms)
+ end
+ end
+ end
+end
+
+
+"""
+ transpose(x, perm)
+
+
+"""
+begin
+ begin
+ function transpose_graph(x_, perm_; name=nothing)
+ local desc
+ tf.with_op_name(name, "Transpose") do
+ desc = tf.NodeDescription("Transpose")
+ begin
+ begin
+ x_ = convert(Tensor{Any}, x_)
+ begin
+ end
+ end
+ begin
+ perm_ = convert(Tensor{Int32}, perm_)
+ begin
+ end
+ end
+ begin
+ (perm_,) = tf.tf_promote(perm_)
+ end
+ begin
+ (x_,) = tf.tf_promote(x_)
+ end
+ end
+ begin
+ begin
+ tf.add_input(desc, x_)
+ end
+ begin
+ tf.add_input(desc, perm_)
+ end
+ end
+ begin
+ end
+ end
+ tf.Tensor(tf.Operation(desc))
+ end
+ end
+ begin
+ function transpose_eager(x_, perm_; name=nothing)
+ desc = tf.EagerOp("Transpose")
+ x_ = convert(tf.EagerTensor, x_)
+ perm_ = convert(tf.EagerTensor, perm_)
+ begin
+ begin
+ tf.add_input(desc, x_)
+ end
+ begin
+ tf.add_input(desc, perm_)
+ end
+ end
+ begin
+ end
+ begin
+ desc["T"] = tf.data_type(x_)
+ end
+ begin
+ desc["Tperm"] = tf.data_type(perm_)
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(transpose, [x_, perm_], name=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res[1]
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function transpose(x_, perm_; name=nothing)
+ if tf.in_eager_mode()
+ transpose_eager(x_, perm_; name=name)
+ else
+ transpose_graph(x_, perm_; name=name)
+ end
+ end
+ end
+end
+
+
+"""
+ retrieve_tpu_embedding_rms_prop_parameters(; table_id=-1, table_name=)
+
+Retrieve embedding parameters for a single table.
+"""
+begin
+ begin
+ function retrieve_tpu_embedding_rms_prop_parameters_graph(; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing)
+ local desc
+ tf.with_op_name(name, "RetrieveTPUEmbeddingRMSPropParameters") do
+ desc = tf.NodeDescription("RetrieveTPUEmbeddingRMSPropParameters")
+ begin
+ end
+ begin
+ end
+ begin
+ begin
+ if table_id !== nothing
+ desc["table_id"] = Base.Int(table_id)
+ end
+ end
+ begin
+ if table_name !== nothing
+ desc["table_name"] = Base.String(table_name)
+ end
+ end
+ begin
+ if num_shards !== nothing
+ desc["num_shards"] = Base.Int(num_shards)
+ end
+ end
+ begin
+ if shard_id !== nothing
+ desc["shard_id"] = Base.Int(shard_id)
+ end
+ end
+ end
+ end
+ begin
+ out = tf.Tensor[]
+ op = tf.Operation(desc)
+ for out_idx = 1:3
+ push!(out, tf.Tensor(op, out_idx))
+ end
+ out
+ end
+ end
+ end
+ begin
+ function retrieve_tpu_embedding_rms_prop_parameters_eager(; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing)
+ desc = tf.EagerOp("RetrieveTPUEmbeddingRMSPropParameters")
+ begin
+ end
+ begin
+ begin
+ if table_id !== nothing
+ desc["table_id"] = Base.Int(table_id)
+ end
+ end
+ begin
+ if table_name !== nothing
+ desc["table_name"] = Base.String(table_name)
+ end
+ end
+ begin
+ if num_shards !== nothing
+ desc["num_shards"] = Base.Int(num_shards)
+ end
+ end
+ begin
+ if shard_id !== nothing
+ desc["shard_id"] = Base.Int(shard_id)
+ end
+ end
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(retrieve_tpu_embedding_rms_prop_parameters, [], name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function retrieve_tpu_embedding_rms_prop_parameters(; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing)
+ if tf.in_eager_mode()
+ retrieve_tpu_embedding_rms_prop_parameters_eager(; name=name, table_id=table_id, table_name=table_name, num_shards=num_shards, shard_id=shard_id)
+ else
+ retrieve_tpu_embedding_rms_prop_parameters_graph(; name=name, table_id=table_id, table_name=table_name, num_shards=num_shards, shard_id=shard_id)
+ end
+ end
+ end
+end
+
+
+"""
+ ifft(input)
+
+
+"""
+begin
+ begin
+ function ifft_graph(input_; name=nothing)
+ local desc
+ tf.with_op_name(name, "IFFT") do
+ desc = tf.NodeDescription("IFFT")
+ begin
+ begin
+ input_ = convert(Tensor{Complex{Float32}}, input_)
+ begin
+ end
+ end
+ begin
+ (input_,) = tf.tf_promote(input_)
+ end
+ end
+ begin
+ begin
+ tf.add_input(desc, input_)
+ end
+ end
+ begin
+ end
+ end
+ tf.Tensor(tf.Operation(desc))
+ end
+ end
+ begin
+ function ifft_eager(input_; name=nothing)
+ desc = tf.EagerOp("IFFT")
+ input_ = convert(tf.EagerTensor, input_)
+ begin
+ begin
+ tf.add_input(desc, input_)
+ end
+ end
+ begin
+ end
+ begin
+ desc["Tcomplex"] = tf.data_type(input_)
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(ifft, [input_], name=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res[1]
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function ifft(input_; name=nothing)
+ if tf.in_eager_mode()
+ ifft_eager(input_; name=name)
+ else
+ ifft_graph(input_; name=name)
+ end
+ end
+ end
+end
+
+
+"""
+ sparse_segment_sum_with_num_segments(data, indices, segment_ids, num_segments)
+
+
+"""
+begin
+ begin
+ function sparse_segment_sum_with_num_segments_graph(data_, indices_, segment_ids_, num_segments_; name=nothing)
+ local desc
+ tf.with_op_name(name, "SparseSegmentSumWithNumSegments") do
+ desc = tf.NodeDescription("SparseSegmentSumWithNumSegments")
+ begin
+ begin
+ data_ = convert(Tensor{Any}, data_)
+ begin
+ end
+ end
+ begin
+ indices_ = convert(Tensor{Int32}, indices_)
+ begin
+ indices_ = indices_ - convert(tf.Tensor{eltype(indices_)}, 1)
+ end
+ end
+ begin
+ segment_ids_ = convert(Tensor{Int32}, segment_ids_)
+ begin
+ end
+ end
+ begin
+ num_segments_ = convert(Tensor{Int32}, num_segments_)
+ begin
+ end
+ end
+ begin
+ (num_segments_,) = tf.tf_promote(num_segments_)
+ end
+ begin
+ (data_,) = tf.tf_promote(data_)
+ end
+ begin
+ (indices_,) = tf.tf_promote(indices_)
+ end
+ end
+ begin
+ begin
+ tf.add_input(desc, data_)
+ end
+ begin
+ tf.add_input(desc, indices_)
+ end
+ begin
+ tf.add_input(desc, segment_ids_)
+ end
+ begin
+ tf.add_input(desc, num_segments_)
+ end
+ end
+ begin
+ end
+ end
+ tf.Tensor(tf.Operation(desc))
+ end
+ end
+ begin
+ function sparse_segment_sum_with_num_segments_eager(data_, indices_, segment_ids_, num_segments_; name=nothing)
+ desc = tf.EagerOp("SparseSegmentSumWithNumSegments")
+ data_ = convert(tf.EagerTensor, data_)
+ indices_ = convert(tf.EagerTensor, indices_)
+ segment_ids_ = convert(tf.EagerTensor, segment_ids_)
+ num_segments_ = convert(tf.EagerTensor, num_segments_)
+ begin
+ begin
+ tf.add_input(desc, data_)
+ end
+ begin
+ tf.add_input(desc, indices_)
+ end
+ begin
+ tf.add_input(desc, segment_ids_)
+ end
+ begin
+ tf.add_input(desc, num_segments_)
+ end
+ end
+ begin
+ end
+ begin
+ desc["T"] = tf.data_type(data_)
+ end
+ begin
+ desc["Tidx"] = tf.data_type(indices_)
+ end
+ begin
+ desc["Tnumsegments"] = tf.data_type(num_segments_)
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(sparse_segment_sum_with_num_segments, [data_, indices_, segment_ids_, num_segments_], name=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res[1]
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function sparse_segment_sum_with_num_segments(data_, indices_, segment_ids_, num_segments_; name=nothing)
+ if tf.in_eager_mode()
+ sparse_segment_sum_with_num_segments_eager(data_, indices_, segment_ids_, num_segments_; name=name)
+ else
+ sparse_segment_sum_with_num_segments_graph(data_, indices_, segment_ids_, num_segments_; name=name)
+ end
+ end
+ end
+end
+
+
+"""
+ queue_is_closed_v2(handle)
+
+
+"""
+begin
+ begin
+ function queue_is_closed_v2_graph(handle_; name=nothing)
+ local desc
+ tf.with_op_name(name, "QueueIsClosedV2") do
+ desc = tf.NodeDescription("QueueIsClosedV2")
+ begin
+ begin
+ handle_ = convert(Tensor{Any}, handle_)
+ begin
+ end
+ end
+ end
+ begin
+ begin
+ tf.add_input(desc, handle_)
+ end
+ end
+ begin
+ end
+ end
+ tf.Tensor(tf.Operation(desc))
+ end
+ end
+ begin
+ function queue_is_closed_v2_eager(handle_; name=nothing)
+ desc = tf.EagerOp("QueueIsClosedV2")
+ handle_ = convert(tf.EagerTensor, handle_)
+ begin
+ begin
+ tf.add_input(desc, handle_)
+ end
+ end
+ begin
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(queue_is_closed_v2, [handle_], name=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res[1]
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function queue_is_closed_v2(handle_; name=nothing)
+ if tf.in_eager_mode()
+ queue_is_closed_v2_eager(handle_; name=name)
+ else
+ queue_is_closed_v2_graph(handle_; name=name)
+ end
+ end
+ end
+end
+
+
+"""
+ parameterized_truncated_normal(shape, means, stdevs, minvals, maxvals; seed=0, seed2=0)
+
+
+"""
+begin
+ begin
+ function parameterized_truncated_normal_graph(shape_, means_, stdevs_, minvals_, maxvals_; name=nothing, seed=nothing, seed2=nothing, dtype=nothing)
+ local desc
+ tf.with_op_name(name, "ParameterizedTruncatedNormal") do
+ desc = tf.NodeDescription("ParameterizedTruncatedNormal")
+ begin
+ begin
+ shape_ = convert(Tensor{Any}, shape_)
+ begin
+ end
+ end
+ begin
+ means_ = convert(Tensor{Any}, means_)
+ begin
+ end
+ end
+ begin
+ stdevs_ = convert(Tensor{Any}, stdevs_)
+ begin
+ end
+ end
+ begin
+ minvals_ = convert(Tensor{Any}, minvals_)
+ begin
+ end
+ end
+ begin
+ maxvals_ = convert(Tensor{Any}, maxvals_)
+ begin
+ end
+ end
+ begin
+ (means_, stdevs_, minvals_, maxvals_) = tf.tf_promote(means_, stdevs_, minvals_, maxvals_)
+ end
+ begin
+ (shape_,) = tf.tf_promote(shape_)
+ end
+ end
+ begin
+ begin
+ tf.add_input(desc, shape_)
+ end
+ begin
+ tf.add_input(desc, means_)
+ end
+ begin
+ tf.add_input(desc, stdevs_)
+ end
+ begin
+ tf.add_input(desc, minvals_)
+ end
+ begin
+ tf.add_input(desc, maxvals_)
+ end
+ end
+ begin
+ begin
+ if seed !== nothing
+ desc["seed"] = Base.Int(seed)
+ end
+ end
+ begin
+ if seed2 !== nothing
+ desc["seed2"] = Base.Int(seed2)
+ end
+ end
+ begin
+ if dtype !== nothing
+ desc["dtype"] = Base.identity(dtype)
+ end
+ end
+ end
+ end
+ tf.Tensor(tf.Operation(desc))
+ end
+ end
+ begin
+ function parameterized_truncated_normal_eager(shape_, means_, stdevs_, minvals_, maxvals_; name=nothing, seed=nothing, seed2=nothing, dtype=nothing)
+ desc = tf.EagerOp("ParameterizedTruncatedNormal")
+ shape_ = convert(tf.EagerTensor, shape_)
+ means_ = convert(tf.EagerTensor, means_)
+ stdevs_ = convert(tf.EagerTensor, stdevs_)
+ minvals_ = convert(tf.EagerTensor, minvals_)
+ maxvals_ = convert(tf.EagerTensor, maxvals_)
+ begin
+ begin
+ tf.add_input(desc, shape_)
+ end
+ begin
+ tf.add_input(desc, means_)
+ end
+ begin
+ tf.add_input(desc, stdevs_)
+ end
+ begin
+ tf.add_input(desc, minvals_)
+ end
+ begin
+ tf.add_input(desc, maxvals_)
+ end
+ end
+ begin
+ begin
+ if seed !== nothing
+ desc["seed"] = Base.Int(seed)
+ end
+ end
+ begin
+ if seed2 !== nothing
+ desc["seed2"] = Base.Int(seed2)
+ end
+ end
+ begin
+ if dtype !== nothing
+ desc["dtype"] = Base.identity(dtype)
+ end
+ end
+ end
+ begin
+ desc["T"] = tf.data_type(shape_)
+ end
+ begin
+ desc["dtype"] = tf.data_type(means_)
+ end
+ begin
+ desc["dtype"] = tf.data_type(stdevs_)
+ end
+ begin
+ desc["dtype"] = tf.data_type(minvals_)
+ end
+ begin
+ desc["dtype"] = tf.data_type(maxvals_)
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(parameterized_truncated_normal, [shape_, means_, stdevs_, minvals_, maxvals_], name=nothing, seed=nothing, seed2=nothing, dtype=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res[1]
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function parameterized_truncated_normal(shape_, means_, stdevs_, minvals_, maxvals_; name=nothing, seed=nothing, seed2=nothing, dtype=nothing)
+ if tf.in_eager_mode()
+ parameterized_truncated_normal_eager(shape_, means_, stdevs_, minvals_, maxvals_; name=name, seed=seed, seed2=seed2, dtype=dtype)
+ else
+ parameterized_truncated_normal_graph(shape_, means_, stdevs_, minvals_, maxvals_; name=name, seed=seed, seed2=seed2, dtype=dtype)
+ end
+ end
+ end
+end
+
+
+"""
+ diag_part(input)
+
+
+"""
+begin
+ begin
+ function diag_part_graph(input_; name=nothing)
+ local desc
+ tf.with_op_name(name, "DiagPart") do
+ desc = tf.NodeDescription("DiagPart")
+ begin
+ begin
+ input_ = convert(Tensor{Any}, input_)
+ begin
+ end
+ end
+ begin
+ (input_,) = tf.tf_promote(input_)
+ end
+ end
+ begin
+ begin
+ tf.add_input(desc, input_)
+ end
+ end
+ begin
+ end
+ end
+ tf.Tensor(tf.Operation(desc))
+ end
+ end
+ begin
+ function diag_part_eager(input_; name=nothing)
+ desc = tf.EagerOp("DiagPart")
+ input_ = convert(tf.EagerTensor, input_)
+ begin
+ begin
+ tf.add_input(desc, input_)
+ end
+ end
+ begin
+ end
+ begin
+ desc["T"] = tf.data_type(input_)
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(diag_part, [input_], name=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res[1]
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function diag_part(input_; name=nothing)
+ if tf.in_eager_mode()
+ diag_part_eager(input_; name=name)
+ else
+ diag_part_graph(input_; name=name)
+ end
+ end
+ end
+end
+
+
+"""
+ regex_replace(input, pattern, rewrite; replace_global=true)
+
+
+"""
+begin
+ begin
+ function regex_replace_graph(input_, pattern_, rewrite_; name=nothing, replace_global=nothing)
+ local desc
+ tf.with_op_name(name, "RegexReplace") do
+ desc = tf.NodeDescription("RegexReplace")
+ begin
+ begin
+ input_ = convert(Tensor{String}, input_)
+ begin
+ end
+ end
+ begin
+ pattern_ = convert(Tensor{String}, pattern_)
+ begin
+ end
+ end
+ begin
+ rewrite_ = convert(Tensor{String}, rewrite_)
+ begin
+ end
+ end
+ end
+ begin
+ begin
+ tf.add_input(desc, input_)
+ end
+ begin
+ tf.add_input(desc, pattern_)
+ end
+ begin
+ tf.add_input(desc, rewrite_)
+ end
+ end
+ begin
+ begin
+ if replace_global !== nothing
+ desc["replace_global"] = Base.Bool(replace_global)
+ end
+ end
+ end
+ end
+ tf.Tensor(tf.Operation(desc))
+ end
+ end
+ begin
+ function regex_replace_eager(input_, pattern_, rewrite_; name=nothing, replace_global=nothing)
+ desc = tf.EagerOp("RegexReplace")
+ input_ = convert(tf.EagerTensor, input_)
+ pattern_ = convert(tf.EagerTensor, pattern_)
+ rewrite_ = convert(tf.EagerTensor, rewrite_)
+ begin
+ begin
+ tf.add_input(desc, input_)
+ end
+ begin
+ tf.add_input(desc, pattern_)
+ end
+ begin
+ tf.add_input(desc, rewrite_)
+ end
+ end
+ begin
+ begin
+ if replace_global !== nothing
+ desc["replace_global"] = Base.Bool(replace_global)
+ end
+ end
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(regex_replace, [input_, pattern_, rewrite_], name=nothing, replace_global=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res[1]
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function regex_replace(input_, pattern_, rewrite_; name=nothing, replace_global=nothing)
+ if tf.in_eager_mode()
+ regex_replace_eager(input_, pattern_, rewrite_; name=name, replace_global=replace_global)
+ else
+ regex_replace_graph(input_, pattern_, rewrite_; name=name, replace_global=replace_global)
+ end
+ end
+ end
+end
+
+
+"""
+ sparse_tensor_dense_mat_mul(a_indices, a_values, a_shape, b; adjoint_a=false, adjoint_b=false)
+
+
+"""
+begin
+ begin
+ function sparse_tensor_dense_mat_mul_graph(a_indices_, a_values_, a_shape_, b_; name=nothing, adjoint_a=nothing, adjoint_b=nothing)
+ local desc
+ tf.with_op_name(name, "SparseTensorDenseMatMul") do
+ desc = tf.NodeDescription("SparseTensorDenseMatMul")
+ begin
+ begin
+ a_indices_ = convert(Tensor{Int64}, a_indices_)
+ begin
+ a_indices_ = a_indices_ - convert(tf.Tensor{eltype(a_indices_)}, 1)
+ end
+ end
+ begin
+ a_values_ = convert(Tensor{Any}, a_values_)
+ begin
+ end
+ end
+ begin
+ a_shape_ = convert(Tensor{Int64}, a_shape_)
+ begin
+ end
+ end
+ begin
+ b_ = convert(Tensor{Any}, b_)
+ begin
+ end
+ end
+ begin
+ (a_values_, b_) = tf.tf_promote(a_values_, b_)
+ end
+ begin
+ (a_indices_,) = tf.tf_promote(a_indices_)
+ end
+ end
+ begin
+ begin
+ tf.add_input(desc, a_indices_)
+ end
+ begin
+ tf.add_input(desc, a_values_)
+ end
+ begin
+ tf.add_input(desc, a_shape_)
+ end
+ begin
+ tf.add_input(desc, b_)
+ end
+ end
+ begin
+ begin
+ if adjoint_a !== nothing
+ desc["adjoint_a"] = Base.Bool(adjoint_a)
+ end
+ end
+ begin
+ if adjoint_b !== nothing
+ desc["adjoint_b"] = Base.Bool(adjoint_b)
+ end
+ end
+ end
+ end
+ tf.Tensor(tf.Operation(desc))
+ end
+ end
+ begin
+ function sparse_tensor_dense_mat_mul_eager(a_indices_, a_values_, a_shape_, b_; name=nothing, adjoint_a=nothing, adjoint_b=nothing)
+ desc = tf.EagerOp("SparseTensorDenseMatMul")
+ a_indices_ = convert(tf.EagerTensor, a_indices_)
+ a_values_ = convert(tf.EagerTensor, a_values_)
+ a_shape_ = convert(tf.EagerTensor, a_shape_)
+ b_ = convert(tf.EagerTensor, b_)
+ begin
+ begin
+ tf.add_input(desc, a_indices_)
+ end
+ begin
+ tf.add_input(desc, a_values_)
+ end
+ begin
+ tf.add_input(desc, a_shape_)
+ end
+ begin
+ tf.add_input(desc, b_)
+ end
+ end
+ begin
+ begin
+ if adjoint_a !== nothing
+ desc["adjoint_a"] = Base.Bool(adjoint_a)
+ end
+ end
+ begin
+ if adjoint_b !== nothing
+ desc["adjoint_b"] = Base.Bool(adjoint_b)
+ end
+ end
+ end
+ begin
+ desc["Tindices"] = tf.data_type(a_indices_)
+ end
+ begin
+ desc["T"] = tf.data_type(a_values_)
+ end
+ begin
+ desc["T"] = tf.data_type(b_)
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(sparse_tensor_dense_mat_mul, [a_indices_, a_values_, a_shape_, b_], name=nothing, adjoint_a=nothing, adjoint_b=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res[1]
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function sparse_tensor_dense_mat_mul(a_indices_, a_values_, a_shape_, b_; name=nothing, adjoint_a=nothing, adjoint_b=nothing)
+ if tf.in_eager_mode()
+ sparse_tensor_dense_mat_mul_eager(a_indices_, a_values_, a_shape_, b_; name=name, adjoint_a=adjoint_a, adjoint_b=adjoint_b)
+ else
+ sparse_tensor_dense_mat_mul_graph(a_indices_, a_values_, a_shape_, b_; name=name, adjoint_a=adjoint_a, adjoint_b=adjoint_b)
+ end
+ end
+ end
+end
+
+
+"""
+ map_defun(arguments, captured_inputs; Tcaptured=Int64[])
+
+
+"""
+begin
+ begin
+ function map_defun_graph(arguments_, captured_inputs_; name=nothing, Targuments=nothing, Tcaptured=nothing, output_types=nothing, output_shapes=nothing, f=nothing)
+ local desc
+ tf.with_op_name(name, "MapDefun") do
+ desc = tf.NodeDescription("MapDefun")
+ begin
+ begin
+ arguments_ = [convert(Tensor{Any}, x) for x = arguments_]
+ begin
+ end
+ end
+ begin
+ captured_inputs_ = [convert(Tensor{Any}, x) for x = captured_inputs_]
+ begin
+ end
+ end
+ end
+ begin
+ begin
+ tf.add_input(desc, arguments_)
+ end
+ begin
+ tf.add_input(desc, captured_inputs_)
+ end
+ end
+ begin
+ begin
+ if Targuments !== nothing
+ desc["Targuments"] = map(Base.identity, Targuments)
+ end
+ end
+ begin
+ if Tcaptured !== nothing
+ desc["Tcaptured"] = map(Base.identity, Tcaptured)
+ end
+ end
+ begin
+ if output_types !== nothing
+ desc["output_types"] = map(Base.identity, output_types)
+ end
+ end
+ begin
+ if output_shapes !== nothing
+ desc["output_shapes"] = map(Base.identity, output_shapes)
+ end
+ end
+ begin
+ if f !== nothing
+ desc["f"] = Base.identity(f)
+ end
+ end
+ end
+ end
+ tf.Tensor(tf.Operation(desc))
+ end
+ end
+ begin
+ function map_defun_eager(arguments_, captured_inputs_; name=nothing, Targuments=nothing, Tcaptured=nothing, output_types=nothing, output_shapes=nothing, f=nothing)
+ desc = tf.EagerOp("MapDefun")
+ arguments_ = convert(tf.EagerTensor, arguments_)
+ captured_inputs_ = convert(tf.EagerTensor, captured_inputs_)
+ begin
+ begin
+ tf.add_input(desc, arguments_)
+ end
+ begin
+ tf.add_input(desc, captured_inputs_)
+ end
+ end
+ begin
+ begin
+ if Targuments !== nothing
+ desc["Targuments"] = map(Base.identity, Targuments)
+ end
+ end
+ begin
+ if Tcaptured !== nothing
+ desc["Tcaptured"] = map(Base.identity, Tcaptured)
+ end
+ end
+ begin
+ if output_types !== nothing
+ desc["output_types"] = map(Base.identity, output_types)
+ end
+ end
+ begin
+ if output_shapes !== nothing
+ desc["output_shapes"] = map(Base.identity, output_shapes)
+ end
+ end
+ begin
+ if f !== nothing
+ desc["f"] = Base.identity(f)
+ end
+ end
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(map_defun, [arguments_, captured_inputs_], name=nothing, Targuments=nothing, Tcaptured=nothing, output_types=nothing, output_shapes=nothing, f=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res[1]
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function map_defun(arguments_, captured_inputs_; name=nothing, Targuments=nothing, Tcaptured=nothing, output_types=nothing, output_shapes=nothing, f=nothing)
+ if tf.in_eager_mode()
+ map_defun_eager(arguments_, captured_inputs_; name=name, Targuments=Targuments, Tcaptured=Tcaptured, output_types=output_types, output_shapes=output_shapes, f=f)
+ else
+ map_defun_graph(arguments_, captured_inputs_; name=name, Targuments=Targuments, Tcaptured=Tcaptured, output_types=output_types, output_shapes=output_shapes, f=f)
+ end
+ end
+ end
+end
+
+
+"""
+ thread_unsafe_unigram_candidate_sampler(true_classes; seed=0, seed2=0)
+
+
+"""
+begin
+ begin
+ function thread_unsafe_unigram_candidate_sampler_graph(true_classes_; name=nothing, num_true=nothing, num_sampled=nothing, unique=nothing, range_max=nothing, seed=nothing, seed2=nothing)
+ local desc
+ tf.with_op_name(name, "ThreadUnsafeUnigramCandidateSampler") do
+ desc = tf.NodeDescription("ThreadUnsafeUnigramCandidateSampler")
+ begin
+ begin
+ true_classes_ = convert(Tensor{Int64}, true_classes_)
+ begin
+ end
+ end
+ end
+ begin
+ begin
+ tf.add_input(desc, true_classes_)
+ end
+ end
+ begin
+ begin
+ if num_true !== nothing
+ desc["num_true"] = Base.Int(num_true)
+ end
+ end
+ begin
+ if num_sampled !== nothing
+ desc["num_sampled"] = Base.Int(num_sampled)
+ end
+ end
+ begin
+ if unique !== nothing
+ desc["unique"] = Base.Bool(unique)
+ end
+ end
+ begin
+ if range_max !== nothing
+ desc["range_max"] = Base.Int(range_max)
+ end
+ end
+ begin
+ if seed !== nothing
+ desc["seed"] = Base.Int(seed)
+ end
+ end
+ begin
+ if seed2 !== nothing
+ desc["seed2"] = Base.Int(seed2)
+ end
+ end
+ end
+ end
+ begin
+ out = tf.Tensor[]
+ op = tf.Operation(desc)
+ for out_idx = 1:3
+ push!(out, tf.Tensor(op, out_idx))
+ end
+ out
+ end
+ end
+ end
+ begin
+ function thread_unsafe_unigram_candidate_sampler_eager(true_classes_; name=nothing, num_true=nothing, num_sampled=nothing, unique=nothing, range_max=nothing, seed=nothing, seed2=nothing)
+ desc = tf.EagerOp("ThreadUnsafeUnigramCandidateSampler")
+ true_classes_ = convert(tf.EagerTensor, true_classes_)
+ begin
+ begin
+ tf.add_input(desc, true_classes_)
+ end
+ end
+ begin
+ begin
+ if num_true !== nothing
+ desc["num_true"] = Base.Int(num_true)
+ end
+ end
+ begin
+ if num_sampled !== nothing
+ desc["num_sampled"] = Base.Int(num_sampled)
+ end
+ end
+ begin
+ if unique !== nothing
+ desc["unique"] = Base.Bool(unique)
+ end
+ end
+ begin
+ if range_max !== nothing
+ desc["range_max"] = Base.Int(range_max)
+ end
+ end
+ begin
+ if seed !== nothing
+ desc["seed"] = Base.Int(seed)
+ end
+ end
+ begin
+ if seed2 !== nothing
+ desc["seed2"] = Base.Int(seed2)
+ end
+ end
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(thread_unsafe_unigram_candidate_sampler, [true_classes_], name=nothing, num_true=nothing, num_sampled=nothing, unique=nothing, range_max=nothing, seed=nothing, seed2=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function thread_unsafe_unigram_candidate_sampler(true_classes_; name=nothing, num_true=nothing, num_sampled=nothing, unique=nothing, range_max=nothing, seed=nothing, seed2=nothing)
+ if tf.in_eager_mode()
+ thread_unsafe_unigram_candidate_sampler_eager(true_classes_; name=name, num_true=num_true, num_sampled=num_sampled, unique=unique, range_max=range_max, seed=seed, seed2=seed2)
+ else
+ thread_unsafe_unigram_candidate_sampler_graph(true_classes_; name=name, num_true=num_true, num_sampled=num_sampled, unique=unique, range_max=range_max, seed=seed, seed2=seed2)
+ end
+ end
+ end
+end
+
+
+"""
+ retrieve_tpu_embedding_adam_parameters_grad_accum_debug(; table_id=-1, table_name=)
+
+Retrieve embedding parameters for a single table.
+"""
+begin
+ begin
+ function retrieve_tpu_embedding_adam_parameters_grad_accum_debug_graph(; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing)
+ local desc
+ tf.with_op_name(name, "RetrieveTPUEmbeddingADAMParametersGradAccumDebug") do
+ desc = tf.NodeDescription("RetrieveTPUEmbeddingADAMParametersGradAccumDebug")
+ begin
+ end
+ begin
+ end
+ begin
+ begin
+ if table_id !== nothing
+ desc["table_id"] = Base.Int(table_id)
+ end
+ end
+ begin
+ if table_name !== nothing
+ desc["table_name"] = Base.String(table_name)
+ end
+ end
+ begin
+ if num_shards !== nothing
+ desc["num_shards"] = Base.Int(num_shards)
+ end
+ end
+ begin
+ if shard_id !== nothing
+ desc["shard_id"] = Base.Int(shard_id)
+ end
+ end
+ end
+ end
+ begin
+ out = tf.Tensor[]
+ op = tf.Operation(desc)
+ for out_idx = 1:4
+ push!(out, tf.Tensor(op, out_idx))
+ end
+ out
+ end
+ end
+ end
+ begin
+ function retrieve_tpu_embedding_adam_parameters_grad_accum_debug_eager(; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing)
+ desc = tf.EagerOp("RetrieveTPUEmbeddingADAMParametersGradAccumDebug")
+ begin
+ end
+ begin
+ begin
+ if table_id !== nothing
+ desc["table_id"] = Base.Int(table_id)
+ end
+ end
+ begin
+ if table_name !== nothing
+ desc["table_name"] = Base.String(table_name)
+ end
+ end
+ begin
+ if num_shards !== nothing
+ desc["num_shards"] = Base.Int(num_shards)
+ end
+ end
+ begin
+ if shard_id !== nothing
+ desc["shard_id"] = Base.Int(shard_id)
+ end
+ end
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(retrieve_tpu_embedding_adam_parameters_grad_accum_debug, [], name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function retrieve_tpu_embedding_adam_parameters_grad_accum_debug(; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing)
+ if tf.in_eager_mode()
+ retrieve_tpu_embedding_adam_parameters_grad_accum_debug_eager(; name=name, table_id=table_id, table_name=table_name, num_shards=num_shards, shard_id=shard_id)
+ else
+ retrieve_tpu_embedding_adam_parameters_grad_accum_debug_graph(; name=name, table_id=table_id, table_name=table_name, num_shards=num_shards, shard_id=shard_id)
+ end
+ end
+ end
+end
+
+
+"""
+ parallel_concat(values)
+
+
+"""
+begin
+ begin
+ function parallel_concat_graph(values_; name=nothing, N=nothing, shape=nothing)
+ local desc
+ tf.with_op_name(name, "ParallelConcat") do
+ desc = tf.NodeDescription("ParallelConcat")
+ begin
+ begin
+ values_ = [convert(Tensor{Any}, x) for x = values_]
+ begin
+ end
+ end
+ begin
+ (values_,) = tf.tf_promote(values_)
+ end
+ end
+ begin
+ begin
+ tf.add_input(desc, values_)
+ end
+ end
+ begin
+ begin
+ if N !== nothing
+ desc["N"] = Base.Int(N)
+ end
+ end
+ begin
+ if shape !== nothing
+ desc["shape"] = Base.identity(shape)
+ end
+ end
+ end
+ end
+ tf.Tensor(tf.Operation(desc))
+ end
+ end
+ begin
+ function parallel_concat_eager(values_; name=nothing, N=nothing, shape=nothing)
+ desc = tf.EagerOp("ParallelConcat")
+ values_ = convert(tf.EagerTensor, values_)
+ begin
+ begin
+ tf.add_input(desc, values_)
+ end
+ end
+ begin
+ begin
+ if N !== nothing
+ desc["N"] = Base.Int(N)
+ end
+ end
+ begin
+ if shape !== nothing
+ desc["shape"] = Base.identity(shape)
+ end
+ end
+ end
+ begin
+ desc["T"] = tf.data_type(values_)
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(parallel_concat, [values_], name=nothing, N=nothing, shape=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res[1]
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function parallel_concat(values_; name=nothing, N=nothing, shape=nothing)
+ if tf.in_eager_mode()
+ parallel_concat_eager(values_; name=name, N=N, shape=shape)
+ else
+ parallel_concat_graph(values_; name=name, N=N, shape=shape)
+ end
+ end
+ end
+end
+
+
+"""
+ lookup_table_find_v2(table_handle, keys, default_value)
+
+
+"""
+begin
+ begin
+ function lookup_table_find_v2_graph(table_handle_, keys_, default_value_; name=nothing)
+ local desc
+ tf.with_op_name(name, "LookupTableFindV2") do
+ desc = tf.NodeDescription("LookupTableFindV2")
+ begin
+ begin
+ table_handle_ = convert(Tensor{Any}, table_handle_)
+ begin
+ end
+ end
+ begin
+ keys_ = convert(Tensor{Any}, keys_)
+ begin
+ end
+ end
+ begin
+ default_value_ = convert(Tensor{Any}, default_value_)
+ begin
+ end
+ end
+ begin
+ (keys_,) = tf.tf_promote(keys_)
+ end
+ begin
+ (default_value_,) = tf.tf_promote(default_value_)
+ end
+ end
+ begin
+ begin
+ tf.add_input(desc, table_handle_)
+ end
+ begin
+ tf.add_input(desc, keys_)
+ end
+ begin
+ tf.add_input(desc, default_value_)
+ end
+ end
+ begin
+ end
+ end
+ tf.Tensor(tf.Operation(desc))
+ end
+ end
+ begin
+ function lookup_table_find_v2_eager(table_handle_, keys_, default_value_; name=nothing)
+ desc = tf.EagerOp("LookupTableFindV2")
+ table_handle_ = convert(tf.EagerTensor, table_handle_)
+ keys_ = convert(tf.EagerTensor, keys_)
+ default_value_ = convert(tf.EagerTensor, default_value_)
+ begin
+ begin
+ tf.add_input(desc, table_handle_)
+ end
+ begin
+ tf.add_input(desc, keys_)
+ end
+ begin
+ tf.add_input(desc, default_value_)
+ end
+ end
+ begin
+ end
+ begin
+ desc["Tin"] = tf.data_type(keys_)
+ end
+ begin
+ desc["Tout"] = tf.data_type(default_value_)
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(lookup_table_find_v2, [table_handle_, keys_, default_value_], name=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res[1]
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function lookup_table_find_v2(table_handle_, keys_, default_value_; name=nothing)
+ if tf.in_eager_mode()
+ lookup_table_find_v2_eager(table_handle_, keys_, default_value_; name=name)
+ else
+ lookup_table_find_v2_graph(table_handle_, keys_, default_value_; name=name)
+ end
+ end
+ end
+end
+
+
+"""
+ tensor_forest_tree_deserialize(tree_handle, tree_config)
+
+
+"""
+begin
+ begin
+ function tensor_forest_tree_deserialize_graph(tree_handle_, tree_config_; name=nothing)
+ local desc
+ tf.with_op_name(name, "TensorForestTreeDeserialize") do
+ desc = tf.NodeDescription("TensorForestTreeDeserialize")
+ begin
+ begin
+ tree_handle_ = convert(Tensor{Any}, tree_handle_)
+ begin
+ end
+ end
+ begin
+ tree_config_ = convert(Tensor{String}, tree_config_)
+ begin
+ end
+ end
+ end
+ begin
+ begin
+ tf.add_input(desc, tree_handle_)
+ end
+ begin
+ tf.add_input(desc, tree_config_)
+ end
+ end
+ begin
+ end
+ end
+ tf.Tensor(tf.Operation(desc))
+ end
+ end
+ begin
+ function tensor_forest_tree_deserialize_eager(tree_handle_, tree_config_; name=nothing)
+ desc = tf.EagerOp("TensorForestTreeDeserialize")
+ tree_handle_ = convert(tf.EagerTensor, tree_handle_)
+ tree_config_ = convert(tf.EagerTensor, tree_config_)
+ begin
+ begin
+ tf.add_input(desc, tree_handle_)
+ end
+ begin
+ tf.add_input(desc, tree_config_)
+ end
+ end
+ begin
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(tensor_forest_tree_deserialize, [tree_handle_, tree_config_], name=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res[1]
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function tensor_forest_tree_deserialize(tree_handle_, tree_config_; name=nothing)
+ if tf.in_eager_mode()
+ tensor_forest_tree_deserialize_eager(tree_handle_, tree_config_; name=name)
+ else
+ tensor_forest_tree_deserialize_graph(tree_handle_, tree_config_; name=name)
+ end
+ end
+ end
+end
+
+
+"""
+ retrieve_tpu_embedding_momentum_parameters(; table_id=-1, table_name=)
+
+Retrieve embedding parameters for a single table.
+"""
+begin
+ begin
+ function retrieve_tpu_embedding_momentum_parameters_graph(; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing)
+ local desc
+ tf.with_op_name(name, "RetrieveTPUEmbeddingMomentumParameters") do
+ desc = tf.NodeDescription("RetrieveTPUEmbeddingMomentumParameters")
+ begin
+ end
+ begin
+ end
+ begin
+ begin
+ if table_id !== nothing
+ desc["table_id"] = Base.Int(table_id)
+ end
+ end
+ begin
+ if table_name !== nothing
+ desc["table_name"] = Base.String(table_name)
+ end
+ end
+ begin
+ if num_shards !== nothing
+ desc["num_shards"] = Base.Int(num_shards)
+ end
+ end
+ begin
+ if shard_id !== nothing
+ desc["shard_id"] = Base.Int(shard_id)
+ end
+ end
+ end
+ end
+ begin
+ out = tf.Tensor[]
+ op = tf.Operation(desc)
+ for out_idx = 1:2
+ push!(out, tf.Tensor(op, out_idx))
+ end
+ out
+ end
+ end
+ end
+ begin
+ function retrieve_tpu_embedding_momentum_parameters_eager(; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing)
+ desc = tf.EagerOp("RetrieveTPUEmbeddingMomentumParameters")
+ begin
+ end
+ begin
+ begin
+ if table_id !== nothing
+ desc["table_id"] = Base.Int(table_id)
+ end
+ end
+ begin
+ if table_name !== nothing
+ desc["table_name"] = Base.String(table_name)
+ end
+ end
+ begin
+ if num_shards !== nothing
+ desc["num_shards"] = Base.Int(num_shards)
+ end
+ end
+ begin
+ if shard_id !== nothing
+ desc["shard_id"] = Base.Int(shard_id)
+ end
+ end
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(retrieve_tpu_embedding_momentum_parameters, [], name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function retrieve_tpu_embedding_momentum_parameters(; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing)
+ if tf.in_eager_mode()
+ retrieve_tpu_embedding_momentum_parameters_eager(; name=name, table_id=table_id, table_name=table_name, num_shards=num_shards, shard_id=shard_id)
+ else
+ retrieve_tpu_embedding_momentum_parameters_graph(; name=name, table_id=table_id, table_name=table_name, num_shards=num_shards, shard_id=shard_id)
+ end
+ end
+ end
+end
+
+
+"""
+ fake_quant_with_min_max_args(inputs; min=?, max=?, num_bits=8, narrow_range=false)
+
+
+"""
+begin
+ begin
+ function fake_quant_with_min_max_args_graph(inputs_; name=nothing, min=nothing, max=nothing, num_bits=nothing, narrow_range=nothing)
+ local desc
+ tf.with_op_name(name, "FakeQuantWithMinMaxArgs") do
+ desc = tf.NodeDescription("FakeQuantWithMinMaxArgs")
+ begin
+ begin
+ inputs_ = convert(Tensor{Float32}, inputs_)
+ begin
+ end
+ end
+ end
+ begin
+ begin
+ tf.add_input(desc, inputs_)
+ end
+ end
+ begin
+ begin
+ if min !== nothing
+ desc["min"] = Base.identity(min)
+ end
+ end
+ begin
+ if max !== nothing
+ desc["max"] = Base.identity(max)
+ end
+ end
+ begin
+ if num_bits !== nothing
+ desc["num_bits"] = Base.Int(num_bits)
+ end
+ end
+ begin
+ if narrow_range !== nothing
+ desc["narrow_range"] = Base.Bool(narrow_range)
+ end
+ end
+ end
+ end
+ tf.Tensor(tf.Operation(desc))
+ end
+ end
+ begin
+ function fake_quant_with_min_max_args_eager(inputs_; name=nothing, min=nothing, max=nothing, num_bits=nothing, narrow_range=nothing)
+ desc = tf.EagerOp("FakeQuantWithMinMaxArgs")
+ inputs_ = convert(tf.EagerTensor, inputs_)
+ begin
+ begin
+ tf.add_input(desc, inputs_)
+ end
+ end
+ begin
+ begin
+ if min !== nothing
+ desc["min"] = Base.identity(min)
+ end
+ end
+ begin
+ if max !== nothing
+ desc["max"] = Base.identity(max)
+ end
+ end
+ begin
+ if num_bits !== nothing
+ desc["num_bits"] = Base.Int(num_bits)
+ end
+ end
+ begin
+ if narrow_range !== nothing
+ desc["narrow_range"] = Base.Bool(narrow_range)
+ end
+ end
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(fake_quant_with_min_max_args, [inputs_], name=nothing, min=nothing, max=nothing, num_bits=nothing, narrow_range=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res[1]
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function fake_quant_with_min_max_args(inputs_; name=nothing, min=nothing, max=nothing, num_bits=nothing, narrow_range=nothing)
+ if tf.in_eager_mode()
+ fake_quant_with_min_max_args_eager(inputs_; name=name, min=min, max=max, num_bits=num_bits, narrow_range=narrow_range)
+ else
+ fake_quant_with_min_max_args_graph(inputs_; name=name, min=min, max=max, num_bits=num_bits, narrow_range=narrow_range)
+ end
+ end
+ end
+end
+
+
+"""
+ resource_apply_gradient_descent(var, alpha, delta; use_locking=false)
+
+
+"""
+begin
+ begin
+ function resource_apply_gradient_descent_graph(var_, alpha_, delta_; name=nothing, use_locking=nothing)
+ local desc
+ tf.with_op_name(name, "ResourceApplyGradientDescent") do
+ desc = tf.NodeDescription("ResourceApplyGradientDescent")
+ begin
+ begin
+ var_ = convert(Tensor{Any}, var_)
+ begin
+ end
+ end
+ begin
+ alpha_ = convert(Tensor{Any}, alpha_)
+ begin
+ end
+ end
+ begin
+ delta_ = convert(Tensor{Any}, delta_)
+ begin
+ end
+ end
+ begin
+ (alpha_, delta_) = tf.tf_promote(alpha_, delta_)
+ end
+ end
+ begin
+ begin
+ tf.add_input(desc, var_)
+ end
+ begin
+ tf.add_input(desc, alpha_)
+ end
+ begin
+ tf.add_input(desc, delta_)
+ end
+ end
+ begin
+ begin
+ if use_locking !== nothing
+ desc["use_locking"] = Base.Bool(use_locking)
+ end
+ end
+ end
+ end
+ tf.Tensor(tf.Operation(desc))
+ end
+ end
+ begin
+ function resource_apply_gradient_descent_eager(var_, alpha_, delta_; name=nothing, use_locking=nothing)
+ desc = tf.EagerOp("ResourceApplyGradientDescent")
+ var_ = convert(tf.EagerTensor, var_)
+ alpha_ = convert(tf.EagerTensor, alpha_)
+ delta_ = convert(tf.EagerTensor, delta_)
+ begin
+ begin
+ tf.add_input(desc, var_)
+ end
+ begin
+ tf.add_input(desc, alpha_)
+ end
+ begin
+ tf.add_input(desc, delta_)
+ end
+ end
+ begin
+ begin
+ if use_locking !== nothing
+ desc["use_locking"] = Base.Bool(use_locking)
+ end
+ end
+ end
+ begin
+ desc["T"] = tf.data_type(alpha_)
+ end
+ begin
+ desc["T"] = tf.data_type(delta_)
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(resource_apply_gradient_descent, [var_, alpha_, delta_], name=nothing, use_locking=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res[1]
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function resource_apply_gradient_descent(var_, alpha_, delta_; name=nothing, use_locking=nothing)
+ if tf.in_eager_mode()
+ resource_apply_gradient_descent_eager(var_, alpha_, delta_; name=name, use_locking=use_locking)
+ else
+ resource_apply_gradient_descent_graph(var_, alpha_, delta_; name=name, use_locking=use_locking)
+ end
+ end
+ end
+end
+
+
+"""
+ experimental_sliding_window_dataset(input_dataset, window_size, window_shift, window_stride)
+
+
+"""
+begin
+ begin
+ function experimental_sliding_window_dataset_graph(input_dataset_, window_size_, window_shift_, window_stride_; name=nothing, output_types=nothing, output_shapes=nothing)
+ local desc
+ tf.with_op_name(name, "ExperimentalSlidingWindowDataset") do
+ desc = tf.NodeDescription("ExperimentalSlidingWindowDataset")
+ begin
+ begin
+ input_dataset_ = convert(Tensor{Any}, input_dataset_)
+ begin
+ end
+ end
+ begin
+ window_size_ = convert(Tensor{Int64}, window_size_)
+ begin
+ end
+ end
+ begin
+ window_shift_ = convert(Tensor{Int64}, window_shift_)
+ begin
+ end
+ end
+ begin
+ window_stride_ = convert(Tensor{Int64}, window_stride_)
+ begin
+ end
+ end
+ end
+ begin
+ begin
+ tf.add_input(desc, input_dataset_)
+ end
+ begin
+ tf.add_input(desc, window_size_)
+ end
+ begin
+ tf.add_input(desc, window_shift_)
+ end
+ begin
+ tf.add_input(desc, window_stride_)
+ end
+ end
+ begin
+ begin
+ if output_types !== nothing
+ desc["output_types"] = map(Base.identity, output_types)
+ end
+ end
+ begin
+ if output_shapes !== nothing
+ desc["output_shapes"] = map(Base.identity, output_shapes)
+ end
+ end
+ end
+ end
+ tf.Tensor(tf.Operation(desc))
+ end
+ end
+ begin
+ function experimental_sliding_window_dataset_eager(input_dataset_, window_size_, window_shift_, window_stride_; name=nothing, output_types=nothing, output_shapes=nothing)
+ desc = tf.EagerOp("ExperimentalSlidingWindowDataset")
+ input_dataset_ = convert(tf.EagerTensor, input_dataset_)
+ window_size_ = convert(tf.EagerTensor, window_size_)
+ window_shift_ = convert(tf.EagerTensor, window_shift_)
+ window_stride_ = convert(tf.EagerTensor, window_stride_)
+ begin
+ begin
+ tf.add_input(desc, input_dataset_)
+ end
+ begin
+ tf.add_input(desc, window_size_)
+ end
+ begin
+ tf.add_input(desc, window_shift_)
+ end
+ begin
+ tf.add_input(desc, window_stride_)
+ end
+ end
+ begin
+ begin
+ if output_types !== nothing
+ desc["output_types"] = map(Base.identity, output_types)
+ end
+ end
+ begin
+ if output_shapes !== nothing
+ desc["output_shapes"] = map(Base.identity, output_shapes)
+ end
+ end
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(experimental_sliding_window_dataset, [input_dataset_, window_size_, window_shift_, window_stride_], name=nothing, output_types=nothing, output_shapes=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res[1]
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function experimental_sliding_window_dataset(input_dataset_, window_size_, window_shift_, window_stride_; name=nothing, output_types=nothing, output_shapes=nothing)
+ if tf.in_eager_mode()
+ experimental_sliding_window_dataset_eager(input_dataset_, window_size_, window_shift_, window_stride_; name=name, output_types=output_types, output_shapes=output_shapes)
+ else
+ experimental_sliding_window_dataset_graph(input_dataset_, window_size_, window_shift_, window_stride_; name=name, output_types=output_types, output_shapes=output_shapes)
+ end
+ end
+ end
+end
+
+
+"""
+ decode_raw(bytes; little_endian=true)
+
+
+"""
+begin
+ begin
+ function decode_raw_graph(bytes_; name=nothing, out_type=nothing, little_endian=nothing)
+ local desc
+ tf.with_op_name(name, "DecodeRaw") do
+ desc = tf.NodeDescription("DecodeRaw")
+ begin
+ begin
+ bytes_ = convert(Tensor{String}, bytes_)
+ begin
+ end
+ end
+ end
+ begin
+ begin
+ tf.add_input(desc, bytes_)
+ end
+ end
+ begin
+ begin
+ if out_type !== nothing
+ desc["out_type"] = Base.identity(out_type)
+ end
+ end
+ begin
+ if little_endian !== nothing
+ desc["little_endian"] = Base.Bool(little_endian)
+ end
+ end
+ end
+ end
+ tf.Tensor(tf.Operation(desc))
+ end
+ end
+ begin
+ function decode_raw_eager(bytes_; name=nothing, out_type=nothing, little_endian=nothing)
+ desc = tf.EagerOp("DecodeRaw")
+ bytes_ = convert(tf.EagerTensor, bytes_)
+ begin
+ begin
+ tf.add_input(desc, bytes_)
+ end
+ end
+ begin
+ begin
+ if out_type !== nothing
+ desc["out_type"] = Base.identity(out_type)
+ end
+ end
+ begin
+ if little_endian !== nothing
+ desc["little_endian"] = Base.Bool(little_endian)
+ end
+ end
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(decode_raw, [bytes_], name=nothing, out_type=nothing, little_endian=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res[1]
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function decode_raw(bytes_; name=nothing, out_type=nothing, little_endian=nothing)
+ if tf.in_eager_mode()
+ decode_raw_eager(bytes_; name=name, out_type=out_type, little_endian=little_endian)
+ else
+ decode_raw_graph(bytes_; name=name, out_type=out_type, little_endian=little_endian)
+ end
+ end
+ end
+end
+
+
+"""
+ fake_quant_with_min_max_vars_per_channel_gradient(gradients, inputs, min, max; num_bits=8, narrow_range=false)
+
+
+"""
+begin
+ begin
+ function fake_quant_with_min_max_vars_per_channel_gradient_graph(gradients_, inputs_, min_, max_; name=nothing, num_bits=nothing, narrow_range=nothing)
+ local desc
+ tf.with_op_name(name, "FakeQuantWithMinMaxVarsPerChannelGradient") do
+ desc = tf.NodeDescription("FakeQuantWithMinMaxVarsPerChannelGradient")
+ begin
+ begin
+ gradients_ = convert(Tensor{Float32}, gradients_)
+ begin
+ end
+ end
+ begin
+ inputs_ = convert(Tensor{Float32}, inputs_)
+ begin
+ end
+ end
+ begin
+ min_ = convert(Tensor{Float32}, min_)
+ begin
+ end
+ end
+ begin
+ max_ = convert(Tensor{Float32}, max_)
+ begin
+ end
+ end
+ end
+ begin
+ begin
+ tf.add_input(desc, gradients_)
+ end
+ begin
+ tf.add_input(desc, inputs_)
+ end
+ begin
+ tf.add_input(desc, min_)
+ end
+ begin
+ tf.add_input(desc, max_)
+ end
+ end
+ begin
+ begin
+ if num_bits !== nothing
+ desc["num_bits"] = Base.Int(num_bits)
+ end
+ end
+ begin
+ if narrow_range !== nothing
+ desc["narrow_range"] = Base.Bool(narrow_range)
+ end
+ end
+ end
+ end
+ begin
+ out = tf.Tensor[]
+ op = tf.Operation(desc)
+ for out_idx = 1:3
+ push!(out, tf.Tensor(op, out_idx))
+ end
+ out
+ end
+ end
+ end
+ begin
+ function fake_quant_with_min_max_vars_per_channel_gradient_eager(gradients_, inputs_, min_, max_; name=nothing, num_bits=nothing, narrow_range=nothing)
+ desc = tf.EagerOp("FakeQuantWithMinMaxVarsPerChannelGradient")
+ gradients_ = convert(tf.EagerTensor, gradients_)
+ inputs_ = convert(tf.EagerTensor, inputs_)
+ min_ = convert(tf.EagerTensor, min_)
+ max_ = convert(tf.EagerTensor, max_)
+ begin
+ begin
+ tf.add_input(desc, gradients_)
+ end
+ begin
+ tf.add_input(desc, inputs_)
+ end
+ begin
+ tf.add_input(desc, min_)
+ end
+ begin
+ tf.add_input(desc, max_)
+ end
+ end
+ begin
+ begin
+ if num_bits !== nothing
+ desc["num_bits"] = Base.Int(num_bits)
+ end
+ end
+ begin
+ if narrow_range !== nothing
+ desc["narrow_range"] = Base.Bool(narrow_range)
+ end
+ end
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(fake_quant_with_min_max_vars_per_channel_gradient, [gradients_, inputs_, min_, max_], name=nothing, num_bits=nothing, narrow_range=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function fake_quant_with_min_max_vars_per_channel_gradient(gradients_, inputs_, min_, max_; name=nothing, num_bits=nothing, narrow_range=nothing)
+ if tf.in_eager_mode()
+ fake_quant_with_min_max_vars_per_channel_gradient_eager(gradients_, inputs_, min_, max_; name=name, num_bits=num_bits, narrow_range=narrow_range)
+ else
+ fake_quant_with_min_max_vars_per_channel_gradient_graph(gradients_, inputs_, min_, max_; name=name, num_bits=num_bits, narrow_range=narrow_range)
+ end
+ end
+ end
+end
+
+
+"""
+ unique_with_counts_v2(x, axis; out_idx=Int32)
+
+
+"""
+begin
+ begin
+ function unique_with_counts_v2_graph(x_, axis_; name=nothing, out_idx=nothing)
+ local desc
+ tf.with_op_name(name, "UniqueWithCountsV2") do
+ desc = tf.NodeDescription("UniqueWithCountsV2")
+ begin
+ begin
+ x_ = convert(Tensor{Any}, x_)
+ begin
+ end
+ end
+ begin
+ axis_ = convert(Tensor{Int64}, axis_)
+ begin
+ end
+ end
+ begin
+ (x_,) = tf.tf_promote(x_)
+ end
+ begin
+ (axis_,) = tf.tf_promote(axis_)
+ end
+ end
+ begin
+ begin
+ tf.add_input(desc, x_)
+ end
+ begin
+ tf.add_input(desc, axis_)
+ end
+ end
+ begin
+ begin
+ if out_idx !== nothing
+ desc["out_idx"] = Base.identity(out_idx)
+ end
+ end
+ end
+ end
+ begin
+ out = tf.Tensor[]
+ op = tf.Operation(desc)
+ for out_idx = 1:3
+ push!(out, tf.Tensor(op, out_idx))
+ end
+ out
+ end
+ end
+ end
+ begin
+ function unique_with_counts_v2_eager(x_, axis_; name=nothing, out_idx=nothing)
+ desc = tf.EagerOp("UniqueWithCountsV2")
+ x_ = convert(tf.EagerTensor, x_)
+ axis_ = convert(tf.EagerTensor, axis_)
+ begin
+ begin
+ tf.add_input(desc, x_)
+ end
+ begin
+ tf.add_input(desc, axis_)
+ end
+ end
+ begin
+ begin
+ if out_idx !== nothing
+ desc["out_idx"] = Base.identity(out_idx)
+ end
+ end
+ end
+ begin
+ desc["T"] = tf.data_type(x_)
+ end
+ begin
+ desc["Taxis"] = tf.data_type(axis_)
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(unique_with_counts_v2, [x_, axis_], name=nothing, out_idx=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function unique_with_counts_v2(x_, axis_; name=nothing, out_idx=nothing)
+ if tf.in_eager_mode()
+ unique_with_counts_v2_eager(x_, axis_; name=name, out_idx=out_idx)
+ else
+ unique_with_counts_v2_graph(x_, axis_; name=name, out_idx=out_idx)
+ end
+ end
+ end
+end
+
+
+"""
+ experimental_sleep_dataset(input_dataset, sleep_microseconds)
+
+
+"""
+begin
+ begin
+ function experimental_sleep_dataset_graph(input_dataset_, sleep_microseconds_; name=nothing, output_types=nothing, output_shapes=nothing)
+ local desc
+ tf.with_op_name(name, "ExperimentalSleepDataset") do
+ desc = tf.NodeDescription("ExperimentalSleepDataset")
+ begin
+ begin
+ input_dataset_ = convert(Tensor{Any}, input_dataset_)
+ begin
+ end
+ end
+ begin
+ sleep_microseconds_ = convert(Tensor{Int64}, sleep_microseconds_)
+ begin
+ end
+ end
+ end
+ begin
+ begin
+ tf.add_input(desc, input_dataset_)
+ end
+ begin
+ tf.add_input(desc, sleep_microseconds_)
+ end
+ end
+ begin
+ begin
+ if output_types !== nothing
+ desc["output_types"] = map(Base.identity, output_types)
+ end
+ end
+ begin
+ if output_shapes !== nothing
+ desc["output_shapes"] = map(Base.identity, output_shapes)
+ end
+ end
+ end
+ end
+ tf.Tensor(tf.Operation(desc))
+ end
+ end
+ begin
+ function experimental_sleep_dataset_eager(input_dataset_, sleep_microseconds_; name=nothing, output_types=nothing, output_shapes=nothing)
+ desc = tf.EagerOp("ExperimentalSleepDataset")
+ input_dataset_ = convert(tf.EagerTensor, input_dataset_)
+ sleep_microseconds_ = convert(tf.EagerTensor, sleep_microseconds_)
+ begin
+ begin
+ tf.add_input(desc, input_dataset_)
+ end
+ begin
+ tf.add_input(desc, sleep_microseconds_)
+ end
+ end
+ begin
+ begin
+ if output_types !== nothing
+ desc["output_types"] = map(Base.identity, output_types)
+ end
+ end
+ begin
+ if output_shapes !== nothing
+ desc["output_shapes"] = map(Base.identity, output_shapes)
+ end
+ end
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(experimental_sleep_dataset, [input_dataset_, sleep_microseconds_], name=nothing, output_types=nothing, output_shapes=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res[1]
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function experimental_sleep_dataset(input_dataset_, sleep_microseconds_; name=nothing, output_types=nothing, output_shapes=nothing)
+ if tf.in_eager_mode()
+ experimental_sleep_dataset_eager(input_dataset_, sleep_microseconds_; name=name, output_types=output_types, output_shapes=output_shapes)
+ else
+ experimental_sleep_dataset_graph(input_dataset_, sleep_microseconds_; name=name, output_types=output_types, output_shapes=output_shapes)
+ end
+ end
+ end
+end
+
+
+"""
+ tpu_replicated_output(input)
+
+Operator that connects the output of an N-way replicated TPU computation to N separate outputs.
+"""
+begin
+ begin
+ function tpu_replicated_output_graph(input_; name=nothing, num_replicas=nothing)
+ local desc
+ tf.with_op_name(name, "TPUReplicatedOutput") do
+ desc = tf.NodeDescription("TPUReplicatedOutput")
+ begin
+ begin
+ input_ = convert(Tensor{Any}, input_)
+ begin
+ end
+ end
+ begin
+ (input_,) = tf.tf_promote(input_)
+ end
+ end
+ begin
+ begin
+ tf.add_input(desc, input_)
+ end
+ end
+ begin
+ begin
+ if num_replicas !== nothing
+ desc["num_replicas"] = Base.Int(num_replicas)
+ end
+ end
+ end
+ end
+ begin
+ out = tf.Tensor[]
+ op = tf.Operation(desc)
+ for out_idx = 1:num_replicas
+ push!(out, tf.Tensor(op, out_idx))
+ end
+ out
+ end
+ end
+ end
+ begin
+ function tpu_replicated_output_eager(input_; name=nothing, num_replicas=nothing)
+ desc = tf.EagerOp("TPUReplicatedOutput")
+ input_ = convert(tf.EagerTensor, input_)
+ begin
+ begin
+ tf.add_input(desc, input_)
+ end
+ end
+ begin
+ begin
+ if num_replicas !== nothing
+ desc["num_replicas"] = Base.Int(num_replicas)
+ end
+ end
+ end
+ begin
+ desc["T"] = tf.data_type(input_)
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(tpu_replicated_output, [input_], name=nothing, num_replicas=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function tpu_replicated_output(input_; name=nothing, num_replicas=nothing)
+ if tf.in_eager_mode()
+ tpu_replicated_output_eager(input_; name=name, num_replicas=num_replicas)
+ else
+ tpu_replicated_output_graph(input_; name=name, num_replicas=num_replicas)
+ end
+ end
+ end
+end
+
+
+"""
+ lower_bound(sorted_inputs, values; out_type=Int32)
+
+
+"""
+begin
+ begin
+ function lower_bound_graph(sorted_inputs_, values_; name=nothing, out_type=nothing)
+ local desc
+ tf.with_op_name(name, "LowerBound") do
+ desc = tf.NodeDescription("LowerBound")
+ begin
+ begin
+ sorted_inputs_ = convert(Tensor{Any}, sorted_inputs_)
+ begin
+ end
+ end
+ begin
+ values_ = convert(Tensor{Any}, values_)
+ begin
+ end
+ end
+ begin
+ (sorted_inputs_, values_) = tf.tf_promote(sorted_inputs_, values_)
+ end
+ end
+ begin
+ begin
+ tf.add_input(desc, sorted_inputs_)
+ end
+ begin
+ tf.add_input(desc, values_)
+ end
+ end
+ begin
+ begin
+ if out_type !== nothing
+ desc["out_type"] = Base.identity(out_type)
+ end
+ end
+ end
+ end
+ tf.Tensor(tf.Operation(desc))
+ end
+ end
+ begin
+ function lower_bound_eager(sorted_inputs_, values_; name=nothing, out_type=nothing)
+ desc = tf.EagerOp("LowerBound")
+ sorted_inputs_ = convert(tf.EagerTensor, sorted_inputs_)
+ values_ = convert(tf.EagerTensor, values_)
+ begin
+ begin
+ tf.add_input(desc, sorted_inputs_)
+ end
+ begin
+ tf.add_input(desc, values_)
+ end
+ end
+ begin
+ begin
+ if out_type !== nothing
+ desc["out_type"] = Base.identity(out_type)
+ end
+ end
+ end
+ begin
+ desc["T"] = tf.data_type(sorted_inputs_)
+ end
+ begin
+ desc["T"] = tf.data_type(values_)
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(lower_bound, [sorted_inputs_, values_], name=nothing, out_type=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res[1]
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function lower_bound(sorted_inputs_, values_; name=nothing, out_type=nothing)
+ if tf.in_eager_mode()
+ lower_bound_eager(sorted_inputs_, values_; name=name, out_type=out_type)
+ else
+ lower_bound_graph(sorted_inputs_, values_; name=name, out_type=out_type)
+ end
+ end
+ end
+end
+
+
+"""
+ tan(x)
+
+
+"""
+begin
+ begin
+ function tan_graph(x_; name=nothing)
+ local desc
+ tf.with_op_name(name, "Tan") do
+ desc = tf.NodeDescription("Tan")
+ begin
+ begin
+ x_ = convert(Tensor{Any}, x_)
+ begin
+ end
+ end
+ begin
+ (x_,) = tf.tf_promote(x_)
+ end
+ end
+ begin
+ begin
+ tf.add_input(desc, x_)
+ end
+ end
+ begin
+ end
+ end
+ tf.Tensor(tf.Operation(desc))
+ end
+ end
+ begin
+ function tan_eager(x_; name=nothing)
+ desc = tf.EagerOp("Tan")
+ x_ = convert(tf.EagerTensor, x_)
+ begin
+ begin
+ tf.add_input(desc, x_)
+ end
+ end
+ begin
+ end
+ begin
+ desc["T"] = tf.data_type(x_)
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(tan, [x_], name=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res[1]
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function tan(x_; name=nothing)
+ if tf.in_eager_mode()
+ tan_eager(x_; name=name)
+ else
+ tan_graph(x_; name=name)
+ end
+ end
+ end
+end
+
+
+"""
+ enter(data; is_constant=false, parallel_iterations=10)
+
+
+"""
+begin
+ begin
+ function enter_graph(data_; name=nothing, frame_name=nothing, is_constant=nothing, parallel_iterations=nothing)
+ local desc
+ tf.with_op_name(name, "Enter") do
+ desc = tf.NodeDescription("Enter")
+ begin
+ begin
+ data_ = convert(Tensor{Any}, data_)
+ begin
+ end
+ end
+ begin
+ (data_,) = tf.tf_promote(data_)
+ end
+ end
+ begin
+ begin
+ tf.add_input(desc, data_)
+ end
+ end
+ begin
+ begin
+ if frame_name !== nothing
+ desc["frame_name"] = Base.String(frame_name)
+ end
+ end
+ begin
+ if is_constant !== nothing
+ desc["is_constant"] = Base.Bool(is_constant)
+ end
+ end
+ begin
+ if parallel_iterations !== nothing
+ desc["parallel_iterations"] = Base.Int(parallel_iterations)
+ end
+ end
+ end
+ end
+ tf.Tensor(tf.Operation(desc))
+ end
+ end
+ begin
+ function enter_eager(data_; name=nothing, frame_name=nothing, is_constant=nothing, parallel_iterations=nothing)
+ desc = tf.EagerOp("Enter")
+ data_ = convert(tf.EagerTensor, data_)
+ begin
+ begin
+ tf.add_input(desc, data_)
+ end
+ end
+ begin
+ begin
+ if frame_name !== nothing
+ desc["frame_name"] = Base.String(frame_name)
+ end
+ end
+ begin
+ if is_constant !== nothing
+ desc["is_constant"] = Base.Bool(is_constant)
+ end
+ end
+ begin
+ if parallel_iterations !== nothing
+ desc["parallel_iterations"] = Base.Int(parallel_iterations)
+ end
+ end
+ end
+ begin
+ desc["T"] = tf.data_type(data_)
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(enter, [data_], name=nothing, frame_name=nothing, is_constant=nothing, parallel_iterations=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res[1]
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function enter(data_; name=nothing, frame_name=nothing, is_constant=nothing, parallel_iterations=nothing)
+ if tf.in_eager_mode()
+ enter_eager(data_; name=name, frame_name=frame_name, is_constant=is_constant, parallel_iterations=parallel_iterations)
+ else
+ enter_graph(data_; name=name, frame_name=frame_name, is_constant=is_constant, parallel_iterations=parallel_iterations)
+ end
+ end
+ end
+end
+
+
+"""
+ infeed_enqueue_tuple(inputs; device_ordinal=-1)
+
+An op which feeds multiple Tensor values into the computation as an XLA tuple.
+"""
+begin
+ begin
+ function infeed_enqueue_tuple_graph(inputs_; name=nothing, dtypes=nothing, shapes=nothing, device_ordinal=nothing)
+ local desc
+ tf.with_op_name(name, "InfeedEnqueueTuple") do
+ desc = tf.NodeDescription("InfeedEnqueueTuple")
+ begin
+ begin
+ inputs_ = [convert(Tensor{Any}, x) for x = inputs_]
+ begin
+ end
+ end
+ end
+ begin
+ begin
+ tf.add_input(desc, inputs_)
+ end
+ end
+ begin
+ begin
+ if dtypes !== nothing
+ desc["dtypes"] = map(Base.identity, dtypes)
+ end
+ end
+ begin
+ if shapes !== nothing
+ desc["shapes"] = map(Base.identity, shapes)
+ end
+ end
+ begin
+ if device_ordinal !== nothing
+ desc["device_ordinal"] = Base.Int(device_ordinal)
+ end
+ end
+ end
+ end
+ tf.Tensor(tf.Operation(desc))
+ end
+ end
+ begin
+ function infeed_enqueue_tuple_eager(inputs_; name=nothing, dtypes=nothing, shapes=nothing, device_ordinal=nothing)
+ desc = tf.EagerOp("InfeedEnqueueTuple")
+ inputs_ = convert(tf.EagerTensor, inputs_)
+ begin
+ begin
+ tf.add_input(desc, inputs_)
+ end
+ end
+ begin
+ begin
+ if dtypes !== nothing
+ desc["dtypes"] = map(Base.identity, dtypes)
+ end
+ end
+ begin
+ if shapes !== nothing
+ desc["shapes"] = map(Base.identity, shapes)
+ end
+ end
+ begin
+ if device_ordinal !== nothing
+ desc["device_ordinal"] = Base.Int(device_ordinal)
+ end
+ end
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(infeed_enqueue_tuple, [inputs_], name=nothing, dtypes=nothing, shapes=nothing, device_ordinal=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res[1]
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function infeed_enqueue_tuple(inputs_; name=nothing, dtypes=nothing, shapes=nothing, device_ordinal=nothing)
+ if tf.in_eager_mode()
+ infeed_enqueue_tuple_eager(inputs_; name=name, dtypes=dtypes, shapes=shapes, device_ordinal=device_ordinal)
+ else
+ infeed_enqueue_tuple_graph(inputs_; name=name, dtypes=dtypes, shapes=shapes, device_ordinal=device_ordinal)
+ end
+ end
+ end
+end
+
+
+"""
+ square(x)
+
+
+"""
+begin
+ begin
+ function square_graph(x_; name=nothing)
+ local desc
+ tf.with_op_name(name, "Square") do
+ desc = tf.NodeDescription("Square")
+ begin
+ begin
+ x_ = convert(Tensor{Any}, x_)
+ begin
+ end
+ end
+ begin
+ (x_,) = tf.tf_promote(x_)
+ end
+ end
+ begin
+ begin
+ tf.add_input(desc, x_)
+ end
+ end
+ begin
+ end
+ end
+ tf.Tensor(tf.Operation(desc))
+ end
+ end
+ begin
+ function square_eager(x_; name=nothing)
+ desc = tf.EagerOp("Square")
+ x_ = convert(tf.EagerTensor, x_)
+ begin
+ begin
+ tf.add_input(desc, x_)
+ end
+ end
+ begin
+ end
+ begin
+ desc["T"] = tf.data_type(x_)
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(square, [x_], name=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res[1]
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function square(x_; name=nothing)
+ if tf.in_eager_mode()
+ square_eager(x_; name=name)
+ else
+ square_graph(x_; name=name)
+ end
+ end
+ end
+end
+
+
+"""
+ _set_global_tpu_array(topology)
+
+An op that informs a host of the global ids of all the of TPUs in the
+"""
+begin
+ begin
+ function _set_global_tpu_array_graph(topology_; name=nothing)
+ local desc
+ tf.with_op_name(name, "_SetGlobalTPUArray") do
+ desc = tf.NodeDescription("_SetGlobalTPUArray")
+ begin
+ begin
+ topology_ = convert(Tensor{String}, topology_)
+ begin
+ end
+ end
+ end
+ begin
+ begin
+ tf.add_input(desc, topology_)
+ end
+ end
+ begin
+ end
+ end
+ tf.Tensor(tf.Operation(desc))
+ end
+ end
+ begin
+ function _set_global_tpu_array_eager(topology_; name=nothing)
+ desc = tf.EagerOp("_SetGlobalTPUArray")
+ topology_ = convert(tf.EagerTensor, topology_)
+ begin
+ begin
+ tf.add_input(desc, topology_)
+ end
+ end
+ begin
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(_set_global_tpu_array, [topology_], name=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res[1]
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function _set_global_tpu_array(topology_; name=nothing)
+ if tf.in_eager_mode()
+ _set_global_tpu_array_eager(topology_; name=name)
+ else
+ _set_global_tpu_array_graph(topology_; name=name)
+ end
+ end
+ end
+end
+
+
+"""
+ debug_gradient_ref_identity(input)
+
+
+"""
+begin
+ begin
+ function debug_gradient_ref_identity_graph(input_; name=nothing)
+ local desc
+ tf.with_op_name(name, "DebugGradientRefIdentity") do
+ desc = tf.NodeDescription("DebugGradientRefIdentity")
+ begin
+ begin
+ input_ = convert(Tensor{Any}, input_)
+ begin
+ end
+ end
+ begin
+ (input_,) = tf.tf_promote(input_)
+ end
+ end
+ begin
+ begin
+ tf.add_input(desc, input_)
+ end
+ end
+ begin
+ end
+ end
+ tf.Tensor(tf.Operation(desc))
+ end
+ end
+ begin
+ function debug_gradient_ref_identity_eager(input_; name=nothing)
+ desc = tf.EagerOp("DebugGradientRefIdentity")
+ input_ = convert(tf.EagerTensor, input_)
+ begin
+ begin
+ tf.add_input(desc, input_)
+ end
+ end
+ begin
+ end
+ begin
+ desc["T"] = tf.data_type(input_)
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(debug_gradient_ref_identity, [input_], name=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res[1]
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function debug_gradient_ref_identity(input_; name=nothing)
+ if tf.in_eager_mode()
+ debug_gradient_ref_identity_eager(input_; name=name)
+ else
+ debug_gradient_ref_identity_graph(input_; name=name)
+ end
+ end
+ end
+end
+
+
+"""
+ apply_adadelta(var, accum, accum_update, lr, rho, epsilon, grad; use_locking=false)
+
+
+"""
+begin
+ begin
+ function apply_adadelta_graph(var_, accum_, accum_update_, lr_, rho_, epsilon_, grad_; name=nothing, use_locking=nothing)
+ local desc
+ tf.with_op_name(name, "ApplyAdadelta") do
+ desc = tf.NodeDescription("ApplyAdadelta")
+ begin
+ begin
+ var_ = convert(Tensor{Any}, var_)
+ begin
+ end
+ end
+ begin
+ accum_ = convert(Tensor{Any}, accum_)
+ begin
+ end
+ end
+ begin
+ accum_update_ = convert(Tensor{Any}, accum_update_)
+ begin
+ end
+ end
+ begin
+ lr_ = convert(Tensor{Any}, lr_)
+ begin
+ end
+ end
+ begin
+ rho_ = convert(Tensor{Any}, rho_)
+ begin
+ end
+ end
+ begin
+ epsilon_ = convert(Tensor{Any}, epsilon_)
+ begin
+ end
+ end
+ begin
+ grad_ = convert(Tensor{Any}, grad_)
+ begin
+ end
+ end
+ begin
+ (var_, accum_, accum_update_, lr_, rho_, epsilon_, grad_) = tf.tf_promote(var_, accum_, accum_update_, lr_, rho_, epsilon_, grad_)
+ end
+ end
+ begin
+ begin
+ tf.add_input(desc, var_)
+ end
+ begin
+ tf.add_input(desc, accum_)
+ end
+ begin
+ tf.add_input(desc, accum_update_)
+ end
+ begin
+ tf.add_input(desc, lr_)
+ end
+ begin
+ tf.add_input(desc, rho_)
+ end
+ begin
+ tf.add_input(desc, epsilon_)
+ end
+ begin
+ tf.add_input(desc, grad_)
+ end
+ end
+ begin
+ begin
+ if use_locking !== nothing
+ desc["use_locking"] = Base.Bool(use_locking)
+ end
+ end
+ end
+ end
+ tf.Tensor(tf.Operation(desc))
+ end
+ end
+ begin
+ function apply_adadelta_eager(var_, accum_, accum_update_, lr_, rho_, epsilon_, grad_; name=nothing, use_locking=nothing)
+ desc = tf.EagerOp("ApplyAdadelta")
+ var_ = convert(tf.EagerTensor, var_)
+ accum_ = convert(tf.EagerTensor, accum_)
+ accum_update_ = convert(tf.EagerTensor, accum_update_)
+ lr_ = convert(tf.EagerTensor, lr_)
+ rho_ = convert(tf.EagerTensor, rho_)
+ epsilon_ = convert(tf.EagerTensor, epsilon_)
+ grad_ = convert(tf.EagerTensor, grad_)
+ begin
+ begin
+ tf.add_input(desc, var_)
+ end
+ begin
+ tf.add_input(desc, accum_)
+ end
+ begin
+ tf.add_input(desc, accum_update_)
+ end
+ begin
+ tf.add_input(desc, lr_)
+ end
+ begin
+ tf.add_input(desc, rho_)
+ end
+ begin
+ tf.add_input(desc, epsilon_)
+ end
+ begin
+ tf.add_input(desc, grad_)
+ end
+ end
+ begin
+ begin
+ if use_locking !== nothing
+ desc["use_locking"] = Base.Bool(use_locking)
+ end
+ end
+ end
+ begin
+ desc["T"] = tf.data_type(var_)
+ end
+ begin
+ desc["T"] = tf.data_type(accum_)
+ end
+ begin
+ desc["T"] = tf.data_type(accum_update_)
+ end
+ begin
+ desc["T"] = tf.data_type(lr_)
+ end
+ begin
+ desc["T"] = tf.data_type(rho_)
+ end
+ begin
+ desc["T"] = tf.data_type(epsilon_)
+ end
+ begin
+ desc["T"] = tf.data_type(grad_)
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(apply_adadelta, [var_, accum_, accum_update_, lr_, rho_, epsilon_, grad_], name=nothing, use_locking=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res[1]
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function apply_adadelta(var_, accum_, accum_update_, lr_, rho_, epsilon_, grad_; name=nothing, use_locking=nothing)
+ if tf.in_eager_mode()
+ apply_adadelta_eager(var_, accum_, accum_update_, lr_, rho_, epsilon_, grad_; name=name, use_locking=use_locking)
+ else
+ apply_adadelta_graph(var_, accum_, accum_update_, lr_, rho_, epsilon_, grad_; name=name, use_locking=use_locking)
+ end
+ end
+ end
+end
+
+
+"""
+ experimental_group_by_window_dataset(input_dataset, key_func_other_arguments, reduce_func_other_arguments, window_size_func_other_arguments)
+
+
+"""
+begin
+ begin
+ function experimental_group_by_window_dataset_graph(input_dataset_, key_func_other_arguments_, reduce_func_other_arguments_, window_size_func_other_arguments_; name=nothing, key_func=nothing, reduce_func=nothing, window_size_func=nothing, Tkey_func_other_arguments=nothing, Treduce_func_other_arguments=nothing, Twindow_size_func_other_arguments=nothing, output_types=nothing, output_shapes=nothing)
+ local desc
+ tf.with_op_name(name, "ExperimentalGroupByWindowDataset") do
+ desc = tf.NodeDescription("ExperimentalGroupByWindowDataset")
+ begin
+ begin
+ input_dataset_ = convert(Tensor{Any}, input_dataset_)
+ begin
+ end
+ end
+ begin
+ key_func_other_arguments_ = [convert(Tensor{Any}, x) for x = key_func_other_arguments_]
+ begin
+ end
+ end
+ begin
+ reduce_func_other_arguments_ = [convert(Tensor{Any}, x) for x = reduce_func_other_arguments_]
+ begin
+ end
+ end
+ begin
+ window_size_func_other_arguments_ = [convert(Tensor{Any}, x) for x = window_size_func_other_arguments_]
+ begin
+ end
+ end
+ end
+ begin
+ begin
+ tf.add_input(desc, input_dataset_)
+ end
+ begin
+ tf.add_input(desc, key_func_other_arguments_)
+ end
+ begin
+ tf.add_input(desc, reduce_func_other_arguments_)
+ end
+ begin
+ tf.add_input(desc, window_size_func_other_arguments_)
+ end
+ end
+ begin
+ begin
+ if key_func !== nothing
+ desc["key_func"] = Base.identity(key_func)
+ end
+ end
+ begin
+ if reduce_func !== nothing
+ desc["reduce_func"] = Base.identity(reduce_func)
+ end
+ end
+ begin
+ if window_size_func !== nothing
+ desc["window_size_func"] = Base.identity(window_size_func)
+ end
+ end
+ begin
+ if Tkey_func_other_arguments !== nothing
+ desc["Tkey_func_other_arguments"] = map(Base.identity, Tkey_func_other_arguments)
+ end
+ end
+ begin
+ if Treduce_func_other_arguments !== nothing
+ desc["Treduce_func_other_arguments"] = map(Base.identity, Treduce_func_other_arguments)
+ end
+ end
+ begin
+ if Twindow_size_func_other_arguments !== nothing
+ desc["Twindow_size_func_other_arguments"] = map(Base.identity, Twindow_size_func_other_arguments)
+ end
+ end
+ begin
+ if output_types !== nothing
+ desc["output_types"] = map(Base.identity, output_types)
+ end
+ end
+ begin
+ if output_shapes !== nothing
+ desc["output_shapes"] = map(Base.identity, output_shapes)
+ end
+ end
+ end
+ end
+ tf.Tensor(tf.Operation(desc))
+ end
+ end
+ begin
+ function experimental_group_by_window_dataset_eager(input_dataset_, key_func_other_arguments_, reduce_func_other_arguments_, window_size_func_other_arguments_; name=nothing, key_func=nothing, reduce_func=nothing, window_size_func=nothing, Tkey_func_other_arguments=nothing, Treduce_func_other_arguments=nothing, Twindow_size_func_other_arguments=nothing, output_types=nothing, output_shapes=nothing)
+ desc = tf.EagerOp("ExperimentalGroupByWindowDataset")
+ input_dataset_ = convert(tf.EagerTensor, input_dataset_)
+ key_func_other_arguments_ = convert(tf.EagerTensor, key_func_other_arguments_)
+ reduce_func_other_arguments_ = convert(tf.EagerTensor, reduce_func_other_arguments_)
+ window_size_func_other_arguments_ = convert(tf.EagerTensor, window_size_func_other_arguments_)
+ begin
+ begin
+ tf.add_input(desc, input_dataset_)
+ end
+ begin
+ tf.add_input(desc, key_func_other_arguments_)
+ end
+ begin
+ tf.add_input(desc, reduce_func_other_arguments_)
+ end
+ begin
+ tf.add_input(desc, window_size_func_other_arguments_)
+ end
+ end
+ begin
+ begin
+ if key_func !== nothing
+ desc["key_func"] = Base.identity(key_func)
+ end
+ end
+ begin
+ if reduce_func !== nothing
+ desc["reduce_func"] = Base.identity(reduce_func)
+ end
+ end
+ begin
+ if window_size_func !== nothing
+ desc["window_size_func"] = Base.identity(window_size_func)
+ end
+ end
+ begin
+ if Tkey_func_other_arguments !== nothing
+ desc["Tkey_func_other_arguments"] = map(Base.identity, Tkey_func_other_arguments)
+ end
+ end
+ begin
+ if Treduce_func_other_arguments !== nothing
+ desc["Treduce_func_other_arguments"] = map(Base.identity, Treduce_func_other_arguments)
+ end
+ end
+ begin
+ if Twindow_size_func_other_arguments !== nothing
+ desc["Twindow_size_func_other_arguments"] = map(Base.identity, Twindow_size_func_other_arguments)
+ end
+ end
+ begin
+ if output_types !== nothing
+ desc["output_types"] = map(Base.identity, output_types)
+ end
+ end
+ begin
+ if output_shapes !== nothing
+ desc["output_shapes"] = map(Base.identity, output_shapes)
+ end
+ end
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(experimental_group_by_window_dataset, [input_dataset_, key_func_other_arguments_, reduce_func_other_arguments_, window_size_func_other_arguments_], name=nothing, key_func=nothing, reduce_func=nothing, window_size_func=nothing, Tkey_func_other_arguments=nothing, Treduce_func_other_arguments=nothing, Twindow_size_func_other_arguments=nothing, output_types=nothing, output_shapes=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res[1]
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function experimental_group_by_window_dataset(input_dataset_, key_func_other_arguments_, reduce_func_other_arguments_, window_size_func_other_arguments_; name=nothing, key_func=nothing, reduce_func=nothing, window_size_func=nothing, Tkey_func_other_arguments=nothing, Treduce_func_other_arguments=nothing, Twindow_size_func_other_arguments=nothing, output_types=nothing, output_shapes=nothing)
+ if tf.in_eager_mode()
+ experimental_group_by_window_dataset_eager(input_dataset_, key_func_other_arguments_, reduce_func_other_arguments_, window_size_func_other_arguments_; name=name, key_func=key_func, reduce_func=reduce_func, window_size_func=window_size_func, Tkey_func_other_arguments=Tkey_func_other_arguments, Treduce_func_other_arguments=Treduce_func_other_arguments, Twindow_size_func_other_arguments=Twindow_size_func_other_arguments, output_types=output_types, output_shapes=output_shapes)
+ else
+ experimental_group_by_window_dataset_graph(input_dataset_, key_func_other_arguments_, reduce_func_other_arguments_, window_size_func_other_arguments_; name=name, key_func=key_func, reduce_func=reduce_func, window_size_func=window_size_func, Tkey_func_other_arguments=Tkey_func_other_arguments, Treduce_func_other_arguments=Treduce_func_other_arguments, Twindow_size_func_other_arguments=Twindow_size_func_other_arguments, output_types=output_types, output_shapes=output_shapes)
+ end
+ end
+ end
+end
+
+
+"""
+ audio_summary(tag, tensor; max_outputs=3)
+
+
+"""
+begin
+ begin
+ function audio_summary_graph(tag_, tensor_; name=nothing, sample_rate=nothing, max_outputs=nothing)
+ local desc
+ tf.with_op_name(name, "AudioSummary") do
+ desc = tf.NodeDescription("AudioSummary")
+ begin
+ begin
+ tag_ = convert(Tensor{String}, tag_)
+ begin
+ end
+ end
+ begin
+ tensor_ = convert(Tensor{Float32}, tensor_)
+ begin
+ end
+ end
+ end
+ begin
+ begin
+ tf.add_input(desc, tag_)
+ end
+ begin
+ tf.add_input(desc, tensor_)
+ end
+ end
+ begin
+ begin
+ if sample_rate !== nothing
+ desc["sample_rate"] = Base.identity(sample_rate)
+ end
+ end
+ begin
+ if max_outputs !== nothing
+ desc["max_outputs"] = Base.Int(max_outputs)
+ end
+ end
+ end
+ end
+ tf.Tensor(tf.Operation(desc))
+ end
+ end
+ begin
+ function audio_summary_eager(tag_, tensor_; name=nothing, sample_rate=nothing, max_outputs=nothing)
+ desc = tf.EagerOp("AudioSummary")
+ tag_ = convert(tf.EagerTensor, tag_)
+ tensor_ = convert(tf.EagerTensor, tensor_)
+ begin
+ begin
+ tf.add_input(desc, tag_)
+ end
+ begin
+ tf.add_input(desc, tensor_)
+ end
+ end
+ begin
+ begin
+ if sample_rate !== nothing
+ desc["sample_rate"] = Base.identity(sample_rate)
+ end
+ end
+ begin
+ if max_outputs !== nothing
+ desc["max_outputs"] = Base.Int(max_outputs)
+ end
+ end
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(audio_summary, [tag_, tensor_], name=nothing, sample_rate=nothing, max_outputs=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res[1]
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function audio_summary(tag_, tensor_; name=nothing, sample_rate=nothing, max_outputs=nothing)
+ if tf.in_eager_mode()
+ audio_summary_eager(tag_, tensor_; name=name, sample_rate=sample_rate, max_outputs=max_outputs)
+ else
+ audio_summary_graph(tag_, tensor_; name=name, sample_rate=sample_rate, max_outputs=max_outputs)
+ end
+ end
+ end
+end
+
+
+"""
+ squared_difference(x, y)
+
+
+"""
+begin
+ begin
+ function squared_difference_graph(x_, y_; name=nothing)
+ local desc
+ tf.with_op_name(name, "SquaredDifference") do
+ desc = tf.NodeDescription("SquaredDifference")
+ begin
+ begin
+ x_ = convert(Tensor{Any}, x_)
+ begin
+ end
+ end
+ begin
+ y_ = convert(Tensor{Any}, y_)
+ begin
+ end
+ end
+ begin
+ (x_, y_) = tf.tf_promote(x_, y_)
+ end
+ end
+ begin
+ begin
+ tf.add_input(desc, x_)
+ end
+ begin
+ tf.add_input(desc, y_)
+ end
+ end
+ begin
+ end
+ end
+ tf.Tensor(tf.Operation(desc))
+ end
+ end
+ begin
+ function squared_difference_eager(x_, y_; name=nothing)
+ desc = tf.EagerOp("SquaredDifference")
+ x_ = convert(tf.EagerTensor, x_)
+ y_ = convert(tf.EagerTensor, y_)
+ begin
+ begin
+ tf.add_input(desc, x_)
+ end
+ begin
+ tf.add_input(desc, y_)
+ end
+ end
+ begin
+ end
+ begin
+ desc["T"] = tf.data_type(x_)
+ end
+ begin
+ desc["T"] = tf.data_type(y_)
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(squared_difference, [x_, y_], name=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res[1]
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function squared_difference(x_, y_; name=nothing)
+ if tf.in_eager_mode()
+ squared_difference_eager(x_, y_; name=name)
+ else
+ squared_difference_graph(x_, y_; name=name)
+ end
+ end
+ end
+end
+
+
+"""
+ scatter_nd_update(ref, indices, updates; use_locking=true)
+
+
+"""
+begin
+ begin
+ function scatter_nd_update_graph(ref_, indices_, updates_; name=nothing, use_locking=nothing)
+ local desc
+ tf.with_op_name(name, "ScatterNdUpdate") do
+ desc = tf.NodeDescription("ScatterNdUpdate")
+ begin
+ begin
+ ref_ = convert(Tensor{Any}, ref_)
+ begin
+ end
+ end
+ begin
+ indices_ = convert(Tensor{Any}, indices_)
+ begin
+ indices_ = indices_ - convert(tf.Tensor{eltype(indices_)}, 1)
+ end
+ end
+ begin
+ updates_ = convert(Tensor{Any}, updates_)
+ begin
+ end
+ end
+ begin
+ (ref_, updates_) = tf.tf_promote(ref_, updates_)
+ end
+ begin
+ (indices_,) = tf.tf_promote(indices_)
+ end
+ end
+ begin
+ begin
+ tf.add_input(desc, ref_)
+ end
+ begin
+ tf.add_input(desc, indices_)
+ end
+ begin
+ tf.add_input(desc, updates_)
+ end
+ end
+ begin
+ begin
+ if use_locking !== nothing
+ desc["use_locking"] = Base.Bool(use_locking)
+ end
+ end
+ end
+ end
+ tf.Tensor(tf.Operation(desc))
+ end
+ end
+ begin
+ function scatter_nd_update_eager(ref_, indices_, updates_; name=nothing, use_locking=nothing)
+ desc = tf.EagerOp("ScatterNdUpdate")
+ ref_ = convert(tf.EagerTensor, ref_)
+ indices_ = convert(tf.EagerTensor, indices_)
+ updates_ = convert(tf.EagerTensor, updates_)
+ begin
+ begin
+ tf.add_input(desc, ref_)
+ end
+ begin
+ tf.add_input(desc, indices_)
+ end
+ begin
+ tf.add_input(desc, updates_)
+ end
+ end
+ begin
+ begin
+ if use_locking !== nothing
+ desc["use_locking"] = Base.Bool(use_locking)
+ end
+ end
+ end
+ begin
+ desc["T"] = tf.data_type(ref_)
+ end
+ begin
+ desc["Tindices"] = tf.data_type(indices_)
+ end
+ begin
+ desc["T"] = tf.data_type(updates_)
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(scatter_nd_update, [ref_, indices_, updates_], name=nothing, use_locking=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res[1]
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function scatter_nd_update(ref_, indices_, updates_; name=nothing, use_locking=nothing)
+ if tf.in_eager_mode()
+ scatter_nd_update_eager(ref_, indices_, updates_; name=name, use_locking=use_locking)
+ else
+ scatter_nd_update_graph(ref_, indices_, updates_; name=name, use_locking=use_locking)
+ end
+ end
+ end
+end
+
+
+"""
+ dynamic_stitch(indices, data)
+
+
+"""
+begin
+ begin
+ function dynamic_stitch_graph(indices_, data_; name=nothing, N=nothing)
+ local desc
+ tf.with_op_name(name, "DynamicStitch") do
+ desc = tf.NodeDescription("DynamicStitch")
+ begin
+ begin
+ indices_ = [convert(Tensor{Int32}, x) for x = indices_]
+ begin
+ end
+ end
+ begin
+ data_ = [convert(Tensor{Any}, x) for x = data_]
+ begin
+ end
+ end
+ begin
+ (data_,) = tf.tf_promote(data_)
+ end
+ end
+ begin
+ begin
+ tf.add_input(desc, indices_)
+ end
+ begin
+ tf.add_input(desc, data_)
+ end
+ end
+ begin
+ begin
+ if N !== nothing
+ desc["N"] = Base.Int(N)
+ end
+ end
+ end
+ end
+ tf.Tensor(tf.Operation(desc))
+ end
+ end
+ begin
+ function dynamic_stitch_eager(indices_, data_; name=nothing, N=nothing)
+ desc = tf.EagerOp("DynamicStitch")
+ indices_ = convert(tf.EagerTensor, indices_)
+ data_ = convert(tf.EagerTensor, data_)
+ begin
+ begin
+ tf.add_input(desc, indices_)
+ end
+ begin
+ tf.add_input(desc, data_)
+ end
+ end
+ begin
+ begin
+ if N !== nothing
+ desc["N"] = Base.Int(N)
+ end
+ end
+ end
+ begin
+ desc["T"] = tf.data_type(data_)
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(dynamic_stitch, [indices_, data_], name=nothing, N=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res[1]
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function dynamic_stitch(indices_, data_; name=nothing, N=nothing)
+ if tf.in_eager_mode()
+ dynamic_stitch_eager(indices_, data_; name=name, N=N)
+ else
+ dynamic_stitch_graph(indices_, data_; name=name, N=N)
+ end
+ end
+ end
+end
+
+
+"""
+ ones_like(x)
+
+
+"""
+begin
+ begin
+ function ones_like_graph(x_; name=nothing)
+ local desc
+ tf.with_op_name(name, "OnesLike") do
+ desc = tf.NodeDescription("OnesLike")
+ begin
+ begin
+ x_ = convert(Tensor{Any}, x_)
+ begin
+ end
+ end
+ begin
+ (x_,) = tf.tf_promote(x_)
+ end
+ end
+ begin
+ begin
+ tf.add_input(desc, x_)
+ end
+ end
+ begin
+ end
+ end
+ tf.Tensor(tf.Operation(desc))
+ end
+ end
+ begin
+ function ones_like_eager(x_; name=nothing)
+ desc = tf.EagerOp("OnesLike")
+ x_ = convert(tf.EagerTensor, x_)
+ begin
+ begin
+ tf.add_input(desc, x_)
+ end
+ end
+ begin
+ end
+ begin
+ desc["T"] = tf.data_type(x_)
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(ones_like, [x_], name=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res[1]
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function ones_like(x_; name=nothing)
+ if tf.in_eager_mode()
+ ones_like_eager(x_; name=name)
+ else
+ ones_like_graph(x_; name=name)
+ end
+ end
+ end
+end
+
+
+"""
+ fractional_max_pool_grad(orig_input, orig_output, out_backprop, row_pooling_sequence, col_pooling_sequence; overlapping=false)
+
+
+"""
+begin
+ begin
+ function fractional_max_pool_grad_graph(orig_input_, orig_output_, out_backprop_, row_pooling_sequence_, col_pooling_sequence_; name=nothing, overlapping=nothing)
+ local desc
+ tf.with_op_name(name, "FractionalMaxPoolGrad") do
+ desc = tf.NodeDescription("FractionalMaxPoolGrad")
+ begin
+ begin
+ orig_input_ = convert(Tensor{Any}, orig_input_)
+ begin
+ end
+ end
+ begin
+ orig_output_ = convert(Tensor{Any}, orig_output_)
+ begin
+ end
+ end
+ begin
+ out_backprop_ = convert(Tensor{Any}, out_backprop_)
+ begin
+ end
+ end
+ begin
+ row_pooling_sequence_ = convert(Tensor{Int64}, row_pooling_sequence_)
+ begin
+ end
+ end
+ begin
+ col_pooling_sequence_ = convert(Tensor{Int64}, col_pooling_sequence_)
+ begin
+ end
+ end
+ begin
+ (orig_input_, orig_output_, out_backprop_) = tf.tf_promote(orig_input_, orig_output_, out_backprop_)
+ end
+ end
+ begin
+ begin
+ tf.add_input(desc, orig_input_)
+ end
+ begin
+ tf.add_input(desc, orig_output_)
+ end
+ begin
+ tf.add_input(desc, out_backprop_)
+ end
+ begin
+ tf.add_input(desc, row_pooling_sequence_)
+ end
+ begin
+ tf.add_input(desc, col_pooling_sequence_)
+ end
+ end
+ begin
+ begin
+ if overlapping !== nothing
+ desc["overlapping"] = Base.Bool(overlapping)
+ end
+ end
+ end
+ end
+ tf.Tensor(tf.Operation(desc))
+ end
+ end
+ begin
+ function fractional_max_pool_grad_eager(orig_input_, orig_output_, out_backprop_, row_pooling_sequence_, col_pooling_sequence_; name=nothing, overlapping=nothing)
+ desc = tf.EagerOp("FractionalMaxPoolGrad")
+ orig_input_ = convert(tf.EagerTensor, orig_input_)
+ orig_output_ = convert(tf.EagerTensor, orig_output_)
+ out_backprop_ = convert(tf.EagerTensor, out_backprop_)
+ row_pooling_sequence_ = convert(tf.EagerTensor, row_pooling_sequence_)
+ col_pooling_sequence_ = convert(tf.EagerTensor, col_pooling_sequence_)
+ begin
+ begin
+ tf.add_input(desc, orig_input_)
+ end
+ begin
+ tf.add_input(desc, orig_output_)
+ end
+ begin
+ tf.add_input(desc, out_backprop_)
+ end
+ begin
+ tf.add_input(desc, row_pooling_sequence_)
+ end
+ begin
+ tf.add_input(desc, col_pooling_sequence_)
+ end
+ end
+ begin
+ begin
+ if overlapping !== nothing
+ desc["overlapping"] = Base.Bool(overlapping)
+ end
+ end
+ end
+ begin
+ desc["T"] = tf.data_type(orig_input_)
+ end
+ begin
+ desc["T"] = tf.data_type(orig_output_)
+ end
+ begin
+ desc["T"] = tf.data_type(out_backprop_)
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(fractional_max_pool_grad, [orig_input_, orig_output_, out_backprop_, row_pooling_sequence_, col_pooling_sequence_], name=nothing, overlapping=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res[1]
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function fractional_max_pool_grad(orig_input_, orig_output_, out_backprop_, row_pooling_sequence_, col_pooling_sequence_; name=nothing, overlapping=nothing)
+ if tf.in_eager_mode()
+ fractional_max_pool_grad_eager(orig_input_, orig_output_, out_backprop_, row_pooling_sequence_, col_pooling_sequence_; name=name, overlapping=overlapping)
+ else
+ fractional_max_pool_grad_graph(orig_input_, orig_output_, out_backprop_, row_pooling_sequence_, col_pooling_sequence_; name=name, overlapping=overlapping)
+ end
+ end
+ end
+end
+
+
+"""
+ remote_call(target, args)
+
+
+"""
+begin
+ begin
+ function remote_call_graph(target_, args_; name=nothing, Tin=nothing, Tout=nothing, f=nothing)
+ local desc
+ tf.with_op_name(name, "RemoteCall") do
+ desc = tf.NodeDescription("RemoteCall")
+ begin
+ begin
+ target_ = convert(Tensor{String}, target_)
+ begin
+ end
+ end
+ begin
+ args_ = [convert(Tensor{Any}, x) for x = args_]
+ begin
+ end
+ end
+ end
+ begin
+ begin
+ tf.add_input(desc, target_)
+ end
+ begin
+ tf.add_input(desc, args_)
+ end
+ end
+ begin
+ begin
+ if Tin !== nothing
+ desc["Tin"] = map(Base.identity, Tin)
+ end
+ end
+ begin
+ if Tout !== nothing
+ desc["Tout"] = map(Base.identity, Tout)
+ end
+ end
+ begin
+ if f !== nothing
+ desc["f"] = Base.identity(f)
+ end
+ end
+ end
+ end
+ tf.Tensor(tf.Operation(desc))
+ end
+ end
+ begin
+ function remote_call_eager(target_, args_; name=nothing, Tin=nothing, Tout=nothing, f=nothing)
+ desc = tf.EagerOp("RemoteCall")
+ target_ = convert(tf.EagerTensor, target_)
+ args_ = convert(tf.EagerTensor, args_)
+ begin
+ begin
+ tf.add_input(desc, target_)
+ end
+ begin
+ tf.add_input(desc, args_)
+ end
+ end
+ begin
+ begin
+ if Tin !== nothing
+ desc["Tin"] = map(Base.identity, Tin)
+ end
+ end
+ begin
+ if Tout !== nothing
+ desc["Tout"] = map(Base.identity, Tout)
+ end
+ end
+ begin
+ if f !== nothing
+ desc["f"] = Base.identity(f)
+ end
+ end
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(remote_call, [target_, args_], name=nothing, Tin=nothing, Tout=nothing, f=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res[1]
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function remote_call(target_, args_; name=nothing, Tin=nothing, Tout=nothing, f=nothing)
+ if tf.in_eager_mode()
+ remote_call_eager(target_, args_; name=name, Tin=Tin, Tout=Tout, f=f)
+ else
+ remote_call_graph(target_, args_; name=name, Tin=Tin, Tout=Tout, f=f)
+ end
+ end
+ end
+end
+
+
+"""
+ gather(params, indices; validate_indices=true)
+
+
+"""
+begin
+ begin
+ function gather_graph(params_, indices_; name=nothing, validate_indices=nothing)
+ local desc
+ tf.with_op_name(name, "Gather") do
+ desc = tf.NodeDescription("Gather")
+ begin
+ begin
+ params_ = convert(Tensor{Any}, params_)
+ begin
+ end
+ end
+ begin
+ indices_ = convert(Tensor{Any}, indices_)
+ begin
+ indices_ = indices_ - convert(tf.Tensor{eltype(indices_)}, 1)
+ end
+ end
+ begin
+ (params_,) = tf.tf_promote(params_)
+ end
+ begin
+ (indices_,) = tf.tf_promote(indices_)
+ end
+ end
+ begin
+ begin
+ tf.add_input(desc, params_)
+ end
+ begin
+ tf.add_input(desc, indices_)
+ end
+ end
+ begin
+ begin
+ if validate_indices !== nothing
+ desc["validate_indices"] = Base.Bool(validate_indices)
+ end
+ end
+ end
+ end
+ tf.Tensor(tf.Operation(desc))
+ end
+ end
+ begin
+ function gather_eager(params_, indices_; name=nothing, validate_indices=nothing)
+ desc = tf.EagerOp("Gather")
+ params_ = convert(tf.EagerTensor, params_)
+ indices_ = convert(tf.EagerTensor, indices_)
+ begin
+ begin
+ tf.add_input(desc, params_)
+ end
+ begin
+ tf.add_input(desc, indices_)
+ end
+ end
+ begin
+ begin
+ if validate_indices !== nothing
+ desc["validate_indices"] = Base.Bool(validate_indices)
+ end
+ end
+ end
+ begin
+ desc["Tparams"] = tf.data_type(params_)
+ end
+ begin
+ desc["Tindices"] = tf.data_type(indices_)
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(gather, [params_, indices_], name=nothing, validate_indices=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res[1]
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function gather(params_, indices_; name=nothing, validate_indices=nothing)
+ if tf.in_eager_mode()
+ gather_eager(params_, indices_; name=name, validate_indices=validate_indices)
+ else
+ gather_graph(params_, indices_; name=name, validate_indices=validate_indices)
+ end
+ end
+ end
+end
+
+
+"""
+ quantized_mat_mul(a, b, min_a, max_a, min_b, max_b; transpose_a=false, transpose_b=false)
+
+
+"""
+begin
+ begin
+ function quantized_mat_mul_graph(a_, b_, min_a_, max_a_, min_b_, max_b_; name=nothing, transpose_a=nothing, transpose_b=nothing)
+ local desc
+ tf.with_op_name(name, "QuantizedMatMul") do
+ desc = tf.NodeDescription("QuantizedMatMul")
+ begin
+ begin
+ a_ = convert(Tensor{Any}, a_)
+ begin
+ end
+ end
+ begin
+ b_ = convert(Tensor{Any}, b_)
+ begin
+ end
+ end
+ begin
+ min_a_ = convert(Tensor{Float32}, min_a_)
+ begin
+ end
+ end
+ begin
+ max_a_ = convert(Tensor{Float32}, max_a_)
+ begin
+ end
+ end
+ begin
+ min_b_ = convert(Tensor{Float32}, min_b_)
+ begin
+ end
+ end
+ begin
+ max_b_ = convert(Tensor{Float32}, max_b_)
+ begin
+ end
+ end
+ begin
+ (a_,) = tf.tf_promote(a_)
+ end
+ begin
+ (b_,) = tf.tf_promote(b_)
+ end
+ end
+ begin
+ begin
+ tf.add_input(desc, a_)
+ end
+ begin
+ tf.add_input(desc, b_)
+ end
+ begin
+ tf.add_input(desc, min_a_)
+ end
+ begin
+ tf.add_input(desc, max_a_)
+ end
+ begin
+ tf.add_input(desc, min_b_)
+ end
+ begin
+ tf.add_input(desc, max_b_)
+ end
+ end
+ begin
+ begin
+ if transpose_a !== nothing
+ desc["transpose_a"] = Base.Bool(transpose_a)
+ end
+ end
+ begin
+ if transpose_b !== nothing
+ desc["transpose_b"] = Base.Bool(transpose_b)
+ end
+ end
+ end
+ end
+ begin
+ out = tf.Tensor[]
+ op = tf.Operation(desc)
+ for out_idx = 1:3
+ push!(out, tf.Tensor(op, out_idx))
+ end
+ out
+ end
+ end
+ end
+ begin
+ function quantized_mat_mul_eager(a_, b_, min_a_, max_a_, min_b_, max_b_; name=nothing, transpose_a=nothing, transpose_b=nothing)
+ desc = tf.EagerOp("QuantizedMatMul")
+ a_ = convert(tf.EagerTensor, a_)
+ b_ = convert(tf.EagerTensor, b_)
+ min_a_ = convert(tf.EagerTensor, min_a_)
+ max_a_ = convert(tf.EagerTensor, max_a_)
+ min_b_ = convert(tf.EagerTensor, min_b_)
+ max_b_ = convert(tf.EagerTensor, max_b_)
+ begin
+ begin
+ tf.add_input(desc, a_)
+ end
+ begin
+ tf.add_input(desc, b_)
+ end
+ begin
+ tf.add_input(desc, min_a_)
+ end
+ begin
+ tf.add_input(desc, max_a_)
+ end
+ begin
+ tf.add_input(desc, min_b_)
+ end
+ begin
+ tf.add_input(desc, max_b_)
+ end
+ end
+ begin
+ begin
+ if transpose_a !== nothing
+ desc["transpose_a"] = Base.Bool(transpose_a)
+ end
+ end
+ begin
+ if transpose_b !== nothing
+ desc["transpose_b"] = Base.Bool(transpose_b)
+ end
+ end
+ end
+ begin
+ desc["T1"] = tf.data_type(a_)
+ end
+ begin
+ desc["T2"] = tf.data_type(b_)
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(quantized_mat_mul, [a_, b_, min_a_, max_a_, min_b_, max_b_], name=nothing, transpose_a=nothing, transpose_b=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function quantized_mat_mul(a_, b_, min_a_, max_a_, min_b_, max_b_; name=nothing, transpose_a=nothing, transpose_b=nothing)
+ if tf.in_eager_mode()
+ quantized_mat_mul_eager(a_, b_, min_a_, max_a_, min_b_, max_b_; name=name, transpose_a=transpose_a, transpose_b=transpose_b)
+ else
+ quantized_mat_mul_graph(a_, b_, min_a_, max_a_, min_b_, max_b_; name=name, transpose_a=transpose_a, transpose_b=transpose_b)
+ end
+ end
+ end
+end
+
+
+"""
+ unicode_decode_with_offsets(input; errors=, replacement_char=65533, replace_control_characters=false)
+
+
+"""
+begin
+ begin
+ function unicode_decode_with_offsets_graph(input_; name=nothing, input_encoding=nothing, errors=nothing, replacement_char=nothing, replace_control_characters=nothing)
+ local desc
+ tf.with_op_name(name, "UnicodeDecodeWithOffsets") do
+ desc = tf.NodeDescription("UnicodeDecodeWithOffsets")
+ begin
+ begin
+ input_ = convert(Tensor{String}, input_)
+ begin
+ end
+ end
+ end
+ begin
+ begin
+ tf.add_input(desc, input_)
+ end
+ end
+ begin
+ begin
+ if input_encoding !== nothing
+ desc["input_encoding"] = Base.String(input_encoding)
+ end
+ end
+ begin
+ if errors !== nothing
+ desc["errors"] = Base.String(errors)
+ end
+ end
+ begin
+ if replacement_char !== nothing
+ desc["replacement_char"] = Base.Int(replacement_char)
+ end
+ end
+ begin
+ if replace_control_characters !== nothing
+ desc["replace_control_characters"] = Base.Bool(replace_control_characters)
+ end
+ end
+ end
+ end
+ begin
+ out = tf.Tensor[]
+ op = tf.Operation(desc)
+ for out_idx = 1:3
+ push!(out, tf.Tensor(op, out_idx))
+ end
+ out
+ end
+ end
+ end
+ begin
+ function unicode_decode_with_offsets_eager(input_; name=nothing, input_encoding=nothing, errors=nothing, replacement_char=nothing, replace_control_characters=nothing)
+ desc = tf.EagerOp("UnicodeDecodeWithOffsets")
+ input_ = convert(tf.EagerTensor, input_)
+ begin
+ begin
+ tf.add_input(desc, input_)
+ end
+ end
+ begin
+ begin
+ if input_encoding !== nothing
+ desc["input_encoding"] = Base.String(input_encoding)
+ end
+ end
+ begin
+ if errors !== nothing
+ desc["errors"] = Base.String(errors)
+ end
+ end
+ begin
+ if replacement_char !== nothing
+ desc["replacement_char"] = Base.Int(replacement_char)
+ end
+ end
+ begin
+ if replace_control_characters !== nothing
+ desc["replace_control_characters"] = Base.Bool(replace_control_characters)
+ end
+ end
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(unicode_decode_with_offsets, [input_], name=nothing, input_encoding=nothing, errors=nothing, replacement_char=nothing, replace_control_characters=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function unicode_decode_with_offsets(input_; name=nothing, input_encoding=nothing, errors=nothing, replacement_char=nothing, replace_control_characters=nothing)
+ if tf.in_eager_mode()
+ unicode_decode_with_offsets_eager(input_; name=name, input_encoding=input_encoding, errors=errors, replacement_char=replacement_char, replace_control_characters=replace_control_characters)
+ else
+ unicode_decode_with_offsets_graph(input_; name=name, input_encoding=input_encoding, errors=errors, replacement_char=replacement_char, replace_control_characters=replace_control_characters)
+ end
+ end
+ end
+end
+
+
+"""
+ accumulator_apply_gradient(handle, local_step, gradient)
+
+
+"""
+begin
+ begin
+ function accumulator_apply_gradient_graph(handle_, local_step_, gradient_; name=nothing, dtype=nothing)
+ local desc
+ tf.with_op_name(name, "AccumulatorApplyGradient") do
+ desc = tf.NodeDescription("AccumulatorApplyGradient")
+ begin
+ begin
+ handle_ = convert(Tensor{String}, handle_)
+ begin
+ end
+ end
+ begin
+ local_step_ = convert(Tensor{Int64}, local_step_)
+ begin
+ end
+ end
+ begin
+ gradient_ = convert(Tensor{Any}, gradient_)
+ begin
+ end
+ end
+ begin
+ (gradient_,) = tf.tf_promote(gradient_)
+ end
+ end
+ begin
+ begin
+ tf.add_input(desc, handle_)
+ end
+ begin
+ tf.add_input(desc, local_step_)
+ end
+ begin
+ tf.add_input(desc, gradient_)
+ end
+ end
+ begin
+ begin
+ if dtype !== nothing
+ desc["dtype"] = Base.identity(dtype)
+ end
+ end
+ end
+ end
+ tf.Tensor(tf.Operation(desc))
+ end
+ end
+ begin
+ function accumulator_apply_gradient_eager(handle_, local_step_, gradient_; name=nothing, dtype=nothing)
+ desc = tf.EagerOp("AccumulatorApplyGradient")
+ handle_ = convert(tf.EagerTensor, handle_)
+ local_step_ = convert(tf.EagerTensor, local_step_)
+ gradient_ = convert(tf.EagerTensor, gradient_)
+ begin
+ begin
+ tf.add_input(desc, handle_)
+ end
+ begin
+ tf.add_input(desc, local_step_)
+ end
+ begin
+ tf.add_input(desc, gradient_)
+ end
+ end
+ begin
+ begin
+ if dtype !== nothing
+ desc["dtype"] = Base.identity(dtype)
+ end
+ end
+ end
+ begin
+ desc["dtype"] = tf.data_type(gradient_)
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(accumulator_apply_gradient, [handle_, local_step_, gradient_], name=nothing, dtype=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res[1]
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function accumulator_apply_gradient(handle_, local_step_, gradient_; name=nothing, dtype=nothing)
+ if tf.in_eager_mode()
+ accumulator_apply_gradient_eager(handle_, local_step_, gradient_; name=name, dtype=dtype)
+ else
+ accumulator_apply_gradient_graph(handle_, local_step_, gradient_; name=name, dtype=dtype)
+ end
+ end
+ end
+end
+
+
+"""
+ enqueue_tpu_embedding_sparse_tensor_batch(sample_indices, embedding_indices, aggregation_weights, mode_override; device_ordinal=-1, combiners=Int64[])
+
+This Op eases the porting of code that uses tf.nn.embedding_lookup_sparse().
+"""
+begin
+ begin
+ function enqueue_tpu_embedding_sparse_tensor_batch_graph(sample_indices_, embedding_indices_, aggregation_weights_, mode_override_; name=nothing, N=nothing, device_ordinal=nothing, combiners=nothing, table_ids=nothing)
+ local desc
+ tf.with_op_name(name, "EnqueueTPUEmbeddingSparseTensorBatch") do
+ desc = tf.NodeDescription("EnqueueTPUEmbeddingSparseTensorBatch")
+ begin
+ begin
+ sample_indices_ = [convert(Tensor{Int32}, x) for x = sample_indices_]
+ begin
+ end
+ end
+ begin
+ embedding_indices_ = [convert(Tensor{Int32}, x) for x = embedding_indices_]
+ begin
+ end
+ end
+ begin
+ aggregation_weights_ = [convert(Tensor{Float32}, x) for x = aggregation_weights_]
+ begin
+ end
+ end
+ begin
+ mode_override_ = convert(Tensor{String}, mode_override_)
+ begin
+ end
+ end
+ end
+ begin
+ begin
+ tf.add_input(desc, sample_indices_)
+ end
+ begin
+ tf.add_input(desc, embedding_indices_)
+ end
+ begin
+ tf.add_input(desc, aggregation_weights_)
+ end
+ begin
+ tf.add_input(desc, mode_override_)
+ end
+ end
+ begin
+ begin
+ if N !== nothing
+ desc["N"] = Base.Int(N)
+ end
+ end
+ begin
+ if device_ordinal !== nothing
+ desc["device_ordinal"] = Base.Int(device_ordinal)
+ end
+ end
+ begin
+ if combiners !== nothing
+ desc["combiners"] = map(Base.identity, combiners)
+ end
+ end
+ begin
+ if table_ids !== nothing
+ desc["table_ids"] = map(Base.identity, table_ids)
+ end
+ end
+ end
+ end
+ tf.Tensor(tf.Operation(desc))
+ end
+ end
+ begin
+ function enqueue_tpu_embedding_sparse_tensor_batch_eager(sample_indices_, embedding_indices_, aggregation_weights_, mode_override_; name=nothing, N=nothing, device_ordinal=nothing, combiners=nothing, table_ids=nothing)
+ desc = tf.EagerOp("EnqueueTPUEmbeddingSparseTensorBatch")
+ sample_indices_ = convert(tf.EagerTensor, sample_indices_)
+ embedding_indices_ = convert(tf.EagerTensor, embedding_indices_)
+ aggregation_weights_ = convert(tf.EagerTensor, aggregation_weights_)
+ mode_override_ = convert(tf.EagerTensor, mode_override_)
+ begin
+ begin
+ tf.add_input(desc, sample_indices_)
+ end
+ begin
+ tf.add_input(desc, embedding_indices_)
+ end
+ begin
+ tf.add_input(desc, aggregation_weights_)
+ end
+ begin
+ tf.add_input(desc, mode_override_)
+ end
+ end
+ begin
+ begin
+ if N !== nothing
+ desc["N"] = Base.Int(N)
+ end
+ end
+ begin
+ if device_ordinal !== nothing
+ desc["device_ordinal"] = Base.Int(device_ordinal)
+ end
+ end
+ begin
+ if combiners !== nothing
+ desc["combiners"] = map(Base.identity, combiners)
+ end
+ end
+ begin
+ if table_ids !== nothing
+ desc["table_ids"] = map(Base.identity, table_ids)
+ end
+ end
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(enqueue_tpu_embedding_sparse_tensor_batch, [sample_indices_, embedding_indices_, aggregation_weights_, mode_override_], name=nothing, N=nothing, device_ordinal=nothing, combiners=nothing, table_ids=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res[1]
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function enqueue_tpu_embedding_sparse_tensor_batch(sample_indices_, embedding_indices_, aggregation_weights_, mode_override_; name=nothing, N=nothing, device_ordinal=nothing, combiners=nothing, table_ids=nothing)
+ if tf.in_eager_mode()
+ enqueue_tpu_embedding_sparse_tensor_batch_eager(sample_indices_, embedding_indices_, aggregation_weights_, mode_override_; name=name, N=N, device_ordinal=device_ordinal, combiners=combiners, table_ids=table_ids)
+ else
+ enqueue_tpu_embedding_sparse_tensor_batch_graph(sample_indices_, embedding_indices_, aggregation_weights_, mode_override_; name=name, N=N, device_ordinal=device_ordinal, combiners=combiners, table_ids=table_ids)
+ end
+ end
+ end
+end
+
+
+"""
+ write_summary(writer, step, tensor, tag, summary_metadata)
+
+
+"""
+begin
+ begin
+ function write_summary_graph(writer_, step_, tensor_, tag_, summary_metadata_; name=nothing)
+ local desc
+ tf.with_op_name(name, "WriteSummary") do
+ desc = tf.NodeDescription("WriteSummary")
+ begin
+ begin
+ writer_ = convert(Tensor{Any}, writer_)
+ begin
+ end
+ end
+ begin
+ step_ = convert(Tensor{Int64}, step_)
+ begin
+ end
+ end
+ begin
+ tensor_ = convert(Tensor{Any}, tensor_)
+ begin
+ end
+ end
+ begin
+ tag_ = convert(Tensor{String}, tag_)
+ begin
+ end
+ end
+ begin
+ summary_metadata_ = convert(Tensor{String}, summary_metadata_)
+ begin
+ end
+ end
+ begin
+ (tensor_,) = tf.tf_promote(tensor_)
+ end
+ end
+ begin
+ begin
+ tf.add_input(desc, writer_)
+ end
+ begin
+ tf.add_input(desc, step_)
+ end
+ begin
+ tf.add_input(desc, tensor_)
+ end
+ begin
+ tf.add_input(desc, tag_)
+ end
+ begin
+ tf.add_input(desc, summary_metadata_)
+ end
+ end
+ begin
+ end
+ end
+ tf.Tensor(tf.Operation(desc))
+ end
+ end
+ begin
+ function write_summary_eager(writer_, step_, tensor_, tag_, summary_metadata_; name=nothing)
+ desc = tf.EagerOp("WriteSummary")
+ writer_ = convert(tf.EagerTensor, writer_)
+ step_ = convert(tf.EagerTensor, step_)
+ tensor_ = convert(tf.EagerTensor, tensor_)
+ tag_ = convert(tf.EagerTensor, tag_)
+ summary_metadata_ = convert(tf.EagerTensor, summary_metadata_)
+ begin
+ begin
+ tf.add_input(desc, writer_)
+ end
+ begin
+ tf.add_input(desc, step_)
+ end
+ begin
+ tf.add_input(desc, tensor_)
+ end
+ begin
+ tf.add_input(desc, tag_)
+ end
+ begin
+ tf.add_input(desc, summary_metadata_)
+ end
+ end
+ begin
+ end
+ begin
+ desc["T"] = tf.data_type(tensor_)
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(write_summary, [writer_, step_, tensor_, tag_, summary_metadata_], name=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res[1]
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function write_summary(writer_, step_, tensor_, tag_, summary_metadata_; name=nothing)
+ if tf.in_eager_mode()
+ write_summary_eager(writer_, step_, tensor_, tag_, summary_metadata_; name=name)
+ else
+ write_summary_graph(writer_, step_, tensor_, tag_, summary_metadata_; name=name)
+ end
+ end
+ end
+end
+
+
+"""
+ quantized_conv2d(input, filter, min_input, max_input, min_filter, max_filter; out_type=Float32, dilations=[1, 1, 1, 1])
+
+
+"""
+begin
+ begin
+ function quantized_conv2d_graph(input_, filter_, min_input_, max_input_, min_filter_, max_filter_; name=nothing, out_type=nothing, strides=nothing, padding=nothing, dilations=nothing)
+ local desc
+ tf.with_op_name(name, "QuantizedConv2D") do
+ desc = tf.NodeDescription("QuantizedConv2D")
+ begin
+ begin
+ input_ = convert(Tensor{Any}, input_)
+ begin
+ end
+ end
+ begin
+ filter_ = convert(Tensor{Any}, filter_)
+ begin
+ end
+ end
+ begin
+ min_input_ = convert(Tensor{Float32}, min_input_)
+ begin
+ end
+ end
+ begin
+ max_input_ = convert(Tensor{Float32}, max_input_)
+ begin
+ end
+ end
+ begin
+ min_filter_ = convert(Tensor{Float32}, min_filter_)
+ begin
+ end
+ end
+ begin
+ max_filter_ = convert(Tensor{Float32}, max_filter_)
+ begin
+ end
+ end
+ begin
+ (filter_,) = tf.tf_promote(filter_)
+ end
+ begin
+ (input_,) = tf.tf_promote(input_)
+ end
+ end
+ begin
+ begin
+ tf.add_input(desc, input_)
+ end
+ begin
+ tf.add_input(desc, filter_)
+ end
+ begin
+ tf.add_input(desc, min_input_)
+ end
+ begin
+ tf.add_input(desc, max_input_)
+ end
+ begin
+ tf.add_input(desc, min_filter_)
+ end
+ begin
+ tf.add_input(desc, max_filter_)
+ end
+ end
+ begin
+ begin
+ if out_type !== nothing
+ desc["out_type"] = Base.identity(out_type)
+ end
+ end
+ begin
+ if strides !== nothing
+ desc["strides"] = map(Base.identity, strides)
+ end
+ end
+ begin
+ if padding !== nothing
+ desc["padding"] = Base.String(padding)
+ end
+ end
+ begin
+ if dilations !== nothing
+ desc["dilations"] = map(Base.identity, dilations)
+ end
+ end
+ end
+ end
+ begin
+ out = tf.Tensor[]
+ op = tf.Operation(desc)
+ for out_idx = 1:3
+ push!(out, tf.Tensor(op, out_idx))
+ end
+ out
+ end
+ end
+ end
+ begin
+ function quantized_conv2d_eager(input_, filter_, min_input_, max_input_, min_filter_, max_filter_; name=nothing, out_type=nothing, strides=nothing, padding=nothing, dilations=nothing)
+ desc = tf.EagerOp("QuantizedConv2D")
+ input_ = convert(tf.EagerTensor, input_)
+ filter_ = convert(tf.EagerTensor, filter_)
+ min_input_ = convert(tf.EagerTensor, min_input_)
+ max_input_ = convert(tf.EagerTensor, max_input_)
+ min_filter_ = convert(tf.EagerTensor, min_filter_)
+ max_filter_ = convert(tf.EagerTensor, max_filter_)
+ begin
+ begin
+ tf.add_input(desc, input_)
+ end
+ begin
+ tf.add_input(desc, filter_)
+ end
+ begin
+ tf.add_input(desc, min_input_)
+ end
+ begin
+ tf.add_input(desc, max_input_)
+ end
+ begin
+ tf.add_input(desc, min_filter_)
+ end
+ begin
+ tf.add_input(desc, max_filter_)
+ end
+ end
+ begin
+ begin
+ if out_type !== nothing
+ desc["out_type"] = Base.identity(out_type)
+ end
+ end
+ begin
+ if strides !== nothing
+ desc["strides"] = map(Base.identity, strides)
+ end
+ end
+ begin
+ if padding !== nothing
+ desc["padding"] = Base.String(padding)
+ end
+ end
+ begin
+ if dilations !== nothing
+ desc["dilations"] = map(Base.identity, dilations)
+ end
+ end
+ end
+ begin
+ desc["Tinput"] = tf.data_type(input_)
+ end
+ begin
+ desc["Tfilter"] = tf.data_type(filter_)
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(quantized_conv2d, [input_, filter_, min_input_, max_input_, min_filter_, max_filter_], name=nothing, out_type=nothing, strides=nothing, padding=nothing, dilations=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function quantized_conv2d(input_, filter_, min_input_, max_input_, min_filter_, max_filter_; name=nothing, out_type=nothing, strides=nothing, padding=nothing, dilations=nothing)
+ if tf.in_eager_mode()
+ quantized_conv2d_eager(input_, filter_, min_input_, max_input_, min_filter_, max_filter_; name=name, out_type=out_type, strides=strides, padding=padding, dilations=dilations)
+ else
+ quantized_conv2d_graph(input_, filter_, min_input_, max_input_, min_filter_, max_filter_; name=name, out_type=out_type, strides=strides, padding=padding, dilations=dilations)
+ end
+ end
+ end
+end
+
+
+"""
+ resource_apply_momentum(var, accum, lr, grad, momentum; use_locking=false, use_nesterov=false)
+
+
+"""
+begin
+ begin
+ function resource_apply_momentum_graph(var_, accum_, lr_, grad_, momentum_; name=nothing, use_locking=nothing, use_nesterov=nothing)
+ local desc
+ tf.with_op_name(name, "ResourceApplyMomentum") do
+ desc = tf.NodeDescription("ResourceApplyMomentum")
+ begin
+ begin
+ var_ = convert(Tensor{Any}, var_)
+ begin
+ end
+ end
+ begin
+ accum_ = convert(Tensor{Any}, accum_)
+ begin
+ end
+ end
+ begin
+ lr_ = convert(Tensor{Any}, lr_)
+ begin
+ end
+ end
+ begin
+ grad_ = convert(Tensor{Any}, grad_)
+ begin
+ end
+ end
+ begin
+ momentum_ = convert(Tensor{Any}, momentum_)
+ begin
+ end
+ end
+ begin
+ (lr_, grad_, momentum_) = tf.tf_promote(lr_, grad_, momentum_)
+ end
+ end
+ begin
+ begin
+ tf.add_input(desc, var_)
+ end
+ begin
+ tf.add_input(desc, accum_)
+ end
+ begin
+ tf.add_input(desc, lr_)
+ end
+ begin
+ tf.add_input(desc, grad_)
+ end
+ begin
+ tf.add_input(desc, momentum_)
+ end
+ end
+ begin
+ begin
+ if use_locking !== nothing
+ desc["use_locking"] = Base.Bool(use_locking)
+ end
+ end
+ begin
+ if use_nesterov !== nothing
+ desc["use_nesterov"] = Base.Bool(use_nesterov)
+ end
+ end
+ end
+ end
+ tf.Tensor(tf.Operation(desc))
+ end
+ end
+ begin
+ function resource_apply_momentum_eager(var_, accum_, lr_, grad_, momentum_; name=nothing, use_locking=nothing, use_nesterov=nothing)
+ desc = tf.EagerOp("ResourceApplyMomentum")
+ var_ = convert(tf.EagerTensor, var_)
+ accum_ = convert(tf.EagerTensor, accum_)
+ lr_ = convert(tf.EagerTensor, lr_)
+ grad_ = convert(tf.EagerTensor, grad_)
+ momentum_ = convert(tf.EagerTensor, momentum_)
+ begin
+ begin
+ tf.add_input(desc, var_)
+ end
+ begin
+ tf.add_input(desc, accum_)
+ end
+ begin
+ tf.add_input(desc, lr_)
+ end
+ begin
+ tf.add_input(desc, grad_)
+ end
+ begin
+ tf.add_input(desc, momentum_)
+ end
+ end
+ begin
+ begin
+ if use_locking !== nothing
+ desc["use_locking"] = Base.Bool(use_locking)
+ end
+ end
+ begin
+ if use_nesterov !== nothing
+ desc["use_nesterov"] = Base.Bool(use_nesterov)
+ end
+ end
+ end
+ begin
+ desc["T"] = tf.data_type(lr_)
+ end
+ begin
+ desc["T"] = tf.data_type(grad_)
+ end
+ begin
+ desc["T"] = tf.data_type(momentum_)
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(resource_apply_momentum, [var_, accum_, lr_, grad_, momentum_], name=nothing, use_locking=nothing, use_nesterov=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res[1]
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function resource_apply_momentum(var_, accum_, lr_, grad_, momentum_; name=nothing, use_locking=nothing, use_nesterov=nothing)
+ if tf.in_eager_mode()
+ resource_apply_momentum_eager(var_, accum_, lr_, grad_, momentum_; name=name, use_locking=use_locking, use_nesterov=use_nesterov)
+ else
+ resource_apply_momentum_graph(var_, accum_, lr_, grad_, momentum_; name=name, use_locking=use_locking, use_nesterov=use_nesterov)
+ end
+ end
+ end
+end
+
+
+"""
+ log1p(x)
+
+
+"""
+begin
+ begin
+ function log1p_graph(x_; name=nothing)
+ local desc
+ tf.with_op_name(name, "Log1p") do
+ desc = tf.NodeDescription("Log1p")
+ begin
+ begin
+ x_ = convert(Tensor{Any}, x_)
+ begin
+ end
+ end
+ begin
+ (x_,) = tf.tf_promote(x_)
+ end
+ end
+ begin
+ begin
+ tf.add_input(desc, x_)
+ end
+ end
+ begin
+ end
+ end
+ tf.Tensor(tf.Operation(desc))
+ end
+ end
+ begin
+ function log1p_eager(x_; name=nothing)
+ desc = tf.EagerOp("Log1p")
+ x_ = convert(tf.EagerTensor, x_)
+ begin
+ begin
+ tf.add_input(desc, x_)
+ end
+ end
+ begin
+ end
+ begin
+ desc["T"] = tf.data_type(x_)
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(log1p, [x_], name=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res[1]
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function log1p(x_; name=nothing)
+ if tf.in_eager_mode()
+ log1p_eager(x_; name=name)
+ else
+ log1p_graph(x_; name=name)
+ end
+ end
+ end
+end
+
+
+"""
+ ordered_map_clear(; capacity=0, memory_limit=0, container=, shared_name=)
+
+
+"""
+begin
+ begin
+ function ordered_map_clear_graph(; name=nothing, capacity=nothing, memory_limit=nothing, dtypes=nothing, container=nothing, shared_name=nothing)
+ local desc
+ tf.with_op_name(name, "OrderedMapClear") do
+ desc = tf.NodeDescription("OrderedMapClear")
+ begin
+ end
+ begin
+ end
+ begin
+ begin
+ if capacity !== nothing
+ desc["capacity"] = Base.Int(capacity)
+ end
+ end
+ begin
+ if memory_limit !== nothing
+ desc["memory_limit"] = Base.Int(memory_limit)
+ end
+ end
+ begin
+ if dtypes !== nothing
+ desc["dtypes"] = map(Base.identity, dtypes)
+ end
+ end
+ begin
+ if container !== nothing
+ desc["container"] = Base.String(container)
+ end
+ end
+ begin
+ if shared_name !== nothing
+ desc["shared_name"] = Base.String(shared_name)
+ end
+ end
+ end
+ end
+ tf.Tensor(tf.Operation(desc))
+ end
+ end
+ begin
+ function ordered_map_clear_eager(; name=nothing, capacity=nothing, memory_limit=nothing, dtypes=nothing, container=nothing, shared_name=nothing)
+ desc = tf.EagerOp("OrderedMapClear")
+ begin
+ end
+ begin
+ begin
+ if capacity !== nothing
+ desc["capacity"] = Base.Int(capacity)
+ end
+ end
+ begin
+ if memory_limit !== nothing
+ desc["memory_limit"] = Base.Int(memory_limit)
+ end
+ end
+ begin
+ if dtypes !== nothing
+ desc["dtypes"] = map(Base.identity, dtypes)
+ end
+ end
+ begin
+ if container !== nothing
+ desc["container"] = Base.String(container)
+ end
+ end
+ begin
+ if shared_name !== nothing
+ desc["shared_name"] = Base.String(shared_name)
+ end
+ end
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(ordered_map_clear, [], name=nothing, capacity=nothing, memory_limit=nothing, dtypes=nothing, container=nothing, shared_name=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res[1]
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function ordered_map_clear(; name=nothing, capacity=nothing, memory_limit=nothing, dtypes=nothing, container=nothing, shared_name=nothing)
+ if tf.in_eager_mode()
+ ordered_map_clear_eager(; name=name, capacity=capacity, memory_limit=memory_limit, dtypes=dtypes, container=container, shared_name=shared_name)
+ else
+ ordered_map_clear_graph(; name=name, capacity=capacity, memory_limit=memory_limit, dtypes=dtypes, container=container, shared_name=shared_name)
+ end
+ end
+ end
+end
+
+
+"""
+ resource_scatter_update(resource, indices, updates)
+
+
+"""
+begin
+ begin
+ function resource_scatter_update_graph(resource_, indices_, updates_; name=nothing, dtype=nothing)
+ local desc
+ tf.with_op_name(name, "ResourceScatterUpdate") do
+ desc = tf.NodeDescription("ResourceScatterUpdate")
+ begin
+ begin
+ resource_ = convert(Tensor{Any}, resource_)
+ begin
+ end
+ end
+ begin
+ indices_ = convert(Tensor{Any}, indices_)
+ begin
+ indices_ = indices_ - convert(tf.Tensor{eltype(indices_)}, 1)
+ end
+ end
+ begin
+ updates_ = convert(Tensor{Any}, updates_)
+ begin
+ end
+ end
+ begin
+ (updates_,) = tf.tf_promote(updates_)
+ end
+ begin
+ (indices_,) = tf.tf_promote(indices_)
+ end
+ end
+ begin
+ begin
+ tf.add_input(desc, resource_)
+ end
+ begin
+ tf.add_input(desc, indices_)
+ end
+ begin
+ tf.add_input(desc, updates_)
+ end
+ end
+ begin
+ begin
+ if dtype !== nothing
+ desc["dtype"] = Base.identity(dtype)
+ end
+ end
+ end
+ end
+ tf.Tensor(tf.Operation(desc))
+ end
+ end
+ begin
+ function resource_scatter_update_eager(resource_, indices_, updates_; name=nothing, dtype=nothing)
+ desc = tf.EagerOp("ResourceScatterUpdate")
+ resource_ = convert(tf.EagerTensor, resource_)
+ indices_ = convert(tf.EagerTensor, indices_)
+ updates_ = convert(tf.EagerTensor, updates_)
+ begin
+ begin
+ tf.add_input(desc, resource_)
+ end
+ begin
+ tf.add_input(desc, indices_)
+ end
+ begin
+ tf.add_input(desc, updates_)
+ end
+ end
+ begin
+ begin
+ if dtype !== nothing
+ desc["dtype"] = Base.identity(dtype)
+ end
+ end
+ end
+ begin
+ desc["Tindices"] = tf.data_type(indices_)
+ end
+ begin
+ desc["dtype"] = tf.data_type(updates_)
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(resource_scatter_update, [resource_, indices_, updates_], name=nothing, dtype=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res[1]
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function resource_scatter_update(resource_, indices_, updates_; name=nothing, dtype=nothing)
+ if tf.in_eager_mode()
+ resource_scatter_update_eager(resource_, indices_, updates_; name=name, dtype=dtype)
+ else
+ resource_scatter_update_graph(resource_, indices_, updates_; name=name, dtype=dtype)
+ end
+ end
+ end
+end
+
+
+"""
+ barrier_take_many(handle, num_elements; allow_small_batch=false, wait_for_incomplete=false, timeout_ms=-1)
+
+
+"""
+begin
+ begin
+ function barrier_take_many_graph(handle_, num_elements_; name=nothing, component_types=nothing, allow_small_batch=nothing, wait_for_incomplete=nothing, timeout_ms=nothing)
+ local desc
+ tf.with_op_name(name, "BarrierTakeMany") do
+ desc = tf.NodeDescription("BarrierTakeMany")
+ begin
+ begin
+ handle_ = convert(Tensor{String}, handle_)
+ begin
+ end
+ end
+ begin
+ num_elements_ = convert(Tensor{Int32}, num_elements_)
+ begin
+ end
+ end
+ end
+ begin
+ begin
+ tf.add_input(desc, handle_)
+ end
+ begin
+ tf.add_input(desc, num_elements_)
+ end
+ end
+ begin
+ begin
+ if component_types !== nothing
+ desc["component_types"] = map(Base.identity, component_types)
+ end
+ end
+ begin
+ if allow_small_batch !== nothing
+ desc["allow_small_batch"] = Base.Bool(allow_small_batch)
+ end
+ end
+ begin
+ if wait_for_incomplete !== nothing
+ desc["wait_for_incomplete"] = Base.Bool(wait_for_incomplete)
+ end
+ end
+ begin
+ if timeout_ms !== nothing
+ desc["timeout_ms"] = Base.Int(timeout_ms)
+ end
+ end
+ end
+ end
+ begin
+ out = tf.Tensor[]
+ op = tf.Operation(desc)
+ for out_idx = 1:3
+ push!(out, tf.Tensor(op, out_idx))
+ end
+ out
+ end
+ end
+ end
+ begin
+ function barrier_take_many_eager(handle_, num_elements_; name=nothing, component_types=nothing, allow_small_batch=nothing, wait_for_incomplete=nothing, timeout_ms=nothing)
+ desc = tf.EagerOp("BarrierTakeMany")
+ handle_ = convert(tf.EagerTensor, handle_)
+ num_elements_ = convert(tf.EagerTensor, num_elements_)
+ begin
+ begin
+ tf.add_input(desc, handle_)
+ end
+ begin
+ tf.add_input(desc, num_elements_)
+ end
+ end
+ begin
+ begin
+ if component_types !== nothing
+ desc["component_types"] = map(Base.identity, component_types)
+ end
+ end
+ begin
+ if allow_small_batch !== nothing
+ desc["allow_small_batch"] = Base.Bool(allow_small_batch)
+ end
+ end
+ begin
+ if wait_for_incomplete !== nothing
+ desc["wait_for_incomplete"] = Base.Bool(wait_for_incomplete)
+ end
+ end
+ begin
+ if timeout_ms !== nothing
+ desc["timeout_ms"] = Base.Int(timeout_ms)
+ end
+ end
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(barrier_take_many, [handle_, num_elements_], name=nothing, component_types=nothing, allow_small_batch=nothing, wait_for_incomplete=nothing, timeout_ms=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function barrier_take_many(handle_, num_elements_; name=nothing, component_types=nothing, allow_small_batch=nothing, wait_for_incomplete=nothing, timeout_ms=nothing)
+ if tf.in_eager_mode()
+ barrier_take_many_eager(handle_, num_elements_; name=name, component_types=component_types, allow_small_batch=allow_small_batch, wait_for_incomplete=wait_for_incomplete, timeout_ms=timeout_ms)
+ else
+ barrier_take_many_graph(handle_, num_elements_; name=name, component_types=component_types, allow_small_batch=allow_small_batch, wait_for_incomplete=wait_for_incomplete, timeout_ms=timeout_ms)
+ end
+ end
+ end
+end
+
+
+"""
+ resource_apply_keras_momentum(var, accum, lr, grad, momentum; use_locking=false, use_nesterov=false)
+
+
+"""
+begin
+ begin
+ function resource_apply_keras_momentum_graph(var_, accum_, lr_, grad_, momentum_; name=nothing, use_locking=nothing, use_nesterov=nothing)
+ local desc
+ tf.with_op_name(name, "ResourceApplyKerasMomentum") do
+ desc = tf.NodeDescription("ResourceApplyKerasMomentum")
+ begin
+ begin
+ var_ = convert(Tensor{Any}, var_)
+ begin
+ end
+ end
+ begin
+ accum_ = convert(Tensor{Any}, accum_)
+ begin
+ end
+ end
+ begin
+ lr_ = convert(Tensor{Any}, lr_)
+ begin
+ end
+ end
+ begin
+ grad_ = convert(Tensor{Any}, grad_)
+ begin
+ end
+ end
+ begin
+ momentum_ = convert(Tensor{Any}, momentum_)
+ begin
+ end
+ end
+ begin
+ (lr_, grad_, momentum_) = tf.tf_promote(lr_, grad_, momentum_)
+ end
+ end
+ begin
+ begin
+ tf.add_input(desc, var_)
+ end
+ begin
+ tf.add_input(desc, accum_)
+ end
+ begin
+ tf.add_input(desc, lr_)
+ end
+ begin
+ tf.add_input(desc, grad_)
+ end
+ begin
+ tf.add_input(desc, momentum_)
+ end
+ end
+ begin
+ begin
+ if use_locking !== nothing
+ desc["use_locking"] = Base.Bool(use_locking)
+ end
+ end
+ begin
+ if use_nesterov !== nothing
+ desc["use_nesterov"] = Base.Bool(use_nesterov)
+ end
+ end
+ end
+ end
+ tf.Tensor(tf.Operation(desc))
+ end
+ end
+ begin
+ function resource_apply_keras_momentum_eager(var_, accum_, lr_, grad_, momentum_; name=nothing, use_locking=nothing, use_nesterov=nothing)
+ desc = tf.EagerOp("ResourceApplyKerasMomentum")
+ var_ = convert(tf.EagerTensor, var_)
+ accum_ = convert(tf.EagerTensor, accum_)
+ lr_ = convert(tf.EagerTensor, lr_)
+ grad_ = convert(tf.EagerTensor, grad_)
+ momentum_ = convert(tf.EagerTensor, momentum_)
+ begin
+ begin
+ tf.add_input(desc, var_)
+ end
+ begin
+ tf.add_input(desc, accum_)
+ end
+ begin
+ tf.add_input(desc, lr_)
+ end
+ begin
+ tf.add_input(desc, grad_)
+ end
+ begin
+ tf.add_input(desc, momentum_)
+ end
+ end
+ begin
+ begin
+ if use_locking !== nothing
+ desc["use_locking"] = Base.Bool(use_locking)
+ end
+ end
+ begin
+ if use_nesterov !== nothing
+ desc["use_nesterov"] = Base.Bool(use_nesterov)
+ end
+ end
+ end
+ begin
+ desc["T"] = tf.data_type(lr_)
+ end
+ begin
+ desc["T"] = tf.data_type(grad_)
+ end
+ begin
+ desc["T"] = tf.data_type(momentum_)
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(resource_apply_keras_momentum, [var_, accum_, lr_, grad_, momentum_], name=nothing, use_locking=nothing, use_nesterov=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res[1]
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function resource_apply_keras_momentum(var_, accum_, lr_, grad_, momentum_; name=nothing, use_locking=nothing, use_nesterov=nothing)
+ if tf.in_eager_mode()
+ resource_apply_keras_momentum_eager(var_, accum_, lr_, grad_, momentum_; name=name, use_locking=use_locking, use_nesterov=use_nesterov)
+ else
+ resource_apply_keras_momentum_graph(var_, accum_, lr_, grad_, momentum_; name=name, use_locking=use_locking, use_nesterov=use_nesterov)
+ end
+ end
+ end
+end
+
+
+"""
+ generate_big_query_reader_partitions(; test_end_point=)
+
+Generates serialized partition messages suitable for batch reads.
+"""
+begin
+ begin
+ function generate_big_query_reader_partitions_graph(; name=nothing, project_id=nothing, dataset_id=nothing, table_id=nothing, columns=nothing, timestamp_millis=nothing, num_partitions=nothing, test_end_point=nothing)
+ local desc
+ tf.with_op_name(name, "GenerateBigQueryReaderPartitions") do
+ desc = tf.NodeDescription("GenerateBigQueryReaderPartitions")
+ begin
+ end
+ begin
+ end
+ begin
+ begin
+ if project_id !== nothing
+ desc["project_id"] = Base.String(project_id)
+ end
+ end
+ begin
+ if dataset_id !== nothing
+ desc["dataset_id"] = Base.String(dataset_id)
+ end
+ end
+ begin
+ if table_id !== nothing
+ desc["table_id"] = Base.String(table_id)
+ end
+ end
+ begin
+ if columns !== nothing
+ desc["columns"] = map(Base.identity, columns)
+ end
+ end
+ begin
+ if timestamp_millis !== nothing
+ desc["timestamp_millis"] = Base.Int(timestamp_millis)
+ end
+ end
+ begin
+ if num_partitions !== nothing
+ desc["num_partitions"] = Base.Int(num_partitions)
+ end
+ end
+ begin
+ if test_end_point !== nothing
+ desc["test_end_point"] = Base.String(test_end_point)
+ end
+ end
+ end
+ end
+ tf.Tensor(tf.Operation(desc))
+ end
+ end
+ begin
+ function generate_big_query_reader_partitions_eager(; name=nothing, project_id=nothing, dataset_id=nothing, table_id=nothing, columns=nothing, timestamp_millis=nothing, num_partitions=nothing, test_end_point=nothing)
+ desc = tf.EagerOp("GenerateBigQueryReaderPartitions")
+ begin
+ end
+ begin
+ begin
+ if project_id !== nothing
+ desc["project_id"] = Base.String(project_id)
+ end
+ end
+ begin
+ if dataset_id !== nothing
+ desc["dataset_id"] = Base.String(dataset_id)
+ end
+ end
+ begin
+ if table_id !== nothing
+ desc["table_id"] = Base.String(table_id)
+ end
+ end
+ begin
+ if columns !== nothing
+ desc["columns"] = map(Base.identity, columns)
+ end
+ end
+ begin
+ if timestamp_millis !== nothing
+ desc["timestamp_millis"] = Base.Int(timestamp_millis)
+ end
+ end
+ begin
+ if num_partitions !== nothing
+ desc["num_partitions"] = Base.Int(num_partitions)
+ end
+ end
+ begin
+ if test_end_point !== nothing
+ desc["test_end_point"] = Base.String(test_end_point)
+ end
+ end
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(generate_big_query_reader_partitions, [], name=nothing, project_id=nothing, dataset_id=nothing, table_id=nothing, columns=nothing, timestamp_millis=nothing, num_partitions=nothing, test_end_point=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res[1]
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function generate_big_query_reader_partitions(; name=nothing, project_id=nothing, dataset_id=nothing, table_id=nothing, columns=nothing, timestamp_millis=nothing, num_partitions=nothing, test_end_point=nothing)
+ if tf.in_eager_mode()
+ generate_big_query_reader_partitions_eager(; name=name, project_id=project_id, dataset_id=dataset_id, table_id=table_id, columns=columns, timestamp_millis=timestamp_millis, num_partitions=num_partitions, test_end_point=test_end_point)
+ else
+ generate_big_query_reader_partitions_graph(; name=name, project_id=project_id, dataset_id=dataset_id, table_id=table_id, columns=columns, timestamp_millis=timestamp_millis, num_partitions=num_partitions, test_end_point=test_end_point)
+ end
+ end
+ end
+end
+
+
+"""
+ _xla_recv_at_host(dynamic_key)
+
+A placeholder op for multiple values that will be sent to TensorFlow from a
+"""
+begin
+ begin
+ function _xla_recv_at_host_graph(dynamic_key_; name=nothing, Toutputs=nothing, key=nothing, device_ordinal=nothing)
+ local desc
+ tf.with_op_name(name, "_XlaRecvAtHost") do
+ desc = tf.NodeDescription("_XlaRecvAtHost")
+ begin
+ begin
+ dynamic_key_ = convert(Tensor{String}, dynamic_key_)
+ begin
+ end
+ end
+ end
+ begin
+ begin
+ tf.add_input(desc, dynamic_key_)
+ end
+ end
+ begin
+ begin
+ if Toutputs !== nothing
+ desc["Toutputs"] = map(Base.identity, Toutputs)
+ end
+ end
+ begin
+ if key !== nothing
+ desc["key"] = Base.String(key)
+ end
+ end
+ begin
+ if device_ordinal !== nothing
+ desc["device_ordinal"] = Base.Int(device_ordinal)
+ end
+ end
+ end
+ end
+ tf.Tensor(tf.Operation(desc))
+ end
+ end
+ begin
+ function _xla_recv_at_host_eager(dynamic_key_; name=nothing, Toutputs=nothing, key=nothing, device_ordinal=nothing)
+ desc = tf.EagerOp("_XlaRecvAtHost")
+ dynamic_key_ = convert(tf.EagerTensor, dynamic_key_)
+ begin
+ begin
+ tf.add_input(desc, dynamic_key_)
+ end
+ end
+ begin
+ begin
+ if Toutputs !== nothing
+ desc["Toutputs"] = map(Base.identity, Toutputs)
+ end
+ end
+ begin
+ if key !== nothing
+ desc["key"] = Base.String(key)
+ end
+ end
+ begin
+ if device_ordinal !== nothing
+ desc["device_ordinal"] = Base.Int(device_ordinal)
+ end
+ end
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(_xla_recv_at_host, [dynamic_key_], name=nothing, Toutputs=nothing, key=nothing, device_ordinal=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res[1]
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function _xla_recv_at_host(dynamic_key_; name=nothing, Toutputs=nothing, key=nothing, device_ordinal=nothing)
+ if tf.in_eager_mode()
+ _xla_recv_at_host_eager(dynamic_key_; name=name, Toutputs=Toutputs, key=key, device_ordinal=device_ordinal)
+ else
+ _xla_recv_at_host_graph(dynamic_key_; name=name, Toutputs=Toutputs, key=key, device_ordinal=device_ordinal)
+ end
+ end
+ end
+end
+
+
+"""
+ quantized_avg_pool(input, min_input, max_input)
+
+
+"""
+begin
+ begin
+ function quantized_avg_pool_graph(input_, min_input_, max_input_; name=nothing, ksize=nothing, strides=nothing, padding=nothing)
+ local desc
+ tf.with_op_name(name, "QuantizedAvgPool") do
+ desc = tf.NodeDescription("QuantizedAvgPool")
+ begin
+ begin
+ input_ = convert(Tensor{Any}, input_)
+ begin
+ end
+ end
+ begin
+ min_input_ = convert(Tensor{Float32}, min_input_)
+ begin
+ end
+ end
+ begin
+ max_input_ = convert(Tensor{Float32}, max_input_)
+ begin
+ end
+ end
+ begin
+ (input_,) = tf.tf_promote(input_)
+ end
+ end
+ begin
+ begin
+ tf.add_input(desc, input_)
+ end
+ begin
+ tf.add_input(desc, min_input_)
+ end
+ begin
+ tf.add_input(desc, max_input_)
+ end
+ end
+ begin
+ begin
+ if ksize !== nothing
+ desc["ksize"] = map(Base.identity, ksize)
+ end
+ end
+ begin
+ if strides !== nothing
+ desc["strides"] = map(Base.identity, strides)
+ end
+ end
+ begin
+ if padding !== nothing
+ desc["padding"] = Base.String(padding)
+ end
+ end
+ end
+ end
+ begin
+ out = tf.Tensor[]
+ op = tf.Operation(desc)
+ for out_idx = 1:3
+ push!(out, tf.Tensor(op, out_idx))
+ end
+ out
+ end
+ end
+ end
+ begin
+ function quantized_avg_pool_eager(input_, min_input_, max_input_; name=nothing, ksize=nothing, strides=nothing, padding=nothing)
+ desc = tf.EagerOp("QuantizedAvgPool")
+ input_ = convert(tf.EagerTensor, input_)
+ min_input_ = convert(tf.EagerTensor, min_input_)
+ max_input_ = convert(tf.EagerTensor, max_input_)
+ begin
+ begin
+ tf.add_input(desc, input_)
+ end
+ begin
+ tf.add_input(desc, min_input_)
+ end
+ begin
+ tf.add_input(desc, max_input_)
+ end
+ end
+ begin
+ begin
+ if ksize !== nothing
+ desc["ksize"] = map(Base.identity, ksize)
+ end
+ end
+ begin
+ if strides !== nothing
+ desc["strides"] = map(Base.identity, strides)
+ end
+ end
+ begin
+ if padding !== nothing
+ desc["padding"] = Base.String(padding)
+ end
+ end
+ end
+ begin
+ desc["T"] = tf.data_type(input_)
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(quantized_avg_pool, [input_, min_input_, max_input_], name=nothing, ksize=nothing, strides=nothing, padding=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function quantized_avg_pool(input_, min_input_, max_input_; name=nothing, ksize=nothing, strides=nothing, padding=nothing)
+ if tf.in_eager_mode()
+ quantized_avg_pool_eager(input_, min_input_, max_input_; name=name, ksize=ksize, strides=strides, padding=padding)
+ else
+ quantized_avg_pool_graph(input_, min_input_, max_input_; name=name, ksize=ksize, strides=strides, padding=padding)
+ end
+ end
+ end
+end
+
+
+"""
+ resource_apply_adam_with_amsgrad(var, m, v, vhat, beta1_power, beta2_power, lr, beta1, beta2, epsilon, grad; use_locking=false)
+
+
+"""
+begin
+ begin
+ function resource_apply_adam_with_amsgrad_graph(var_, m_, v_, vhat_, beta1_power_, beta2_power_, lr_, beta1_, beta2_, epsilon_, grad_; name=nothing, use_locking=nothing)
+ local desc
+ tf.with_op_name(name, "ResourceApplyAdamWithAmsgrad") do
+ desc = tf.NodeDescription("ResourceApplyAdamWithAmsgrad")
+ begin
+ begin
+ var_ = convert(Tensor{Any}, var_)
+ begin
+ end
+ end
+ begin
+ m_ = convert(Tensor{Any}, m_)
+ begin
+ end
+ end
+ begin
+ v_ = convert(Tensor{Any}, v_)
+ begin
+ end
+ end
+ begin
+ vhat_ = convert(Tensor{Any}, vhat_)
+ begin
+ end
+ end
+ begin
+ beta1_power_ = convert(Tensor{Any}, beta1_power_)
+ begin
+ end
+ end
+ begin
+ beta2_power_ = convert(Tensor{Any}, beta2_power_)
+ begin
+ end
+ end
+ begin
+ lr_ = convert(Tensor{Any}, lr_)
+ begin
+ end
+ end
+ begin
+ beta1_ = convert(Tensor{Any}, beta1_)
+ begin
+ end
+ end
+ begin
+ beta2_ = convert(Tensor{Any}, beta2_)
+ begin
+ end
+ end
+ begin
+ epsilon_ = convert(Tensor{Any}, epsilon_)
+ begin
+ end
+ end
+ begin
+ grad_ = convert(Tensor{Any}, grad_)
+ begin
+ end
+ end
+ begin
+ (beta1_power_, beta2_power_, lr_, beta1_, beta2_, epsilon_, grad_) = tf.tf_promote(beta1_power_, beta2_power_, lr_, beta1_, beta2_, epsilon_, grad_)
+ end
+ end
+ begin
+ begin
+ tf.add_input(desc, var_)
+ end
+ begin
+ tf.add_input(desc, m_)
+ end
+ begin
+ tf.add_input(desc, v_)
+ end
+ begin
+ tf.add_input(desc, vhat_)
+ end
+ begin
+ tf.add_input(desc, beta1_power_)
+ end
+ begin
+ tf.add_input(desc, beta2_power_)
+ end
+ begin
+ tf.add_input(desc, lr_)
+ end
+ begin
+ tf.add_input(desc, beta1_)
+ end
+ begin
+ tf.add_input(desc, beta2_)
+ end
+ begin
+ tf.add_input(desc, epsilon_)
+ end
+ begin
+ tf.add_input(desc, grad_)
+ end
+ end
+ begin
+ begin
+ if use_locking !== nothing
+ desc["use_locking"] = Base.Bool(use_locking)
+ end
+ end
+ end
+ end
+ tf.Tensor(tf.Operation(desc))
+ end
+ end
+ begin
+ function resource_apply_adam_with_amsgrad_eager(var_, m_, v_, vhat_, beta1_power_, beta2_power_, lr_, beta1_, beta2_, epsilon_, grad_; name=nothing, use_locking=nothing)
+ desc = tf.EagerOp("ResourceApplyAdamWithAmsgrad")
+ var_ = convert(tf.EagerTensor, var_)
+ m_ = convert(tf.EagerTensor, m_)
+ v_ = convert(tf.EagerTensor, v_)
+ vhat_ = convert(tf.EagerTensor, vhat_)
+ beta1_power_ = convert(tf.EagerTensor, beta1_power_)
+ beta2_power_ = convert(tf.EagerTensor, beta2_power_)
+ lr_ = convert(tf.EagerTensor, lr_)
+ beta1_ = convert(tf.EagerTensor, beta1_)
+ beta2_ = convert(tf.EagerTensor, beta2_)
+ epsilon_ = convert(tf.EagerTensor, epsilon_)
+ grad_ = convert(tf.EagerTensor, grad_)
+ begin
+ begin
+ tf.add_input(desc, var_)
+ end
+ begin
+ tf.add_input(desc, m_)
+ end
+ begin
+ tf.add_input(desc, v_)
+ end
+ begin
+ tf.add_input(desc, vhat_)
+ end
+ begin
+ tf.add_input(desc, beta1_power_)
+ end
+ begin
+ tf.add_input(desc, beta2_power_)
+ end
+ begin
+ tf.add_input(desc, lr_)
+ end
+ begin
+ tf.add_input(desc, beta1_)
+ end
+ begin
+ tf.add_input(desc, beta2_)
+ end
+ begin
+ tf.add_input(desc, epsilon_)
+ end
+ begin
+ tf.add_input(desc, grad_)
+ end
+ end
+ begin
+ begin
+ if use_locking !== nothing
+ desc["use_locking"] = Base.Bool(use_locking)
+ end
+ end
+ end
+ begin
+ desc["T"] = tf.data_type(beta1_power_)
+ end
+ begin
+ desc["T"] = tf.data_type(beta2_power_)
+ end
+ begin
+ desc["T"] = tf.data_type(lr_)
+ end
+ begin
+ desc["T"] = tf.data_type(beta1_)
+ end
+ begin
+ desc["T"] = tf.data_type(beta2_)
+ end
+ begin
+ desc["T"] = tf.data_type(epsilon_)
+ end
+ begin
+ desc["T"] = tf.data_type(grad_)
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(resource_apply_adam_with_amsgrad, [var_, m_, v_, vhat_, beta1_power_, beta2_power_, lr_, beta1_, beta2_, epsilon_, grad_], name=nothing, use_locking=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res[1]
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function resource_apply_adam_with_amsgrad(var_, m_, v_, vhat_, beta1_power_, beta2_power_, lr_, beta1_, beta2_, epsilon_, grad_; name=nothing, use_locking=nothing)
+ if tf.in_eager_mode()
+ resource_apply_adam_with_amsgrad_eager(var_, m_, v_, vhat_, beta1_power_, beta2_power_, lr_, beta1_, beta2_, epsilon_, grad_; name=name, use_locking=use_locking)
+ else
+ resource_apply_adam_with_amsgrad_graph(var_, m_, v_, vhat_, beta1_power_, beta2_power_, lr_, beta1_, beta2_, epsilon_, grad_; name=name, use_locking=use_locking)
+ end
+ end
+ end
+end
+
+
+"""
+ _host_recv(; client_terminated=false)
+
+Receives the named tensor from send_device on recv_device.
+"""
+begin
+ begin
+ function _host_recv_graph(; name=nothing, tensor_type=nothing, tensor_name=nothing, send_device=nothing, send_device_incarnation=nothing, recv_device=nothing, client_terminated=nothing)
+ local desc
+ tf.with_op_name(name, "_HostRecv") do
+ desc = tf.NodeDescription("_HostRecv")
+ begin
+ end
+ begin
+ end
+ begin
+ begin
+ if tensor_type !== nothing
+ desc["tensor_type"] = Base.identity(tensor_type)
+ end
+ end
+ begin
+ if tensor_name !== nothing
+ desc["tensor_name"] = Base.String(tensor_name)
+ end
+ end
+ begin
+ if send_device !== nothing
+ desc["send_device"] = Base.String(send_device)
+ end
+ end
+ begin
+ if send_device_incarnation !== nothing
+ desc["send_device_incarnation"] = Base.Int(send_device_incarnation)
+ end
+ end
+ begin
+ if recv_device !== nothing
+ desc["recv_device"] = Base.String(recv_device)
+ end
+ end
+ begin
+ if client_terminated !== nothing
+ desc["client_terminated"] = Base.Bool(client_terminated)
+ end
+ end
+ end
+ end
+ tf.Tensor(tf.Operation(desc))
+ end
+ end
+ begin
+ function _host_recv_eager(; name=nothing, tensor_type=nothing, tensor_name=nothing, send_device=nothing, send_device_incarnation=nothing, recv_device=nothing, client_terminated=nothing)
+ desc = tf.EagerOp("_HostRecv")
+ begin
+ end
+ begin
+ begin
+ if tensor_type !== nothing
+ desc["tensor_type"] = Base.identity(tensor_type)
+ end
+ end
+ begin
+ if tensor_name !== nothing
+ desc["tensor_name"] = Base.String(tensor_name)
+ end
+ end
+ begin
+ if send_device !== nothing
+ desc["send_device"] = Base.String(send_device)
+ end
+ end
+ begin
+ if send_device_incarnation !== nothing
+ desc["send_device_incarnation"] = Base.Int(send_device_incarnation)
+ end
+ end
+ begin
+ if recv_device !== nothing
+ desc["recv_device"] = Base.String(recv_device)
+ end
+ end
+ begin
+ if client_terminated !== nothing
+ desc["client_terminated"] = Base.Bool(client_terminated)
+ end
+ end
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(_host_recv, [], name=nothing, tensor_type=nothing, tensor_name=nothing, send_device=nothing, send_device_incarnation=nothing, recv_device=nothing, client_terminated=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res[1]
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function _host_recv(; name=nothing, tensor_type=nothing, tensor_name=nothing, send_device=nothing, send_device_incarnation=nothing, recv_device=nothing, client_terminated=nothing)
+ if tf.in_eager_mode()
+ _host_recv_eager(; name=name, tensor_type=tensor_type, tensor_name=tensor_name, send_device=send_device, send_device_incarnation=send_device_incarnation, recv_device=recv_device, client_terminated=client_terminated)
+ else
+ _host_recv_graph(; name=name, tensor_type=tensor_type, tensor_name=tensor_name, send_device=send_device, send_device_incarnation=send_device_incarnation, recv_device=recv_device, client_terminated=client_terminated)
+ end
+ end
+ end
+end
+
+
+"""
+ boosted_trees_center_bias(tree_ensemble_handle, mean_gradients, mean_hessians, l1, l2)
+
+
+"""
+begin
+ begin
+ function boosted_trees_center_bias_graph(tree_ensemble_handle_, mean_gradients_, mean_hessians_, l1_, l2_; name=nothing)
+ local desc
+ tf.with_op_name(name, "BoostedTreesCenterBias") do
+ desc = tf.NodeDescription("BoostedTreesCenterBias")
+ begin
+ begin
+ tree_ensemble_handle_ = convert(Tensor{Any}, tree_ensemble_handle_)
+ begin
+ end
+ end
+ begin
+ mean_gradients_ = convert(Tensor{Float32}, mean_gradients_)
+ begin
+ end
+ end
+ begin
+ mean_hessians_ = convert(Tensor{Float32}, mean_hessians_)
+ begin
+ end
+ end
+ begin
+ l1_ = convert(Tensor{Float32}, l1_)
+ begin
+ end
+ end
+ begin
+ l2_ = convert(Tensor{Float32}, l2_)
+ begin
+ end
+ end
+ end
+ begin
+ begin
+ tf.add_input(desc, tree_ensemble_handle_)
+ end
+ begin
+ tf.add_input(desc, mean_gradients_)
+ end
+ begin
+ tf.add_input(desc, mean_hessians_)
+ end
+ begin
+ tf.add_input(desc, l1_)
+ end
+ begin
+ tf.add_input(desc, l2_)
+ end
+ end
+ begin
+ end
+ end
+ tf.Tensor(tf.Operation(desc))
+ end
+ end
+ begin
+ function boosted_trees_center_bias_eager(tree_ensemble_handle_, mean_gradients_, mean_hessians_, l1_, l2_; name=nothing)
+ desc = tf.EagerOp("BoostedTreesCenterBias")
+ tree_ensemble_handle_ = convert(tf.EagerTensor, tree_ensemble_handle_)
+ mean_gradients_ = convert(tf.EagerTensor, mean_gradients_)
+ mean_hessians_ = convert(tf.EagerTensor, mean_hessians_)
+ l1_ = convert(tf.EagerTensor, l1_)
+ l2_ = convert(tf.EagerTensor, l2_)
+ begin
+ begin
+ tf.add_input(desc, tree_ensemble_handle_)
+ end
+ begin
+ tf.add_input(desc, mean_gradients_)
+ end
+ begin
+ tf.add_input(desc, mean_hessians_)
+ end
+ begin
+ tf.add_input(desc, l1_)
+ end
+ begin
+ tf.add_input(desc, l2_)
+ end
+ end
+ begin
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(boosted_trees_center_bias, [tree_ensemble_handle_, mean_gradients_, mean_hessians_, l1_, l2_], name=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res[1]
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function boosted_trees_center_bias(tree_ensemble_handle_, mean_gradients_, mean_hessians_, l1_, l2_; name=nothing)
+ if tf.in_eager_mode()
+ boosted_trees_center_bias_eager(tree_ensemble_handle_, mean_gradients_, mean_hessians_, l1_, l2_; name=name)
+ else
+ boosted_trees_center_bias_graph(tree_ensemble_handle_, mean_gradients_, mean_hessians_, l1_, l2_; name=name)
+ end
+ end
+ end
+end
+
+
+"""
+ lookup_table_size_v2(table_handle)
+
+
+"""
+begin
+ begin
+ function lookup_table_size_v2_graph(table_handle_; name=nothing)
+ local desc
+ tf.with_op_name(name, "LookupTableSizeV2") do
+ desc = tf.NodeDescription("LookupTableSizeV2")
+ begin
+ begin
+ table_handle_ = convert(Tensor{Any}, table_handle_)
+ begin
+ end
+ end
+ end
+ begin
+ begin
+ tf.add_input(desc, table_handle_)
+ end
+ end
+ begin
+ end
+ end
+ tf.Tensor(tf.Operation(desc))
+ end
+ end
+ begin
+ function lookup_table_size_v2_eager(table_handle_; name=nothing)
+ desc = tf.EagerOp("LookupTableSizeV2")
+ table_handle_ = convert(tf.EagerTensor, table_handle_)
+ begin
+ begin
+ tf.add_input(desc, table_handle_)
+ end
+ end
+ begin
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(lookup_table_size_v2, [table_handle_], name=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res[1]
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function lookup_table_size_v2(table_handle_; name=nothing)
+ if tf.in_eager_mode()
+ lookup_table_size_v2_eager(table_handle_; name=name)
+ else
+ lookup_table_size_v2_graph(table_handle_; name=name)
+ end
+ end
+ end
+end
+
+
+"""
+ irfft(input, fft_length)
+
+
+"""
+begin
+ begin
+ function irfft_graph(input_, fft_length_; name=nothing)
+ local desc
+ tf.with_op_name(name, "IRFFT") do
+ desc = tf.NodeDescription("IRFFT")
+ begin
+ begin
+ input_ = convert(Tensor{Complex{Float32}}, input_)
+ begin
+ end
+ end
+ begin
+ fft_length_ = convert(Tensor{Int32}, fft_length_)
+ begin
+ end
+ end
+ end
+ begin
+ begin
+ tf.add_input(desc, input_)
+ end
+ begin
+ tf.add_input(desc, fft_length_)
+ end
+ end
+ begin
+ end
+ end
+ tf.Tensor(tf.Operation(desc))
+ end
+ end
+ begin
+ function irfft_eager(input_, fft_length_; name=nothing)
+ desc = tf.EagerOp("IRFFT")
+ input_ = convert(tf.EagerTensor, input_)
+ fft_length_ = convert(tf.EagerTensor, fft_length_)
+ begin
+ begin
+ tf.add_input(desc, input_)
+ end
+ begin
+ tf.add_input(desc, fft_length_)
+ end
+ end
+ begin
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(irfft, [input_, fft_length_], name=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res[1]
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function irfft(input_, fft_length_; name=nothing)
+ if tf.in_eager_mode()
+ irfft_eager(input_, fft_length_; name=name)
+ else
+ irfft_graph(input_, fft_length_; name=name)
+ end
+ end
+ end
+end
+
+
+"""
+ inplace_add(x, i, v)
+
+
+"""
+begin
+ begin
+ function inplace_add_graph(x_, i_, v_; name=nothing)
+ local desc
+ tf.with_op_name(name, "InplaceAdd") do
+ desc = tf.NodeDescription("InplaceAdd")
+ begin
+ begin
+ x_ = convert(Tensor{Any}, x_)
+ begin
+ end
+ end
+ begin
+ i_ = convert(Tensor{Int32}, i_)
+ begin
+ end
+ end
+ begin
+ v_ = convert(Tensor{Any}, v_)
+ begin
+ end
+ end
+ begin
+ (x_, v_) = tf.tf_promote(x_, v_)
+ end
+ end
+ begin
+ begin
+ tf.add_input(desc, x_)
+ end
+ begin
+ tf.add_input(desc, i_)
+ end
+ begin
+ tf.add_input(desc, v_)
+ end
+ end
+ begin
+ end
+ end
+ tf.Tensor(tf.Operation(desc))
+ end
+ end
+ begin
+ function inplace_add_eager(x_, i_, v_; name=nothing)
+ desc = tf.EagerOp("InplaceAdd")
+ x_ = convert(tf.EagerTensor, x_)
+ i_ = convert(tf.EagerTensor, i_)
+ v_ = convert(tf.EagerTensor, v_)
+ begin
+ begin
+ tf.add_input(desc, x_)
+ end
+ begin
+ tf.add_input(desc, i_)
+ end
+ begin
+ tf.add_input(desc, v_)
+ end
+ end
+ begin
+ end
+ begin
+ desc["T"] = tf.data_type(x_)
+ end
+ begin
+ desc["T"] = tf.data_type(v_)
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(inplace_add, [x_, i_, v_], name=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res[1]
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function inplace_add(x_, i_, v_; name=nothing)
+ if tf.in_eager_mode()
+ inplace_add_eager(x_, i_, v_; name=name)
+ else
+ inplace_add_graph(x_, i_, v_; name=name)
+ end
+ end
+ end
+end
+
+
+"""
+ bias_add(value, bias; data_format=)
+
+
+"""
+begin
+ begin
+ function bias_add_graph(value_, bias_; name=nothing, data_format=nothing)
+ local desc
+ tf.with_op_name(name, "BiasAdd") do
+ desc = tf.NodeDescription("BiasAdd")
+ begin
+ begin
+ value_ = convert(Tensor{Any}, value_)
+ begin
+ end
+ end
+ begin
+ bias_ = convert(Tensor{Any}, bias_)
+ begin
+ end
+ end
+ begin
+ (value_, bias_) = tf.tf_promote(value_, bias_)
+ end
+ end
+ begin
+ begin
+ tf.add_input(desc, value_)
+ end
+ begin
+ tf.add_input(desc, bias_)
+ end
+ end
+ begin
+ begin
+ if data_format !== nothing
+ desc["data_format"] = Base.String(data_format)
+ end
+ end
+ end
+ end
+ tf.Tensor(tf.Operation(desc))
+ end
+ end
+ begin
+ function bias_add_eager(value_, bias_; name=nothing, data_format=nothing)
+ desc = tf.EagerOp("BiasAdd")
+ value_ = convert(tf.EagerTensor, value_)
+ bias_ = convert(tf.EagerTensor, bias_)
+ begin
+ begin
+ tf.add_input(desc, value_)
+ end
+ begin
+ tf.add_input(desc, bias_)
+ end
+ end
+ begin
+ begin
+ if data_format !== nothing
+ desc["data_format"] = Base.String(data_format)
+ end
+ end
+ end
+ begin
+ desc["T"] = tf.data_type(value_)
+ end
+ begin
+ desc["T"] = tf.data_type(bias_)
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(bias_add, [value_, bias_], name=nothing, data_format=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res[1]
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function bias_add(value_, bias_; name=nothing, data_format=nothing)
+ if tf.in_eager_mode()
+ bias_add_eager(value_, bias_; name=name, data_format=data_format)
+ else
+ bias_add_graph(value_, bias_; name=name, data_format=data_format)
+ end
+ end
+ end
+end
+
+
+"""
+ _disconnect_host_from_distributed_tpu_system()
+
+An op that disconnects the TPUs on a host from a running distributed
+"""
+begin
+ begin
+ function _disconnect_host_from_distributed_tpu_system_graph(; name=nothing)
+ local desc
+ tf.with_op_name(name, "_DisconnectHostFromDistributedTPUSystem") do
+ desc = tf.NodeDescription("_DisconnectHostFromDistributedTPUSystem")
+ begin
+ end
+ begin
+ end
+ begin
+ end
+ end
+ tf.Tensor(tf.Operation(desc))
+ end
+ end
+ begin
+ function _disconnect_host_from_distributed_tpu_system_eager(; name=nothing)
+ desc = tf.EagerOp("_DisconnectHostFromDistributedTPUSystem")
+ begin
+ end
+ begin
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(_disconnect_host_from_distributed_tpu_system, [], name=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res[1]
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function _disconnect_host_from_distributed_tpu_system(; name=nothing)
+ if tf.in_eager_mode()
+ _disconnect_host_from_distributed_tpu_system_eager(; name=name)
+ else
+ _disconnect_host_from_distributed_tpu_system_graph(; name=name)
+ end
+ end
+ end
+end
+
+
+"""
+ load_tpu_embedding_adam_parameters_grad_accum_debug(parameters, momenta, velocities, gradient_accumulators; table_id=-1, table_name=)
+
+Load embedding parameters for a single table.
+"""
+begin
+ begin
+ function load_tpu_embedding_adam_parameters_grad_accum_debug_graph(parameters_, momenta_, velocities_, gradient_accumulators_; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing)
+ local desc
+ tf.with_op_name(name, "LoadTPUEmbeddingADAMParametersGradAccumDebug") do
+ desc = tf.NodeDescription("LoadTPUEmbeddingADAMParametersGradAccumDebug")
+ begin
+ begin
+ parameters_ = convert(Tensor{Float32}, parameters_)
+ begin
+ end
+ end
+ begin
+ momenta_ = convert(Tensor{Float32}, momenta_)
+ begin
+ end
+ end
+ begin
+ velocities_ = convert(Tensor{Float32}, velocities_)
+ begin
+ end
+ end
+ begin
+ gradient_accumulators_ = convert(Tensor{Float32}, gradient_accumulators_)
+ begin
+ end
+ end
+ end
+ begin
+ begin
+ tf.add_input(desc, parameters_)
+ end
+ begin
+ tf.add_input(desc, momenta_)
+ end
+ begin
+ tf.add_input(desc, velocities_)
+ end
+ begin
+ tf.add_input(desc, gradient_accumulators_)
+ end
+ end
+ begin
+ begin
+ if table_id !== nothing
+ desc["table_id"] = Base.Int(table_id)
+ end
+ end
+ begin
+ if table_name !== nothing
+ desc["table_name"] = Base.String(table_name)
+ end
+ end
+ begin
+ if num_shards !== nothing
+ desc["num_shards"] = Base.Int(num_shards)
+ end
+ end
+ begin
+ if shard_id !== nothing
+ desc["shard_id"] = Base.Int(shard_id)
+ end
+ end
+ end
+ end
+ tf.Tensor(tf.Operation(desc))
+ end
+ end
+ begin
+ function load_tpu_embedding_adam_parameters_grad_accum_debug_eager(parameters_, momenta_, velocities_, gradient_accumulators_; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing)
+ desc = tf.EagerOp("LoadTPUEmbeddingADAMParametersGradAccumDebug")
+ parameters_ = convert(tf.EagerTensor, parameters_)
+ momenta_ = convert(tf.EagerTensor, momenta_)
+ velocities_ = convert(tf.EagerTensor, velocities_)
+ gradient_accumulators_ = convert(tf.EagerTensor, gradient_accumulators_)
+ begin
+ begin
+ tf.add_input(desc, parameters_)
+ end
+ begin
+ tf.add_input(desc, momenta_)
+ end
+ begin
+ tf.add_input(desc, velocities_)
+ end
+ begin
+ tf.add_input(desc, gradient_accumulators_)
+ end
+ end
+ begin
+ begin
+ if table_id !== nothing
+ desc["table_id"] = Base.Int(table_id)
+ end
+ end
+ begin
+ if table_name !== nothing
+ desc["table_name"] = Base.String(table_name)
+ end
+ end
+ begin
+ if num_shards !== nothing
+ desc["num_shards"] = Base.Int(num_shards)
+ end
+ end
+ begin
+ if shard_id !== nothing
+ desc["shard_id"] = Base.Int(shard_id)
+ end
+ end
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(load_tpu_embedding_adam_parameters_grad_accum_debug, [parameters_, momenta_, velocities_, gradient_accumulators_], name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res[1]
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function load_tpu_embedding_adam_parameters_grad_accum_debug(parameters_, momenta_, velocities_, gradient_accumulators_; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing)
+ if tf.in_eager_mode()
+ load_tpu_embedding_adam_parameters_grad_accum_debug_eager(parameters_, momenta_, velocities_, gradient_accumulators_; name=name, table_id=table_id, table_name=table_name, num_shards=num_shards, shard_id=shard_id)
+ else
+ load_tpu_embedding_adam_parameters_grad_accum_debug_graph(parameters_, momenta_, velocities_, gradient_accumulators_; name=name, table_id=table_id, table_name=table_name, num_shards=num_shards, shard_id=shard_id)
+ end
+ end
+ end
+end
+
+
+"""
+ ragged_range(starts, limits, deltas)
+
+
+"""
+begin
+ begin
+ function ragged_range_graph(starts_, limits_, deltas_; name=nothing)
+ local desc
+ tf.with_op_name(name, "RaggedRange") do
+ desc = tf.NodeDescription("RaggedRange")
+ begin
+ begin
+ starts_ = convert(Tensor{Int32}, starts_)
+ begin
+ end
+ end
+ begin
+ limits_ = convert(Tensor{Int32}, limits_)
+ begin
+ end
+ end
+ begin
+ deltas_ = convert(Tensor{Int32}, deltas_)
+ begin
+ end
+ end
+ begin
+ (starts_, limits_, deltas_) = tf.tf_promote(starts_, limits_, deltas_)
+ end
+ end
+ begin
+ begin
+ tf.add_input(desc, starts_)
+ end
+ begin
+ tf.add_input(desc, limits_)
+ end
+ begin
+ tf.add_input(desc, deltas_)
+ end
+ end
+ begin
+ end
+ end
+ begin
+ out = tf.Tensor[]
+ op = tf.Operation(desc)
+ for out_idx = 1:2
+ push!(out, tf.Tensor(op, out_idx))
+ end
+ out
+ end
+ end
+ end
+ begin
+ function ragged_range_eager(starts_, limits_, deltas_; name=nothing)
+ desc = tf.EagerOp("RaggedRange")
+ starts_ = convert(tf.EagerTensor, starts_)
+ limits_ = convert(tf.EagerTensor, limits_)
+ deltas_ = convert(tf.EagerTensor, deltas_)
+ begin
+ begin
+ tf.add_input(desc, starts_)
+ end
+ begin
+ tf.add_input(desc, limits_)
+ end
+ begin
+ tf.add_input(desc, deltas_)
+ end
+ end
+ begin
+ end
+ begin
+ desc["T"] = tf.data_type(starts_)
+ end
+ begin
+ desc["T"] = tf.data_type(limits_)
+ end
+ begin
+ desc["T"] = tf.data_type(deltas_)
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(ragged_range, [starts_, limits_, deltas_], name=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function ragged_range(starts_, limits_, deltas_; name=nothing)
+ if tf.in_eager_mode()
+ ragged_range_eager(starts_, limits_, deltas_; name=name)
+ else
+ ragged_range_graph(starts_, limits_, deltas_; name=name)
+ end
+ end
+ end
+end
+
+
+"""
+ window_dataset(input_dataset, size, shift, stride, drop_remainder)
+
+
+"""
+begin
+ begin
+ function window_dataset_graph(input_dataset_, size_, shift_, stride_, drop_remainder_; name=nothing, output_types=nothing, output_shapes=nothing)
+ local desc
+ tf.with_op_name(name, "WindowDataset") do
+ desc = tf.NodeDescription("WindowDataset")
+ begin
+ begin
+ input_dataset_ = convert(Tensor{Any}, input_dataset_)
+ begin
+ end
+ end
+ begin
+ size_ = convert(Tensor{Int64}, size_)
+ begin
+ end
+ end
+ begin
+ shift_ = convert(Tensor{Int64}, shift_)
+ begin
+ end
+ end
+ begin
+ stride_ = convert(Tensor{Int64}, stride_)
+ begin
+ end
+ end
+ begin
+ drop_remainder_ = convert(Tensor{Bool}, drop_remainder_)
+ begin
+ end
+ end
+ end
+ begin
+ begin
+ tf.add_input(desc, input_dataset_)
+ end
+ begin
+ tf.add_input(desc, size_)
+ end
+ begin
+ tf.add_input(desc, shift_)
+ end
+ begin
+ tf.add_input(desc, stride_)
+ end
+ begin
+ tf.add_input(desc, drop_remainder_)
+ end
+ end
+ begin
+ begin
+ if output_types !== nothing
+ desc["output_types"] = map(Base.identity, output_types)
+ end
+ end
+ begin
+ if output_shapes !== nothing
+ desc["output_shapes"] = map(Base.identity, output_shapes)
+ end
+ end
+ end
+ end
+ tf.Tensor(tf.Operation(desc))
+ end
+ end
+ begin
+ function window_dataset_eager(input_dataset_, size_, shift_, stride_, drop_remainder_; name=nothing, output_types=nothing, output_shapes=nothing)
+ desc = tf.EagerOp("WindowDataset")
+ input_dataset_ = convert(tf.EagerTensor, input_dataset_)
+ size_ = convert(tf.EagerTensor, size_)
+ shift_ = convert(tf.EagerTensor, shift_)
+ stride_ = convert(tf.EagerTensor, stride_)
+ drop_remainder_ = convert(tf.EagerTensor, drop_remainder_)
+ begin
+ begin
+ tf.add_input(desc, input_dataset_)
+ end
+ begin
+ tf.add_input(desc, size_)
+ end
+ begin
+ tf.add_input(desc, shift_)
+ end
+ begin
+ tf.add_input(desc, stride_)
+ end
+ begin
+ tf.add_input(desc, drop_remainder_)
+ end
+ end
+ begin
+ begin
+ if output_types !== nothing
+ desc["output_types"] = map(Base.identity, output_types)
+ end
+ end
+ begin
+ if output_shapes !== nothing
+ desc["output_shapes"] = map(Base.identity, output_shapes)
+ end
+ end
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(window_dataset, [input_dataset_, size_, shift_, stride_, drop_remainder_], name=nothing, output_types=nothing, output_shapes=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res[1]
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function window_dataset(input_dataset_, size_, shift_, stride_, drop_remainder_; name=nothing, output_types=nothing, output_shapes=nothing)
+ if tf.in_eager_mode()
+ window_dataset_eager(input_dataset_, size_, shift_, stride_, drop_remainder_; name=name, output_types=output_types, output_shapes=output_shapes)
+ else
+ window_dataset_graph(input_dataset_, size_, shift_, stride_, drop_remainder_; name=name, output_types=output_types, output_shapes=output_shapes)
+ end
+ end
+ end
+end
+
+
+"""
+ diag(diagonal)
+
+
+"""
+begin
+ begin
+ function diag_graph(diagonal_; name=nothing)
+ local desc
+ tf.with_op_name(name, "Diag") do
+ desc = tf.NodeDescription("Diag")
+ begin
+ begin
+ diagonal_ = convert(Tensor{Any}, diagonal_)
+ begin
+ end
+ end
+ begin
+ (diagonal_,) = tf.tf_promote(diagonal_)
+ end
+ end
+ begin
+ begin
+ tf.add_input(desc, diagonal_)
+ end
+ end
+ begin
+ end
+ end
+ tf.Tensor(tf.Operation(desc))
+ end
+ end
+ begin
+ function diag_eager(diagonal_; name=nothing)
+ desc = tf.EagerOp("Diag")
+ diagonal_ = convert(tf.EagerTensor, diagonal_)
+ begin
+ begin
+ tf.add_input(desc, diagonal_)
+ end
+ end
+ begin
+ end
+ begin
+ desc["T"] = tf.data_type(diagonal_)
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(diag, [diagonal_], name=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res[1]
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function diag(diagonal_; name=nothing)
+ if tf.in_eager_mode()
+ diag_eager(diagonal_; name=name)
+ else
+ diag_graph(diagonal_; name=name)
+ end
+ end
+ end
+end
+
+
+"""
+ infeed_dequeue()
+
+A placeholder op for a value that will be fed into the computation.
+"""
+begin
+ begin
+ function infeed_dequeue_graph(; name=nothing, dtype=nothing, shape=nothing)
+ local desc
+ tf.with_op_name(name, "InfeedDequeue") do
+ desc = tf.NodeDescription("InfeedDequeue")
+ begin
+ end
+ begin
+ end
+ begin
+ begin
+ if dtype !== nothing
+ desc["dtype"] = Base.identity(dtype)
+ end
+ end
+ begin
+ if shape !== nothing
+ desc["shape"] = Base.identity(shape)
+ end
+ end
+ end
+ end
+ tf.Tensor(tf.Operation(desc))
+ end
+ end
+ begin
+ function infeed_dequeue_eager(; name=nothing, dtype=nothing, shape=nothing)
+ desc = tf.EagerOp("InfeedDequeue")
+ begin
+ end
+ begin
+ begin
+ if dtype !== nothing
+ desc["dtype"] = Base.identity(dtype)
+ end
+ end
+ begin
+ if shape !== nothing
+ desc["shape"] = Base.identity(shape)
+ end
+ end
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(infeed_dequeue, [], name=nothing, dtype=nothing, shape=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res[1]
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function infeed_dequeue(; name=nothing, dtype=nothing, shape=nothing)
+ if tf.in_eager_mode()
+ infeed_dequeue_eager(; name=name, dtype=dtype, shape=shape)
+ else
+ infeed_dequeue_graph(; name=name, dtype=dtype, shape=shape)
+ end
+ end
+ end
+end
+
+
+"""
+ experimental_latency_stats_dataset(input_dataset, tag)
+
+
+"""
+begin
+ begin
+ function experimental_latency_stats_dataset_graph(input_dataset_, tag_; name=nothing, output_types=nothing, output_shapes=nothing)
+ local desc
+ tf.with_op_name(name, "ExperimentalLatencyStatsDataset") do
+ desc = tf.NodeDescription("ExperimentalLatencyStatsDataset")
+ begin
+ begin
+ input_dataset_ = convert(Tensor{Any}, input_dataset_)
+ begin
+ end
+ end
+ begin
+ tag_ = convert(Tensor{String}, tag_)
+ begin
+ end
+ end
+ end
+ begin
+ begin
+ tf.add_input(desc, input_dataset_)
+ end
+ begin
+ tf.add_input(desc, tag_)
+ end
+ end
+ begin
+ begin
+ if output_types !== nothing
+ desc["output_types"] = map(Base.identity, output_types)
+ end
+ end
+ begin
+ if output_shapes !== nothing
+ desc["output_shapes"] = map(Base.identity, output_shapes)
+ end
+ end
+ end
+ end
+ tf.Tensor(tf.Operation(desc))
+ end
+ end
+ begin
+ function experimental_latency_stats_dataset_eager(input_dataset_, tag_; name=nothing, output_types=nothing, output_shapes=nothing)
+ desc = tf.EagerOp("ExperimentalLatencyStatsDataset")
+ input_dataset_ = convert(tf.EagerTensor, input_dataset_)
+ tag_ = convert(tf.EagerTensor, tag_)
+ begin
+ begin
+ tf.add_input(desc, input_dataset_)
+ end
+ begin
+ tf.add_input(desc, tag_)
+ end
+ end
+ begin
+ begin
+ if output_types !== nothing
+ desc["output_types"] = map(Base.identity, output_types)
+ end
+ end
+ begin
+ if output_shapes !== nothing
+ desc["output_shapes"] = map(Base.identity, output_shapes)
+ end
+ end
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(experimental_latency_stats_dataset, [input_dataset_, tag_], name=nothing, output_types=nothing, output_shapes=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res[1]
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function experimental_latency_stats_dataset(input_dataset_, tag_; name=nothing, output_types=nothing, output_shapes=nothing)
+ if tf.in_eager_mode()
+ experimental_latency_stats_dataset_eager(input_dataset_, tag_; name=name, output_types=output_types, output_shapes=output_shapes)
+ else
+ experimental_latency_stats_dataset_graph(input_dataset_, tag_; name=name, output_types=output_types, output_shapes=output_shapes)
+ end
+ end
+ end
+end
+
+
+"""
+ add_sparse_to_tensors_map(sparse_indices, sparse_values, sparse_shape; container=, shared_name=)
+
+
+"""
+begin
+ begin
+ function add_sparse_to_tensors_map_graph(sparse_indices_, sparse_values_, sparse_shape_; name=nothing, container=nothing, shared_name=nothing)
+ local desc
+ tf.with_op_name(name, "AddSparseToTensorsMap") do
+ desc = tf.NodeDescription("AddSparseToTensorsMap")
+ begin
+ begin
+ sparse_indices_ = convert(Tensor{Int64}, sparse_indices_)
+ begin
+ end
+ end
+ begin
+ sparse_values_ = convert(Tensor{Any}, sparse_values_)
+ begin
+ end
+ end
+ begin
+ sparse_shape_ = convert(Tensor{Int64}, sparse_shape_)
+ begin
+ end
+ end
+ begin
+ (sparse_values_,) = tf.tf_promote(sparse_values_)
+ end
+ end
+ begin
+ begin
+ tf.add_input(desc, sparse_indices_)
+ end
+ begin
+ tf.add_input(desc, sparse_values_)
+ end
+ begin
+ tf.add_input(desc, sparse_shape_)
+ end
+ end
+ begin
+ begin
+ if container !== nothing
+ desc["container"] = Base.String(container)
+ end
+ end
+ begin
+ if shared_name !== nothing
+ desc["shared_name"] = Base.String(shared_name)
+ end
+ end
+ end
+ end
+ tf.Tensor(tf.Operation(desc))
+ end
+ end
+ begin
+ function add_sparse_to_tensors_map_eager(sparse_indices_, sparse_values_, sparse_shape_; name=nothing, container=nothing, shared_name=nothing)
+ desc = tf.EagerOp("AddSparseToTensorsMap")
+ sparse_indices_ = convert(tf.EagerTensor, sparse_indices_)
+ sparse_values_ = convert(tf.EagerTensor, sparse_values_)
+ sparse_shape_ = convert(tf.EagerTensor, sparse_shape_)
+ begin
+ begin
+ tf.add_input(desc, sparse_indices_)
+ end
+ begin
+ tf.add_input(desc, sparse_values_)
+ end
+ begin
+ tf.add_input(desc, sparse_shape_)
+ end
+ end
+ begin
+ begin
+ if container !== nothing
+ desc["container"] = Base.String(container)
+ end
+ end
+ begin
+ if shared_name !== nothing
+ desc["shared_name"] = Base.String(shared_name)
+ end
+ end
+ end
+ begin
+ desc["T"] = tf.data_type(sparse_values_)
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(add_sparse_to_tensors_map, [sparse_indices_, sparse_values_, sparse_shape_], name=nothing, container=nothing, shared_name=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res[1]
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function add_sparse_to_tensors_map(sparse_indices_, sparse_values_, sparse_shape_; name=nothing, container=nothing, shared_name=nothing)
+ if tf.in_eager_mode()
+ add_sparse_to_tensors_map_eager(sparse_indices_, sparse_values_, sparse_shape_; name=name, container=container, shared_name=shared_name)
+ else
+ add_sparse_to_tensors_map_graph(sparse_indices_, sparse_values_, sparse_shape_; name=name, container=container, shared_name=shared_name)
+ end
+ end
+ end
+end
+
+
+"""
+ ragged_gather(params_nested_splits, params_dense_values, indices)
+
+
+"""
+begin
+ begin
+ function ragged_gather_graph(params_nested_splits_, params_dense_values_, indices_; name=nothing, PARAMS_RAGGED_RANK=nothing, OUTPUT_RAGGED_RANK=nothing)
+ local desc
+ tf.with_op_name(name, "RaggedGather") do
+ desc = tf.NodeDescription("RaggedGather")
+ begin
+ begin
+ params_nested_splits_ = [convert(Tensor{Int64}, x) for x = params_nested_splits_]
+ begin
+ end
+ end
+ begin
+ params_dense_values_ = convert(Tensor{Any}, params_dense_values_)
+ begin
+ end
+ end
+ begin
+ indices_ = convert(Tensor{Any}, indices_)
+ begin
+ indices_ = indices_ - convert(tf.Tensor{eltype(indices_)}, 1)
+ end
+ end
+ begin
+ (indices_,) = tf.tf_promote(indices_)
+ end
+ begin
+ (params_dense_values_,) = tf.tf_promote(params_dense_values_)
+ end
+ end
+ begin
+ begin
+ tf.add_input(desc, params_nested_splits_)
+ end
+ begin
+ tf.add_input(desc, params_dense_values_)
+ end
+ begin
+ tf.add_input(desc, indices_)
+ end
+ end
+ begin
+ begin
+ if PARAMS_RAGGED_RANK !== nothing
+ desc["PARAMS_RAGGED_RANK"] = Base.Int(PARAMS_RAGGED_RANK)
+ end
+ end
+ begin
+ if OUTPUT_RAGGED_RANK !== nothing
+ desc["OUTPUT_RAGGED_RANK"] = Base.Int(OUTPUT_RAGGED_RANK)
+ end
+ end
+ end
+ end
+ begin
+ out = tf.Tensor[]
+ op = tf.Operation(desc)
+ for out_idx = 1:2
+ push!(out, tf.Tensor(op, out_idx))
+ end
+ out
+ end
+ end
+ end
+ begin
+ function ragged_gather_eager(params_nested_splits_, params_dense_values_, indices_; name=nothing, PARAMS_RAGGED_RANK=nothing, OUTPUT_RAGGED_RANK=nothing)
+ desc = tf.EagerOp("RaggedGather")
+ params_nested_splits_ = convert(tf.EagerTensor, params_nested_splits_)
+ params_dense_values_ = convert(tf.EagerTensor, params_dense_values_)
+ indices_ = convert(tf.EagerTensor, indices_)
+ begin
+ begin
+ tf.add_input(desc, params_nested_splits_)
+ end
+ begin
+ tf.add_input(desc, params_dense_values_)
+ end
+ begin
+ tf.add_input(desc, indices_)
+ end
+ end
+ begin
+ begin
+ if PARAMS_RAGGED_RANK !== nothing
+ desc["PARAMS_RAGGED_RANK"] = Base.Int(PARAMS_RAGGED_RANK)
+ end
+ end
+ begin
+ if OUTPUT_RAGGED_RANK !== nothing
+ desc["OUTPUT_RAGGED_RANK"] = Base.Int(OUTPUT_RAGGED_RANK)
+ end
+ end
+ end
+ begin
+ desc["Tvalues"] = tf.data_type(params_dense_values_)
+ end
+ begin
+ desc["Tindices"] = tf.data_type(indices_)
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(ragged_gather, [params_nested_splits_, params_dense_values_, indices_], name=nothing, PARAMS_RAGGED_RANK=nothing, OUTPUT_RAGGED_RANK=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function ragged_gather(params_nested_splits_, params_dense_values_, indices_; name=nothing, PARAMS_RAGGED_RANK=nothing, OUTPUT_RAGGED_RANK=nothing)
+ if tf.in_eager_mode()
+ ragged_gather_eager(params_nested_splits_, params_dense_values_, indices_; name=name, PARAMS_RAGGED_RANK=PARAMS_RAGGED_RANK, OUTPUT_RAGGED_RANK=OUTPUT_RAGGED_RANK)
+ else
+ ragged_gather_graph(params_nested_splits_, params_dense_values_, indices_; name=name, PARAMS_RAGGED_RANK=PARAMS_RAGGED_RANK, OUTPUT_RAGGED_RANK=OUTPUT_RAGGED_RANK)
+ end
+ end
+ end
+end
+
+
+"""
+ rgb_to_hsv(images)
+
+
+"""
+begin
+ begin
+ function rgb_to_hsv_graph(images_; name=nothing)
+ local desc
+ tf.with_op_name(name, "RGBToHSV") do
+ desc = tf.NodeDescription("RGBToHSV")
+ begin
+ begin
+ images_ = convert(Tensor{Float32}, images_)
+ begin
+ end
+ end
+ begin
+ (images_,) = tf.tf_promote(images_)
+ end
+ end
+ begin
+ begin
+ tf.add_input(desc, images_)
+ end
+ end
+ begin
+ end
+ end
+ tf.Tensor(tf.Operation(desc))
+ end
+ end
+ begin
+ function rgb_to_hsv_eager(images_; name=nothing)
+ desc = tf.EagerOp("RGBToHSV")
+ images_ = convert(tf.EagerTensor, images_)
+ begin
+ begin
+ tf.add_input(desc, images_)
+ end
+ end
+ begin
+ end
+ begin
+ desc["T"] = tf.data_type(images_)
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(rgb_to_hsv, [images_], name=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res[1]
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function rgb_to_hsv(images_; name=nothing)
+ if tf.in_eager_mode()
+ rgb_to_hsv_eager(images_; name=name)
+ else
+ rgb_to_hsv_graph(images_; name=name)
+ end
+ end
+ end
+end
+
+
+"""
+ multi_device_iterator_to_string_handle(multi_device_iterator)
+
+
+"""
+begin
+ begin
+ function multi_device_iterator_to_string_handle_graph(multi_device_iterator_; name=nothing)
+ local desc
+ tf.with_op_name(name, "MultiDeviceIteratorToStringHandle") do
+ desc = tf.NodeDescription("MultiDeviceIteratorToStringHandle")
+ begin
+ begin
+ multi_device_iterator_ = convert(Tensor{Any}, multi_device_iterator_)
+ begin
+ end
+ end
+ end
+ begin
+ begin
+ tf.add_input(desc, multi_device_iterator_)
+ end
+ end
+ begin
+ end
+ end
+ tf.Tensor(tf.Operation(desc))
+ end
+ end
+ begin
+ function multi_device_iterator_to_string_handle_eager(multi_device_iterator_; name=nothing)
+ desc = tf.EagerOp("MultiDeviceIteratorToStringHandle")
+ multi_device_iterator_ = convert(tf.EagerTensor, multi_device_iterator_)
+ begin
+ begin
+ tf.add_input(desc, multi_device_iterator_)
+ end
+ end
+ begin
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(multi_device_iterator_to_string_handle, [multi_device_iterator_], name=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res[1]
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function multi_device_iterator_to_string_handle(multi_device_iterator_; name=nothing)
+ if tf.in_eager_mode()
+ multi_device_iterator_to_string_handle_eager(multi_device_iterator_; name=name)
+ else
+ multi_device_iterator_to_string_handle_graph(multi_device_iterator_; name=name)
+ end
+ end
+ end
+end
+
+
+"""
+ for_(start, limit, delta, input)
+
+
+"""
+begin
+ begin
+ function for__graph(start_, limit_, delta_, input_; name=nothing, T=nothing, body=nothing)
+ local desc
+ tf.with_op_name(name, "For") do
+ desc = tf.NodeDescription("For")
+ begin
+ begin
+ start_ = convert(Tensor{Int32}, start_)
+ begin
+ end
+ end
+ begin
+ limit_ = convert(Tensor{Int32}, limit_)
+ begin
+ end
+ end
+ begin
+ delta_ = convert(Tensor{Int32}, delta_)
+ begin
+ end
+ end
+ begin
+ input_ = [convert(Tensor{Any}, x) for x = input_]
+ begin
+ end
+ end
+ end
+ begin
+ begin
+ tf.add_input(desc, start_)
+ end
+ begin
+ tf.add_input(desc, limit_)
+ end
+ begin
+ tf.add_input(desc, delta_)
+ end
+ begin
+ tf.add_input(desc, input_)
+ end
+ end
+ begin
+ begin
+ if T !== nothing
+ desc["T"] = map(Base.identity, T)
+ end
+ end
+ begin
+ if body !== nothing
+ desc["body"] = Base.identity(body)
+ end
+ end
+ end
+ end
+ tf.Tensor(tf.Operation(desc))
+ end
+ end
+ begin
+ function for__eager(start_, limit_, delta_, input_; name=nothing, T=nothing, body=nothing)
+ desc = tf.EagerOp("For")
+ start_ = convert(tf.EagerTensor, start_)
+ limit_ = convert(tf.EagerTensor, limit_)
+ delta_ = convert(tf.EagerTensor, delta_)
+ input_ = convert(tf.EagerTensor, input_)
+ begin
+ begin
+ tf.add_input(desc, start_)
+ end
+ begin
+ tf.add_input(desc, limit_)
+ end
+ begin
+ tf.add_input(desc, delta_)
+ end
+ begin
+ tf.add_input(desc, input_)
+ end
+ end
+ begin
+ begin
+ if T !== nothing
+ desc["T"] = map(Base.identity, T)
+ end
+ end
+ begin
+ if body !== nothing
+ desc["body"] = Base.identity(body)
+ end
+ end
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(for_, [start_, limit_, delta_, input_], name=nothing, T=nothing, body=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res[1]
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function for_(start_, limit_, delta_, input_; name=nothing, T=nothing, body=nothing)
+ if tf.in_eager_mode()
+ for__eager(start_, limit_, delta_, input_; name=name, T=T, body=body)
+ else
+ for__graph(start_, limit_, delta_, input_; name=name, T=T, body=body)
+ end
+ end
+ end
+end
+
+
+"""
+ sparse_reduce_max_sparse(input_indices, input_values, input_shape, reduction_axes; keep_dims=false)
+
+
+"""
+begin
+ begin
+ function sparse_reduce_max_sparse_graph(input_indices_, input_values_, input_shape_, reduction_axes_; name=nothing, keep_dims=nothing)
+ local desc
+ tf.with_op_name(name, "SparseReduceMaxSparse") do
+ desc = tf.NodeDescription("SparseReduceMaxSparse")
+ begin
+ begin
+ input_indices_ = convert(Tensor{Int64}, input_indices_)
+ begin
+ end
+ end
+ begin
+ input_values_ = convert(Tensor{Any}, input_values_)
+ begin
+ end
+ end
+ begin
+ input_shape_ = convert(Tensor{Int64}, input_shape_)
+ begin
+ end
+ end
+ begin
+ reduction_axes_ = convert(Tensor{Int32}, reduction_axes_)
+ begin
+ end
+ end
+ begin
+ (input_values_,) = tf.tf_promote(input_values_)
+ end
+ end
+ begin
+ begin
+ tf.add_input(desc, input_indices_)
+ end
+ begin
+ tf.add_input(desc, input_values_)
+ end
+ begin
+ tf.add_input(desc, input_shape_)
+ end
+ begin
+ tf.add_input(desc, reduction_axes_)
+ end
+ end
+ begin
+ begin
+ if keep_dims !== nothing
+ desc["keep_dims"] = Base.Bool(keep_dims)
+ end
+ end
+ end
+ end
+ begin
+ out = tf.Tensor[]
+ op = tf.Operation(desc)
+ for out_idx = 1:3
+ push!(out, tf.Tensor(op, out_idx))
+ end
+ out
+ end
+ end
+ end
+ begin
+ function sparse_reduce_max_sparse_eager(input_indices_, input_values_, input_shape_, reduction_axes_; name=nothing, keep_dims=nothing)
+ desc = tf.EagerOp("SparseReduceMaxSparse")
+ input_indices_ = convert(tf.EagerTensor, input_indices_)
+ input_values_ = convert(tf.EagerTensor, input_values_)
+ input_shape_ = convert(tf.EagerTensor, input_shape_)
+ reduction_axes_ = convert(tf.EagerTensor, reduction_axes_)
+ begin
+ begin
+ tf.add_input(desc, input_indices_)
+ end
+ begin
+ tf.add_input(desc, input_values_)
+ end
+ begin
+ tf.add_input(desc, input_shape_)
+ end
+ begin
+ tf.add_input(desc, reduction_axes_)
+ end
+ end
+ begin
+ begin
+ if keep_dims !== nothing
+ desc["keep_dims"] = Base.Bool(keep_dims)
+ end
+ end
+ end
+ begin
+ desc["T"] = tf.data_type(input_values_)
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(sparse_reduce_max_sparse, [input_indices_, input_values_, input_shape_, reduction_axes_], name=nothing, keep_dims=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function sparse_reduce_max_sparse(input_indices_, input_values_, input_shape_, reduction_axes_; name=nothing, keep_dims=nothing)
+ if tf.in_eager_mode()
+ sparse_reduce_max_sparse_eager(input_indices_, input_values_, input_shape_, reduction_axes_; name=name, keep_dims=keep_dims)
+ else
+ sparse_reduce_max_sparse_graph(input_indices_, input_values_, input_shape_, reduction_axes_; name=name, keep_dims=keep_dims)
+ end
+ end
+ end
+end
+
+
+"""
+ concat_offset(concat_dim, shape)
+
+
+"""
+begin
+ begin
+ function concat_offset_graph(concat_dim_, shape_; name=nothing, N=nothing)
+ local desc
+ tf.with_op_name(name, "ConcatOffset") do
+ desc = tf.NodeDescription("ConcatOffset")
+ begin
+ begin
+ concat_dim_ = convert(Tensor{Int32}, concat_dim_)
+ begin
+ end
+ end
+ begin
+ shape_ = [convert(Tensor{Int32}, x) for x = shape_]
+ begin
+ end
+ end
+ end
+ begin
+ begin
+ tf.add_input(desc, concat_dim_)
+ end
+ begin
+ tf.add_input(desc, shape_)
+ end
+ end
+ begin
+ begin
+ if N !== nothing
+ desc["N"] = Base.Int(N)
+ end
+ end
+ end
+ end
+ begin
+ out = tf.Tensor[]
+ op = tf.Operation(desc)
+ for out_idx = 1:N
+ push!(out, tf.Tensor(op, out_idx))
+ end
+ out
+ end
+ end
+ end
+ begin
+ function concat_offset_eager(concat_dim_, shape_; name=nothing, N=nothing)
+ desc = tf.EagerOp("ConcatOffset")
+ concat_dim_ = convert(tf.EagerTensor, concat_dim_)
+ shape_ = convert(tf.EagerTensor, shape_)
+ begin
+ begin
+ tf.add_input(desc, concat_dim_)
+ end
+ begin
+ tf.add_input(desc, shape_)
+ end
+ end
+ begin
+ begin
+ if N !== nothing
+ desc["N"] = Base.Int(N)
+ end
+ end
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(concat_offset, [concat_dim_, shape_], name=nothing, N=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function concat_offset(concat_dim_, shape_; name=nothing, N=nothing)
+ if tf.in_eager_mode()
+ concat_offset_eager(concat_dim_, shape_; name=name, N=N)
+ else
+ concat_offset_graph(concat_dim_, shape_; name=name, N=N)
+ end
+ end
+ end
+end
+
+
+"""
+ stage(values; capacity=0, memory_limit=0, container=, shared_name=)
+
+
+"""
+begin
+ begin
+ function stage_graph(values_; name=nothing, capacity=nothing, memory_limit=nothing, dtypes=nothing, container=nothing, shared_name=nothing)
+ local desc
+ tf.with_op_name(name, "Stage") do
+ desc = tf.NodeDescription("Stage")
+ begin
+ begin
+ values_ = [convert(Tensor{Any}, x) for x = values_]
+ begin
+ end
+ end
+ end
+ begin
+ begin
+ tf.add_input(desc, values_)
+ end
+ end
+ begin
+ begin
+ if capacity !== nothing
+ desc["capacity"] = Base.Int(capacity)
+ end
+ end
+ begin
+ if memory_limit !== nothing
+ desc["memory_limit"] = Base.Int(memory_limit)
+ end
+ end
+ begin
+ if dtypes !== nothing
+ desc["dtypes"] = map(Base.identity, dtypes)
+ end
+ end
+ begin
+ if container !== nothing
+ desc["container"] = Base.String(container)
+ end
+ end
+ begin
+ if shared_name !== nothing
+ desc["shared_name"] = Base.String(shared_name)
+ end
+ end
+ end
+ end
+ tf.Tensor(tf.Operation(desc))
+ end
+ end
+ begin
+ function stage_eager(values_; name=nothing, capacity=nothing, memory_limit=nothing, dtypes=nothing, container=nothing, shared_name=nothing)
+ desc = tf.EagerOp("Stage")
+ values_ = convert(tf.EagerTensor, values_)
+ begin
+ begin
+ tf.add_input(desc, values_)
+ end
+ end
+ begin
+ begin
+ if capacity !== nothing
+ desc["capacity"] = Base.Int(capacity)
+ end
+ end
+ begin
+ if memory_limit !== nothing
+ desc["memory_limit"] = Base.Int(memory_limit)
+ end
+ end
+ begin
+ if dtypes !== nothing
+ desc["dtypes"] = map(Base.identity, dtypes)
+ end
+ end
+ begin
+ if container !== nothing
+ desc["container"] = Base.String(container)
+ end
+ end
+ begin
+ if shared_name !== nothing
+ desc["shared_name"] = Base.String(shared_name)
+ end
+ end
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(stage, [values_], name=nothing, capacity=nothing, memory_limit=nothing, dtypes=nothing, container=nothing, shared_name=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res[1]
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function stage(values_; name=nothing, capacity=nothing, memory_limit=nothing, dtypes=nothing, container=nothing, shared_name=nothing)
+ if tf.in_eager_mode()
+ stage_eager(values_; name=name, capacity=capacity, memory_limit=memory_limit, dtypes=dtypes, container=container, shared_name=shared_name)
+ else
+ stage_graph(values_; name=name, capacity=capacity, memory_limit=memory_limit, dtypes=dtypes, container=container, shared_name=shared_name)
+ end
+ end
+ end
+end
+
+
+"""
+ switch(data, pred)
+
+
+"""
+begin
+ begin
+ function switch_graph(data_, pred_; name=nothing)
+ local desc
+ tf.with_op_name(name, "Switch") do
+ desc = tf.NodeDescription("Switch")
+ begin
+ begin
+ data_ = convert(Tensor{Any}, data_)
+ begin
+ end
+ end
+ begin
+ pred_ = convert(Tensor{Bool}, pred_)
+ begin
+ end
+ end
+ begin
+ (data_,) = tf.tf_promote(data_)
+ end
+ end
+ begin
+ begin
+ tf.add_input(desc, data_)
+ end
+ begin
+ tf.add_input(desc, pred_)
+ end
+ end
+ begin
+ end
+ end
+ begin
+ out = tf.Tensor[]
+ op = tf.Operation(desc)
+ for out_idx = 1:2
+ push!(out, tf.Tensor(op, out_idx))
+ end
+ out
+ end
+ end
+ end
+ begin
+ function switch_eager(data_, pred_; name=nothing)
+ desc = tf.EagerOp("Switch")
+ data_ = convert(tf.EagerTensor, data_)
+ pred_ = convert(tf.EagerTensor, pred_)
+ begin
+ begin
+ tf.add_input(desc, data_)
+ end
+ begin
+ tf.add_input(desc, pred_)
+ end
+ end
+ begin
+ end
+ begin
+ desc["T"] = tf.data_type(data_)
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(switch, [data_, pred_], name=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function switch(data_, pred_; name=nothing)
+ if tf.in_eager_mode()
+ switch_eager(data_, pred_; name=name)
+ else
+ switch_graph(data_, pred_; name=name)
+ end
+ end
+ end
+end
+
+
+"""
+ queue_dequeue_many_v2(handle, n; timeout_ms=-1)
+
+
+"""
+begin
+ begin
+ function queue_dequeue_many_v2_graph(handle_, n_; name=nothing, component_types=nothing, timeout_ms=nothing)
+ local desc
+ tf.with_op_name(name, "QueueDequeueManyV2") do
+ desc = tf.NodeDescription("QueueDequeueManyV2")
+ begin
+ begin
+ handle_ = convert(Tensor{Any}, handle_)
+ begin
+ end
+ end
+ begin
+ n_ = convert(Tensor{Int32}, n_)
+ begin
+ end
+ end
+ end
+ begin
+ begin
+ tf.add_input(desc, handle_)
+ end
+ begin
+ tf.add_input(desc, n_)
+ end
+ end
+ begin
+ begin
+ if component_types !== nothing
+ desc["component_types"] = map(Base.identity, component_types)
+ end
+ end
+ begin
+ if timeout_ms !== nothing
+ desc["timeout_ms"] = Base.Int(timeout_ms)
+ end
+ end
+ end
+ end
+ tf.Tensor(tf.Operation(desc))
+ end
+ end
+ begin
+ function queue_dequeue_many_v2_eager(handle_, n_; name=nothing, component_types=nothing, timeout_ms=nothing)
+ desc = tf.EagerOp("QueueDequeueManyV2")
+ handle_ = convert(tf.EagerTensor, handle_)
+ n_ = convert(tf.EagerTensor, n_)
+ begin
+ begin
+ tf.add_input(desc, handle_)
+ end
+ begin
+ tf.add_input(desc, n_)
+ end
+ end
+ begin
+ begin
+ if component_types !== nothing
+ desc["component_types"] = map(Base.identity, component_types)
+ end
+ end
+ begin
+ if timeout_ms !== nothing
+ desc["timeout_ms"] = Base.Int(timeout_ms)
+ end
+ end
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(queue_dequeue_many_v2, [handle_, n_], name=nothing, component_types=nothing, timeout_ms=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res[1]
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function queue_dequeue_many_v2(handle_, n_; name=nothing, component_types=nothing, timeout_ms=nothing)
+ if tf.in_eager_mode()
+ queue_dequeue_many_v2_eager(handle_, n_; name=name, component_types=component_types, timeout_ms=timeout_ms)
+ else
+ queue_dequeue_many_v2_graph(handle_, n_; name=name, component_types=component_types, timeout_ms=timeout_ms)
+ end
+ end
+ end
+end
+
+
+"""
+ segment_prod(data, segment_ids)
+
+
+"""
+begin
+ begin
+ function segment_prod_graph(data_, segment_ids_; name=nothing)
+ local desc
+ tf.with_op_name(name, "SegmentProd") do
+ desc = tf.NodeDescription("SegmentProd")
+ begin
+ begin
+ data_ = convert(Tensor{Any}, data_)
+ begin
+ end
+ end
+ begin
+ segment_ids_ = convert(Tensor{Any}, segment_ids_)
+ begin
+ segment_ids_ = segment_ids_ - convert(tf.Tensor{eltype(segment_ids_)}, 1)
+ end
+ end
+ begin
+ (data_,) = tf.tf_promote(data_)
+ end
+ begin
+ (segment_ids_,) = tf.tf_promote(segment_ids_)
+ end
+ end
+ begin
+ begin
+ tf.add_input(desc, data_)
+ end
+ begin
+ tf.add_input(desc, segment_ids_)
+ end
+ end
+ begin
+ end
+ end
+ tf.Tensor(tf.Operation(desc))
+ end
+ end
+ begin
+ function segment_prod_eager(data_, segment_ids_; name=nothing)
+ desc = tf.EagerOp("SegmentProd")
+ data_ = convert(tf.EagerTensor, data_)
+ segment_ids_ = convert(tf.EagerTensor, segment_ids_)
+ begin
+ begin
+ tf.add_input(desc, data_)
+ end
+ begin
+ tf.add_input(desc, segment_ids_)
+ end
+ end
+ begin
+ end
+ begin
+ desc["T"] = tf.data_type(data_)
+ end
+ begin
+ desc["Tindices"] = tf.data_type(segment_ids_)
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(segment_prod, [data_, segment_ids_], name=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res[1]
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function segment_prod(data_, segment_ids_; name=nothing)
+ if tf.in_eager_mode()
+ segment_prod_eager(data_, segment_ids_; name=name)
+ else
+ segment_prod_graph(data_, segment_ids_; name=name)
+ end
+ end
+ end
+end
+
+
+"""
+ approximate_equal(x, y; tolerance=?)
+
+
+"""
+begin
+ begin
+ function approximate_equal_graph(x_, y_; name=nothing, tolerance=nothing)
+ local desc
+ tf.with_op_name(name, "ApproximateEqual") do
+ desc = tf.NodeDescription("ApproximateEqual")
+ begin
+ begin
+ x_ = convert(Tensor{Any}, x_)
+ begin
+ end
+ end
+ begin
+ y_ = convert(Tensor{Any}, y_)
+ begin
+ end
+ end
+ begin
+ (x_, y_) = tf.tf_promote(x_, y_)
+ end
+ end
+ begin
+ begin
+ tf.add_input(desc, x_)
+ end
+ begin
+ tf.add_input(desc, y_)
+ end
+ end
+ begin
+ begin
+ if tolerance !== nothing
+ desc["tolerance"] = Base.identity(tolerance)
+ end
+ end
+ end
+ end
+ tf.Tensor(tf.Operation(desc))
+ end
+ end
+ begin
+ function approximate_equal_eager(x_, y_; name=nothing, tolerance=nothing)
+ desc = tf.EagerOp("ApproximateEqual")
+ x_ = convert(tf.EagerTensor, x_)
+ y_ = convert(tf.EagerTensor, y_)
+ begin
+ begin
+ tf.add_input(desc, x_)
+ end
+ begin
+ tf.add_input(desc, y_)
+ end
+ end
+ begin
+ begin
+ if tolerance !== nothing
+ desc["tolerance"] = Base.identity(tolerance)
+ end
+ end
+ end
+ begin
+ desc["T"] = tf.data_type(x_)
+ end
+ begin
+ desc["T"] = tf.data_type(y_)
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(approximate_equal, [x_, y_], name=nothing, tolerance=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res[1]
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function approximate_equal(x_, y_; name=nothing, tolerance=nothing)
+ if tf.in_eager_mode()
+ approximate_equal_eager(x_, y_; name=name, tolerance=tolerance)
+ else
+ approximate_equal_graph(x_, y_; name=name, tolerance=tolerance)
+ end
+ end
+ end
+end
+
+
+"""
+ conv2d(input, filter; use_cudnn_on_gpu=true, data_format=, dilations=[1, 1, 1, 1])
+
+
+"""
+begin
+ begin
+ function conv2d_graph(input_, filter_; name=nothing, strides=nothing, use_cudnn_on_gpu=nothing, padding=nothing, data_format=nothing, dilations=nothing)
+ local desc
+ tf.with_op_name(name, "Conv2D") do
+ desc = tf.NodeDescription("Conv2D")
+ begin
+ begin
+ input_ = convert(Tensor{Any}, input_)
+ begin
+ end
+ end
+ begin
+ filter_ = convert(Tensor{Any}, filter_)
+ begin
+ end
+ end
+ begin
+ (input_, filter_) = tf.tf_promote(input_, filter_)
+ end
+ end
+ begin
+ begin
+ tf.add_input(desc, input_)
+ end
+ begin
+ tf.add_input(desc, filter_)
+ end
+ end
+ begin
+ begin
+ if strides !== nothing
+ desc["strides"] = map(Base.identity, strides)
+ end
+ end
+ begin
+ if use_cudnn_on_gpu !== nothing
+ desc["use_cudnn_on_gpu"] = Base.Bool(use_cudnn_on_gpu)
+ end
+ end
+ begin
+ if padding !== nothing
+ desc["padding"] = Base.String(padding)
+ end
+ end
+ begin
+ if data_format !== nothing
+ desc["data_format"] = Base.String(data_format)
+ end
+ end
+ begin
+ if dilations !== nothing
+ desc["dilations"] = map(Base.identity, dilations)
+ end
+ end
+ end
+ end
+ tf.Tensor(tf.Operation(desc))
+ end
+ end
+ begin
+ function conv2d_eager(input_, filter_; name=nothing, strides=nothing, use_cudnn_on_gpu=nothing, padding=nothing, data_format=nothing, dilations=nothing)
+ desc = tf.EagerOp("Conv2D")
+ input_ = convert(tf.EagerTensor, input_)
+ filter_ = convert(tf.EagerTensor, filter_)
+ begin
+ begin
+ tf.add_input(desc, input_)
+ end
+ begin
+ tf.add_input(desc, filter_)
+ end
+ end
+ begin
+ begin
+ if strides !== nothing
+ desc["strides"] = map(Base.identity, strides)
+ end
+ end
+ begin
+ if use_cudnn_on_gpu !== nothing
+ desc["use_cudnn_on_gpu"] = Base.Bool(use_cudnn_on_gpu)
+ end
+ end
+ begin
+ if padding !== nothing
+ desc["padding"] = Base.String(padding)
+ end
+ end
+ begin
+ if data_format !== nothing
+ desc["data_format"] = Base.String(data_format)
+ end
+ end
+ begin
+ if dilations !== nothing
+ desc["dilations"] = map(Base.identity, dilations)
+ end
+ end
+ end
+ begin
+ desc["T"] = tf.data_type(input_)
+ end
+ begin
+ desc["T"] = tf.data_type(filter_)
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(conv2d, [input_, filter_], name=nothing, strides=nothing, use_cudnn_on_gpu=nothing, padding=nothing, data_format=nothing, dilations=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res[1]
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function conv2d(input_, filter_; name=nothing, strides=nothing, use_cudnn_on_gpu=nothing, padding=nothing, data_format=nothing, dilations=nothing)
+ if tf.in_eager_mode()
+ conv2d_eager(input_, filter_; name=name, strides=strides, use_cudnn_on_gpu=use_cudnn_on_gpu, padding=padding, data_format=data_format, dilations=dilations)
+ else
+ conv2d_graph(input_, filter_; name=name, strides=strides, use_cudnn_on_gpu=use_cudnn_on_gpu, padding=padding, data_format=data_format, dilations=dilations)
+ end
+ end
+ end
+end
+
+
+"""
+ cross_replica_sum(input, group_assignment)
+
+An Op to sum inputs across replicated TPU instances. Each instance supplies its
+"""
+begin
+ begin
+ function cross_replica_sum_graph(input_, group_assignment_; name=nothing)
+ local desc
+ tf.with_op_name(name, "CrossReplicaSum") do
+ desc = tf.NodeDescription("CrossReplicaSum")
+ begin
+ begin
+ input_ = convert(Tensor{Any}, input_)
+ begin
+ end
+ end
+ begin
+ group_assignment_ = convert(Tensor{Int32}, group_assignment_)
+ begin
+ end
+ end
+ begin
+ (input_,) = tf.tf_promote(input_)
+ end
+ end
+ begin
+ begin
+ tf.add_input(desc, input_)
+ end
+ begin
+ tf.add_input(desc, group_assignment_)
+ end
+ end
+ begin
+ end
+ end
+ tf.Tensor(tf.Operation(desc))
+ end
+ end
+ begin
+ function cross_replica_sum_eager(input_, group_assignment_; name=nothing)
+ desc = tf.EagerOp("CrossReplicaSum")
+ input_ = convert(tf.EagerTensor, input_)
+ group_assignment_ = convert(tf.EagerTensor, group_assignment_)
+ begin
+ begin
+ tf.add_input(desc, input_)
+ end
+ begin
+ tf.add_input(desc, group_assignment_)
+ end
+ end
+ begin
+ end
+ begin
+ desc["T"] = tf.data_type(input_)
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(cross_replica_sum, [input_, group_assignment_], name=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res[1]
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function cross_replica_sum(input_, group_assignment_; name=nothing)
+ if tf.in_eager_mode()
+ cross_replica_sum_eager(input_, group_assignment_; name=name)
+ else
+ cross_replica_sum_graph(input_, group_assignment_; name=name)
+ end
+ end
+ end
+end
+
+
+"""
+ sparse_mat_mul(a, b; transpose_a=false, transpose_b=false, a_is_sparse=false, b_is_sparse=false)
+
+
+"""
+begin
+ begin
+ function sparse_mat_mul_graph(a_, b_; name=nothing, transpose_a=nothing, transpose_b=nothing, a_is_sparse=nothing, b_is_sparse=nothing)
+ local desc
+ tf.with_op_name(name, "SparseMatMul") do
+ desc = tf.NodeDescription("SparseMatMul")
+ begin
+ begin
+ a_ = convert(Tensor{Float32}, a_)
+ begin
+ end
+ end
+ begin
+ b_ = convert(Tensor{Float32}, b_)
+ begin
+ end
+ end
+ begin
+ (b_,) = tf.tf_promote(b_)
+ end
+ begin
+ (a_,) = tf.tf_promote(a_)
+ end
+ end
+ begin
+ begin
+ tf.add_input(desc, a_)
+ end
+ begin
+ tf.add_input(desc, b_)
+ end
+ end
+ begin
+ begin
+ if transpose_a !== nothing
+ desc["transpose_a"] = Base.Bool(transpose_a)
+ end
+ end
+ begin
+ if transpose_b !== nothing
+ desc["transpose_b"] = Base.Bool(transpose_b)
+ end
+ end
+ begin
+ if a_is_sparse !== nothing
+ desc["a_is_sparse"] = Base.Bool(a_is_sparse)
+ end
+ end
+ begin
+ if b_is_sparse !== nothing
+ desc["b_is_sparse"] = Base.Bool(b_is_sparse)
+ end
+ end
+ end
+ end
+ tf.Tensor(tf.Operation(desc))
+ end
+ end
+ begin
+ function sparse_mat_mul_eager(a_, b_; name=nothing, transpose_a=nothing, transpose_b=nothing, a_is_sparse=nothing, b_is_sparse=nothing)
+ desc = tf.EagerOp("SparseMatMul")
+ a_ = convert(tf.EagerTensor, a_)
+ b_ = convert(tf.EagerTensor, b_)
+ begin
+ begin
+ tf.add_input(desc, a_)
+ end
+ begin
+ tf.add_input(desc, b_)
+ end
+ end
+ begin
+ begin
+ if transpose_a !== nothing
+ desc["transpose_a"] = Base.Bool(transpose_a)
+ end
+ end
+ begin
+ if transpose_b !== nothing
+ desc["transpose_b"] = Base.Bool(transpose_b)
+ end
+ end
+ begin
+ if a_is_sparse !== nothing
+ desc["a_is_sparse"] = Base.Bool(a_is_sparse)
+ end
+ end
+ begin
+ if b_is_sparse !== nothing
+ desc["b_is_sparse"] = Base.Bool(b_is_sparse)
+ end
+ end
+ end
+ begin
+ desc["Ta"] = tf.data_type(a_)
+ end
+ begin
+ desc["Tb"] = tf.data_type(b_)
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(sparse_mat_mul, [a_, b_], name=nothing, transpose_a=nothing, transpose_b=nothing, a_is_sparse=nothing, b_is_sparse=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res[1]
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function sparse_mat_mul(a_, b_; name=nothing, transpose_a=nothing, transpose_b=nothing, a_is_sparse=nothing, b_is_sparse=nothing)
+ if tf.in_eager_mode()
+ sparse_mat_mul_eager(a_, b_; name=name, transpose_a=transpose_a, transpose_b=transpose_b, a_is_sparse=a_is_sparse, b_is_sparse=b_is_sparse)
+ else
+ sparse_mat_mul_graph(a_, b_; name=name, transpose_a=transpose_a, transpose_b=transpose_b, a_is_sparse=a_is_sparse, b_is_sparse=b_is_sparse)
+ end
+ end
+ end
+end
+
+
+"""
+ _scoped_allocator_split(concat, split)
+
+Acts roughly like a SplitV Op that splits one tensor into multiple tensors
+"""
+begin
+ begin
+ function _scoped_allocator_split_graph(concat_, split_; name=nothing, sa_name=nothing, id=nothing, N=nothing, shapes=nothing)
+ local desc
+ tf.with_op_name(name, "_ScopedAllocatorSplit") do
+ desc = tf.NodeDescription("_ScopedAllocatorSplit")
+ begin
+ begin
+ concat_ = convert(Tensor{Any}, concat_)
+ begin
+ end
+ end
+ begin
+ split_ = [convert(Tensor{Any}, x) for x = split_]
+ begin
+ end
+ end
+ begin
+ (concat_, split_) = tf.tf_promote(concat_, split_)
+ end
+ end
+ begin
+ begin
+ tf.add_input(desc, concat_)
+ end
+ begin
+ tf.add_input(desc, split_)
+ end
+ end
+ begin
+ begin
+ if sa_name !== nothing
+ desc["sa_name"] = Base.String(sa_name)
+ end
+ end
+ begin
+ if id !== nothing
+ desc["id"] = Base.Int(id)
+ end
+ end
+ begin
+ if N !== nothing
+ desc["N"] = Base.Int(N)
+ end
+ end
+ begin
+ if shapes !== nothing
+ desc["shapes"] = map(Base.identity, shapes)
+ end
+ end
+ end
+ end
+ begin
+ out = tf.Tensor[]
+ op = tf.Operation(desc)
+ for out_idx = 1:N
+ push!(out, tf.Tensor(op, out_idx))
+ end
+ out
+ end
+ end
+ end
+ begin
+ function _scoped_allocator_split_eager(concat_, split_; name=nothing, sa_name=nothing, id=nothing, N=nothing, shapes=nothing)
+ desc = tf.EagerOp("_ScopedAllocatorSplit")
+ concat_ = convert(tf.EagerTensor, concat_)
+ split_ = convert(tf.EagerTensor, split_)
+ begin
+ begin
+ tf.add_input(desc, concat_)
+ end
+ begin
+ tf.add_input(desc, split_)
+ end
+ end
+ begin
+ begin
+ if sa_name !== nothing
+ desc["sa_name"] = Base.String(sa_name)
+ end
+ end
+ begin
+ if id !== nothing
+ desc["id"] = Base.Int(id)
+ end
+ end
+ begin
+ if N !== nothing
+ desc["N"] = Base.Int(N)
+ end
+ end
+ begin
+ if shapes !== nothing
+ desc["shapes"] = map(Base.identity, shapes)
+ end
+ end
+ end
+ begin
+ desc["T"] = tf.data_type(concat_)
+ end
+ begin
+ desc["T"] = tf.data_type(split_)
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(_scoped_allocator_split, [concat_, split_], name=nothing, sa_name=nothing, id=nothing, N=nothing, shapes=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function _scoped_allocator_split(concat_, split_; name=nothing, sa_name=nothing, id=nothing, N=nothing, shapes=nothing)
+ if tf.in_eager_mode()
+ _scoped_allocator_split_eager(concat_, split_; name=name, sa_name=sa_name, id=id, N=N, shapes=shapes)
+ else
+ _scoped_allocator_split_graph(concat_, split_; name=name, sa_name=sa_name, id=id, N=N, shapes=shapes)
+ end
+ end
+ end
+end
+
+
+"""
+ igammac(a, x)
+
+
+"""
+begin
+ begin
+ function igammac_graph(a_, x_; name=nothing)
+ local desc
+ tf.with_op_name(name, "Igammac") do
+ desc = tf.NodeDescription("Igammac")
+ begin
+ begin
+ a_ = convert(Tensor{Any}, a_)
+ begin
+ end
+ end
+ begin
+ x_ = convert(Tensor{Any}, x_)
+ begin
+ end
+ end
+ begin
+ (a_, x_) = tf.tf_promote(a_, x_)
+ end
+ end
+ begin
+ begin
+ tf.add_input(desc, a_)
+ end
+ begin
+ tf.add_input(desc, x_)
+ end
+ end
+ begin
+ end
+ end
+ tf.Tensor(tf.Operation(desc))
+ end
+ end
+ begin
+ function igammac_eager(a_, x_; name=nothing)
+ desc = tf.EagerOp("Igammac")
+ a_ = convert(tf.EagerTensor, a_)
+ x_ = convert(tf.EagerTensor, x_)
+ begin
+ begin
+ tf.add_input(desc, a_)
+ end
+ begin
+ tf.add_input(desc, x_)
+ end
+ end
+ begin
+ end
+ begin
+ desc["T"] = tf.data_type(a_)
+ end
+ begin
+ desc["T"] = tf.data_type(x_)
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(igammac, [a_, x_], name=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res[1]
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function igammac(a_, x_; name=nothing)
+ if tf.in_eager_mode()
+ igammac_eager(a_, x_; name=name)
+ else
+ igammac_graph(a_, x_; name=name)
+ end
+ end
+ end
+end
+
+
+"""
+ batch_mat_mul(x, y; adj_x=false, adj_y=false)
+
+
+"""
+begin
+ begin
+ function batch_mat_mul_graph(x_, y_; name=nothing, adj_x=nothing, adj_y=nothing)
+ local desc
+ tf.with_op_name(name, "BatchMatMul") do
+ desc = tf.NodeDescription("BatchMatMul")
+ begin
+ begin
+ x_ = convert(Tensor{Any}, x_)
+ begin
+ end
+ end
+ begin
+ y_ = convert(Tensor{Any}, y_)
+ begin
+ end
+ end
+ begin
+ (x_, y_) = tf.tf_promote(x_, y_)
+ end
+ end
+ begin
+ begin
+ tf.add_input(desc, x_)
+ end
+ begin
+ tf.add_input(desc, y_)
+ end
+ end
+ begin
+ begin
+ if adj_x !== nothing
+ desc["adj_x"] = Base.Bool(adj_x)
+ end
+ end
+ begin
+ if adj_y !== nothing
+ desc["adj_y"] = Base.Bool(adj_y)
+ end
+ end
+ end
+ end
+ tf.Tensor(tf.Operation(desc))
+ end
+ end
+ begin
+ function batch_mat_mul_eager(x_, y_; name=nothing, adj_x=nothing, adj_y=nothing)
+ desc = tf.EagerOp("BatchMatMul")
+ x_ = convert(tf.EagerTensor, x_)
+ y_ = convert(tf.EagerTensor, y_)
+ begin
+ begin
+ tf.add_input(desc, x_)
+ end
+ begin
+ tf.add_input(desc, y_)
+ end
+ end
+ begin
+ begin
+ if adj_x !== nothing
+ desc["adj_x"] = Base.Bool(adj_x)
+ end
+ end
+ begin
+ if adj_y !== nothing
+ desc["adj_y"] = Base.Bool(adj_y)
+ end
+ end
+ end
+ begin
+ desc["T"] = tf.data_type(x_)
+ end
+ begin
+ desc["T"] = tf.data_type(y_)
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(batch_mat_mul, [x_, y_], name=nothing, adj_x=nothing, adj_y=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res[1]
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function batch_mat_mul(x_, y_; name=nothing, adj_x=nothing, adj_y=nothing)
+ if tf.in_eager_mode()
+ batch_mat_mul_eager(x_, y_; name=name, adj_x=adj_x, adj_y=adj_y)
+ else
+ batch_mat_mul_graph(x_, y_; name=name, adj_x=adj_x, adj_y=adj_y)
+ end
+ end
+ end
+end
+
+
+"""
+ tensor_array_pack(handle, flow_in; element_shape=?)
+
+
+"""
+begin
+ begin
+ function tensor_array_pack_graph(handle_, flow_in_; name=nothing, dtype=nothing, element_shape=nothing)
+ local desc
+ tf.with_op_name(name, "TensorArrayPack") do
+ desc = tf.NodeDescription("TensorArrayPack")
+ begin
+ begin
+ handle_ = convert(Tensor{String}, handle_)
+ begin
+ end
+ end
+ begin
+ flow_in_ = convert(Tensor{Float32}, flow_in_)
+ begin
+ end
+ end
+ end
+ begin
+ begin
+ tf.add_input(desc, handle_)
+ end
+ begin
+ tf.add_input(desc, flow_in_)
+ end
+ end
+ begin
+ begin
+ if dtype !== nothing
+ desc["dtype"] = Base.identity(dtype)
+ end
+ end
+ begin
+ if element_shape !== nothing
+ desc["element_shape"] = Base.identity(element_shape)
+ end
+ end
+ end
+ end
+ tf.Tensor(tf.Operation(desc))
+ end
+ end
+ begin
+ function tensor_array_pack_eager(handle_, flow_in_; name=nothing, dtype=nothing, element_shape=nothing)
+ desc = tf.EagerOp("TensorArrayPack")
+ handle_ = convert(tf.EagerTensor, handle_)
+ flow_in_ = convert(tf.EagerTensor, flow_in_)
+ begin
+ begin
+ tf.add_input(desc, handle_)
+ end
+ begin
+ tf.add_input(desc, flow_in_)
+ end
+ end
+ begin
+ begin
+ if dtype !== nothing
+ desc["dtype"] = Base.identity(dtype)
+ end
+ end
+ begin
+ if element_shape !== nothing
+ desc["element_shape"] = Base.identity(element_shape)
+ end
+ end
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(tensor_array_pack, [handle_, flow_in_], name=nothing, dtype=nothing, element_shape=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res[1]
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function tensor_array_pack(handle_, flow_in_; name=nothing, dtype=nothing, element_shape=nothing)
+ if tf.in_eager_mode()
+ tensor_array_pack_eager(handle_, flow_in_; name=name, dtype=dtype, element_shape=element_shape)
+ else
+ tensor_array_pack_graph(handle_, flow_in_; name=name, dtype=dtype, element_shape=element_shape)
+ end
+ end
+ end
+end
+
+
+"""
+ queue_close_v2(handle; cancel_pending_enqueues=false)
+
+
+"""
+begin
+ begin
+ function queue_close_v2_graph(handle_; name=nothing, cancel_pending_enqueues=nothing)
+ local desc
+ tf.with_op_name(name, "QueueCloseV2") do
+ desc = tf.NodeDescription("QueueCloseV2")
+ begin
+ begin
+ handle_ = convert(Tensor{Any}, handle_)
+ begin
+ end
+ end
+ end
+ begin
+ begin
+ tf.add_input(desc, handle_)
+ end
+ end
+ begin
+ begin
+ if cancel_pending_enqueues !== nothing
+ desc["cancel_pending_enqueues"] = Base.Bool(cancel_pending_enqueues)
+ end
+ end
+ end
+ end
+ tf.Tensor(tf.Operation(desc))
+ end
+ end
+ begin
+ function queue_close_v2_eager(handle_; name=nothing, cancel_pending_enqueues=nothing)
+ desc = tf.EagerOp("QueueCloseV2")
+ handle_ = convert(tf.EagerTensor, handle_)
+ begin
+ begin
+ tf.add_input(desc, handle_)
+ end
+ end
+ begin
+ begin
+ if cancel_pending_enqueues !== nothing
+ desc["cancel_pending_enqueues"] = Base.Bool(cancel_pending_enqueues)
+ end
+ end
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(queue_close_v2, [handle_], name=nothing, cancel_pending_enqueues=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res[1]
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function queue_close_v2(handle_; name=nothing, cancel_pending_enqueues=nothing)
+ if tf.in_eager_mode()
+ queue_close_v2_eager(handle_; name=name, cancel_pending_enqueues=cancel_pending_enqueues)
+ else
+ queue_close_v2_graph(handle_; name=name, cancel_pending_enqueues=cancel_pending_enqueues)
+ end
+ end
+ end
+end
+
+
+"""
+ enqueue_tpu_embedding_sparse_batch(sample_indices, embedding_indices, aggregation_weights, mode_override; device_ordinal=-1, combiners=Int64[])
+
+An op that enqueues TPUEmbedding input indices from a SparseTensor.
+"""
+begin
+ begin
+ function enqueue_tpu_embedding_sparse_batch_graph(sample_indices_, embedding_indices_, aggregation_weights_, mode_override_; name=nothing, N=nothing, device_ordinal=nothing, combiners=nothing)
+ local desc
+ tf.with_op_name(name, "EnqueueTPUEmbeddingSparseBatch") do
+ desc = tf.NodeDescription("EnqueueTPUEmbeddingSparseBatch")
+ begin
+ begin
+ sample_indices_ = [convert(Tensor{Int32}, x) for x = sample_indices_]
+ begin
+ end
+ end
+ begin
+ embedding_indices_ = [convert(Tensor{Int32}, x) for x = embedding_indices_]
+ begin
+ end
+ end
+ begin
+ aggregation_weights_ = [convert(Tensor{Float32}, x) for x = aggregation_weights_]
+ begin
+ end
+ end
+ begin
+ mode_override_ = convert(Tensor{String}, mode_override_)
+ begin
+ end
+ end
+ end
+ begin
+ begin
+ tf.add_input(desc, sample_indices_)
+ end
+ begin
+ tf.add_input(desc, embedding_indices_)
+ end
+ begin
+ tf.add_input(desc, aggregation_weights_)
+ end
+ begin
+ tf.add_input(desc, mode_override_)
+ end
+ end
+ begin
+ begin
+ if N !== nothing
+ desc["N"] = Base.Int(N)
+ end
+ end
+ begin
+ if device_ordinal !== nothing
+ desc["device_ordinal"] = Base.Int(device_ordinal)
+ end
+ end
+ begin
+ if combiners !== nothing
+ desc["combiners"] = map(Base.identity, combiners)
+ end
+ end
+ end
+ end
+ tf.Tensor(tf.Operation(desc))
+ end
+ end
+ begin
+ function enqueue_tpu_embedding_sparse_batch_eager(sample_indices_, embedding_indices_, aggregation_weights_, mode_override_; name=nothing, N=nothing, device_ordinal=nothing, combiners=nothing)
+ desc = tf.EagerOp("EnqueueTPUEmbeddingSparseBatch")
+ sample_indices_ = convert(tf.EagerTensor, sample_indices_)
+ embedding_indices_ = convert(tf.EagerTensor, embedding_indices_)
+ aggregation_weights_ = convert(tf.EagerTensor, aggregation_weights_)
+ mode_override_ = convert(tf.EagerTensor, mode_override_)
+ begin
+ begin
+ tf.add_input(desc, sample_indices_)
+ end
+ begin
+ tf.add_input(desc, embedding_indices_)
+ end
+ begin
+ tf.add_input(desc, aggregation_weights_)
+ end
+ begin
+ tf.add_input(desc, mode_override_)
+ end
+ end
+ begin
+ begin
+ if N !== nothing
+ desc["N"] = Base.Int(N)
+ end
+ end
+ begin
+ if device_ordinal !== nothing
+ desc["device_ordinal"] = Base.Int(device_ordinal)
+ end
+ end
+ begin
+ if combiners !== nothing
+ desc["combiners"] = map(Base.identity, combiners)
+ end
+ end
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(enqueue_tpu_embedding_sparse_batch, [sample_indices_, embedding_indices_, aggregation_weights_, mode_override_], name=nothing, N=nothing, device_ordinal=nothing, combiners=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res[1]
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function enqueue_tpu_embedding_sparse_batch(sample_indices_, embedding_indices_, aggregation_weights_, mode_override_; name=nothing, N=nothing, device_ordinal=nothing, combiners=nothing)
+ if tf.in_eager_mode()
+ enqueue_tpu_embedding_sparse_batch_eager(sample_indices_, embedding_indices_, aggregation_weights_, mode_override_; name=name, N=N, device_ordinal=device_ordinal, combiners=combiners)
+ else
+ enqueue_tpu_embedding_sparse_batch_graph(sample_indices_, embedding_indices_, aggregation_weights_, mode_override_; name=name, N=N, device_ordinal=device_ordinal, combiners=combiners)
+ end
+ end
+ end
+end
+
+
+"""
+ reader_restore_state(reader_handle, state)
+
+
+"""
+begin
+ begin
+ function reader_restore_state_graph(reader_handle_, state_; name=nothing)
+ local desc
+ tf.with_op_name(name, "ReaderRestoreState") do
+ desc = tf.NodeDescription("ReaderRestoreState")
+ begin
+ begin
+ reader_handle_ = convert(Tensor{String}, reader_handle_)
+ begin
+ end
+ end
+ begin
+ state_ = convert(Tensor{String}, state_)
+ begin
+ end
+ end
+ end
+ begin
+ begin
+ tf.add_input(desc, reader_handle_)
+ end
+ begin
+ tf.add_input(desc, state_)
+ end
+ end
+ begin
+ end
+ end
+ tf.Tensor(tf.Operation(desc))
+ end
+ end
+ begin
+ function reader_restore_state_eager(reader_handle_, state_; name=nothing)
+ desc = tf.EagerOp("ReaderRestoreState")
+ reader_handle_ = convert(tf.EagerTensor, reader_handle_)
+ state_ = convert(tf.EagerTensor, state_)
+ begin
+ begin
+ tf.add_input(desc, reader_handle_)
+ end
+ begin
+ tf.add_input(desc, state_)
+ end
+ end
+ begin
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(reader_restore_state, [reader_handle_, state_], name=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res[1]
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function reader_restore_state(reader_handle_, state_; name=nothing)
+ if tf.in_eager_mode()
+ reader_restore_state_eager(reader_handle_, state_; name=name)
+ else
+ reader_restore_state_graph(reader_handle_, state_; name=name)
+ end
+ end
+ end
+end
+
+
+"""
+ _fused_conv2d(input, filter, args; data_format=, dilations=[1, 1, 1, 1], fused_ops=Int64[], epsilon=?)
+
+*NOTE*: Do not invoke this operator directly in Python. Grappler is
+"""
+begin
+ begin
+ function _fused_conv2d_graph(input_, filter_, args_; name=nothing, num_args=nothing, strides=nothing, padding=nothing, data_format=nothing, dilations=nothing, fused_ops=nothing, epsilon=nothing)
+ local desc
+ tf.with_op_name(name, "_FusedConv2D") do
+ desc = tf.NodeDescription("_FusedConv2D")
+ begin
+ begin
+ input_ = convert(Tensor{Any}, input_)
+ begin
+ end
+ end
+ begin
+ filter_ = convert(Tensor{Any}, filter_)
+ begin
+ end
+ end
+ begin
+ args_ = [convert(Tensor{Any}, x) for x = args_]
+ begin
+ end
+ end
+ begin
+ (input_, filter_, args_) = tf.tf_promote(input_, filter_, args_)
+ end
+ end
+ begin
+ begin
+ tf.add_input(desc, input_)
+ end
+ begin
+ tf.add_input(desc, filter_)
+ end
+ begin
+ tf.add_input(desc, args_)
+ end
+ end
+ begin
+ begin
+ if num_args !== nothing
+ desc["num_args"] = Base.Int(num_args)
+ end
+ end
+ begin
+ if strides !== nothing
+ desc["strides"] = map(Base.identity, strides)
+ end
+ end
+ begin
+ if padding !== nothing
+ desc["padding"] = Base.String(padding)
+ end
+ end
+ begin
+ if data_format !== nothing
+ desc["data_format"] = Base.String(data_format)
+ end
+ end
+ begin
+ if dilations !== nothing
+ desc["dilations"] = map(Base.identity, dilations)
+ end
+ end
+ begin
+ if fused_ops !== nothing
+ desc["fused_ops"] = map(Base.identity, fused_ops)
+ end
+ end
+ begin
+ if epsilon !== nothing
+ desc["epsilon"] = Base.identity(epsilon)
+ end
+ end
+ end
+ end
+ tf.Tensor(tf.Operation(desc))
+ end
+ end
+ begin
+ function _fused_conv2d_eager(input_, filter_, args_; name=nothing, num_args=nothing, strides=nothing, padding=nothing, data_format=nothing, dilations=nothing, fused_ops=nothing, epsilon=nothing)
+ desc = tf.EagerOp("_FusedConv2D")
+ input_ = convert(tf.EagerTensor, input_)
+ filter_ = convert(tf.EagerTensor, filter_)
+ args_ = convert(tf.EagerTensor, args_)
+ begin
+ begin
+ tf.add_input(desc, input_)
+ end
+ begin
+ tf.add_input(desc, filter_)
+ end
+ begin
+ tf.add_input(desc, args_)
+ end
+ end
+ begin
+ begin
+ if num_args !== nothing
+ desc["num_args"] = Base.Int(num_args)
+ end
+ end
+ begin
+ if strides !== nothing
+ desc["strides"] = map(Base.identity, strides)
+ end
+ end
+ begin
+ if padding !== nothing
+ desc["padding"] = Base.String(padding)
+ end
+ end
+ begin
+ if data_format !== nothing
+ desc["data_format"] = Base.String(data_format)
+ end
+ end
+ begin
+ if dilations !== nothing
+ desc["dilations"] = map(Base.identity, dilations)
+ end
+ end
+ begin
+ if fused_ops !== nothing
+ desc["fused_ops"] = map(Base.identity, fused_ops)
+ end
+ end
+ begin
+ if epsilon !== nothing
+ desc["epsilon"] = Base.identity(epsilon)
+ end
+ end
+ end
+ begin
+ desc["T"] = tf.data_type(input_)
+ end
+ begin
+ desc["T"] = tf.data_type(filter_)
+ end
+ begin
+ desc["T"] = tf.data_type(args_)
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(_fused_conv2d, [input_, filter_, args_], name=nothing, num_args=nothing, strides=nothing, padding=nothing, data_format=nothing, dilations=nothing, fused_ops=nothing, epsilon=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res[1]
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function _fused_conv2d(input_, filter_, args_; name=nothing, num_args=nothing, strides=nothing, padding=nothing, data_format=nothing, dilations=nothing, fused_ops=nothing, epsilon=nothing)
+ if tf.in_eager_mode()
+ _fused_conv2d_eager(input_, filter_, args_; name=name, num_args=num_args, strides=strides, padding=padding, data_format=data_format, dilations=dilations, fused_ops=fused_ops, epsilon=epsilon)
+ else
+ _fused_conv2d_graph(input_, filter_, args_; name=name, num_args=num_args, strides=strides, padding=padding, data_format=data_format, dilations=dilations, fused_ops=fused_ops, epsilon=epsilon)
+ end
+ end
+ end
+end
+
+
+"""
+ _read_variables_op(resources)
+
+
+"""
+begin
+ begin
+ function _read_variables_op_graph(resources_; name=nothing, N=nothing, dtypes=nothing)
+ local desc
+ tf.with_op_name(name, "_ReadVariablesOp") do
+ desc = tf.NodeDescription("_ReadVariablesOp")
+ begin
+ begin
+ resources_ = [convert(Tensor{Any}, x) for x = resources_]
+ begin
+ end
+ end
+ end
+ begin
+ begin
+ tf.add_input(desc, resources_)
+ end
+ end
+ begin
+ begin
+ if N !== nothing
+ desc["N"] = Base.Int(N)
+ end
+ end
+ begin
+ if dtypes !== nothing
+ desc["dtypes"] = map(Base.identity, dtypes)
+ end
+ end
+ end
+ end
+ tf.Tensor(tf.Operation(desc))
+ end
+ end
+ begin
+ function _read_variables_op_eager(resources_; name=nothing, N=nothing, dtypes=nothing)
+ desc = tf.EagerOp("_ReadVariablesOp")
+ resources_ = convert(tf.EagerTensor, resources_)
+ begin
+ begin
+ tf.add_input(desc, resources_)
+ end
+ end
+ begin
+ begin
+ if N !== nothing
+ desc["N"] = Base.Int(N)
+ end
+ end
+ begin
+ if dtypes !== nothing
+ desc["dtypes"] = map(Base.identity, dtypes)
+ end
+ end
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(_read_variables_op, [resources_], name=nothing, N=nothing, dtypes=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res[1]
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function _read_variables_op(resources_; name=nothing, N=nothing, dtypes=nothing)
+ if tf.in_eager_mode()
+ _read_variables_op_eager(resources_; name=name, N=N, dtypes=dtypes)
+ else
+ _read_variables_op_graph(resources_; name=name, N=N, dtypes=dtypes)
+ end
+ end
+ end
+end
+
+
+"""
+ mutable_hash_table_of_tensors(; container=, shared_name=, use_node_name_sharing=false, value_shape=?)
+
+
+"""
+begin
+ begin
+ function mutable_hash_table_of_tensors_graph(; name=nothing, container=nothing, shared_name=nothing, use_node_name_sharing=nothing, key_dtype=nothing, value_dtype=nothing, value_shape=nothing)
+ local desc
+ tf.with_op_name(name, "MutableHashTableOfTensors") do
+ desc = tf.NodeDescription("MutableHashTableOfTensors")
+ begin
+ end
+ begin
+ end
+ begin
+ begin
+ if container !== nothing
+ desc["container"] = Base.String(container)
+ end
+ end
+ begin
+ if shared_name !== nothing
+ desc["shared_name"] = Base.String(shared_name)
+ end
+ end
+ begin
+ if use_node_name_sharing !== nothing
+ desc["use_node_name_sharing"] = Base.Bool(use_node_name_sharing)
+ end
+ end
+ begin
+ if key_dtype !== nothing
+ desc["key_dtype"] = Base.identity(key_dtype)
+ end
+ end
+ begin
+ if value_dtype !== nothing
+ desc["value_dtype"] = Base.identity(value_dtype)
+ end
+ end
+ begin
+ if value_shape !== nothing
+ desc["value_shape"] = Base.identity(value_shape)
+ end
+ end
+ end
+ end
+ tf.Tensor(tf.Operation(desc))
+ end
+ end
+ begin
+ function mutable_hash_table_of_tensors_eager(; name=nothing, container=nothing, shared_name=nothing, use_node_name_sharing=nothing, key_dtype=nothing, value_dtype=nothing, value_shape=nothing)
+ desc = tf.EagerOp("MutableHashTableOfTensors")
+ begin
+ end
+ begin
+ begin
+ if container !== nothing
+ desc["container"] = Base.String(container)
+ end
+ end
+ begin
+ if shared_name !== nothing
+ desc["shared_name"] = Base.String(shared_name)
+ end
+ end
+ begin
+ if use_node_name_sharing !== nothing
+ desc["use_node_name_sharing"] = Base.Bool(use_node_name_sharing)
+ end
+ end
+ begin
+ if key_dtype !== nothing
+ desc["key_dtype"] = Base.identity(key_dtype)
+ end
+ end
+ begin
+ if value_dtype !== nothing
+ desc["value_dtype"] = Base.identity(value_dtype)
+ end
+ end
+ begin
+ if value_shape !== nothing
+ desc["value_shape"] = Base.identity(value_shape)
+ end
+ end
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(mutable_hash_table_of_tensors, [], name=nothing, container=nothing, shared_name=nothing, use_node_name_sharing=nothing, key_dtype=nothing, value_dtype=nothing, value_shape=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res[1]
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function mutable_hash_table_of_tensors(; name=nothing, container=nothing, shared_name=nothing, use_node_name_sharing=nothing, key_dtype=nothing, value_dtype=nothing, value_shape=nothing)
+ if tf.in_eager_mode()
+ mutable_hash_table_of_tensors_eager(; name=name, container=container, shared_name=shared_name, use_node_name_sharing=use_node_name_sharing, key_dtype=key_dtype, value_dtype=value_dtype, value_shape=value_shape)
+ else
+ mutable_hash_table_of_tensors_graph(; name=name, container=container, shared_name=shared_name, use_node_name_sharing=use_node_name_sharing, key_dtype=key_dtype, value_dtype=value_dtype, value_shape=value_shape)
+ end
+ end
+ end
+end
+
+
+"""
+ read_file(filename)
+
+
+"""
+begin
+ begin
+ function read_file_graph(filename_; name=nothing)
+ local desc
+ tf.with_op_name(name, "ReadFile") do
+ desc = tf.NodeDescription("ReadFile")
+ begin
+ begin
+ filename_ = convert(Tensor{String}, filename_)
+ begin
+ end
+ end
+ end
+ begin
+ begin
+ tf.add_input(desc, filename_)
+ end
+ end
+ begin
+ end
+ end
+ tf.Tensor(tf.Operation(desc))
+ end
+ end
+ begin
+ function read_file_eager(filename_; name=nothing)
+ desc = tf.EagerOp("ReadFile")
+ filename_ = convert(tf.EagerTensor, filename_)
+ begin
+ begin
+ tf.add_input(desc, filename_)
+ end
+ end
+ begin
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(read_file, [filename_], name=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res[1]
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function read_file(filename_; name=nothing)
+ if tf.in_eager_mode()
+ read_file_eager(filename_; name=name)
+ else
+ read_file_graph(filename_; name=name)
+ end
+ end
+ end
+end
+
+
+"""
+ load_tpu_embedding_mdl_adagrad_light_parameters(parameters, accumulators, weights, benefits; table_id=-1, table_name=)
+
+Load embedding parameters for a single table.
+"""
+begin
+ begin
+ function load_tpu_embedding_mdl_adagrad_light_parameters_graph(parameters_, accumulators_, weights_, benefits_; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing)
+ local desc
+ tf.with_op_name(name, "LoadTPUEmbeddingMDLAdagradLightParameters") do
+ desc = tf.NodeDescription("LoadTPUEmbeddingMDLAdagradLightParameters")
+ begin
+ begin
+ parameters_ = convert(Tensor{Float32}, parameters_)
+ begin
+ end
+ end
+ begin
+ accumulators_ = convert(Tensor{Float32}, accumulators_)
+ begin
+ end
+ end
+ begin
+ weights_ = convert(Tensor{Float32}, weights_)
+ begin
+ end
+ end
+ begin
+ benefits_ = convert(Tensor{Float32}, benefits_)
+ begin
+ end
+ end
+ end
+ begin
+ begin
+ tf.add_input(desc, parameters_)
+ end
+ begin
+ tf.add_input(desc, accumulators_)
+ end
+ begin
+ tf.add_input(desc, weights_)
+ end
+ begin
+ tf.add_input(desc, benefits_)
+ end
+ end
+ begin
+ begin
+ if table_id !== nothing
+ desc["table_id"] = Base.Int(table_id)
+ end
+ end
+ begin
+ if table_name !== nothing
+ desc["table_name"] = Base.String(table_name)
+ end
+ end
+ begin
+ if num_shards !== nothing
+ desc["num_shards"] = Base.Int(num_shards)
+ end
+ end
+ begin
+ if shard_id !== nothing
+ desc["shard_id"] = Base.Int(shard_id)
+ end
+ end
+ end
+ end
+ tf.Tensor(tf.Operation(desc))
+ end
+ end
+ begin
+ function load_tpu_embedding_mdl_adagrad_light_parameters_eager(parameters_, accumulators_, weights_, benefits_; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing)
+ desc = tf.EagerOp("LoadTPUEmbeddingMDLAdagradLightParameters")
+ parameters_ = convert(tf.EagerTensor, parameters_)
+ accumulators_ = convert(tf.EagerTensor, accumulators_)
+ weights_ = convert(tf.EagerTensor, weights_)
+ benefits_ = convert(tf.EagerTensor, benefits_)
+ begin
+ begin
+ tf.add_input(desc, parameters_)
+ end
+ begin
+ tf.add_input(desc, accumulators_)
+ end
+ begin
+ tf.add_input(desc, weights_)
+ end
+ begin
+ tf.add_input(desc, benefits_)
+ end
+ end
+ begin
+ begin
+ if table_id !== nothing
+ desc["table_id"] = Base.Int(table_id)
+ end
+ end
+ begin
+ if table_name !== nothing
+ desc["table_name"] = Base.String(table_name)
+ end
+ end
+ begin
+ if num_shards !== nothing
+ desc["num_shards"] = Base.Int(num_shards)
+ end
+ end
+ begin
+ if shard_id !== nothing
+ desc["shard_id"] = Base.Int(shard_id)
+ end
+ end
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(load_tpu_embedding_mdl_adagrad_light_parameters, [parameters_, accumulators_, weights_, benefits_], name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res[1]
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function load_tpu_embedding_mdl_adagrad_light_parameters(parameters_, accumulators_, weights_, benefits_; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing)
+ if tf.in_eager_mode()
+ load_tpu_embedding_mdl_adagrad_light_parameters_eager(parameters_, accumulators_, weights_, benefits_; name=name, table_id=table_id, table_name=table_name, num_shards=num_shards, shard_id=shard_id)
+ else
+ load_tpu_embedding_mdl_adagrad_light_parameters_graph(parameters_, accumulators_, weights_, benefits_; name=name, table_id=table_id, table_name=table_name, num_shards=num_shards, shard_id=shard_id)
+ end
+ end
+ end
+end
+
+
+"""
+ fractional_avg_pool_grad(orig_input_tensor_shape, out_backprop, row_pooling_sequence, col_pooling_sequence; overlapping=false)
+
+
+"""
+begin
+ begin
+ function fractional_avg_pool_grad_graph(orig_input_tensor_shape_, out_backprop_, row_pooling_sequence_, col_pooling_sequence_; name=nothing, overlapping=nothing)
+ local desc
+ tf.with_op_name(name, "FractionalAvgPoolGrad") do
+ desc = tf.NodeDescription("FractionalAvgPoolGrad")
+ begin
+ begin
+ orig_input_tensor_shape_ = convert(Tensor{Int64}, orig_input_tensor_shape_)
+ begin
+ end
+ end
+ begin
+ out_backprop_ = convert(Tensor{Any}, out_backprop_)
+ begin
+ end
+ end
+ begin
+ row_pooling_sequence_ = convert(Tensor{Int64}, row_pooling_sequence_)
+ begin
+ end
+ end
+ begin
+ col_pooling_sequence_ = convert(Tensor{Int64}, col_pooling_sequence_)
+ begin
+ end
+ end
+ begin
+ (out_backprop_,) = tf.tf_promote(out_backprop_)
+ end
+ end
+ begin
+ begin
+ tf.add_input(desc, orig_input_tensor_shape_)
+ end
+ begin
+ tf.add_input(desc, out_backprop_)
+ end
+ begin
+ tf.add_input(desc, row_pooling_sequence_)
+ end
+ begin
+ tf.add_input(desc, col_pooling_sequence_)
+ end
+ end
+ begin
+ begin
+ if overlapping !== nothing
+ desc["overlapping"] = Base.Bool(overlapping)
+ end
+ end
+ end
+ end
+ tf.Tensor(tf.Operation(desc))
+ end
+ end
+ begin
+ function fractional_avg_pool_grad_eager(orig_input_tensor_shape_, out_backprop_, row_pooling_sequence_, col_pooling_sequence_; name=nothing, overlapping=nothing)
+ desc = tf.EagerOp("FractionalAvgPoolGrad")
+ orig_input_tensor_shape_ = convert(tf.EagerTensor, orig_input_tensor_shape_)
+ out_backprop_ = convert(tf.EagerTensor, out_backprop_)
+ row_pooling_sequence_ = convert(tf.EagerTensor, row_pooling_sequence_)
+ col_pooling_sequence_ = convert(tf.EagerTensor, col_pooling_sequence_)
+ begin
+ begin
+ tf.add_input(desc, orig_input_tensor_shape_)
+ end
+ begin
+ tf.add_input(desc, out_backprop_)
+ end
+ begin
+ tf.add_input(desc, row_pooling_sequence_)
+ end
+ begin
+ tf.add_input(desc, col_pooling_sequence_)
+ end
+ end
+ begin
+ begin
+ if overlapping !== nothing
+ desc["overlapping"] = Base.Bool(overlapping)
+ end
+ end
+ end
+ begin
+ desc["T"] = tf.data_type(out_backprop_)
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(fractional_avg_pool_grad, [orig_input_tensor_shape_, out_backprop_, row_pooling_sequence_, col_pooling_sequence_], name=nothing, overlapping=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res[1]
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function fractional_avg_pool_grad(orig_input_tensor_shape_, out_backprop_, row_pooling_sequence_, col_pooling_sequence_; name=nothing, overlapping=nothing)
+ if tf.in_eager_mode()
+ fractional_avg_pool_grad_eager(orig_input_tensor_shape_, out_backprop_, row_pooling_sequence_, col_pooling_sequence_; name=name, overlapping=overlapping)
+ else
+ fractional_avg_pool_grad_graph(orig_input_tensor_shape_, out_backprop_, row_pooling_sequence_, col_pooling_sequence_; name=name, overlapping=overlapping)
+ end
+ end
+ end
+end
+
+
+"""
+ load_tpu_embedding_adagrad_parameters_grad_accum_debug(parameters, accumulators, gradient_accumulators; table_id=-1, table_name=)
+
+Load embedding parameters for a single table.
+"""
+begin
+ begin
+ function load_tpu_embedding_adagrad_parameters_grad_accum_debug_graph(parameters_, accumulators_, gradient_accumulators_; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing)
+ local desc
+ tf.with_op_name(name, "LoadTPUEmbeddingAdagradParametersGradAccumDebug") do
+ desc = tf.NodeDescription("LoadTPUEmbeddingAdagradParametersGradAccumDebug")
+ begin
+ begin
+ parameters_ = convert(Tensor{Float32}, parameters_)
+ begin
+ end
+ end
+ begin
+ accumulators_ = convert(Tensor{Float32}, accumulators_)
+ begin
+ end
+ end
+ begin
+ gradient_accumulators_ = convert(Tensor{Float32}, gradient_accumulators_)
+ begin
+ end
+ end
+ end
+ begin
+ begin
+ tf.add_input(desc, parameters_)
+ end
+ begin
+ tf.add_input(desc, accumulators_)
+ end
+ begin
+ tf.add_input(desc, gradient_accumulators_)
+ end
+ end
+ begin
+ begin
+ if table_id !== nothing
+ desc["table_id"] = Base.Int(table_id)
+ end
+ end
+ begin
+ if table_name !== nothing
+ desc["table_name"] = Base.String(table_name)
+ end
+ end
+ begin
+ if num_shards !== nothing
+ desc["num_shards"] = Base.Int(num_shards)
+ end
+ end
+ begin
+ if shard_id !== nothing
+ desc["shard_id"] = Base.Int(shard_id)
+ end
+ end
+ end
+ end
+ tf.Tensor(tf.Operation(desc))
+ end
+ end
+ begin
+ function load_tpu_embedding_adagrad_parameters_grad_accum_debug_eager(parameters_, accumulators_, gradient_accumulators_; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing)
+ desc = tf.EagerOp("LoadTPUEmbeddingAdagradParametersGradAccumDebug")
+ parameters_ = convert(tf.EagerTensor, parameters_)
+ accumulators_ = convert(tf.EagerTensor, accumulators_)
+ gradient_accumulators_ = convert(tf.EagerTensor, gradient_accumulators_)
+ begin
+ begin
+ tf.add_input(desc, parameters_)
+ end
+ begin
+ tf.add_input(desc, accumulators_)
+ end
+ begin
+ tf.add_input(desc, gradient_accumulators_)
+ end
+ end
+ begin
+ begin
+ if table_id !== nothing
+ desc["table_id"] = Base.Int(table_id)
+ end
+ end
+ begin
+ if table_name !== nothing
+ desc["table_name"] = Base.String(table_name)
+ end
+ end
+ begin
+ if num_shards !== nothing
+ desc["num_shards"] = Base.Int(num_shards)
+ end
+ end
+ begin
+ if shard_id !== nothing
+ desc["shard_id"] = Base.Int(shard_id)
+ end
+ end
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(load_tpu_embedding_adagrad_parameters_grad_accum_debug, [parameters_, accumulators_, gradient_accumulators_], name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res[1]
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function load_tpu_embedding_adagrad_parameters_grad_accum_debug(parameters_, accumulators_, gradient_accumulators_; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing)
+ if tf.in_eager_mode()
+ load_tpu_embedding_adagrad_parameters_grad_accum_debug_eager(parameters_, accumulators_, gradient_accumulators_; name=name, table_id=table_id, table_name=table_name, num_shards=num_shards, shard_id=shard_id)
+ else
+ load_tpu_embedding_adagrad_parameters_grad_accum_debug_graph(parameters_, accumulators_, gradient_accumulators_; name=name, table_id=table_id, table_name=table_name, num_shards=num_shards, shard_id=shard_id)
+ end
+ end
+ end
+end
+
+
+"""
+ bincount(arr, size, weights)
+
+
+"""
+begin
+ begin
+ function bincount_graph(arr_, size_, weights_; name=nothing)
+ local desc
+ tf.with_op_name(name, "Bincount") do
+ desc = tf.NodeDescription("Bincount")
+ begin
+ begin
+ arr_ = convert(Tensor{Int32}, arr_)
+ begin
+ end
+ end
+ begin
+ size_ = convert(Tensor{Int32}, size_)
+ begin
+ end
+ end
+ begin
+ weights_ = convert(Tensor{Any}, weights_)
+ begin
+ end
+ end
+ begin
+ (weights_,) = tf.tf_promote(weights_)
+ end
+ end
+ begin
+ begin
+ tf.add_input(desc, arr_)
+ end
+ begin
+ tf.add_input(desc, size_)
+ end
+ begin
+ tf.add_input(desc, weights_)
+ end
+ end
+ begin
+ end
+ end
+ tf.Tensor(tf.Operation(desc))
+ end
+ end
+ begin
+ function bincount_eager(arr_, size_, weights_; name=nothing)
+ desc = tf.EagerOp("Bincount")
+ arr_ = convert(tf.EagerTensor, arr_)
+ size_ = convert(tf.EagerTensor, size_)
+ weights_ = convert(tf.EagerTensor, weights_)
+ begin
+ begin
+ tf.add_input(desc, arr_)
+ end
+ begin
+ tf.add_input(desc, size_)
+ end
+ begin
+ tf.add_input(desc, weights_)
+ end
+ end
+ begin
+ end
+ begin
+ desc["T"] = tf.data_type(weights_)
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(bincount, [arr_, size_, weights_], name=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res[1]
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function bincount(arr_, size_, weights_; name=nothing)
+ if tf.in_eager_mode()
+ bincount_eager(arr_, size_, weights_; name=name)
+ else
+ bincount_graph(arr_, size_, weights_; name=name)
+ end
+ end
+ end
+end
+
+
+"""
+ inv(x)
+
+
+"""
+begin
+ begin
+ function inv_graph(x_; name=nothing)
+ local desc
+ tf.with_op_name(name, "Inv") do
+ desc = tf.NodeDescription("Inv")
+ begin
+ begin
+ x_ = convert(Tensor{Any}, x_)
+ begin
+ end
+ end
+ begin
+ (x_,) = tf.tf_promote(x_)
+ end
+ end
+ begin
+ begin
+ tf.add_input(desc, x_)
+ end
+ end
+ begin
+ end
+ end
+ tf.Tensor(tf.Operation(desc))
+ end
+ end
+ begin
+ function inv_eager(x_; name=nothing)
+ desc = tf.EagerOp("Inv")
+ x_ = convert(tf.EagerTensor, x_)
+ begin
+ begin
+ tf.add_input(desc, x_)
+ end
+ end
+ begin
+ end
+ begin
+ desc["T"] = tf.data_type(x_)
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(inv, [x_], name=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res[1]
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function inv(x_; name=nothing)
+ if tf.in_eager_mode()
+ inv_eager(x_; name=name)
+ else
+ inv_graph(x_; name=name)
+ end
+ end
+ end
+end
+
+
+"""
+ apply_proximal_adagrad(var, accum, lr, l1, l2, grad; use_locking=false)
+
+
+"""
+begin
+ begin
+ function apply_proximal_adagrad_graph(var_, accum_, lr_, l1_, l2_, grad_; name=nothing, use_locking=nothing)
+ local desc
+ tf.with_op_name(name, "ApplyProximalAdagrad") do
+ desc = tf.NodeDescription("ApplyProximalAdagrad")
+ begin
+ begin
+ var_ = convert(Tensor{Any}, var_)
+ begin
+ end
+ end
+ begin
+ accum_ = convert(Tensor{Any}, accum_)
+ begin
+ end
+ end
+ begin
+ lr_ = convert(Tensor{Any}, lr_)
+ begin
+ end
+ end
+ begin
+ l1_ = convert(Tensor{Any}, l1_)
+ begin
+ end
+ end
+ begin
+ l2_ = convert(Tensor{Any}, l2_)
+ begin
+ end
+ end
+ begin
+ grad_ = convert(Tensor{Any}, grad_)
+ begin
+ end
+ end
+ begin
+ (var_, accum_, lr_, l1_, l2_, grad_) = tf.tf_promote(var_, accum_, lr_, l1_, l2_, grad_)
+ end
+ end
+ begin
+ begin
+ tf.add_input(desc, var_)
+ end
+ begin
+ tf.add_input(desc, accum_)
+ end
+ begin
+ tf.add_input(desc, lr_)
+ end
+ begin
+ tf.add_input(desc, l1_)
+ end
+ begin
+ tf.add_input(desc, l2_)
+ end
+ begin
+ tf.add_input(desc, grad_)
+ end
+ end
+ begin
+ begin
+ if use_locking !== nothing
+ desc["use_locking"] = Base.Bool(use_locking)
+ end
+ end
+ end
+ end
+ tf.Tensor(tf.Operation(desc))
+ end
+ end
+ begin
+ function apply_proximal_adagrad_eager(var_, accum_, lr_, l1_, l2_, grad_; name=nothing, use_locking=nothing)
+ desc = tf.EagerOp("ApplyProximalAdagrad")
+ var_ = convert(tf.EagerTensor, var_)
+ accum_ = convert(tf.EagerTensor, accum_)
+ lr_ = convert(tf.EagerTensor, lr_)
+ l1_ = convert(tf.EagerTensor, l1_)
+ l2_ = convert(tf.EagerTensor, l2_)
+ grad_ = convert(tf.EagerTensor, grad_)
+ begin
+ begin
+ tf.add_input(desc, var_)
+ end
+ begin
+ tf.add_input(desc, accum_)
+ end
+ begin
+ tf.add_input(desc, lr_)
+ end
+ begin
+ tf.add_input(desc, l1_)
+ end
+ begin
+ tf.add_input(desc, l2_)
+ end
+ begin
+ tf.add_input(desc, grad_)
+ end
+ end
+ begin
+ begin
+ if use_locking !== nothing
+ desc["use_locking"] = Base.Bool(use_locking)
+ end
+ end
+ end
+ begin
+ desc["T"] = tf.data_type(var_)
+ end
+ begin
+ desc["T"] = tf.data_type(accum_)
+ end
+ begin
+ desc["T"] = tf.data_type(lr_)
+ end
+ begin
+ desc["T"] = tf.data_type(l1_)
+ end
+ begin
+ desc["T"] = tf.data_type(l2_)
+ end
+ begin
+ desc["T"] = tf.data_type(grad_)
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(apply_proximal_adagrad, [var_, accum_, lr_, l1_, l2_, grad_], name=nothing, use_locking=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res[1]
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function apply_proximal_adagrad(var_, accum_, lr_, l1_, l2_, grad_; name=nothing, use_locking=nothing)
+ if tf.in_eager_mode()
+ apply_proximal_adagrad_eager(var_, accum_, lr_, l1_, l2_, grad_; name=name, use_locking=use_locking)
+ else
+ apply_proximal_adagrad_graph(var_, accum_, lr_, l1_, l2_, grad_; name=name, use_locking=use_locking)
+ end
+ end
+ end
+end
+
+
+"""
+ gather_v2(params, indices, axis)
+
+
+"""
+begin
+ begin
+ function gather_v2_graph(params_, indices_, axis_; name=nothing)
+ local desc
+ tf.with_op_name(name, "GatherV2") do
+ desc = tf.NodeDescription("GatherV2")
+ begin
+ begin
+ params_ = convert(Tensor{Any}, params_)
+ begin
+ end
+ end
+ begin
+ indices_ = convert(Tensor{Any}, indices_)
+ begin
+ indices_ = indices_ - convert(tf.Tensor{eltype(indices_)}, 1)
+ end
+ end
+ begin
+ axis_ = convert(Tensor{Any}, axis_)
+ begin
+ end
+ end
+ begin
+ (params_,) = tf.tf_promote(params_)
+ end
+ begin
+ (indices_,) = tf.tf_promote(indices_)
+ end
+ begin
+ (axis_,) = tf.tf_promote(axis_)
+ end
+ end
+ begin
+ begin
+ tf.add_input(desc, params_)
+ end
+ begin
+ tf.add_input(desc, indices_)
+ end
+ begin
+ tf.add_input(desc, axis_)
+ end
+ end
+ begin
+ end
+ end
+ tf.Tensor(tf.Operation(desc))
+ end
+ end
+ begin
+ function gather_v2_eager(params_, indices_, axis_; name=nothing)
+ desc = tf.EagerOp("GatherV2")
+ params_ = convert(tf.EagerTensor, params_)
+ indices_ = convert(tf.EagerTensor, indices_)
+ axis_ = convert(tf.EagerTensor, axis_)
+ begin
+ begin
+ tf.add_input(desc, params_)
+ end
+ begin
+ tf.add_input(desc, indices_)
+ end
+ begin
+ tf.add_input(desc, axis_)
+ end
+ end
+ begin
+ end
+ begin
+ desc["Tparams"] = tf.data_type(params_)
+ end
+ begin
+ desc["Tindices"] = tf.data_type(indices_)
+ end
+ begin
+ desc["Taxis"] = tf.data_type(axis_)
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(gather_v2, [params_, indices_, axis_], name=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res[1]
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function gather_v2(params_, indices_, axis_; name=nothing)
+ if tf.in_eager_mode()
+ gather_v2_eager(params_, indices_, axis_; name=name)
+ else
+ gather_v2_graph(params_, indices_, axis_; name=name)
+ end
+ end
+ end
+end
+
+
+"""
+ write_file(filename, contents)
+
+
+"""
+begin
+ begin
+ function write_file_graph(filename_, contents_; name=nothing)
+ local desc
+ tf.with_op_name(name, "WriteFile") do
+ desc = tf.NodeDescription("WriteFile")
+ begin
+ begin
+ filename_ = convert(Tensor{String}, filename_)
+ begin
+ end
+ end
+ begin
+ contents_ = convert(Tensor{String}, contents_)
+ begin
+ end
+ end
+ end
+ begin
+ begin
+ tf.add_input(desc, filename_)
+ end
+ begin
+ tf.add_input(desc, contents_)
+ end
+ end
+ begin
+ end
+ end
+ tf.Tensor(tf.Operation(desc))
+ end
+ end
+ begin
+ function write_file_eager(filename_, contents_; name=nothing)
+ desc = tf.EagerOp("WriteFile")
+ filename_ = convert(tf.EagerTensor, filename_)
+ contents_ = convert(tf.EagerTensor, contents_)
+ begin
+ begin
+ tf.add_input(desc, filename_)
+ end
+ begin
+ tf.add_input(desc, contents_)
+ end
+ end
+ begin
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(write_file, [filename_, contents_], name=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res[1]
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function write_file(filename_, contents_; name=nothing)
+ if tf.in_eager_mode()
+ write_file_eager(filename_, contents_; name=name)
+ else
+ write_file_graph(filename_, contents_; name=name)
+ end
+ end
+ end
+end
+
+
+"""
+ boosted_trees_get_ensemble_states(tree_ensemble_handle)
+
+
+"""
+begin
+ begin
+ function boosted_trees_get_ensemble_states_graph(tree_ensemble_handle_; name=nothing)
+ local desc
+ tf.with_op_name(name, "BoostedTreesGetEnsembleStates") do
+ desc = tf.NodeDescription("BoostedTreesGetEnsembleStates")
+ begin
+ begin
+ tree_ensemble_handle_ = convert(Tensor{Any}, tree_ensemble_handle_)
+ begin
+ end
+ end
+ end
+ begin
+ begin
+ tf.add_input(desc, tree_ensemble_handle_)
+ end
+ end
+ begin
+ end
+ end
+ begin
+ out = tf.Tensor[]
+ op = tf.Operation(desc)
+ for out_idx = 1:5
+ push!(out, tf.Tensor(op, out_idx))
+ end
+ out
+ end
+ end
+ end
+ begin
+ function boosted_trees_get_ensemble_states_eager(tree_ensemble_handle_; name=nothing)
+ desc = tf.EagerOp("BoostedTreesGetEnsembleStates")
+ tree_ensemble_handle_ = convert(tf.EagerTensor, tree_ensemble_handle_)
+ begin
+ begin
+ tf.add_input(desc, tree_ensemble_handle_)
+ end
+ end
+ begin
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(boosted_trees_get_ensemble_states, [tree_ensemble_handle_], name=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function boosted_trees_get_ensemble_states(tree_ensemble_handle_; name=nothing)
+ if tf.in_eager_mode()
+ boosted_trees_get_ensemble_states_eager(tree_ensemble_handle_; name=name)
+ else
+ boosted_trees_get_ensemble_states_graph(tree_ensemble_handle_; name=name)
+ end
+ end
+ end
+end
+
+
+"""
+ resource_gather(resource, indices; validate_indices=true)
+
+
+"""
+begin
+ begin
+ function resource_gather_graph(resource_, indices_; name=nothing, validate_indices=nothing, dtype=nothing)
+ local desc
+ tf.with_op_name(name, "ResourceGather") do
+ desc = tf.NodeDescription("ResourceGather")
+ begin
+ begin
+ resource_ = convert(Tensor{Any}, resource_)
+ begin
+ end
+ end
+ begin
+ indices_ = convert(Tensor{Any}, indices_)
+ begin
+ indices_ = indices_ - convert(tf.Tensor{eltype(indices_)}, 1)
+ end
+ end
+ begin
+ (indices_,) = tf.tf_promote(indices_)
+ end
+ end
+ begin
+ begin
+ tf.add_input(desc, resource_)
+ end
+ begin
+ tf.add_input(desc, indices_)
+ end
+ end
+ begin
+ begin
+ if validate_indices !== nothing
+ desc["validate_indices"] = Base.Bool(validate_indices)
+ end
+ end
+ begin
+ if dtype !== nothing
+ desc["dtype"] = Base.identity(dtype)
+ end
+ end
+ end
+ end
+ tf.Tensor(tf.Operation(desc))
+ end
+ end
+ begin
+ function resource_gather_eager(resource_, indices_; name=nothing, validate_indices=nothing, dtype=nothing)
+ desc = tf.EagerOp("ResourceGather")
+ resource_ = convert(tf.EagerTensor, resource_)
+ indices_ = convert(tf.EagerTensor, indices_)
+ begin
+ begin
+ tf.add_input(desc, resource_)
+ end
+ begin
+ tf.add_input(desc, indices_)
+ end
+ end
+ begin
+ begin
+ if validate_indices !== nothing
+ desc["validate_indices"] = Base.Bool(validate_indices)
+ end
+ end
+ begin
+ if dtype !== nothing
+ desc["dtype"] = Base.identity(dtype)
+ end
+ end
+ end
+ begin
+ desc["Tindices"] = tf.data_type(indices_)
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(resource_gather, [resource_, indices_], name=nothing, validate_indices=nothing, dtype=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res[1]
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function resource_gather(resource_, indices_; name=nothing, validate_indices=nothing, dtype=nothing)
+ if tf.in_eager_mode()
+ resource_gather_eager(resource_, indices_; name=name, validate_indices=validate_indices, dtype=dtype)
+ else
+ resource_gather_graph(resource_, indices_; name=name, validate_indices=validate_indices, dtype=dtype)
+ end
+ end
+ end
+end
+
+
+"""
+ resource_apply_proximal_gradient_descent(var, alpha, l1, l2, delta; use_locking=false)
+
+
+"""
+begin
+ begin
+ function resource_apply_proximal_gradient_descent_graph(var_, alpha_, l1_, l2_, delta_; name=nothing, use_locking=nothing)
+ local desc
+ tf.with_op_name(name, "ResourceApplyProximalGradientDescent") do
+ desc = tf.NodeDescription("ResourceApplyProximalGradientDescent")
+ begin
+ begin
+ var_ = convert(Tensor{Any}, var_)
+ begin
+ end
+ end
+ begin
+ alpha_ = convert(Tensor{Any}, alpha_)
+ begin
+ end
+ end
+ begin
+ l1_ = convert(Tensor{Any}, l1_)
+ begin
+ end
+ end
+ begin
+ l2_ = convert(Tensor{Any}, l2_)
+ begin
+ end
+ end
+ begin
+ delta_ = convert(Tensor{Any}, delta_)
+ begin
+ end
+ end
+ begin
+ (alpha_, l1_, l2_, delta_) = tf.tf_promote(alpha_, l1_, l2_, delta_)
+ end
+ end
+ begin
+ begin
+ tf.add_input(desc, var_)
+ end
+ begin
+ tf.add_input(desc, alpha_)
+ end
+ begin
+ tf.add_input(desc, l1_)
+ end
+ begin
+ tf.add_input(desc, l2_)
+ end
+ begin
+ tf.add_input(desc, delta_)
+ end
+ end
+ begin
+ begin
+ if use_locking !== nothing
+ desc["use_locking"] = Base.Bool(use_locking)
+ end
+ end
+ end
+ end
+ tf.Tensor(tf.Operation(desc))
+ end
+ end
+ begin
+ function resource_apply_proximal_gradient_descent_eager(var_, alpha_, l1_, l2_, delta_; name=nothing, use_locking=nothing)
+ desc = tf.EagerOp("ResourceApplyProximalGradientDescent")
+ var_ = convert(tf.EagerTensor, var_)
+ alpha_ = convert(tf.EagerTensor, alpha_)
+ l1_ = convert(tf.EagerTensor, l1_)
+ l2_ = convert(tf.EagerTensor, l2_)
+ delta_ = convert(tf.EagerTensor, delta_)
+ begin
+ begin
+ tf.add_input(desc, var_)
+ end
+ begin
+ tf.add_input(desc, alpha_)
+ end
+ begin
+ tf.add_input(desc, l1_)
+ end
+ begin
+ tf.add_input(desc, l2_)
+ end
+ begin
+ tf.add_input(desc, delta_)
+ end
+ end
+ begin
+ begin
+ if use_locking !== nothing
+ desc["use_locking"] = Base.Bool(use_locking)
+ end
+ end
+ end
+ begin
+ desc["T"] = tf.data_type(alpha_)
+ end
+ begin
+ desc["T"] = tf.data_type(l1_)
+ end
+ begin
+ desc["T"] = tf.data_type(l2_)
+ end
+ begin
+ desc["T"] = tf.data_type(delta_)
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(resource_apply_proximal_gradient_descent, [var_, alpha_, l1_, l2_, delta_], name=nothing, use_locking=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res[1]
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function resource_apply_proximal_gradient_descent(var_, alpha_, l1_, l2_, delta_; name=nothing, use_locking=nothing)
+ if tf.in_eager_mode()
+ resource_apply_proximal_gradient_descent_eager(var_, alpha_, l1_, l2_, delta_; name=name, use_locking=use_locking)
+ else
+ resource_apply_proximal_gradient_descent_graph(var_, alpha_, l1_, l2_, delta_; name=name, use_locking=use_locking)
+ end
+ end
+ end
+end
+
+
+"""
+ truncate_mod(x, y)
+
+
+"""
+begin
+ begin
+ function truncate_mod_graph(x_, y_; name=nothing)
+ local desc
+ tf.with_op_name(name, "TruncateMod") do
+ desc = tf.NodeDescription("TruncateMod")
+ begin
+ begin
+ x_ = convert(Tensor{Any}, x_)
+ begin
+ end
+ end
+ begin
+ y_ = convert(Tensor{Any}, y_)
+ begin
+ end
+ end
+ begin
+ (x_, y_) = tf.tf_promote(x_, y_)
+ end
+ end
+ begin
+ begin
+ tf.add_input(desc, x_)
+ end
+ begin
+ tf.add_input(desc, y_)
+ end
+ end
+ begin
+ end
+ end
+ tf.Tensor(tf.Operation(desc))
+ end
+ end
+ begin
+ function truncate_mod_eager(x_, y_; name=nothing)
+ desc = tf.EagerOp("TruncateMod")
+ x_ = convert(tf.EagerTensor, x_)
+ y_ = convert(tf.EagerTensor, y_)
+ begin
+ begin
+ tf.add_input(desc, x_)
+ end
+ begin
+ tf.add_input(desc, y_)
+ end
+ end
+ begin
+ end
+ begin
+ desc["T"] = tf.data_type(x_)
+ end
+ begin
+ desc["T"] = tf.data_type(y_)
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(truncate_mod, [x_, y_], name=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res[1]
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function truncate_mod(x_, y_; name=nothing)
+ if tf.in_eager_mode()
+ truncate_mod_eager(x_, y_; name=name)
+ else
+ truncate_mod_graph(x_, y_; name=name)
+ end
+ end
+ end
+end
+
+
+"""
+ log_matrix_determinant(input)
+
+
+"""
+begin
+ begin
+ function log_matrix_determinant_graph(input_; name=nothing)
+ local desc
+ tf.with_op_name(name, "LogMatrixDeterminant") do
+ desc = tf.NodeDescription("LogMatrixDeterminant")
+ begin
+ begin
+ input_ = convert(Tensor{Any}, input_)
+ begin
+ end
+ end
+ begin
+ (input_,) = tf.tf_promote(input_)
+ end
+ end
+ begin
+ begin
+ tf.add_input(desc, input_)
+ end
+ end
+ begin
+ end
+ end
+ begin
+ out = tf.Tensor[]
+ op = tf.Operation(desc)
+ for out_idx = 1:2
+ push!(out, tf.Tensor(op, out_idx))
+ end
+ out
+ end
+ end
+ end
+ begin
+ function log_matrix_determinant_eager(input_; name=nothing)
+ desc = tf.EagerOp("LogMatrixDeterminant")
+ input_ = convert(tf.EagerTensor, input_)
+ begin
+ begin
+ tf.add_input(desc, input_)
+ end
+ end
+ begin
+ end
+ begin
+ desc["T"] = tf.data_type(input_)
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(log_matrix_determinant, [input_], name=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function log_matrix_determinant(input_; name=nothing)
+ if tf.in_eager_mode()
+ log_matrix_determinant_eager(input_; name=name)
+ else
+ log_matrix_determinant_graph(input_; name=name)
+ end
+ end
+ end
+end
+
+
+"""
+ irfft2d(input, fft_length)
+
+
+"""
+begin
+ begin
+ function irfft2d_graph(input_, fft_length_; name=nothing)
+ local desc
+ tf.with_op_name(name, "IRFFT2D") do
+ desc = tf.NodeDescription("IRFFT2D")
+ begin
+ begin
+ input_ = convert(Tensor{Complex{Float32}}, input_)
+ begin
+ end
+ end
+ begin
+ fft_length_ = convert(Tensor{Int32}, fft_length_)
+ begin
+ end
+ end
+ end
+ begin
+ begin
+ tf.add_input(desc, input_)
+ end
+ begin
+ tf.add_input(desc, fft_length_)
+ end
+ end
+ begin
+ end
+ end
+ tf.Tensor(tf.Operation(desc))
+ end
+ end
+ begin
+ function irfft2d_eager(input_, fft_length_; name=nothing)
+ desc = tf.EagerOp("IRFFT2D")
+ input_ = convert(tf.EagerTensor, input_)
+ fft_length_ = convert(tf.EagerTensor, fft_length_)
+ begin
+ begin
+ tf.add_input(desc, input_)
+ end
+ begin
+ tf.add_input(desc, fft_length_)
+ end
+ end
+ begin
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(irfft2d, [input_, fft_length_], name=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res[1]
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function irfft2d(input_, fft_length_; name=nothing)
+ if tf.in_eager_mode()
+ irfft2d_eager(input_, fft_length_; name=name)
+ else
+ irfft2d_graph(input_, fft_length_; name=name)
+ end
+ end
+ end
+end
+
+
+"""
+ boosted_trees_training_predict(tree_ensemble_handle, cached_tree_ids, cached_node_ids, bucketized_features)
+
+
+"""
+begin
+ begin
+ function boosted_trees_training_predict_graph(tree_ensemble_handle_, cached_tree_ids_, cached_node_ids_, bucketized_features_; name=nothing, num_bucketized_features=nothing, logits_dimension=nothing)
+ local desc
+ tf.with_op_name(name, "BoostedTreesTrainingPredict") do
+ desc = tf.NodeDescription("BoostedTreesTrainingPredict")
+ begin
+ begin
+ tree_ensemble_handle_ = convert(Tensor{Any}, tree_ensemble_handle_)
+ begin
+ end
+ end
+ begin
+ cached_tree_ids_ = convert(Tensor{Int32}, cached_tree_ids_)
+ begin
+ end
+ end
+ begin
+ cached_node_ids_ = convert(Tensor{Int32}, cached_node_ids_)
+ begin
+ end
+ end
+ begin
+ bucketized_features_ = [convert(Tensor{Int32}, x) for x = bucketized_features_]
+ begin
+ end
+ end
+ end
+ begin
+ begin
+ tf.add_input(desc, tree_ensemble_handle_)
+ end
+ begin
+ tf.add_input(desc, cached_tree_ids_)
+ end
+ begin
+ tf.add_input(desc, cached_node_ids_)
+ end
+ begin
+ tf.add_input(desc, bucketized_features_)
+ end
+ end
+ begin
+ begin
+ if num_bucketized_features !== nothing
+ desc["num_bucketized_features"] = Base.Int(num_bucketized_features)
+ end
+ end
+ begin
+ if logits_dimension !== nothing
+ desc["logits_dimension"] = Base.Int(logits_dimension)
+ end
+ end
+ end
+ end
+ begin
+ out = tf.Tensor[]
+ op = tf.Operation(desc)
+ for out_idx = 1:3
+ push!(out, tf.Tensor(op, out_idx))
+ end
+ out
+ end
+ end
+ end
+ begin
+ function boosted_trees_training_predict_eager(tree_ensemble_handle_, cached_tree_ids_, cached_node_ids_, bucketized_features_; name=nothing, num_bucketized_features=nothing, logits_dimension=nothing)
+ desc = tf.EagerOp("BoostedTreesTrainingPredict")
+ tree_ensemble_handle_ = convert(tf.EagerTensor, tree_ensemble_handle_)
+ cached_tree_ids_ = convert(tf.EagerTensor, cached_tree_ids_)
+ cached_node_ids_ = convert(tf.EagerTensor, cached_node_ids_)
+ bucketized_features_ = convert(tf.EagerTensor, bucketized_features_)
+ begin
+ begin
+ tf.add_input(desc, tree_ensemble_handle_)
+ end
+ begin
+ tf.add_input(desc, cached_tree_ids_)
+ end
+ begin
+ tf.add_input(desc, cached_node_ids_)
+ end
+ begin
+ tf.add_input(desc, bucketized_features_)
+ end
+ end
+ begin
+ begin
+ if num_bucketized_features !== nothing
+ desc["num_bucketized_features"] = Base.Int(num_bucketized_features)
+ end
+ end
+ begin
+ if logits_dimension !== nothing
+ desc["logits_dimension"] = Base.Int(logits_dimension)
+ end
+ end
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(boosted_trees_training_predict, [tree_ensemble_handle_, cached_tree_ids_, cached_node_ids_, bucketized_features_], name=nothing, num_bucketized_features=nothing, logits_dimension=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function boosted_trees_training_predict(tree_ensemble_handle_, cached_tree_ids_, cached_node_ids_, bucketized_features_; name=nothing, num_bucketized_features=nothing, logits_dimension=nothing)
+ if tf.in_eager_mode()
+ boosted_trees_training_predict_eager(tree_ensemble_handle_, cached_tree_ids_, cached_node_ids_, bucketized_features_; name=name, num_bucketized_features=num_bucketized_features, logits_dimension=logits_dimension)
+ else
+ boosted_trees_training_predict_graph(tree_ensemble_handle_, cached_tree_ids_, cached_node_ids_, bucketized_features_; name=name, num_bucketized_features=num_bucketized_features, logits_dimension=logits_dimension)
+ end
+ end
+ end
+end
+
+
+"""
+ floor(x)
+
+
+"""
+begin
+ begin
+ function floor_graph(x_; name=nothing)
+ local desc
+ tf.with_op_name(name, "Floor") do
+ desc = tf.NodeDescription("Floor")
+ begin
+ begin
+ x_ = convert(Tensor{Any}, x_)
+ begin
+ end
+ end
+ begin
+ (x_,) = tf.tf_promote(x_)
+ end
+ end
+ begin
+ begin
+ tf.add_input(desc, x_)
+ end
+ end
+ begin
+ end
+ end
+ tf.Tensor(tf.Operation(desc))
+ end
+ end
+ begin
+ function floor_eager(x_; name=nothing)
+ desc = tf.EagerOp("Floor")
+ x_ = convert(tf.EagerTensor, x_)
+ begin
+ begin
+ tf.add_input(desc, x_)
+ end
+ end
+ begin
+ end
+ begin
+ desc["T"] = tf.data_type(x_)
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(floor, [x_], name=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res[1]
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function floor(x_; name=nothing)
+ if tf.in_eager_mode()
+ floor_eager(x_; name=name)
+ else
+ floor_graph(x_; name=name)
+ end
+ end
+ end
+end
+
+
+"""
+ write_image_summary(writer, step, tag, tensor, bad_color; max_images=3)
+
+
+"""
+begin
+ begin
+ function write_image_summary_graph(writer_, step_, tag_, tensor_, bad_color_; name=nothing, max_images=nothing)
+ local desc
+ tf.with_op_name(name, "WriteImageSummary") do
+ desc = tf.NodeDescription("WriteImageSummary")
+ begin
+ begin
+ writer_ = convert(Tensor{Any}, writer_)
+ begin
+ end
+ end
+ begin
+ step_ = convert(Tensor{Int64}, step_)
+ begin
+ end
+ end
+ begin
+ tag_ = convert(Tensor{String}, tag_)
+ begin
+ end
+ end
+ begin
+ tensor_ = convert(Tensor{Float32}, tensor_)
+ begin
+ end
+ end
+ begin
+ bad_color_ = convert(Tensor{UInt8}, bad_color_)
+ begin
+ end
+ end
+ begin
+ (tensor_,) = tf.tf_promote(tensor_)
+ end
+ end
+ begin
+ begin
+ tf.add_input(desc, writer_)
+ end
+ begin
+ tf.add_input(desc, step_)
+ end
+ begin
+ tf.add_input(desc, tag_)
+ end
+ begin
+ tf.add_input(desc, tensor_)
+ end
+ begin
+ tf.add_input(desc, bad_color_)
+ end
+ end
+ begin
+ begin
+ if max_images !== nothing
+ desc["max_images"] = Base.Int(max_images)
+ end
+ end
+ end
+ end
+ tf.Tensor(tf.Operation(desc))
+ end
+ end
+ begin
+ function write_image_summary_eager(writer_, step_, tag_, tensor_, bad_color_; name=nothing, max_images=nothing)
+ desc = tf.EagerOp("WriteImageSummary")
+ writer_ = convert(tf.EagerTensor, writer_)
+ step_ = convert(tf.EagerTensor, step_)
+ tag_ = convert(tf.EagerTensor, tag_)
+ tensor_ = convert(tf.EagerTensor, tensor_)
+ bad_color_ = convert(tf.EagerTensor, bad_color_)
+ begin
+ begin
+ tf.add_input(desc, writer_)
+ end
+ begin
+ tf.add_input(desc, step_)
+ end
+ begin
+ tf.add_input(desc, tag_)
+ end
+ begin
+ tf.add_input(desc, tensor_)
+ end
+ begin
+ tf.add_input(desc, bad_color_)
+ end
+ end
+ begin
+ begin
+ if max_images !== nothing
+ desc["max_images"] = Base.Int(max_images)
+ end
+ end
+ end
+ begin
+ desc["T"] = tf.data_type(tensor_)
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(write_image_summary, [writer_, step_, tag_, tensor_, bad_color_], name=nothing, max_images=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res[1]
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function write_image_summary(writer_, step_, tag_, tensor_, bad_color_; name=nothing, max_images=nothing)
+ if tf.in_eager_mode()
+ write_image_summary_eager(writer_, step_, tag_, tensor_, bad_color_; name=name, max_images=max_images)
+ else
+ write_image_summary_graph(writer_, step_, tag_, tensor_, bad_color_; name=name, max_images=max_images)
+ end
+ end
+ end
+end
+
+
+"""
+ tile_grad(input, multiples)
+
+
+"""
+begin
+ begin
+ function tile_grad_graph(input_, multiples_; name=nothing)
+ local desc
+ tf.with_op_name(name, "TileGrad") do
+ desc = tf.NodeDescription("TileGrad")
+ begin
+ begin
+ input_ = convert(Tensor{Any}, input_)
+ begin
+ end
+ end
+ begin
+ multiples_ = convert(Tensor{Int32}, multiples_)
+ begin
+ end
+ end
+ begin
+ (input_,) = tf.tf_promote(input_)
+ end
+ end
+ begin
+ begin
+ tf.add_input(desc, input_)
+ end
+ begin
+ tf.add_input(desc, multiples_)
+ end
+ end
+ begin
+ end
+ end
+ tf.Tensor(tf.Operation(desc))
+ end
+ end
+ begin
+ function tile_grad_eager(input_, multiples_; name=nothing)
+ desc = tf.EagerOp("TileGrad")
+ input_ = convert(tf.EagerTensor, input_)
+ multiples_ = convert(tf.EagerTensor, multiples_)
+ begin
+ begin
+ tf.add_input(desc, input_)
+ end
+ begin
+ tf.add_input(desc, multiples_)
+ end
+ end
+ begin
+ end
+ begin
+ desc["T"] = tf.data_type(input_)
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(tile_grad, [input_, multiples_], name=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res[1]
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function tile_grad(input_, multiples_; name=nothing)
+ if tf.in_eager_mode()
+ tile_grad_eager(input_, multiples_; name=name)
+ else
+ tile_grad_graph(input_, multiples_; name=name)
+ end
+ end
+ end
+end
+
+
+"""
+ load_tpu_embedding_proximal_adagrad_parameters_grad_accum_debug(parameters, accumulators, gradient_accumulators; table_id=-1, table_name=)
+
+Load embedding parameters for a single table.
+"""
+begin
+ begin
+ function load_tpu_embedding_proximal_adagrad_parameters_grad_accum_debug_graph(parameters_, accumulators_, gradient_accumulators_; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing)
+ local desc
+ tf.with_op_name(name, "LoadTPUEmbeddingProximalAdagradParametersGradAccumDebug") do
+ desc = tf.NodeDescription("LoadTPUEmbeddingProximalAdagradParametersGradAccumDebug")
+ begin
+ begin
+ parameters_ = convert(Tensor{Float32}, parameters_)
+ begin
+ end
+ end
+ begin
+ accumulators_ = convert(Tensor{Float32}, accumulators_)
+ begin
+ end
+ end
+ begin
+ gradient_accumulators_ = convert(Tensor{Float32}, gradient_accumulators_)
+ begin
+ end
+ end
+ end
+ begin
+ begin
+ tf.add_input(desc, parameters_)
+ end
+ begin
+ tf.add_input(desc, accumulators_)
+ end
+ begin
+ tf.add_input(desc, gradient_accumulators_)
+ end
+ end
+ begin
+ begin
+ if table_id !== nothing
+ desc["table_id"] = Base.Int(table_id)
+ end
+ end
+ begin
+ if table_name !== nothing
+ desc["table_name"] = Base.String(table_name)
+ end
+ end
+ begin
+ if num_shards !== nothing
+ desc["num_shards"] = Base.Int(num_shards)
+ end
+ end
+ begin
+ if shard_id !== nothing
+ desc["shard_id"] = Base.Int(shard_id)
+ end
+ end
+ end
+ end
+ tf.Tensor(tf.Operation(desc))
+ end
+ end
+ begin
+ function load_tpu_embedding_proximal_adagrad_parameters_grad_accum_debug_eager(parameters_, accumulators_, gradient_accumulators_; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing)
+ desc = tf.EagerOp("LoadTPUEmbeddingProximalAdagradParametersGradAccumDebug")
+ parameters_ = convert(tf.EagerTensor, parameters_)
+ accumulators_ = convert(tf.EagerTensor, accumulators_)
+ gradient_accumulators_ = convert(tf.EagerTensor, gradient_accumulators_)
+ begin
+ begin
+ tf.add_input(desc, parameters_)
+ end
+ begin
+ tf.add_input(desc, accumulators_)
+ end
+ begin
+ tf.add_input(desc, gradient_accumulators_)
+ end
+ end
+ begin
+ begin
+ if table_id !== nothing
+ desc["table_id"] = Base.Int(table_id)
+ end
+ end
+ begin
+ if table_name !== nothing
+ desc["table_name"] = Base.String(table_name)
+ end
+ end
+ begin
+ if num_shards !== nothing
+ desc["num_shards"] = Base.Int(num_shards)
+ end
+ end
+ begin
+ if shard_id !== nothing
+ desc["shard_id"] = Base.Int(shard_id)
+ end
+ end
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(load_tpu_embedding_proximal_adagrad_parameters_grad_accum_debug, [parameters_, accumulators_, gradient_accumulators_], name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res[1]
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function load_tpu_embedding_proximal_adagrad_parameters_grad_accum_debug(parameters_, accumulators_, gradient_accumulators_; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing)
+ if tf.in_eager_mode()
+ load_tpu_embedding_proximal_adagrad_parameters_grad_accum_debug_eager(parameters_, accumulators_, gradient_accumulators_; name=name, table_id=table_id, table_name=table_name, num_shards=num_shards, shard_id=shard_id)
+ else
+ load_tpu_embedding_proximal_adagrad_parameters_grad_accum_debug_graph(parameters_, accumulators_, gradient_accumulators_; name=name, table_id=table_id, table_name=table_name, num_shards=num_shards, shard_id=shard_id)
+ end
+ end
+ end
+end
+
+
+"""
+ tensor_array_grad_v3(handle, flow_in)
+
+
+"""
+begin
+ begin
+ function tensor_array_grad_v3_graph(handle_, flow_in_; name=nothing, source=nothing)
+ local desc
+ tf.with_op_name(name, "TensorArrayGradV3") do
+ desc = tf.NodeDescription("TensorArrayGradV3")
+ begin
+ begin
+ handle_ = convert(Tensor{Any}, handle_)
+ begin
+ end
+ end
+ begin
+ flow_in_ = convert(Tensor{Float32}, flow_in_)
+ begin
+ end
+ end
+ end
+ begin
+ begin
+ tf.add_input(desc, handle_)
+ end
+ begin
+ tf.add_input(desc, flow_in_)
+ end
+ end
+ begin
+ begin
+ if source !== nothing
+ desc["source"] = Base.String(source)
+ end
+ end
+ end
+ end
+ begin
+ out = tf.Tensor[]
+ op = tf.Operation(desc)
+ for out_idx = 1:2
+ push!(out, tf.Tensor(op, out_idx))
+ end
+ out
+ end
+ end
+ end
+ begin
+ function tensor_array_grad_v3_eager(handle_, flow_in_; name=nothing, source=nothing)
+ desc = tf.EagerOp("TensorArrayGradV3")
+ handle_ = convert(tf.EagerTensor, handle_)
+ flow_in_ = convert(tf.EagerTensor, flow_in_)
+ begin
+ begin
+ tf.add_input(desc, handle_)
+ end
+ begin
+ tf.add_input(desc, flow_in_)
+ end
+ end
+ begin
+ begin
+ if source !== nothing
+ desc["source"] = Base.String(source)
+ end
+ end
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(tensor_array_grad_v3, [handle_, flow_in_], name=nothing, source=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function tensor_array_grad_v3(handle_, flow_in_; name=nothing, source=nothing)
+ if tf.in_eager_mode()
+ tensor_array_grad_v3_eager(handle_, flow_in_; name=name, source=source)
+ else
+ tensor_array_grad_v3_graph(handle_, flow_in_; name=name, source=source)
+ end
+ end
+ end
+end
+
+
+"""
+ enqueue_tpu_embedding_integer_batch(batch, mode_override; device_ordinal=-1)
+
+An op that enqueues a list of input batch tensors to TPUEmbedding.
+"""
+begin
+ begin
+ function enqueue_tpu_embedding_integer_batch_graph(batch_, mode_override_; name=nothing, N=nothing, device_ordinal=nothing)
+ local desc
+ tf.with_op_name(name, "EnqueueTPUEmbeddingIntegerBatch") do
+ desc = tf.NodeDescription("EnqueueTPUEmbeddingIntegerBatch")
+ begin
+ begin
+ batch_ = [convert(Tensor{Int32}, x) for x = batch_]
+ begin
+ end
+ end
+ begin
+ mode_override_ = convert(Tensor{String}, mode_override_)
+ begin
+ end
+ end
+ end
+ begin
+ begin
+ tf.add_input(desc, batch_)
+ end
+ begin
+ tf.add_input(desc, mode_override_)
+ end
+ end
+ begin
+ begin
+ if N !== nothing
+ desc["N"] = Base.Int(N)
+ end
+ end
+ begin
+ if device_ordinal !== nothing
+ desc["device_ordinal"] = Base.Int(device_ordinal)
+ end
+ end
+ end
+ end
+ tf.Tensor(tf.Operation(desc))
+ end
+ end
+ begin
+ function enqueue_tpu_embedding_integer_batch_eager(batch_, mode_override_; name=nothing, N=nothing, device_ordinal=nothing)
+ desc = tf.EagerOp("EnqueueTPUEmbeddingIntegerBatch")
+ batch_ = convert(tf.EagerTensor, batch_)
+ mode_override_ = convert(tf.EagerTensor, mode_override_)
+ begin
+ begin
+ tf.add_input(desc, batch_)
+ end
+ begin
+ tf.add_input(desc, mode_override_)
+ end
+ end
+ begin
+ begin
+ if N !== nothing
+ desc["N"] = Base.Int(N)
+ end
+ end
+ begin
+ if device_ordinal !== nothing
+ desc["device_ordinal"] = Base.Int(device_ordinal)
+ end
+ end
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(enqueue_tpu_embedding_integer_batch, [batch_, mode_override_], name=nothing, N=nothing, device_ordinal=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res[1]
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function enqueue_tpu_embedding_integer_batch(batch_, mode_override_; name=nothing, N=nothing, device_ordinal=nothing)
+ if tf.in_eager_mode()
+ enqueue_tpu_embedding_integer_batch_eager(batch_, mode_override_; name=name, N=N, device_ordinal=device_ordinal)
+ else
+ enqueue_tpu_embedding_integer_batch_graph(batch_, mode_override_; name=name, N=N, device_ordinal=device_ordinal)
+ end
+ end
+ end
+end
+
+
+"""
+ fused_batch_norm(x, scale, offset, mean, variance; epsilon=?, data_format=, is_training=true)
+
+
+"""
+begin
+ begin
+ function fused_batch_norm_graph(x_, scale_, offset_, mean_, variance_; name=nothing, epsilon=nothing, data_format=nothing, is_training=nothing)
+ local desc
+ tf.with_op_name(name, "FusedBatchNorm") do
+ desc = tf.NodeDescription("FusedBatchNorm")
+ begin
+ begin
+ x_ = convert(Tensor{Any}, x_)
+ begin
+ end
+ end
+ begin
+ scale_ = convert(Tensor{Any}, scale_)
+ begin
+ end
+ end
+ begin
+ offset_ = convert(Tensor{Any}, offset_)
+ begin
+ end
+ end
+ begin
+ mean_ = convert(Tensor{Any}, mean_)
+ begin
+ end
+ end
+ begin
+ variance_ = convert(Tensor{Any}, variance_)
+ begin
+ end
+ end
+ begin
+ (x_, scale_, offset_, mean_, variance_) = tf.tf_promote(x_, scale_, offset_, mean_, variance_)
+ end
+ end
+ begin
+ begin
+ tf.add_input(desc, x_)
+ end
+ begin
+ tf.add_input(desc, scale_)
+ end
+ begin
+ tf.add_input(desc, offset_)
+ end
+ begin
+ tf.add_input(desc, mean_)
+ end
+ begin
+ tf.add_input(desc, variance_)
+ end
+ end
+ begin
+ begin
+ if epsilon !== nothing
+ desc["epsilon"] = Base.identity(epsilon)
+ end
+ end
+ begin
+ if data_format !== nothing
+ desc["data_format"] = Base.String(data_format)
+ end
+ end
+ begin
+ if is_training !== nothing
+ desc["is_training"] = Base.Bool(is_training)
+ end
+ end
+ end
+ end
+ begin
+ out = tf.Tensor[]
+ op = tf.Operation(desc)
+ for out_idx = 1:5
+ push!(out, tf.Tensor(op, out_idx))
+ end
+ out
+ end
+ end
+ end
+ begin
+ function fused_batch_norm_eager(x_, scale_, offset_, mean_, variance_; name=nothing, epsilon=nothing, data_format=nothing, is_training=nothing)
+ desc = tf.EagerOp("FusedBatchNorm")
+ x_ = convert(tf.EagerTensor, x_)
+ scale_ = convert(tf.EagerTensor, scale_)
+ offset_ = convert(tf.EagerTensor, offset_)
+ mean_ = convert(tf.EagerTensor, mean_)
+ variance_ = convert(tf.EagerTensor, variance_)
+ begin
+ begin
+ tf.add_input(desc, x_)
+ end
+ begin
+ tf.add_input(desc, scale_)
+ end
+ begin
+ tf.add_input(desc, offset_)
+ end
+ begin
+ tf.add_input(desc, mean_)
+ end
+ begin
+ tf.add_input(desc, variance_)
+ end
+ end
+ begin
+ begin
+ if epsilon !== nothing
+ desc["epsilon"] = Base.identity(epsilon)
+ end
+ end
+ begin
+ if data_format !== nothing
+ desc["data_format"] = Base.String(data_format)
+ end
+ end
+ begin
+ if is_training !== nothing
+ desc["is_training"] = Base.Bool(is_training)
+ end
+ end
+ end
+ begin
+ desc["T"] = tf.data_type(x_)
+ end
+ begin
+ desc["T"] = tf.data_type(scale_)
+ end
+ begin
+ desc["T"] = tf.data_type(offset_)
+ end
+ begin
+ desc["T"] = tf.data_type(mean_)
+ end
+ begin
+ desc["T"] = tf.data_type(variance_)
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(fused_batch_norm, [x_, scale_, offset_, mean_, variance_], name=nothing, epsilon=nothing, data_format=nothing, is_training=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function fused_batch_norm(x_, scale_, offset_, mean_, variance_; name=nothing, epsilon=nothing, data_format=nothing, is_training=nothing)
+ if tf.in_eager_mode()
+ fused_batch_norm_eager(x_, scale_, offset_, mean_, variance_; name=name, epsilon=epsilon, data_format=data_format, is_training=is_training)
+ else
+ fused_batch_norm_graph(x_, scale_, offset_, mean_, variance_; name=name, epsilon=epsilon, data_format=data_format, is_training=is_training)
+ end
+ end
+ end
+end
+
+
+"""
+ logical_and(x, y)
+
+
+"""
+begin
+ begin
+ function logical_and_graph(x_, y_; name=nothing)
+ local desc
+ tf.with_op_name(name, "LogicalAnd") do
+ desc = tf.NodeDescription("LogicalAnd")
+ begin
+ begin
+ x_ = convert(Tensor{Bool}, x_)
+ begin
+ end
+ end
+ begin
+ y_ = convert(Tensor{Bool}, y_)
+ begin
+ end
+ end
+ end
+ begin
+ begin
+ tf.add_input(desc, x_)
+ end
+ begin
+ tf.add_input(desc, y_)
+ end
+ end
+ begin
+ end
+ end
+ tf.Tensor(tf.Operation(desc))
+ end
+ end
+ begin
+ function logical_and_eager(x_, y_; name=nothing)
+ desc = tf.EagerOp("LogicalAnd")
+ x_ = convert(tf.EagerTensor, x_)
+ y_ = convert(tf.EagerTensor, y_)
+ begin
+ begin
+ tf.add_input(desc, x_)
+ end
+ begin
+ tf.add_input(desc, y_)
+ end
+ end
+ begin
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(logical_and, [x_, y_], name=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res[1]
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function logical_and(x_, y_; name=nothing)
+ if tf.in_eager_mode()
+ logical_and_eager(x_, y_; name=name)
+ else
+ logical_and_graph(x_, y_; name=name)
+ end
+ end
+ end
+end
+
+
+"""
+ tensor_scatter_update(tensor, indices, updates)
+
+
+"""
+begin
+ begin
+ function tensor_scatter_update_graph(tensor_, indices_, updates_; name=nothing)
+ local desc
+ tf.with_op_name(name, "TensorScatterUpdate") do
+ desc = tf.NodeDescription("TensorScatterUpdate")
+ begin
+ begin
+ tensor_ = convert(Tensor{Any}, tensor_)
+ begin
+ end
+ end
+ begin
+ indices_ = convert(Tensor{Any}, indices_)
+ begin
+ indices_ = indices_ - convert(tf.Tensor{eltype(indices_)}, 1)
+ end
+ end
+ begin
+ updates_ = convert(Tensor{Any}, updates_)
+ begin
+ end
+ end
+ begin
+ (tensor_, updates_) = tf.tf_promote(tensor_, updates_)
+ end
+ begin
+ (indices_,) = tf.tf_promote(indices_)
+ end
+ end
+ begin
+ begin
+ tf.add_input(desc, tensor_)
+ end
+ begin
+ tf.add_input(desc, indices_)
+ end
+ begin
+ tf.add_input(desc, updates_)
+ end
+ end
+ begin
+ end
+ end
+ tf.Tensor(tf.Operation(desc))
+ end
+ end
+ begin
+ function tensor_scatter_update_eager(tensor_, indices_, updates_; name=nothing)
+ desc = tf.EagerOp("TensorScatterUpdate")
+ tensor_ = convert(tf.EagerTensor, tensor_)
+ indices_ = convert(tf.EagerTensor, indices_)
+ updates_ = convert(tf.EagerTensor, updates_)
+ begin
+ begin
+ tf.add_input(desc, tensor_)
+ end
+ begin
+ tf.add_input(desc, indices_)
+ end
+ begin
+ tf.add_input(desc, updates_)
+ end
+ end
+ begin
+ end
+ begin
+ desc["T"] = tf.data_type(tensor_)
+ end
+ begin
+ desc["Tindices"] = tf.data_type(indices_)
+ end
+ begin
+ desc["T"] = tf.data_type(updates_)
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(tensor_scatter_update, [tensor_, indices_, updates_], name=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res[1]
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function tensor_scatter_update(tensor_, indices_, updates_; name=nothing)
+ if tf.in_eager_mode()
+ tensor_scatter_update_eager(tensor_, indices_, updates_; name=name)
+ else
+ tensor_scatter_update_graph(tensor_, indices_, updates_; name=name)
+ end
+ end
+ end
+end
+
+
+"""
+ text_line_reader_v2(; skip_header_lines=0, container=, shared_name=)
+
+
+"""
+begin
+ begin
+ function text_line_reader_v2_graph(; name=nothing, skip_header_lines=nothing, container=nothing, shared_name=nothing)
+ local desc
+ tf.with_op_name(name, "TextLineReaderV2") do
+ desc = tf.NodeDescription("TextLineReaderV2")
+ begin
+ end
+ begin
+ end
+ begin
+ begin
+ if skip_header_lines !== nothing
+ desc["skip_header_lines"] = Base.Int(skip_header_lines)
+ end
+ end
+ begin
+ if container !== nothing
+ desc["container"] = Base.String(container)
+ end
+ end
+ begin
+ if shared_name !== nothing
+ desc["shared_name"] = Base.String(shared_name)
+ end
+ end
+ end
+ end
+ tf.Tensor(tf.Operation(desc))
+ end
+ end
+ begin
+ function text_line_reader_v2_eager(; name=nothing, skip_header_lines=nothing, container=nothing, shared_name=nothing)
+ desc = tf.EagerOp("TextLineReaderV2")
+ begin
+ end
+ begin
+ begin
+ if skip_header_lines !== nothing
+ desc["skip_header_lines"] = Base.Int(skip_header_lines)
+ end
+ end
+ begin
+ if container !== nothing
+ desc["container"] = Base.String(container)
+ end
+ end
+ begin
+ if shared_name !== nothing
+ desc["shared_name"] = Base.String(shared_name)
+ end
+ end
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(text_line_reader_v2, [], name=nothing, skip_header_lines=nothing, container=nothing, shared_name=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res[1]
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function text_line_reader_v2(; name=nothing, skip_header_lines=nothing, container=nothing, shared_name=nothing)
+ if tf.in_eager_mode()
+ text_line_reader_v2_eager(; name=name, skip_header_lines=skip_header_lines, container=container, shared_name=shared_name)
+ else
+ text_line_reader_v2_graph(; name=name, skip_header_lines=skip_header_lines, container=container, shared_name=shared_name)
+ end
+ end
+ end
+end
+
+
+"""
+ tensor_slice_dataset(components)
+
+
+"""
+begin
+ begin
+ function tensor_slice_dataset_graph(components_; name=nothing, Toutput_types=nothing, output_shapes=nothing)
+ local desc
+ tf.with_op_name(name, "TensorSliceDataset") do
+ desc = tf.NodeDescription("TensorSliceDataset")
+ begin
+ begin
+ components_ = [convert(Tensor{Any}, x) for x = components_]
+ begin
+ end
+ end
+ end
+ begin
+ begin
+ tf.add_input(desc, components_)
+ end
+ end
+ begin
+ begin
+ if Toutput_types !== nothing
+ desc["Toutput_types"] = map(Base.identity, Toutput_types)
+ end
+ end
+ begin
+ if output_shapes !== nothing
+ desc["output_shapes"] = map(Base.identity, output_shapes)
+ end
+ end
+ end
+ end
+ tf.Tensor(tf.Operation(desc))
+ end
+ end
+ begin
+ function tensor_slice_dataset_eager(components_; name=nothing, Toutput_types=nothing, output_shapes=nothing)
+ desc = tf.EagerOp("TensorSliceDataset")
+ components_ = convert(tf.EagerTensor, components_)
+ begin
+ begin
+ tf.add_input(desc, components_)
+ end
+ end
+ begin
+ begin
+ if Toutput_types !== nothing
+ desc["Toutput_types"] = map(Base.identity, Toutput_types)
+ end
+ end
+ begin
+ if output_shapes !== nothing
+ desc["output_shapes"] = map(Base.identity, output_shapes)
+ end
+ end
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(tensor_slice_dataset, [components_], name=nothing, Toutput_types=nothing, output_shapes=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res[1]
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function tensor_slice_dataset(components_; name=nothing, Toutput_types=nothing, output_shapes=nothing)
+ if tf.in_eager_mode()
+ tensor_slice_dataset_eager(components_; name=name, Toutput_types=Toutput_types, output_shapes=output_shapes)
+ else
+ tensor_slice_dataset_graph(components_; name=name, Toutput_types=Toutput_types, output_shapes=output_shapes)
+ end
+ end
+ end
+end
+
+
+"""
+ tensor_array_scatter_v3(handle, indices, value, flow_in)
+
+
+"""
+begin
+ begin
+ function tensor_array_scatter_v3_graph(handle_, indices_, value_, flow_in_; name=nothing)
+ local desc
+ tf.with_op_name(name, "TensorArrayScatterV3") do
+ desc = tf.NodeDescription("TensorArrayScatterV3")
+ begin
+ begin
+ handle_ = convert(Tensor{Any}, handle_)
+ begin
+ end
+ end
+ begin
+ indices_ = convert(Tensor{Int32}, indices_)
+ begin
+ end
+ end
+ begin
+ value_ = convert(Tensor{Any}, value_)
+ begin
+ end
+ end
+ begin
+ flow_in_ = convert(Tensor{Float32}, flow_in_)
+ begin
+ end
+ end
+ begin
+ (value_,) = tf.tf_promote(value_)
+ end
+ end
+ begin
+ begin
+ tf.add_input(desc, handle_)
+ end
+ begin
+ tf.add_input(desc, indices_)
+ end
+ begin
+ tf.add_input(desc, value_)
+ end
+ begin
+ tf.add_input(desc, flow_in_)
+ end
+ end
+ begin
+ end
+ end
+ tf.Tensor(tf.Operation(desc))
+ end
+ end
+ begin
+ function tensor_array_scatter_v3_eager(handle_, indices_, value_, flow_in_; name=nothing)
+ desc = tf.EagerOp("TensorArrayScatterV3")
+ handle_ = convert(tf.EagerTensor, handle_)
+ indices_ = convert(tf.EagerTensor, indices_)
+ value_ = convert(tf.EagerTensor, value_)
+ flow_in_ = convert(tf.EagerTensor, flow_in_)
+ begin
+ begin
+ tf.add_input(desc, handle_)
+ end
+ begin
+ tf.add_input(desc, indices_)
+ end
+ begin
+ tf.add_input(desc, value_)
+ end
+ begin
+ tf.add_input(desc, flow_in_)
+ end
+ end
+ begin
+ end
+ begin
+ desc["T"] = tf.data_type(value_)
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(tensor_array_scatter_v3, [handle_, indices_, value_, flow_in_], name=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res[1]
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function tensor_array_scatter_v3(handle_, indices_, value_, flow_in_; name=nothing)
+ if tf.in_eager_mode()
+ tensor_array_scatter_v3_eager(handle_, indices_, value_, flow_in_; name=name)
+ else
+ tensor_array_scatter_v3_graph(handle_, indices_, value_, flow_in_; name=name)
+ end
+ end
+ end
+end
+
+
+"""
+ resize_nearest_neighbor_grad(grads, size; align_corners=false)
+
+
+"""
+begin
+ begin
+ function resize_nearest_neighbor_grad_graph(grads_, size_; name=nothing, align_corners=nothing)
+ local desc
+ tf.with_op_name(name, "ResizeNearestNeighborGrad") do
+ desc = tf.NodeDescription("ResizeNearestNeighborGrad")
+ begin
+ begin
+ grads_ = convert(Tensor{Any}, grads_)
+ begin
+ end
+ end
+ begin
+ size_ = convert(Tensor{Int32}, size_)
+ begin
+ end
+ end
+ begin
+ (grads_,) = tf.tf_promote(grads_)
+ end
+ end
+ begin
+ begin
+ tf.add_input(desc, grads_)
+ end
+ begin
+ tf.add_input(desc, size_)
+ end
+ end
+ begin
+ begin
+ if align_corners !== nothing
+ desc["align_corners"] = Base.Bool(align_corners)
+ end
+ end
+ end
+ end
+ tf.Tensor(tf.Operation(desc))
+ end
+ end
+ begin
+ function resize_nearest_neighbor_grad_eager(grads_, size_; name=nothing, align_corners=nothing)
+ desc = tf.EagerOp("ResizeNearestNeighborGrad")
+ grads_ = convert(tf.EagerTensor, grads_)
+ size_ = convert(tf.EagerTensor, size_)
+ begin
+ begin
+ tf.add_input(desc, grads_)
+ end
+ begin
+ tf.add_input(desc, size_)
+ end
+ end
+ begin
+ begin
+ if align_corners !== nothing
+ desc["align_corners"] = Base.Bool(align_corners)
+ end
+ end
+ end
+ begin
+ desc["T"] = tf.data_type(grads_)
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(resize_nearest_neighbor_grad, [grads_, size_], name=nothing, align_corners=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res[1]
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function resize_nearest_neighbor_grad(grads_, size_; name=nothing, align_corners=nothing)
+ if tf.in_eager_mode()
+ resize_nearest_neighbor_grad_eager(grads_, size_; name=name, align_corners=align_corners)
+ else
+ resize_nearest_neighbor_grad_graph(grads_, size_; name=name, align_corners=align_corners)
+ end
+ end
+ end
+end
+
+
+"""
+ apply_power_sign(var, m, lr, logbase, sign_decay, beta, grad; use_locking=false)
+
+
+"""
+begin
+ begin
+ function apply_power_sign_graph(var_, m_, lr_, logbase_, sign_decay_, beta_, grad_; name=nothing, use_locking=nothing)
+ local desc
+ tf.with_op_name(name, "ApplyPowerSign") do
+ desc = tf.NodeDescription("ApplyPowerSign")
+ begin
+ begin
+ var_ = convert(Tensor{Any}, var_)
+ begin
+ end
+ end
+ begin
+ m_ = convert(Tensor{Any}, m_)
+ begin
+ end
+ end
+ begin
+ lr_ = convert(Tensor{Any}, lr_)
+ begin
+ end
+ end
+ begin
+ logbase_ = convert(Tensor{Any}, logbase_)
+ begin
+ end
+ end
+ begin
+ sign_decay_ = convert(Tensor{Any}, sign_decay_)
+ begin
+ end
+ end
+ begin
+ beta_ = convert(Tensor{Any}, beta_)
+ begin
+ end
+ end
+ begin
+ grad_ = convert(Tensor{Any}, grad_)
+ begin
+ end
+ end
+ begin
+ (var_, m_, lr_, logbase_, sign_decay_, beta_, grad_) = tf.tf_promote(var_, m_, lr_, logbase_, sign_decay_, beta_, grad_)
+ end
+ end
+ begin
+ begin
+ tf.add_input(desc, var_)
+ end
+ begin
+ tf.add_input(desc, m_)
+ end
+ begin
+ tf.add_input(desc, lr_)
+ end
+ begin
+ tf.add_input(desc, logbase_)
+ end
+ begin
+ tf.add_input(desc, sign_decay_)
+ end
+ begin
+ tf.add_input(desc, beta_)
+ end
+ begin
+ tf.add_input(desc, grad_)
+ end
+ end
+ begin
+ begin
+ if use_locking !== nothing
+ desc["use_locking"] = Base.Bool(use_locking)
+ end
+ end
+ end
+ end
+ tf.Tensor(tf.Operation(desc))
+ end
+ end
+ begin
+ function apply_power_sign_eager(var_, m_, lr_, logbase_, sign_decay_, beta_, grad_; name=nothing, use_locking=nothing)
+ desc = tf.EagerOp("ApplyPowerSign")
+ var_ = convert(tf.EagerTensor, var_)
+ m_ = convert(tf.EagerTensor, m_)
+ lr_ = convert(tf.EagerTensor, lr_)
+ logbase_ = convert(tf.EagerTensor, logbase_)
+ sign_decay_ = convert(tf.EagerTensor, sign_decay_)
+ beta_ = convert(tf.EagerTensor, beta_)
+ grad_ = convert(tf.EagerTensor, grad_)
+ begin
+ begin
+ tf.add_input(desc, var_)
+ end
+ begin
+ tf.add_input(desc, m_)
+ end
+ begin
+ tf.add_input(desc, lr_)
+ end
+ begin
+ tf.add_input(desc, logbase_)
+ end
+ begin
+ tf.add_input(desc, sign_decay_)
+ end
+ begin
+ tf.add_input(desc, beta_)
+ end
+ begin
+ tf.add_input(desc, grad_)
+ end
+ end
+ begin
+ begin
+ if use_locking !== nothing
+ desc["use_locking"] = Base.Bool(use_locking)
+ end
+ end
+ end
+ begin
+ desc["T"] = tf.data_type(var_)
+ end
+ begin
+ desc["T"] = tf.data_type(m_)
+ end
+ begin
+ desc["T"] = tf.data_type(lr_)
+ end
+ begin
+ desc["T"] = tf.data_type(logbase_)
+ end
+ begin
+ desc["T"] = tf.data_type(sign_decay_)
+ end
+ begin
+ desc["T"] = tf.data_type(beta_)
+ end
+ begin
+ desc["T"] = tf.data_type(grad_)
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(apply_power_sign, [var_, m_, lr_, logbase_, sign_decay_, beta_, grad_], name=nothing, use_locking=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res[1]
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function apply_power_sign(var_, m_, lr_, logbase_, sign_decay_, beta_, grad_; name=nothing, use_locking=nothing)
+ if tf.in_eager_mode()
+ apply_power_sign_eager(var_, m_, lr_, logbase_, sign_decay_, beta_, grad_; name=name, use_locking=use_locking)
+ else
+ apply_power_sign_graph(var_, m_, lr_, logbase_, sign_decay_, beta_, grad_; name=name, use_locking=use_locking)
+ end
+ end
+ end
+end
+
+
+"""
+ mirror_pad(input, paddings)
+
+
+"""
+begin
+ begin
+ function mirror_pad_graph(input_, paddings_; name=nothing, mode=nothing)
+ local desc
+ tf.with_op_name(name, "MirrorPad") do
+ desc = tf.NodeDescription("MirrorPad")
+ begin
+ begin
+ input_ = convert(Tensor{Any}, input_)
+ begin
+ end
+ end
+ begin
+ paddings_ = convert(Tensor{Int32}, paddings_)
+ begin
+ end
+ end
+ begin
+ (input_,) = tf.tf_promote(input_)
+ end
+ begin
+ (paddings_,) = tf.tf_promote(paddings_)
+ end
+ end
+ begin
+ begin
+ tf.add_input(desc, input_)
+ end
+ begin
+ tf.add_input(desc, paddings_)
+ end
+ end
+ begin
+ begin
+ if mode !== nothing
+ desc["mode"] = Base.String(mode)
+ end
+ end
+ end
+ end
+ tf.Tensor(tf.Operation(desc))
+ end
+ end
+ begin
+ function mirror_pad_eager(input_, paddings_; name=nothing, mode=nothing)
+ desc = tf.EagerOp("MirrorPad")
+ input_ = convert(tf.EagerTensor, input_)
+ paddings_ = convert(tf.EagerTensor, paddings_)
+ begin
+ begin
+ tf.add_input(desc, input_)
+ end
+ begin
+ tf.add_input(desc, paddings_)
+ end
+ end
+ begin
+ begin
+ if mode !== nothing
+ desc["mode"] = Base.String(mode)
+ end
+ end
+ end
+ begin
+ desc["T"] = tf.data_type(input_)
+ end
+ begin
+ desc["Tpaddings"] = tf.data_type(paddings_)
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(mirror_pad, [input_, paddings_], name=nothing, mode=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res[1]
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function mirror_pad(input_, paddings_; name=nothing, mode=nothing)
+ if tf.in_eager_mode()
+ mirror_pad_eager(input_, paddings_; name=name, mode=mode)
+ else
+ mirror_pad_graph(input_, paddings_; name=name, mode=mode)
+ end
+ end
+ end
+end
+
+
+"""
+ logical_not(x)
+
+
+"""
+begin
+ begin
+ function logical_not_graph(x_; name=nothing)
+ local desc
+ tf.with_op_name(name, "LogicalNot") do
+ desc = tf.NodeDescription("LogicalNot")
+ begin
+ begin
+ x_ = convert(Tensor{Bool}, x_)
+ begin
+ end
+ end
+ end
+ begin
+ begin
+ tf.add_input(desc, x_)
+ end
+ end
+ begin
+ end
+ end
+ tf.Tensor(tf.Operation(desc))
+ end
+ end
+ begin
+ function logical_not_eager(x_; name=nothing)
+ desc = tf.EagerOp("LogicalNot")
+ x_ = convert(tf.EagerTensor, x_)
+ begin
+ begin
+ tf.add_input(desc, x_)
+ end
+ end
+ begin
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(logical_not, [x_], name=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res[1]
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function logical_not(x_; name=nothing)
+ if tf.in_eager_mode()
+ logical_not_eager(x_; name=name)
+ else
+ logical_not_graph(x_; name=name)
+ end
+ end
+ end
+end
+
+
+"""
+ batch_ifft(input)
+
+
+"""
+begin
+ begin
+ function batch_ifft_graph(input_; name=nothing)
+ local desc
+ tf.with_op_name(name, "BatchIFFT") do
+ desc = tf.NodeDescription("BatchIFFT")
+ begin
+ begin
+ input_ = convert(Tensor{Complex{Float32}}, input_)
+ begin
+ end
+ end
+ end
+ begin
+ begin
+ tf.add_input(desc, input_)
+ end
+ end
+ begin
+ end
+ end
+ tf.Tensor(tf.Operation(desc))
+ end
+ end
+ begin
+ function batch_ifft_eager(input_; name=nothing)
+ desc = tf.EagerOp("BatchIFFT")
+ input_ = convert(tf.EagerTensor, input_)
+ begin
+ begin
+ tf.add_input(desc, input_)
+ end
+ end
+ begin
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(batch_ifft, [input_], name=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res[1]
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function batch_ifft(input_; name=nothing)
+ if tf.in_eager_mode()
+ batch_ifft_eager(input_; name=name)
+ else
+ batch_ifft_graph(input_; name=name)
+ end
+ end
+ end
+end
+
+
+"""
+ tensor_array_concat_v2(handle, flow_in; element_shape_except0=?)
+
+
+"""
+begin
+ begin
+ function tensor_array_concat_v2_graph(handle_, flow_in_; name=nothing, dtype=nothing, element_shape_except0=nothing)
+ local desc
+ tf.with_op_name(name, "TensorArrayConcatV2") do
+ desc = tf.NodeDescription("TensorArrayConcatV2")
+ begin
+ begin
+ handle_ = convert(Tensor{String}, handle_)
+ begin
+ end
+ end
+ begin
+ flow_in_ = convert(Tensor{Float32}, flow_in_)
+ begin
+ end
+ end
+ end
+ begin
+ begin
+ tf.add_input(desc, handle_)
+ end
+ begin
+ tf.add_input(desc, flow_in_)
+ end
+ end
+ begin
+ begin
+ if dtype !== nothing
+ desc["dtype"] = Base.identity(dtype)
+ end
+ end
+ begin
+ if element_shape_except0 !== nothing
+ desc["element_shape_except0"] = Base.identity(element_shape_except0)
+ end
+ end
+ end
+ end
+ begin
+ out = tf.Tensor[]
+ op = tf.Operation(desc)
+ for out_idx = 1:2
+ push!(out, tf.Tensor(op, out_idx))
+ end
+ out
+ end
+ end
+ end
+ begin
+ function tensor_array_concat_v2_eager(handle_, flow_in_; name=nothing, dtype=nothing, element_shape_except0=nothing)
+ desc = tf.EagerOp("TensorArrayConcatV2")
+ handle_ = convert(tf.EagerTensor, handle_)
+ flow_in_ = convert(tf.EagerTensor, flow_in_)
+ begin
+ begin
+ tf.add_input(desc, handle_)
+ end
+ begin
+ tf.add_input(desc, flow_in_)
+ end
+ end
+ begin
+ begin
+ if dtype !== nothing
+ desc["dtype"] = Base.identity(dtype)
+ end
+ end
+ begin
+ if element_shape_except0 !== nothing
+ desc["element_shape_except0"] = Base.identity(element_shape_except0)
+ end
+ end
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(tensor_array_concat_v2, [handle_, flow_in_], name=nothing, dtype=nothing, element_shape_except0=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function tensor_array_concat_v2(handle_, flow_in_; name=nothing, dtype=nothing, element_shape_except0=nothing)
+ if tf.in_eager_mode()
+ tensor_array_concat_v2_eager(handle_, flow_in_; name=name, dtype=dtype, element_shape_except0=element_shape_except0)
+ else
+ tensor_array_concat_v2_graph(handle_, flow_in_; name=name, dtype=dtype, element_shape_except0=element_shape_except0)
+ end
+ end
+ end
+end
+
+
+"""
+ sum(input, reduction_indices; keep_dims=false)
+
+
+"""
+begin
+ begin
+ function sum_graph(input_, reduction_indices_; name=nothing, keep_dims=nothing)
+ local desc
+ tf.with_op_name(name, "Sum") do
+ desc = tf.NodeDescription("Sum")
+ begin
+ begin
+ input_ = convert(Tensor{Any}, input_)
+ begin
+ end
+ end
+ begin
+ reduction_indices_ = convert(Tensor{Int32}, reduction_indices_)
+ begin
+ reduction_indices_ = reduction_indices_ - convert(tf.Tensor{eltype(reduction_indices_)}, 1)
+ end
+ end
+ begin
+ (input_,) = tf.tf_promote(input_)
+ end
+ begin
+ (reduction_indices_,) = tf.tf_promote(reduction_indices_)
+ end
+ end
+ begin
+ begin
+ tf.add_input(desc, input_)
+ end
+ begin
+ tf.add_input(desc, reduction_indices_)
+ end
+ end
+ begin
+ begin
+ if keep_dims !== nothing
+ desc["keep_dims"] = Base.Bool(keep_dims)
+ end
+ end
+ end
+ end
+ tf.Tensor(tf.Operation(desc))
+ end
+ end
+ begin
+ function sum_eager(input_, reduction_indices_; name=nothing, keep_dims=nothing)
+ desc = tf.EagerOp("Sum")
+ input_ = convert(tf.EagerTensor, input_)
+ reduction_indices_ = convert(tf.EagerTensor, reduction_indices_)
+ begin
+ begin
+ tf.add_input(desc, input_)
+ end
+ begin
+ tf.add_input(desc, reduction_indices_)
+ end
+ end
+ begin
+ begin
+ if keep_dims !== nothing
+ desc["keep_dims"] = Base.Bool(keep_dims)
+ end
+ end
+ end
+ begin
+ desc["T"] = tf.data_type(input_)
+ end
+ begin
+ desc["Tidx"] = tf.data_type(reduction_indices_)
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(sum, [input_, reduction_indices_], name=nothing, keep_dims=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res[1]
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function sum(input_, reduction_indices_; name=nothing, keep_dims=nothing)
+ if tf.in_eager_mode()
+ sum_eager(input_, reduction_indices_; name=name, keep_dims=keep_dims)
+ else
+ sum_graph(input_, reduction_indices_; name=name, keep_dims=keep_dims)
+ end
+ end
+ end
+end
+
+
+"""
+ boosted_trees_predict(tree_ensemble_handle, bucketized_features)
+
+
+"""
+begin
+ begin
+ function boosted_trees_predict_graph(tree_ensemble_handle_, bucketized_features_; name=nothing, num_bucketized_features=nothing, logits_dimension=nothing)
+ local desc
+ tf.with_op_name(name, "BoostedTreesPredict") do
+ desc = tf.NodeDescription("BoostedTreesPredict")
+ begin
+ begin
+ tree_ensemble_handle_ = convert(Tensor{Any}, tree_ensemble_handle_)
+ begin
+ end
+ end
+ begin
+ bucketized_features_ = [convert(Tensor{Int32}, x) for x = bucketized_features_]
+ begin
+ end
+ end
+ end
+ begin
+ begin
+ tf.add_input(desc, tree_ensemble_handle_)
+ end
+ begin
+ tf.add_input(desc, bucketized_features_)
+ end
+ end
+ begin
+ begin
+ if num_bucketized_features !== nothing
+ desc["num_bucketized_features"] = Base.Int(num_bucketized_features)
+ end
+ end
+ begin
+ if logits_dimension !== nothing
+ desc["logits_dimension"] = Base.Int(logits_dimension)
+ end
+ end
+ end
+ end
+ tf.Tensor(tf.Operation(desc))
+ end
+ end
+ begin
+ function boosted_trees_predict_eager(tree_ensemble_handle_, bucketized_features_; name=nothing, num_bucketized_features=nothing, logits_dimension=nothing)
+ desc = tf.EagerOp("BoostedTreesPredict")
+ tree_ensemble_handle_ = convert(tf.EagerTensor, tree_ensemble_handle_)
+ bucketized_features_ = convert(tf.EagerTensor, bucketized_features_)
+ begin
+ begin
+ tf.add_input(desc, tree_ensemble_handle_)
+ end
+ begin
+ tf.add_input(desc, bucketized_features_)
+ end
+ end
+ begin
+ begin
+ if num_bucketized_features !== nothing
+ desc["num_bucketized_features"] = Base.Int(num_bucketized_features)
+ end
+ end
+ begin
+ if logits_dimension !== nothing
+ desc["logits_dimension"] = Base.Int(logits_dimension)
+ end
+ end
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(boosted_trees_predict, [tree_ensemble_handle_, bucketized_features_], name=nothing, num_bucketized_features=nothing, logits_dimension=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res[1]
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function boosted_trees_predict(tree_ensemble_handle_, bucketized_features_; name=nothing, num_bucketized_features=nothing, logits_dimension=nothing)
+ if tf.in_eager_mode()
+ boosted_trees_predict_eager(tree_ensemble_handle_, bucketized_features_; name=name, num_bucketized_features=num_bucketized_features, logits_dimension=logits_dimension)
+ else
+ boosted_trees_predict_graph(tree_ensemble_handle_, bucketized_features_; name=name, num_bucketized_features=num_bucketized_features, logits_dimension=logits_dimension)
+ end
+ end
+ end
+end
+
+
+"""
+ resource_sparse_apply_adagrad(var, accum, lr, grad, indices; use_locking=false, update_slots=true)
+
+
+"""
+begin
+ begin
+ function resource_sparse_apply_adagrad_graph(var_, accum_, lr_, grad_, indices_; name=nothing, use_locking=nothing, update_slots=nothing)
+ local desc
+ tf.with_op_name(name, "ResourceSparseApplyAdagrad") do
+ desc = tf.NodeDescription("ResourceSparseApplyAdagrad")
+ begin
+ begin
+ var_ = convert(Tensor{Any}, var_)
+ begin
+ end
+ end
+ begin
+ accum_ = convert(Tensor{Any}, accum_)
+ begin
+ end
+ end
+ begin
+ lr_ = convert(Tensor{Any}, lr_)
+ begin
+ end
+ end
+ begin
+ grad_ = convert(Tensor{Any}, grad_)
+ begin
+ end
+ end
+ begin
+ indices_ = convert(Tensor{Any}, indices_)
+ begin
+ indices_ = indices_ - convert(tf.Tensor{eltype(indices_)}, 1)
+ end
+ end
+ begin
+ (lr_, grad_) = tf.tf_promote(lr_, grad_)
+ end
+ begin
+ (indices_,) = tf.tf_promote(indices_)
+ end
+ end
+ begin
+ begin
+ tf.add_input(desc, var_)
+ end
+ begin
+ tf.add_input(desc, accum_)
+ end
+ begin
+ tf.add_input(desc, lr_)
+ end
+ begin
+ tf.add_input(desc, grad_)
+ end
+ begin
+ tf.add_input(desc, indices_)
+ end
+ end
+ begin
+ begin
+ if use_locking !== nothing
+ desc["use_locking"] = Base.Bool(use_locking)
+ end
+ end
+ begin
+ if update_slots !== nothing
+ desc["update_slots"] = Base.Bool(update_slots)
+ end
+ end
+ end
+ end
+ tf.Tensor(tf.Operation(desc))
+ end
+ end
+ begin
+ function resource_sparse_apply_adagrad_eager(var_, accum_, lr_, grad_, indices_; name=nothing, use_locking=nothing, update_slots=nothing)
+ desc = tf.EagerOp("ResourceSparseApplyAdagrad")
+ var_ = convert(tf.EagerTensor, var_)
+ accum_ = convert(tf.EagerTensor, accum_)
+ lr_ = convert(tf.EagerTensor, lr_)
+ grad_ = convert(tf.EagerTensor, grad_)
+ indices_ = convert(tf.EagerTensor, indices_)
+ begin
+ begin
+ tf.add_input(desc, var_)
+ end
+ begin
+ tf.add_input(desc, accum_)
+ end
+ begin
+ tf.add_input(desc, lr_)
+ end
+ begin
+ tf.add_input(desc, grad_)
+ end
+ begin
+ tf.add_input(desc, indices_)
+ end
+ end
+ begin
+ begin
+ if use_locking !== nothing
+ desc["use_locking"] = Base.Bool(use_locking)
+ end
+ end
+ begin
+ if update_slots !== nothing
+ desc["update_slots"] = Base.Bool(update_slots)
+ end
+ end
+ end
+ begin
+ desc["T"] = tf.data_type(lr_)
+ end
+ begin
+ desc["T"] = tf.data_type(grad_)
+ end
+ begin
+ desc["Tindices"] = tf.data_type(indices_)
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(resource_sparse_apply_adagrad, [var_, accum_, lr_, grad_, indices_], name=nothing, use_locking=nothing, update_slots=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res[1]
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function resource_sparse_apply_adagrad(var_, accum_, lr_, grad_, indices_; name=nothing, use_locking=nothing, update_slots=nothing)
+ if tf.in_eager_mode()
+ resource_sparse_apply_adagrad_eager(var_, accum_, lr_, grad_, indices_; name=name, use_locking=use_locking, update_slots=update_slots)
+ else
+ resource_sparse_apply_adagrad_graph(var_, accum_, lr_, grad_, indices_; name=name, use_locking=use_locking, update_slots=update_slots)
+ end
+ end
+ end
+end
+
+
+"""
+ leaky_relu_grad(gradients, features; alpha=?)
+
+
+"""
+begin
+ begin
+ function leaky_relu_grad_graph(gradients_, features_; name=nothing, alpha=nothing)
+ local desc
+ tf.with_op_name(name, "LeakyReluGrad") do
+ desc = tf.NodeDescription("LeakyReluGrad")
+ begin
+ begin
+ gradients_ = convert(Tensor{Float32}, gradients_)
+ begin
+ end
+ end
+ begin
+ features_ = convert(Tensor{Float32}, features_)
+ begin
+ end
+ end
+ begin
+ (gradients_, features_) = tf.tf_promote(gradients_, features_)
+ end
+ end
+ begin
+ begin
+ tf.add_input(desc, gradients_)
+ end
+ begin
+ tf.add_input(desc, features_)
+ end
+ end
+ begin
+ begin
+ if alpha !== nothing
+ desc["alpha"] = Base.identity(alpha)
+ end
+ end
+ end
+ end
+ tf.Tensor(tf.Operation(desc))
+ end
+ end
+ begin
+ function leaky_relu_grad_eager(gradients_, features_; name=nothing, alpha=nothing)
+ desc = tf.EagerOp("LeakyReluGrad")
+ gradients_ = convert(tf.EagerTensor, gradients_)
+ features_ = convert(tf.EagerTensor, features_)
+ begin
+ begin
+ tf.add_input(desc, gradients_)
+ end
+ begin
+ tf.add_input(desc, features_)
+ end
+ end
+ begin
+ begin
+ if alpha !== nothing
+ desc["alpha"] = Base.identity(alpha)
+ end
+ end
+ end
+ begin
+ desc["T"] = tf.data_type(gradients_)
+ end
+ begin
+ desc["T"] = tf.data_type(features_)
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(leaky_relu_grad, [gradients_, features_], name=nothing, alpha=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res[1]
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function leaky_relu_grad(gradients_, features_; name=nothing, alpha=nothing)
+ if tf.in_eager_mode()
+ leaky_relu_grad_eager(gradients_, features_; name=name, alpha=alpha)
+ else
+ leaky_relu_grad_graph(gradients_, features_; name=name, alpha=alpha)
+ end
+ end
+ end
+end
+
+
+"""
+ _device_retval(input)
+
+A graph node which represents a return value of a function.
+"""
+begin
+ begin
+ function _device_retval_graph(input_; name=nothing, index=nothing)
+ local desc
+ tf.with_op_name(name, "_DeviceRetval") do
+ desc = tf.NodeDescription("_DeviceRetval")
+ begin
+ begin
+ input_ = convert(Tensor{Any}, input_)
+ begin
+ end
+ end
+ begin
+ (input_,) = tf.tf_promote(input_)
+ end
+ end
+ begin
+ begin
+ tf.add_input(desc, input_)
+ end
+ end
+ begin
+ begin
+ if index !== nothing
+ desc["index"] = Base.Int(index)
+ end
+ end
+ end
+ end
+ tf.Tensor(tf.Operation(desc))
+ end
+ end
+ begin
+ function _device_retval_eager(input_; name=nothing, index=nothing)
+ desc = tf.EagerOp("_DeviceRetval")
+ input_ = convert(tf.EagerTensor, input_)
+ begin
+ begin
+ tf.add_input(desc, input_)
+ end
+ end
+ begin
+ begin
+ if index !== nothing
+ desc["index"] = Base.Int(index)
+ end
+ end
+ end
+ begin
+ desc["T"] = tf.data_type(input_)
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(_device_retval, [input_], name=nothing, index=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res[1]
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function _device_retval(input_; name=nothing, index=nothing)
+ if tf.in_eager_mode()
+ _device_retval_eager(input_; name=name, index=index)
+ else
+ _device_retval_graph(input_; name=name, index=index)
+ end
+ end
+ end
+end
+
+
+"""
+ pad(input, paddings)
+
+
+"""
+begin
+ begin
+ function pad_graph(input_, paddings_; name=nothing)
+ local desc
+ tf.with_op_name(name, "Pad") do
+ desc = tf.NodeDescription("Pad")
+ begin
+ begin
+ input_ = convert(Tensor{Any}, input_)
+ begin
+ end
+ end
+ begin
+ paddings_ = convert(Tensor{Int32}, paddings_)
+ begin
+ end
+ end
+ begin
+ (input_,) = tf.tf_promote(input_)
+ end
+ begin
+ (paddings_,) = tf.tf_promote(paddings_)
+ end
+ end
+ begin
+ begin
+ tf.add_input(desc, input_)
+ end
+ begin
+ tf.add_input(desc, paddings_)
+ end
+ end
+ begin
+ end
+ end
+ tf.Tensor(tf.Operation(desc))
+ end
+ end
+ begin
+ function pad_eager(input_, paddings_; name=nothing)
+ desc = tf.EagerOp("Pad")
+ input_ = convert(tf.EagerTensor, input_)
+ paddings_ = convert(tf.EagerTensor, paddings_)
+ begin
+ begin
+ tf.add_input(desc, input_)
+ end
+ begin
+ tf.add_input(desc, paddings_)
+ end
+ end
+ begin
+ end
+ begin
+ desc["T"] = tf.data_type(input_)
+ end
+ begin
+ desc["Tpaddings"] = tf.data_type(paddings_)
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(pad, [input_, paddings_], name=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res[1]
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function pad(input_, paddings_; name=nothing)
+ if tf.in_eager_mode()
+ pad_eager(input_, paddings_; name=name)
+ else
+ pad_graph(input_, paddings_; name=name)
+ end
+ end
+ end
+end
+
+
+"""
+ add_many_sparse_to_tensors_map(sparse_indices, sparse_values, sparse_shape; container=, shared_name=)
+
+
+"""
+begin
+ begin
+ function add_many_sparse_to_tensors_map_graph(sparse_indices_, sparse_values_, sparse_shape_; name=nothing, container=nothing, shared_name=nothing)
+ local desc
+ tf.with_op_name(name, "AddManySparseToTensorsMap") do
+ desc = tf.NodeDescription("AddManySparseToTensorsMap")
+ begin
+ begin
+ sparse_indices_ = convert(Tensor{Int64}, sparse_indices_)
+ begin
+ end
+ end
+ begin
+ sparse_values_ = convert(Tensor{Any}, sparse_values_)
+ begin
+ end
+ end
+ begin
+ sparse_shape_ = convert(Tensor{Int64}, sparse_shape_)
+ begin
+ end
+ end
+ begin
+ (sparse_values_,) = tf.tf_promote(sparse_values_)
+ end
+ end
+ begin
+ begin
+ tf.add_input(desc, sparse_indices_)
+ end
+ begin
+ tf.add_input(desc, sparse_values_)
+ end
+ begin
+ tf.add_input(desc, sparse_shape_)
+ end
+ end
+ begin
+ begin
+ if container !== nothing
+ desc["container"] = Base.String(container)
+ end
+ end
+ begin
+ if shared_name !== nothing
+ desc["shared_name"] = Base.String(shared_name)
+ end
+ end
+ end
+ end
+ tf.Tensor(tf.Operation(desc))
+ end
+ end
+ begin
+ function add_many_sparse_to_tensors_map_eager(sparse_indices_, sparse_values_, sparse_shape_; name=nothing, container=nothing, shared_name=nothing)
+ desc = tf.EagerOp("AddManySparseToTensorsMap")
+ sparse_indices_ = convert(tf.EagerTensor, sparse_indices_)
+ sparse_values_ = convert(tf.EagerTensor, sparse_values_)
+ sparse_shape_ = convert(tf.EagerTensor, sparse_shape_)
+ begin
+ begin
+ tf.add_input(desc, sparse_indices_)
+ end
+ begin
+ tf.add_input(desc, sparse_values_)
+ end
+ begin
+ tf.add_input(desc, sparse_shape_)
+ end
+ end
+ begin
+ begin
+ if container !== nothing
+ desc["container"] = Base.String(container)
+ end
+ end
+ begin
+ if shared_name !== nothing
+ desc["shared_name"] = Base.String(shared_name)
+ end
+ end
+ end
+ begin
+ desc["T"] = tf.data_type(sparse_values_)
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(add_many_sparse_to_tensors_map, [sparse_indices_, sparse_values_, sparse_shape_], name=nothing, container=nothing, shared_name=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res[1]
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function add_many_sparse_to_tensors_map(sparse_indices_, sparse_values_, sparse_shape_; name=nothing, container=nothing, shared_name=nothing)
+ if tf.in_eager_mode()
+ add_many_sparse_to_tensors_map_eager(sparse_indices_, sparse_values_, sparse_shape_; name=name, container=container, shared_name=shared_name)
+ else
+ add_many_sparse_to_tensors_map_graph(sparse_indices_, sparse_values_, sparse_shape_; name=name, container=container, shared_name=shared_name)
+ end
+ end
+ end
+end
+
+
+"""
+ sparse_reorder(input_indices, input_values, input_shape)
+
+
+"""
+begin
+ begin
+ function sparse_reorder_graph(input_indices_, input_values_, input_shape_; name=nothing)
+ local desc
+ tf.with_op_name(name, "SparseReorder") do
+ desc = tf.NodeDescription("SparseReorder")
+ begin
+ begin
+ input_indices_ = convert(Tensor{Int64}, input_indices_)
+ begin
+ end
+ end
+ begin
+ input_values_ = convert(Tensor{Any}, input_values_)
+ begin
+ end
+ end
+ begin
+ input_shape_ = convert(Tensor{Int64}, input_shape_)
+ begin
+ end
+ end
+ begin
+ (input_values_,) = tf.tf_promote(input_values_)
+ end
+ end
+ begin
+ begin
+ tf.add_input(desc, input_indices_)
+ end
+ begin
+ tf.add_input(desc, input_values_)
+ end
+ begin
+ tf.add_input(desc, input_shape_)
+ end
+ end
+ begin
+ end
+ end
+ begin
+ out = tf.Tensor[]
+ op = tf.Operation(desc)
+ for out_idx = 1:2
+ push!(out, tf.Tensor(op, out_idx))
+ end
+ out
+ end
+ end
+ end
+ begin
+ function sparse_reorder_eager(input_indices_, input_values_, input_shape_; name=nothing)
+ desc = tf.EagerOp("SparseReorder")
+ input_indices_ = convert(tf.EagerTensor, input_indices_)
+ input_values_ = convert(tf.EagerTensor, input_values_)
+ input_shape_ = convert(tf.EagerTensor, input_shape_)
+ begin
+ begin
+ tf.add_input(desc, input_indices_)
+ end
+ begin
+ tf.add_input(desc, input_values_)
+ end
+ begin
+ tf.add_input(desc, input_shape_)
+ end
+ end
+ begin
+ end
+ begin
+ desc["T"] = tf.data_type(input_values_)
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(sparse_reorder, [input_indices_, input_values_, input_shape_], name=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function sparse_reorder(input_indices_, input_values_, input_shape_; name=nothing)
+ if tf.in_eager_mode()
+ sparse_reorder_eager(input_indices_, input_values_, input_shape_; name=name)
+ else
+ sparse_reorder_graph(input_indices_, input_values_, input_shape_; name=name)
+ end
+ end
+ end
+end
+
+
+"""
+ bitwise_xor(x, y)
+
+
+"""
+begin
+ begin
+ function bitwise_xor_graph(x_, y_; name=nothing)
+ local desc
+ tf.with_op_name(name, "BitwiseXor") do
+ desc = tf.NodeDescription("BitwiseXor")
+ begin
+ begin
+ x_ = convert(Tensor{Any}, x_)
+ begin
+ end
+ end
+ begin
+ y_ = convert(Tensor{Any}, y_)
+ begin
+ end
+ end
+ begin
+ (x_, y_) = tf.tf_promote(x_, y_)
+ end
+ end
+ begin
+ begin
+ tf.add_input(desc, x_)
+ end
+ begin
+ tf.add_input(desc, y_)
+ end
+ end
+ begin
+ end
+ end
+ tf.Tensor(tf.Operation(desc))
+ end
+ end
+ begin
+ function bitwise_xor_eager(x_, y_; name=nothing)
+ desc = tf.EagerOp("BitwiseXor")
+ x_ = convert(tf.EagerTensor, x_)
+ y_ = convert(tf.EagerTensor, y_)
+ begin
+ begin
+ tf.add_input(desc, x_)
+ end
+ begin
+ tf.add_input(desc, y_)
+ end
+ end
+ begin
+ end
+ begin
+ desc["T"] = tf.data_type(x_)
+ end
+ begin
+ desc["T"] = tf.data_type(y_)
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(bitwise_xor, [x_, y_], name=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res[1]
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function bitwise_xor(x_, y_; name=nothing)
+ if tf.in_eager_mode()
+ bitwise_xor_eager(x_, y_; name=name)
+ else
+ bitwise_xor_graph(x_, y_; name=name)
+ end
+ end
+ end
+end
+
+
+"""
+ batch_matrix_set_diag(input, diagonal)
+
+
+"""
+begin
+ begin
+ function batch_matrix_set_diag_graph(input_, diagonal_; name=nothing)
+ local desc
+ tf.with_op_name(name, "BatchMatrixSetDiag") do
+ desc = tf.NodeDescription("BatchMatrixSetDiag")
+ begin
+ begin
+ input_ = convert(Tensor{Any}, input_)
+ begin
+ end
+ end
+ begin
+ diagonal_ = convert(Tensor{Any}, diagonal_)
+ begin
+ end
+ end
+ begin
+ (input_, diagonal_) = tf.tf_promote(input_, diagonal_)
+ end
+ end
+ begin
+ begin
+ tf.add_input(desc, input_)
+ end
+ begin
+ tf.add_input(desc, diagonal_)
+ end
+ end
+ begin
+ end
+ end
+ tf.Tensor(tf.Operation(desc))
+ end
+ end
+ begin
+ function batch_matrix_set_diag_eager(input_, diagonal_; name=nothing)
+ desc = tf.EagerOp("BatchMatrixSetDiag")
+ input_ = convert(tf.EagerTensor, input_)
+ diagonal_ = convert(tf.EagerTensor, diagonal_)
+ begin
+ begin
+ tf.add_input(desc, input_)
+ end
+ begin
+ tf.add_input(desc, diagonal_)
+ end
+ end
+ begin
+ end
+ begin
+ desc["T"] = tf.data_type(input_)
+ end
+ begin
+ desc["T"] = tf.data_type(diagonal_)
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(batch_matrix_set_diag, [input_, diagonal_], name=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res[1]
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function batch_matrix_set_diag(input_, diagonal_; name=nothing)
+ if tf.in_eager_mode()
+ batch_matrix_set_diag_eager(input_, diagonal_; name=name)
+ else
+ batch_matrix_set_diag_graph(input_, diagonal_; name=name)
+ end
+ end
+ end
+end
+
+
+"""
+ lookup_table_insert_v2(table_handle, keys, values)
+
+
+"""
+begin
+ begin
+ function lookup_table_insert_v2_graph(table_handle_, keys_, values_; name=nothing)
+ local desc
+ tf.with_op_name(name, "LookupTableInsertV2") do
+ desc = tf.NodeDescription("LookupTableInsertV2")
+ begin
+ begin
+ table_handle_ = convert(Tensor{Any}, table_handle_)
+ begin
+ end
+ end
+ begin
+ keys_ = convert(Tensor{Any}, keys_)
+ begin
+ end
+ end
+ begin
+ values_ = convert(Tensor{Any}, values_)
+ begin
+ end
+ end
+ begin
+ (keys_,) = tf.tf_promote(keys_)
+ end
+ begin
+ (values_,) = tf.tf_promote(values_)
+ end
+ end
+ begin
+ begin
+ tf.add_input(desc, table_handle_)
+ end
+ begin
+ tf.add_input(desc, keys_)
+ end
+ begin
+ tf.add_input(desc, values_)
+ end
+ end
+ begin
+ end
+ end
+ tf.Tensor(tf.Operation(desc))
+ end
+ end
+ begin
+ function lookup_table_insert_v2_eager(table_handle_, keys_, values_; name=nothing)
+ desc = tf.EagerOp("LookupTableInsertV2")
+ table_handle_ = convert(tf.EagerTensor, table_handle_)
+ keys_ = convert(tf.EagerTensor, keys_)
+ values_ = convert(tf.EagerTensor, values_)
+ begin
+ begin
+ tf.add_input(desc, table_handle_)
+ end
+ begin
+ tf.add_input(desc, keys_)
+ end
+ begin
+ tf.add_input(desc, values_)
+ end
+ end
+ begin
+ end
+ begin
+ desc["Tin"] = tf.data_type(keys_)
+ end
+ begin
+ desc["Tout"] = tf.data_type(values_)
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(lookup_table_insert_v2, [table_handle_, keys_, values_], name=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res[1]
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function lookup_table_insert_v2(table_handle_, keys_, values_; name=nothing)
+ if tf.in_eager_mode()
+ lookup_table_insert_v2_eager(table_handle_, keys_, values_; name=name)
+ else
+ lookup_table_insert_v2_graph(table_handle_, keys_, values_; name=name)
+ end
+ end
+ end
+end
+
+
+"""
+ experimental_dense_to_sparse_batch_dataset(input_dataset, batch_size, row_shape)
+
+
+"""
+begin
+ begin
+ function experimental_dense_to_sparse_batch_dataset_graph(input_dataset_, batch_size_, row_shape_; name=nothing, output_types=nothing, output_shapes=nothing)
+ local desc
+ tf.with_op_name(name, "ExperimentalDenseToSparseBatchDataset") do
+ desc = tf.NodeDescription("ExperimentalDenseToSparseBatchDataset")
+ begin
+ begin
+ input_dataset_ = convert(Tensor{Any}, input_dataset_)
+ begin
+ end
+ end
+ begin
+ batch_size_ = convert(Tensor{Int64}, batch_size_)
+ begin
+ end
+ end
+ begin
+ row_shape_ = convert(Tensor{Int64}, row_shape_)
+ begin
+ end
+ end
+ end
+ begin
+ begin
+ tf.add_input(desc, input_dataset_)
+ end
+ begin
+ tf.add_input(desc, batch_size_)
+ end
+ begin
+ tf.add_input(desc, row_shape_)
+ end
+ end
+ begin
+ begin
+ if output_types !== nothing
+ desc["output_types"] = map(Base.identity, output_types)
+ end
+ end
+ begin
+ if output_shapes !== nothing
+ desc["output_shapes"] = map(Base.identity, output_shapes)
+ end
+ end
+ end
+ end
+ tf.Tensor(tf.Operation(desc))
+ end
+ end
+ begin
+ function experimental_dense_to_sparse_batch_dataset_eager(input_dataset_, batch_size_, row_shape_; name=nothing, output_types=nothing, output_shapes=nothing)
+ desc = tf.EagerOp("ExperimentalDenseToSparseBatchDataset")
+ input_dataset_ = convert(tf.EagerTensor, input_dataset_)
+ batch_size_ = convert(tf.EagerTensor, batch_size_)
+ row_shape_ = convert(tf.EagerTensor, row_shape_)
+ begin
+ begin
+ tf.add_input(desc, input_dataset_)
+ end
+ begin
+ tf.add_input(desc, batch_size_)
+ end
+ begin
+ tf.add_input(desc, row_shape_)
+ end
+ end
+ begin
+ begin
+ if output_types !== nothing
+ desc["output_types"] = map(Base.identity, output_types)
+ end
+ end
+ begin
+ if output_shapes !== nothing
+ desc["output_shapes"] = map(Base.identity, output_shapes)
+ end
+ end
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(experimental_dense_to_sparse_batch_dataset, [input_dataset_, batch_size_, row_shape_], name=nothing, output_types=nothing, output_shapes=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res[1]
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function experimental_dense_to_sparse_batch_dataset(input_dataset_, batch_size_, row_shape_; name=nothing, output_types=nothing, output_shapes=nothing)
+ if tf.in_eager_mode()
+ experimental_dense_to_sparse_batch_dataset_eager(input_dataset_, batch_size_, row_shape_; name=name, output_types=output_types, output_shapes=output_shapes)
+ else
+ experimental_dense_to_sparse_batch_dataset_graph(input_dataset_, batch_size_, row_shape_; name=name, output_types=output_types, output_shapes=output_shapes)
+ end
+ end
+ end
+end
+
+
+"""
+ resource_sparse_apply_rms_prop(var, ms, mom, lr, rho, momentum, epsilon, grad, indices; use_locking=false)
+
+
+"""
+begin
+ begin
+ function resource_sparse_apply_rms_prop_graph(var_, ms_, mom_, lr_, rho_, momentum_, epsilon_, grad_, indices_; name=nothing, use_locking=nothing)
+ local desc
+ tf.with_op_name(name, "ResourceSparseApplyRMSProp") do
+ desc = tf.NodeDescription("ResourceSparseApplyRMSProp")
+ begin
+ begin
+ var_ = convert(Tensor{Any}, var_)
+ begin
+ end
+ end
+ begin
+ ms_ = convert(Tensor{Any}, ms_)
+ begin
+ end
+ end
+ begin
+ mom_ = convert(Tensor{Any}, mom_)
+ begin
+ end
+ end
+ begin
+ lr_ = convert(Tensor{Any}, lr_)
+ begin
+ end
+ end
+ begin
+ rho_ = convert(Tensor{Any}, rho_)
+ begin
+ end
+ end
+ begin
+ momentum_ = convert(Tensor{Any}, momentum_)
+ begin
+ end
+ end
+ begin
+ epsilon_ = convert(Tensor{Any}, epsilon_)
+ begin
+ end
+ end
+ begin
+ grad_ = convert(Tensor{Any}, grad_)
+ begin
+ end
+ end
+ begin
+ indices_ = convert(Tensor{Any}, indices_)
+ begin
+ indices_ = indices_ - convert(tf.Tensor{eltype(indices_)}, 1)
+ end
+ end
+ begin
+ (lr_, rho_, momentum_, epsilon_, grad_) = tf.tf_promote(lr_, rho_, momentum_, epsilon_, grad_)
+ end
+ begin
+ (indices_,) = tf.tf_promote(indices_)
+ end
+ end
+ begin
+ begin
+ tf.add_input(desc, var_)
+ end
+ begin
+ tf.add_input(desc, ms_)
+ end
+ begin
+ tf.add_input(desc, mom_)
+ end
+ begin
+ tf.add_input(desc, lr_)
+ end
+ begin
+ tf.add_input(desc, rho_)
+ end
+ begin
+ tf.add_input(desc, momentum_)
+ end
+ begin
+ tf.add_input(desc, epsilon_)
+ end
+ begin
+ tf.add_input(desc, grad_)
+ end
+ begin
+ tf.add_input(desc, indices_)
+ end
+ end
+ begin
+ begin
+ if use_locking !== nothing
+ desc["use_locking"] = Base.Bool(use_locking)
+ end
+ end
+ end
+ end
+ tf.Tensor(tf.Operation(desc))
+ end
+ end
+ begin
+ function resource_sparse_apply_rms_prop_eager(var_, ms_, mom_, lr_, rho_, momentum_, epsilon_, grad_, indices_; name=nothing, use_locking=nothing)
+ desc = tf.EagerOp("ResourceSparseApplyRMSProp")
+ var_ = convert(tf.EagerTensor, var_)
+ ms_ = convert(tf.EagerTensor, ms_)
+ mom_ = convert(tf.EagerTensor, mom_)
+ lr_ = convert(tf.EagerTensor, lr_)
+ rho_ = convert(tf.EagerTensor, rho_)
+ momentum_ = convert(tf.EagerTensor, momentum_)
+ epsilon_ = convert(tf.EagerTensor, epsilon_)
+ grad_ = convert(tf.EagerTensor, grad_)
+ indices_ = convert(tf.EagerTensor, indices_)
+ begin
+ begin
+ tf.add_input(desc, var_)
+ end
+ begin
+ tf.add_input(desc, ms_)
+ end
+ begin
+ tf.add_input(desc, mom_)
+ end
+ begin
+ tf.add_input(desc, lr_)
+ end
+ begin
+ tf.add_input(desc, rho_)
+ end
+ begin
+ tf.add_input(desc, momentum_)
+ end
+ begin
+ tf.add_input(desc, epsilon_)
+ end
+ begin
+ tf.add_input(desc, grad_)
+ end
+ begin
+ tf.add_input(desc, indices_)
+ end
+ end
+ begin
+ begin
+ if use_locking !== nothing
+ desc["use_locking"] = Base.Bool(use_locking)
+ end
+ end
+ end
+ begin
+ desc["T"] = tf.data_type(lr_)
+ end
+ begin
+ desc["T"] = tf.data_type(rho_)
+ end
+ begin
+ desc["T"] = tf.data_type(momentum_)
+ end
+ begin
+ desc["T"] = tf.data_type(epsilon_)
+ end
+ begin
+ desc["T"] = tf.data_type(grad_)
+ end
+ begin
+ desc["Tindices"] = tf.data_type(indices_)
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(resource_sparse_apply_rms_prop, [var_, ms_, mom_, lr_, rho_, momentum_, epsilon_, grad_, indices_], name=nothing, use_locking=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res[1]
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function resource_sparse_apply_rms_prop(var_, ms_, mom_, lr_, rho_, momentum_, epsilon_, grad_, indices_; name=nothing, use_locking=nothing)
+ if tf.in_eager_mode()
+ resource_sparse_apply_rms_prop_eager(var_, ms_, mom_, lr_, rho_, momentum_, epsilon_, grad_, indices_; name=name, use_locking=use_locking)
+ else
+ resource_sparse_apply_rms_prop_graph(var_, ms_, mom_, lr_, rho_, momentum_, epsilon_, grad_, indices_; name=name, use_locking=use_locking)
+ end
+ end
+ end
+end
+
+
+"""
+ random_crop(image, size; seed=0, seed2=0)
+
+
+"""
+begin
+ begin
+ function random_crop_graph(image_, size_; name=nothing, seed=nothing, seed2=nothing)
+ local desc
+ tf.with_op_name(name, "RandomCrop") do
+ desc = tf.NodeDescription("RandomCrop")
+ begin
+ begin
+ image_ = convert(Tensor{Any}, image_)
+ begin
+ end
+ end
+ begin
+ size_ = convert(Tensor{Int64}, size_)
+ begin
+ end
+ end
+ begin
+ (image_,) = tf.tf_promote(image_)
+ end
+ end
+ begin
+ begin
+ tf.add_input(desc, image_)
+ end
+ begin
+ tf.add_input(desc, size_)
+ end
+ end
+ begin
+ begin
+ if seed !== nothing
+ desc["seed"] = Base.Int(seed)
+ end
+ end
+ begin
+ if seed2 !== nothing
+ desc["seed2"] = Base.Int(seed2)
+ end
+ end
+ end
+ end
+ tf.Tensor(tf.Operation(desc))
+ end
+ end
+ begin
+ function random_crop_eager(image_, size_; name=nothing, seed=nothing, seed2=nothing)
+ desc = tf.EagerOp("RandomCrop")
+ image_ = convert(tf.EagerTensor, image_)
+ size_ = convert(tf.EagerTensor, size_)
+ begin
+ begin
+ tf.add_input(desc, image_)
+ end
+ begin
+ tf.add_input(desc, size_)
+ end
+ end
+ begin
+ begin
+ if seed !== nothing
+ desc["seed"] = Base.Int(seed)
+ end
+ end
+ begin
+ if seed2 !== nothing
+ desc["seed2"] = Base.Int(seed2)
+ end
+ end
+ end
+ begin
+ desc["T"] = tf.data_type(image_)
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(random_crop, [image_, size_], name=nothing, seed=nothing, seed2=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res[1]
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function random_crop(image_, size_; name=nothing, seed=nothing, seed2=nothing)
+ if tf.in_eager_mode()
+ random_crop_eager(image_, size_; name=name, seed=seed, seed2=seed2)
+ else
+ random_crop_graph(image_, size_; name=name, seed=seed, seed2=seed2)
+ end
+ end
+ end
+end
+
+
+"""
+ lookup_table_import_v2(table_handle, keys, values)
+
+
+"""
+begin
+ begin
+ function lookup_table_import_v2_graph(table_handle_, keys_, values_; name=nothing)
+ local desc
+ tf.with_op_name(name, "LookupTableImportV2") do
+ desc = tf.NodeDescription("LookupTableImportV2")
+ begin
+ begin
+ table_handle_ = convert(Tensor{Any}, table_handle_)
+ begin
+ end
+ end
+ begin
+ keys_ = convert(Tensor{Any}, keys_)
+ begin
+ end
+ end
+ begin
+ values_ = convert(Tensor{Any}, values_)
+ begin
+ end
+ end
+ begin
+ (keys_,) = tf.tf_promote(keys_)
+ end
+ begin
+ (values_,) = tf.tf_promote(values_)
+ end
+ end
+ begin
+ begin
+ tf.add_input(desc, table_handle_)
+ end
+ begin
+ tf.add_input(desc, keys_)
+ end
+ begin
+ tf.add_input(desc, values_)
+ end
+ end
+ begin
+ end
+ end
+ tf.Tensor(tf.Operation(desc))
+ end
+ end
+ begin
+ function lookup_table_import_v2_eager(table_handle_, keys_, values_; name=nothing)
+ desc = tf.EagerOp("LookupTableImportV2")
+ table_handle_ = convert(tf.EagerTensor, table_handle_)
+ keys_ = convert(tf.EagerTensor, keys_)
+ values_ = convert(tf.EagerTensor, values_)
+ begin
+ begin
+ tf.add_input(desc, table_handle_)
+ end
+ begin
+ tf.add_input(desc, keys_)
+ end
+ begin
+ tf.add_input(desc, values_)
+ end
+ end
+ begin
+ end
+ begin
+ desc["Tin"] = tf.data_type(keys_)
+ end
+ begin
+ desc["Tout"] = tf.data_type(values_)
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(lookup_table_import_v2, [table_handle_, keys_, values_], name=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res[1]
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function lookup_table_import_v2(table_handle_, keys_, values_; name=nothing)
+ if tf.in_eager_mode()
+ lookup_table_import_v2_eager(table_handle_, keys_, values_; name=name)
+ else
+ lookup_table_import_v2_graph(table_handle_, keys_, values_; name=name)
+ end
+ end
+ end
+end
+
+
+"""
+ resource_scatter_nd_update(ref, indices, updates; use_locking=true)
+
+
+"""
+begin
+ begin
+ function resource_scatter_nd_update_graph(ref_, indices_, updates_; name=nothing, use_locking=nothing)
+ local desc
+ tf.with_op_name(name, "ResourceScatterNdUpdate") do
+ desc = tf.NodeDescription("ResourceScatterNdUpdate")
+ begin
+ begin
+ ref_ = convert(Tensor{Any}, ref_)
+ begin
+ end
+ end
+ begin
+ indices_ = convert(Tensor{Any}, indices_)
+ begin
+ indices_ = indices_ - convert(tf.Tensor{eltype(indices_)}, 1)
+ end
+ end
+ begin
+ updates_ = convert(Tensor{Any}, updates_)
+ begin
+ end
+ end
+ begin
+ (updates_,) = tf.tf_promote(updates_)
+ end
+ begin
+ (indices_,) = tf.tf_promote(indices_)
+ end
+ end
+ begin
+ begin
+ tf.add_input(desc, ref_)
+ end
+ begin
+ tf.add_input(desc, indices_)
+ end
+ begin
+ tf.add_input(desc, updates_)
+ end
+ end
+ begin
+ begin
+ if use_locking !== nothing
+ desc["use_locking"] = Base.Bool(use_locking)
+ end
+ end
+ end
+ end
+ tf.Tensor(tf.Operation(desc))
+ end
+ end
+ begin
+ function resource_scatter_nd_update_eager(ref_, indices_, updates_; name=nothing, use_locking=nothing)
+ desc = tf.EagerOp("ResourceScatterNdUpdate")
+ ref_ = convert(tf.EagerTensor, ref_)
+ indices_ = convert(tf.EagerTensor, indices_)
+ updates_ = convert(tf.EagerTensor, updates_)
+ begin
+ begin
+ tf.add_input(desc, ref_)
+ end
+ begin
+ tf.add_input(desc, indices_)
+ end
+ begin
+ tf.add_input(desc, updates_)
+ end
+ end
+ begin
+ begin
+ if use_locking !== nothing
+ desc["use_locking"] = Base.Bool(use_locking)
+ end
+ end
+ end
+ begin
+ desc["Tindices"] = tf.data_type(indices_)
+ end
+ begin
+ desc["T"] = tf.data_type(updates_)
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(resource_scatter_nd_update, [ref_, indices_, updates_], name=nothing, use_locking=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res[1]
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function resource_scatter_nd_update(ref_, indices_, updates_; name=nothing, use_locking=nothing)
+ if tf.in_eager_mode()
+ resource_scatter_nd_update_eager(ref_, indices_, updates_; name=name, use_locking=use_locking)
+ else
+ resource_scatter_nd_update_graph(ref_, indices_, updates_; name=name, use_locking=use_locking)
+ end
+ end
+ end
+end
+
+
+"""
+ static_regex_full_match(input)
+
+
+"""
+begin
+ begin
+ function static_regex_full_match_graph(input_; name=nothing, pattern=nothing)
+ local desc
+ tf.with_op_name(name, "StaticRegexFullMatch") do
+ desc = tf.NodeDescription("StaticRegexFullMatch")
+ begin
+ begin
+ input_ = convert(Tensor{String}, input_)
+ begin
+ end
+ end
+ end
+ begin
+ begin
+ tf.add_input(desc, input_)
+ end
+ end
+ begin
+ begin
+ if pattern !== nothing
+ desc["pattern"] = Base.String(pattern)
+ end
+ end
+ end
+ end
+ tf.Tensor(tf.Operation(desc))
+ end
+ end
+ begin
+ function static_regex_full_match_eager(input_; name=nothing, pattern=nothing)
+ desc = tf.EagerOp("StaticRegexFullMatch")
+ input_ = convert(tf.EagerTensor, input_)
+ begin
+ begin
+ tf.add_input(desc, input_)
+ end
+ end
+ begin
+ begin
+ if pattern !== nothing
+ desc["pattern"] = Base.String(pattern)
+ end
+ end
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(static_regex_full_match, [input_], name=nothing, pattern=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res[1]
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function static_regex_full_match(input_; name=nothing, pattern=nothing)
+ if tf.in_eager_mode()
+ static_regex_full_match_eager(input_; name=name, pattern=pattern)
+ else
+ static_regex_full_match_graph(input_; name=name, pattern=pattern)
+ end
+ end
+ end
+end
+
+
+"""
+ gcs_configure_credentials(json)
+
+Configures the credentials used by the GCS client of the local TF runtime.
+"""
+begin
+ begin
+ function gcs_configure_credentials_graph(json_; name=nothing)
+ local desc
+ tf.with_op_name(name, "GcsConfigureCredentials") do
+ desc = tf.NodeDescription("GcsConfigureCredentials")
+ begin
+ begin
+ json_ = convert(Tensor{String}, json_)
+ begin
+ end
+ end
+ end
+ begin
+ begin
+ tf.add_input(desc, json_)
+ end
+ end
+ begin
+ end
+ end
+ tf.Tensor(tf.Operation(desc))
+ end
+ end
+ begin
+ function gcs_configure_credentials_eager(json_; name=nothing)
+ desc = tf.EagerOp("GcsConfigureCredentials")
+ json_ = convert(tf.EagerTensor, json_)
+ begin
+ begin
+ tf.add_input(desc, json_)
+ end
+ end
+ begin
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(gcs_configure_credentials, [json_], name=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res[1]
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function gcs_configure_credentials(json_; name=nothing)
+ if tf.in_eager_mode()
+ gcs_configure_credentials_eager(json_; name=name)
+ else
+ gcs_configure_credentials_graph(json_; name=name)
+ end
+ end
+ end
+end
+
+
+"""
+ tensor_array_size_v3(handle, flow_in)
+
+
+"""
+begin
+ begin
+ function tensor_array_size_v3_graph(handle_, flow_in_; name=nothing)
+ local desc
+ tf.with_op_name(name, "TensorArraySizeV3") do
+ desc = tf.NodeDescription("TensorArraySizeV3")
+ begin
+ begin
+ handle_ = convert(Tensor{Any}, handle_)
+ begin
+ end
+ end
+ begin
+ flow_in_ = convert(Tensor{Float32}, flow_in_)
+ begin
+ end
+ end
+ end
+ begin
+ begin
+ tf.add_input(desc, handle_)
+ end
+ begin
+ tf.add_input(desc, flow_in_)
+ end
+ end
+ begin
+ end
+ end
+ tf.Tensor(tf.Operation(desc))
+ end
+ end
+ begin
+ function tensor_array_size_v3_eager(handle_, flow_in_; name=nothing)
+ desc = tf.EagerOp("TensorArraySizeV3")
+ handle_ = convert(tf.EagerTensor, handle_)
+ flow_in_ = convert(tf.EagerTensor, flow_in_)
+ begin
+ begin
+ tf.add_input(desc, handle_)
+ end
+ begin
+ tf.add_input(desc, flow_in_)
+ end
+ end
+ begin
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(tensor_array_size_v3, [handle_, flow_in_], name=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res[1]
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function tensor_array_size_v3(handle_, flow_in_; name=nothing)
+ if tf.in_eager_mode()
+ tensor_array_size_v3_eager(handle_, flow_in_; name=name)
+ else
+ tensor_array_size_v3_graph(handle_, flow_in_; name=name)
+ end
+ end
+ end
+end
+
+
+"""
+ sparse_segment_sqrt_n_with_num_segments(data, indices, segment_ids, num_segments)
+
+
+"""
+begin
+ begin
+ function sparse_segment_sqrt_n_with_num_segments_graph(data_, indices_, segment_ids_, num_segments_; name=nothing)
+ local desc
+ tf.with_op_name(name, "SparseSegmentSqrtNWithNumSegments") do
+ desc = tf.NodeDescription("SparseSegmentSqrtNWithNumSegments")
+ begin
+ begin
+ data_ = convert(Tensor{Any}, data_)
+ begin
+ end
+ end
+ begin
+ indices_ = convert(Tensor{Int32}, indices_)
+ begin
+ indices_ = indices_ - convert(tf.Tensor{eltype(indices_)}, 1)
+ end
+ end
+ begin
+ segment_ids_ = convert(Tensor{Int32}, segment_ids_)
+ begin
+ end
+ end
+ begin
+ num_segments_ = convert(Tensor{Int32}, num_segments_)
+ begin
+ end
+ end
+ begin
+ (num_segments_,) = tf.tf_promote(num_segments_)
+ end
+ begin
+ (data_,) = tf.tf_promote(data_)
+ end
+ begin
+ (indices_,) = tf.tf_promote(indices_)
+ end
+ end
+ begin
+ begin
+ tf.add_input(desc, data_)
+ end
+ begin
+ tf.add_input(desc, indices_)
+ end
+ begin
+ tf.add_input(desc, segment_ids_)
+ end
+ begin
+ tf.add_input(desc, num_segments_)
+ end
+ end
+ begin
+ end
+ end
+ tf.Tensor(tf.Operation(desc))
+ end
+ end
+ begin
+ function sparse_segment_sqrt_n_with_num_segments_eager(data_, indices_, segment_ids_, num_segments_; name=nothing)
+ desc = tf.EagerOp("SparseSegmentSqrtNWithNumSegments")
+ data_ = convert(tf.EagerTensor, data_)
+ indices_ = convert(tf.EagerTensor, indices_)
+ segment_ids_ = convert(tf.EagerTensor, segment_ids_)
+ num_segments_ = convert(tf.EagerTensor, num_segments_)
+ begin
+ begin
+ tf.add_input(desc, data_)
+ end
+ begin
+ tf.add_input(desc, indices_)
+ end
+ begin
+ tf.add_input(desc, segment_ids_)
+ end
+ begin
+ tf.add_input(desc, num_segments_)
+ end
+ end
+ begin
+ end
+ begin
+ desc["T"] = tf.data_type(data_)
+ end
+ begin
+ desc["Tidx"] = tf.data_type(indices_)
+ end
+ begin
+ desc["Tnumsegments"] = tf.data_type(num_segments_)
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(sparse_segment_sqrt_n_with_num_segments, [data_, indices_, segment_ids_, num_segments_], name=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res[1]
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function sparse_segment_sqrt_n_with_num_segments(data_, indices_, segment_ids_, num_segments_; name=nothing)
+ if tf.in_eager_mode()
+ sparse_segment_sqrt_n_with_num_segments_eager(data_, indices_, segment_ids_, num_segments_; name=name)
+ else
+ sparse_segment_sqrt_n_with_num_segments_graph(data_, indices_, segment_ids_, num_segments_; name=name)
+ end
+ end
+ end
+end
+
+
+"""
+ conv2d_backprop_filter(input, filter_sizes, out_backprop; use_cudnn_on_gpu=true, data_format=, dilations=[1, 1, 1, 1])
+
+
+"""
+begin
+ begin
+ function conv2d_backprop_filter_graph(input_, filter_sizes_, out_backprop_; name=nothing, strides=nothing, use_cudnn_on_gpu=nothing, padding=nothing, data_format=nothing, dilations=nothing)
+ local desc
+ tf.with_op_name(name, "Conv2DBackpropFilter") do
+ desc = tf.NodeDescription("Conv2DBackpropFilter")
+ begin
+ begin
+ input_ = convert(Tensor{Any}, input_)
+ begin
+ end
+ end
+ begin
+ filter_sizes_ = convert(Tensor{Int32}, filter_sizes_)
+ begin
+ end
+ end
+ begin
+ out_backprop_ = convert(Tensor{Any}, out_backprop_)
+ begin
+ end
+ end
+ begin
+ (input_, out_backprop_) = tf.tf_promote(input_, out_backprop_)
+ end
+ end
+ begin
+ begin
+ tf.add_input(desc, input_)
+ end
+ begin
+ tf.add_input(desc, filter_sizes_)
+ end
+ begin
+ tf.add_input(desc, out_backprop_)
+ end
+ end
+ begin
+ begin
+ if strides !== nothing
+ desc["strides"] = map(Base.identity, strides)
+ end
+ end
+ begin
+ if use_cudnn_on_gpu !== nothing
+ desc["use_cudnn_on_gpu"] = Base.Bool(use_cudnn_on_gpu)
+ end
+ end
+ begin
+ if padding !== nothing
+ desc["padding"] = Base.String(padding)
+ end
+ end
+ begin
+ if data_format !== nothing
+ desc["data_format"] = Base.String(data_format)
+ end
+ end
+ begin
+ if dilations !== nothing
+ desc["dilations"] = map(Base.identity, dilations)
+ end
+ end
+ end
+ end
+ tf.Tensor(tf.Operation(desc))
+ end
+ end
+ begin
+ function conv2d_backprop_filter_eager(input_, filter_sizes_, out_backprop_; name=nothing, strides=nothing, use_cudnn_on_gpu=nothing, padding=nothing, data_format=nothing, dilations=nothing)
+ desc = tf.EagerOp("Conv2DBackpropFilter")
+ input_ = convert(tf.EagerTensor, input_)
+ filter_sizes_ = convert(tf.EagerTensor, filter_sizes_)
+ out_backprop_ = convert(tf.EagerTensor, out_backprop_)
+ begin
+ begin
+ tf.add_input(desc, input_)
+ end
+ begin
+ tf.add_input(desc, filter_sizes_)
+ end
+ begin
+ tf.add_input(desc, out_backprop_)
+ end
+ end
+ begin
+ begin
+ if strides !== nothing
+ desc["strides"] = map(Base.identity, strides)
+ end
+ end
+ begin
+ if use_cudnn_on_gpu !== nothing
+ desc["use_cudnn_on_gpu"] = Base.Bool(use_cudnn_on_gpu)
+ end
+ end
+ begin
+ if padding !== nothing
+ desc["padding"] = Base.String(padding)
+ end
+ end
+ begin
+ if data_format !== nothing
+ desc["data_format"] = Base.String(data_format)
+ end
+ end
+ begin
+ if dilations !== nothing
+ desc["dilations"] = map(Base.identity, dilations)
+ end
+ end
+ end
+ begin
+ desc["T"] = tf.data_type(input_)
+ end
+ begin
+ desc["T"] = tf.data_type(out_backprop_)
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(conv2d_backprop_filter, [input_, filter_sizes_, out_backprop_], name=nothing, strides=nothing, use_cudnn_on_gpu=nothing, padding=nothing, data_format=nothing, dilations=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res[1]
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function conv2d_backprop_filter(input_, filter_sizes_, out_backprop_; name=nothing, strides=nothing, use_cudnn_on_gpu=nothing, padding=nothing, data_format=nothing, dilations=nothing)
+ if tf.in_eager_mode()
+ conv2d_backprop_filter_eager(input_, filter_sizes_, out_backprop_; name=name, strides=strides, use_cudnn_on_gpu=use_cudnn_on_gpu, padding=padding, data_format=data_format, dilations=dilations)
+ else
+ conv2d_backprop_filter_graph(input_, filter_sizes_, out_backprop_; name=name, strides=strides, use_cudnn_on_gpu=use_cudnn_on_gpu, padding=padding, data_format=data_format, dilations=dilations)
+ end
+ end
+ end
+end
+
+
+"""
+ experimental_group_by_reducer_dataset(input_dataset, key_func_other_arguments, init_func_other_arguments, reduce_func_other_arguments, finalize_func_other_arguments)
+
+
+"""
+begin
+ begin
+ function experimental_group_by_reducer_dataset_graph(input_dataset_, key_func_other_arguments_, init_func_other_arguments_, reduce_func_other_arguments_, finalize_func_other_arguments_; name=nothing, key_func=nothing, init_func=nothing, reduce_func=nothing, finalize_func=nothing, Tkey_func_other_arguments=nothing, Tinit_func_other_arguments=nothing, Treduce_func_other_arguments=nothing, Tfinalize_func_other_arguments=nothing, output_types=nothing, output_shapes=nothing)
+ local desc
+ tf.with_op_name(name, "ExperimentalGroupByReducerDataset") do
+ desc = tf.NodeDescription("ExperimentalGroupByReducerDataset")
+ begin
+ begin
+ input_dataset_ = convert(Tensor{Any}, input_dataset_)
+ begin
+ end
+ end
+ begin
+ key_func_other_arguments_ = [convert(Tensor{Any}, x) for x = key_func_other_arguments_]
+ begin
+ end
+ end
+ begin
+ init_func_other_arguments_ = [convert(Tensor{Any}, x) for x = init_func_other_arguments_]
+ begin
+ end
+ end
+ begin
+ reduce_func_other_arguments_ = [convert(Tensor{Any}, x) for x = reduce_func_other_arguments_]
+ begin
+ end
+ end
+ begin
+ finalize_func_other_arguments_ = [convert(Tensor{Any}, x) for x = finalize_func_other_arguments_]
+ begin
+ end
+ end
+ end
+ begin
+ begin
+ tf.add_input(desc, input_dataset_)
+ end
+ begin
+ tf.add_input(desc, key_func_other_arguments_)
+ end
+ begin
+ tf.add_input(desc, init_func_other_arguments_)
+ end
+ begin
+ tf.add_input(desc, reduce_func_other_arguments_)
+ end
+ begin
+ tf.add_input(desc, finalize_func_other_arguments_)
+ end
+ end
+ begin
+ begin
+ if key_func !== nothing
+ desc["key_func"] = Base.identity(key_func)
+ end
+ end
+ begin
+ if init_func !== nothing
+ desc["init_func"] = Base.identity(init_func)
+ end
+ end
+ begin
+ if reduce_func !== nothing
+ desc["reduce_func"] = Base.identity(reduce_func)
+ end
+ end
+ begin
+ if finalize_func !== nothing
+ desc["finalize_func"] = Base.identity(finalize_func)
+ end
+ end
+ begin
+ if Tkey_func_other_arguments !== nothing
+ desc["Tkey_func_other_arguments"] = map(Base.identity, Tkey_func_other_arguments)
+ end
+ end
+ begin
+ if Tinit_func_other_arguments !== nothing
+ desc["Tinit_func_other_arguments"] = map(Base.identity, Tinit_func_other_arguments)
+ end
+ end
+ begin
+ if Treduce_func_other_arguments !== nothing
+ desc["Treduce_func_other_arguments"] = map(Base.identity, Treduce_func_other_arguments)
+ end
+ end
+ begin
+ if Tfinalize_func_other_arguments !== nothing
+ desc["Tfinalize_func_other_arguments"] = map(Base.identity, Tfinalize_func_other_arguments)
+ end
+ end
+ begin
+ if output_types !== nothing
+ desc["output_types"] = map(Base.identity, output_types)
+ end
+ end
+ begin
+ if output_shapes !== nothing
+ desc["output_shapes"] = map(Base.identity, output_shapes)
+ end
+ end
+ end
+ end
+ tf.Tensor(tf.Operation(desc))
+ end
+ end
+ begin
+ function experimental_group_by_reducer_dataset_eager(input_dataset_, key_func_other_arguments_, init_func_other_arguments_, reduce_func_other_arguments_, finalize_func_other_arguments_; name=nothing, key_func=nothing, init_func=nothing, reduce_func=nothing, finalize_func=nothing, Tkey_func_other_arguments=nothing, Tinit_func_other_arguments=nothing, Treduce_func_other_arguments=nothing, Tfinalize_func_other_arguments=nothing, output_types=nothing, output_shapes=nothing)
+ desc = tf.EagerOp("ExperimentalGroupByReducerDataset")
+ input_dataset_ = convert(tf.EagerTensor, input_dataset_)
+ key_func_other_arguments_ = convert(tf.EagerTensor, key_func_other_arguments_)
+ init_func_other_arguments_ = convert(tf.EagerTensor, init_func_other_arguments_)
+ reduce_func_other_arguments_ = convert(tf.EagerTensor, reduce_func_other_arguments_)
+ finalize_func_other_arguments_ = convert(tf.EagerTensor, finalize_func_other_arguments_)
+ begin
+ begin
+ tf.add_input(desc, input_dataset_)
+ end
+ begin
+ tf.add_input(desc, key_func_other_arguments_)
+ end
+ begin
+ tf.add_input(desc, init_func_other_arguments_)
+ end
+ begin
+ tf.add_input(desc, reduce_func_other_arguments_)
+ end
+ begin
+ tf.add_input(desc, finalize_func_other_arguments_)
+ end
+ end
+ begin
+ begin
+ if key_func !== nothing
+ desc["key_func"] = Base.identity(key_func)
+ end
+ end
+ begin
+ if init_func !== nothing
+ desc["init_func"] = Base.identity(init_func)
+ end
+ end
+ begin
+ if reduce_func !== nothing
+ desc["reduce_func"] = Base.identity(reduce_func)
+ end
+ end
+ begin
+ if finalize_func !== nothing
+ desc["finalize_func"] = Base.identity(finalize_func)
+ end
+ end
+ begin
+ if Tkey_func_other_arguments !== nothing
+ desc["Tkey_func_other_arguments"] = map(Base.identity, Tkey_func_other_arguments)
+ end
+ end
+ begin
+ if Tinit_func_other_arguments !== nothing
+ desc["Tinit_func_other_arguments"] = map(Base.identity, Tinit_func_other_arguments)
+ end
+ end
+ begin
+ if Treduce_func_other_arguments !== nothing
+ desc["Treduce_func_other_arguments"] = map(Base.identity, Treduce_func_other_arguments)
+ end
+ end
+ begin
+ if Tfinalize_func_other_arguments !== nothing
+ desc["Tfinalize_func_other_arguments"] = map(Base.identity, Tfinalize_func_other_arguments)
+ end
+ end
+ begin
+ if output_types !== nothing
+ desc["output_types"] = map(Base.identity, output_types)
+ end
+ end
+ begin
+ if output_shapes !== nothing
+ desc["output_shapes"] = map(Base.identity, output_shapes)
+ end
+ end
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(experimental_group_by_reducer_dataset, [input_dataset_, key_func_other_arguments_, init_func_other_arguments_, reduce_func_other_arguments_, finalize_func_other_arguments_], name=nothing, key_func=nothing, init_func=nothing, reduce_func=nothing, finalize_func=nothing, Tkey_func_other_arguments=nothing, Tinit_func_other_arguments=nothing, Treduce_func_other_arguments=nothing, Tfinalize_func_other_arguments=nothing, output_types=nothing, output_shapes=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res[1]
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function experimental_group_by_reducer_dataset(input_dataset_, key_func_other_arguments_, init_func_other_arguments_, reduce_func_other_arguments_, finalize_func_other_arguments_; name=nothing, key_func=nothing, init_func=nothing, reduce_func=nothing, finalize_func=nothing, Tkey_func_other_arguments=nothing, Tinit_func_other_arguments=nothing, Treduce_func_other_arguments=nothing, Tfinalize_func_other_arguments=nothing, output_types=nothing, output_shapes=nothing)
+ if tf.in_eager_mode()
+ experimental_group_by_reducer_dataset_eager(input_dataset_, key_func_other_arguments_, init_func_other_arguments_, reduce_func_other_arguments_, finalize_func_other_arguments_; name=name, key_func=key_func, init_func=init_func, reduce_func=reduce_func, finalize_func=finalize_func, Tkey_func_other_arguments=Tkey_func_other_arguments, Tinit_func_other_arguments=Tinit_func_other_arguments, Treduce_func_other_arguments=Treduce_func_other_arguments, Tfinalize_func_other_arguments=Tfinalize_func_other_arguments, output_types=output_types, output_shapes=output_shapes)
+ else
+ experimental_group_by_reducer_dataset_graph(input_dataset_, key_func_other_arguments_, init_func_other_arguments_, reduce_func_other_arguments_, finalize_func_other_arguments_; name=name, key_func=key_func, init_func=init_func, reduce_func=reduce_func, finalize_func=finalize_func, Tkey_func_other_arguments=Tkey_func_other_arguments, Tinit_func_other_arguments=Tinit_func_other_arguments, Treduce_func_other_arguments=Treduce_func_other_arguments, Tfinalize_func_other_arguments=Tfinalize_func_other_arguments, output_types=output_types, output_shapes=output_shapes)
+ end
+ end
+ end
+end
+
+
+"""
+ max_pool_grad(orig_input, orig_output, grad; data_format=)
+
+
+"""
+begin
+ begin
+ function max_pool_grad_graph(orig_input_, orig_output_, grad_; name=nothing, ksize=nothing, strides=nothing, padding=nothing, data_format=nothing)
+ local desc
+ tf.with_op_name(name, "MaxPoolGrad") do
+ desc = tf.NodeDescription("MaxPoolGrad")
+ begin
+ begin
+ orig_input_ = convert(Tensor{Float32}, orig_input_)
+ begin
+ end
+ end
+ begin
+ orig_output_ = convert(Tensor{Float32}, orig_output_)
+ begin
+ end
+ end
+ begin
+ grad_ = convert(Tensor{Float32}, grad_)
+ begin
+ end
+ end
+ begin
+ (orig_input_, orig_output_, grad_) = tf.tf_promote(orig_input_, orig_output_, grad_)
+ end
+ end
+ begin
+ begin
+ tf.add_input(desc, orig_input_)
+ end
+ begin
+ tf.add_input(desc, orig_output_)
+ end
+ begin
+ tf.add_input(desc, grad_)
+ end
+ end
+ begin
+ begin
+ if ksize !== nothing
+ desc["ksize"] = map(Base.identity, ksize)
+ end
+ end
+ begin
+ if strides !== nothing
+ desc["strides"] = map(Base.identity, strides)
+ end
+ end
+ begin
+ if padding !== nothing
+ desc["padding"] = Base.String(padding)
+ end
+ end
+ begin
+ if data_format !== nothing
+ desc["data_format"] = Base.String(data_format)
+ end
+ end
+ end
+ end
+ tf.Tensor(tf.Operation(desc))
+ end
+ end
+ begin
+ function max_pool_grad_eager(orig_input_, orig_output_, grad_; name=nothing, ksize=nothing, strides=nothing, padding=nothing, data_format=nothing)
+ desc = tf.EagerOp("MaxPoolGrad")
+ orig_input_ = convert(tf.EagerTensor, orig_input_)
+ orig_output_ = convert(tf.EagerTensor, orig_output_)
+ grad_ = convert(tf.EagerTensor, grad_)
+ begin
+ begin
+ tf.add_input(desc, orig_input_)
+ end
+ begin
+ tf.add_input(desc, orig_output_)
+ end
+ begin
+ tf.add_input(desc, grad_)
+ end
+ end
+ begin
+ begin
+ if ksize !== nothing
+ desc["ksize"] = map(Base.identity, ksize)
+ end
+ end
+ begin
+ if strides !== nothing
+ desc["strides"] = map(Base.identity, strides)
+ end
+ end
+ begin
+ if padding !== nothing
+ desc["padding"] = Base.String(padding)
+ end
+ end
+ begin
+ if data_format !== nothing
+ desc["data_format"] = Base.String(data_format)
+ end
+ end
+ end
+ begin
+ desc["T"] = tf.data_type(orig_input_)
+ end
+ begin
+ desc["T"] = tf.data_type(orig_output_)
+ end
+ begin
+ desc["T"] = tf.data_type(grad_)
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(max_pool_grad, [orig_input_, orig_output_, grad_], name=nothing, ksize=nothing, strides=nothing, padding=nothing, data_format=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res[1]
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function max_pool_grad(orig_input_, orig_output_, grad_; name=nothing, ksize=nothing, strides=nothing, padding=nothing, data_format=nothing)
+ if tf.in_eager_mode()
+ max_pool_grad_eager(orig_input_, orig_output_, grad_; name=name, ksize=ksize, strides=strides, padding=padding, data_format=data_format)
+ else
+ max_pool_grad_graph(orig_input_, orig_output_, grad_; name=name, ksize=ksize, strides=strides, padding=padding, data_format=data_format)
+ end
+ end
+ end
+end
+
+
+"""
+ _initialize_host_for_distributed_tpu(input)
+
+An op that connects each chip on the host to a centralized UberDriver to allow
+"""
+begin
+ begin
+ function _initialize_host_for_distributed_tpu_graph(input_; name=nothing)
+ local desc
+ tf.with_op_name(name, "_InitializeHostForDistributedTPU") do
+ desc = tf.NodeDescription("_InitializeHostForDistributedTPU")
+ begin
+ begin
+ input_ = convert(Tensor{String}, input_)
+ begin
+ end
+ end
+ end
+ begin
+ begin
+ tf.add_input(desc, input_)
+ end
+ end
+ begin
+ end
+ end
+ tf.Tensor(tf.Operation(desc))
+ end
+ end
+ begin
+ function _initialize_host_for_distributed_tpu_eager(input_; name=nothing)
+ desc = tf.EagerOp("_InitializeHostForDistributedTPU")
+ input_ = convert(tf.EagerTensor, input_)
+ begin
+ begin
+ tf.add_input(desc, input_)
+ end
+ end
+ begin
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(_initialize_host_for_distributed_tpu, [input_], name=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res[1]
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function _initialize_host_for_distributed_tpu(input_; name=nothing)
+ if tf.in_eager_mode()
+ _initialize_host_for_distributed_tpu_eager(input_; name=name)
+ else
+ _initialize_host_for_distributed_tpu_graph(input_; name=name)
+ end
+ end
+ end
+end
+
+
+"""
+ stage_peek(index; capacity=0, memory_limit=0, container=, shared_name=)
+
+
+"""
+begin
+ begin
+ function stage_peek_graph(index_; name=nothing, capacity=nothing, memory_limit=nothing, dtypes=nothing, container=nothing, shared_name=nothing)
+ local desc
+ tf.with_op_name(name, "StagePeek") do
+ desc = tf.NodeDescription("StagePeek")
+ begin
+ begin
+ index_ = convert(Tensor{Int32}, index_)
+ begin
+ end
+ end
+ end
+ begin
+ begin
+ tf.add_input(desc, index_)
+ end
+ end
+ begin
+ begin
+ if capacity !== nothing
+ desc["capacity"] = Base.Int(capacity)
+ end
+ end
+ begin
+ if memory_limit !== nothing
+ desc["memory_limit"] = Base.Int(memory_limit)
+ end
+ end
+ begin
+ if dtypes !== nothing
+ desc["dtypes"] = map(Base.identity, dtypes)
+ end
+ end
+ begin
+ if container !== nothing
+ desc["container"] = Base.String(container)
+ end
+ end
+ begin
+ if shared_name !== nothing
+ desc["shared_name"] = Base.String(shared_name)
+ end
+ end
+ end
+ end
+ tf.Tensor(tf.Operation(desc))
+ end
+ end
+ begin
+ function stage_peek_eager(index_; name=nothing, capacity=nothing, memory_limit=nothing, dtypes=nothing, container=nothing, shared_name=nothing)
+ desc = tf.EagerOp("StagePeek")
+ index_ = convert(tf.EagerTensor, index_)
+ begin
+ begin
+ tf.add_input(desc, index_)
+ end
+ end
+ begin
+ begin
+ if capacity !== nothing
+ desc["capacity"] = Base.Int(capacity)
+ end
+ end
+ begin
+ if memory_limit !== nothing
+ desc["memory_limit"] = Base.Int(memory_limit)
+ end
+ end
+ begin
+ if dtypes !== nothing
+ desc["dtypes"] = map(Base.identity, dtypes)
+ end
+ end
+ begin
+ if container !== nothing
+ desc["container"] = Base.String(container)
+ end
+ end
+ begin
+ if shared_name !== nothing
+ desc["shared_name"] = Base.String(shared_name)
+ end
+ end
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(stage_peek, [index_], name=nothing, capacity=nothing, memory_limit=nothing, dtypes=nothing, container=nothing, shared_name=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res[1]
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function stage_peek(index_; name=nothing, capacity=nothing, memory_limit=nothing, dtypes=nothing, container=nothing, shared_name=nothing)
+ if tf.in_eager_mode()
+ stage_peek_eager(index_; name=name, capacity=capacity, memory_limit=memory_limit, dtypes=dtypes, container=container, shared_name=shared_name)
+ else
+ stage_peek_graph(index_; name=name, capacity=capacity, memory_limit=memory_limit, dtypes=dtypes, container=container, shared_name=shared_name)
+ end
+ end
+ end
+end
+
+
+"""
+ pad_v2(input, paddings, constant_values)
+
+
+"""
+begin
+ begin
+ function pad_v2_graph(input_, paddings_, constant_values_; name=nothing)
+ local desc
+ tf.with_op_name(name, "PadV2") do
+ desc = tf.NodeDescription("PadV2")
+ begin
+ begin
+ input_ = convert(Tensor{Any}, input_)
+ begin
+ end
+ end
+ begin
+ paddings_ = convert(Tensor{Int32}, paddings_)
+ begin
+ end
+ end
+ begin
+ constant_values_ = convert(Tensor{Any}, constant_values_)
+ begin
+ end
+ end
+ begin
+ (input_, constant_values_) = tf.tf_promote(input_, constant_values_)
+ end
+ begin
+ (paddings_,) = tf.tf_promote(paddings_)
+ end
+ end
+ begin
+ begin
+ tf.add_input(desc, input_)
+ end
+ begin
+ tf.add_input(desc, paddings_)
+ end
+ begin
+ tf.add_input(desc, constant_values_)
+ end
+ end
+ begin
+ end
+ end
+ tf.Tensor(tf.Operation(desc))
+ end
+ end
+ begin
+ function pad_v2_eager(input_, paddings_, constant_values_; name=nothing)
+ desc = tf.EagerOp("PadV2")
+ input_ = convert(tf.EagerTensor, input_)
+ paddings_ = convert(tf.EagerTensor, paddings_)
+ constant_values_ = convert(tf.EagerTensor, constant_values_)
+ begin
+ begin
+ tf.add_input(desc, input_)
+ end
+ begin
+ tf.add_input(desc, paddings_)
+ end
+ begin
+ tf.add_input(desc, constant_values_)
+ end
+ end
+ begin
+ end
+ begin
+ desc["T"] = tf.data_type(input_)
+ end
+ begin
+ desc["Tpaddings"] = tf.data_type(paddings_)
+ end
+ begin
+ desc["T"] = tf.data_type(constant_values_)
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(pad_v2, [input_, paddings_, constant_values_], name=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res[1]
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function pad_v2(input_, paddings_, constant_values_; name=nothing)
+ if tf.in_eager_mode()
+ pad_v2_eager(input_, paddings_, constant_values_; name=name)
+ else
+ pad_v2_graph(input_, paddings_, constant_values_; name=name)
+ end
+ end
+ end
+end
+
+
+"""
+ optional_get_value(optional)
+
+
+"""
+begin
+ begin
+ function optional_get_value_graph(optional_; name=nothing, output_types=nothing, output_shapes=nothing)
+ local desc
+ tf.with_op_name(name, "OptionalGetValue") do
+ desc = tf.NodeDescription("OptionalGetValue")
+ begin
+ begin
+ optional_ = convert(Tensor{Any}, optional_)
+ begin
+ end
+ end
+ end
+ begin
+ begin
+ tf.add_input(desc, optional_)
+ end
+ end
+ begin
+ begin
+ if output_types !== nothing
+ desc["output_types"] = map(Base.identity, output_types)
+ end
+ end
+ begin
+ if output_shapes !== nothing
+ desc["output_shapes"] = map(Base.identity, output_shapes)
+ end
+ end
+ end
+ end
+ tf.Tensor(tf.Operation(desc))
+ end
+ end
+ begin
+ function optional_get_value_eager(optional_; name=nothing, output_types=nothing, output_shapes=nothing)
+ desc = tf.EagerOp("OptionalGetValue")
+ optional_ = convert(tf.EagerTensor, optional_)
+ begin
+ begin
+ tf.add_input(desc, optional_)
+ end
+ end
+ begin
+ begin
+ if output_types !== nothing
+ desc["output_types"] = map(Base.identity, output_types)
+ end
+ end
+ begin
+ if output_shapes !== nothing
+ desc["output_shapes"] = map(Base.identity, output_shapes)
+ end
+ end
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(optional_get_value, [optional_], name=nothing, output_types=nothing, output_shapes=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res[1]
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function optional_get_value(optional_; name=nothing, output_types=nothing, output_shapes=nothing)
+ if tf.in_eager_mode()
+ optional_get_value_eager(optional_; name=name, output_types=output_types, output_shapes=output_shapes)
+ else
+ optional_get_value_graph(optional_; name=name, output_types=output_types, output_shapes=output_shapes)
+ end
+ end
+ end
+end
+
+
+"""
+ print_v2(input; output_stream=)
+
+
+"""
+begin
+ begin
+ function print_v2_graph(input_; name=nothing, output_stream=nothing)
+ local desc
+ tf.with_op_name(name, "PrintV2") do
+ desc = tf.NodeDescription("PrintV2")
+ begin
+ begin
+ input_ = convert(Tensor{String}, input_)
+ begin
+ end
+ end
+ end
+ begin
+ begin
+ tf.add_input(desc, input_)
+ end
+ end
+ begin
+ begin
+ if output_stream !== nothing
+ desc["output_stream"] = Base.String(output_stream)
+ end
+ end
+ end
+ end
+ tf.Tensor(tf.Operation(desc))
+ end
+ end
+ begin
+ function print_v2_eager(input_; name=nothing, output_stream=nothing)
+ desc = tf.EagerOp("PrintV2")
+ input_ = convert(tf.EagerTensor, input_)
+ begin
+ begin
+ tf.add_input(desc, input_)
+ end
+ end
+ begin
+ begin
+ if output_stream !== nothing
+ desc["output_stream"] = Base.String(output_stream)
+ end
+ end
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(print_v2, [input_], name=nothing, output_stream=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res[1]
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function print_v2(input_; name=nothing, output_stream=nothing)
+ if tf.in_eager_mode()
+ print_v2_eager(input_; name=name, output_stream=output_stream)
+ else
+ print_v2_graph(input_; name=name, output_stream=output_stream)
+ end
+ end
+ end
+end
+
+
+"""
+ _parallel_concat_start()
+
+Creates an empty Tensor with shape `shape` and type `dtype`.
+"""
+begin
+ begin
+ function _parallel_concat_start_graph(; name=nothing, shape=nothing, dtype=nothing)
+ local desc
+ tf.with_op_name(name, "_ParallelConcatStart") do
+ desc = tf.NodeDescription("_ParallelConcatStart")
+ begin
+ end
+ begin
+ end
+ begin
+ begin
+ if shape !== nothing
+ desc["shape"] = Base.identity(shape)
+ end
+ end
+ begin
+ if dtype !== nothing
+ desc["dtype"] = Base.identity(dtype)
+ end
+ end
+ end
+ end
+ tf.Tensor(tf.Operation(desc))
+ end
+ end
+ begin
+ function _parallel_concat_start_eager(; name=nothing, shape=nothing, dtype=nothing)
+ desc = tf.EagerOp("_ParallelConcatStart")
+ begin
+ end
+ begin
+ begin
+ if shape !== nothing
+ desc["shape"] = Base.identity(shape)
+ end
+ end
+ begin
+ if dtype !== nothing
+ desc["dtype"] = Base.identity(dtype)
+ end
+ end
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(_parallel_concat_start, [], name=nothing, shape=nothing, dtype=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res[1]
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function _parallel_concat_start(; name=nothing, shape=nothing, dtype=nothing)
+ if tf.in_eager_mode()
+ _parallel_concat_start_eager(; name=name, shape=shape, dtype=dtype)
+ else
+ _parallel_concat_start_graph(; name=name, shape=shape, dtype=dtype)
+ end
+ end
+ end
+end
+
+
+"""
+ load_tpu_embedding_ftrl_parameters(parameters, accumulators, linears; table_id=-1, table_name=)
+
+Load embedding parameters for a single table.
+"""
+begin
+ begin
+ function load_tpu_embedding_ftrl_parameters_graph(parameters_, accumulators_, linears_; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing)
+ local desc
+ tf.with_op_name(name, "LoadTPUEmbeddingFTRLParameters") do
+ desc = tf.NodeDescription("LoadTPUEmbeddingFTRLParameters")
+ begin
+ begin
+ parameters_ = convert(Tensor{Float32}, parameters_)
+ begin
+ end
+ end
+ begin
+ accumulators_ = convert(Tensor{Float32}, accumulators_)
+ begin
+ end
+ end
+ begin
+ linears_ = convert(Tensor{Float32}, linears_)
+ begin
+ end
+ end
+ end
+ begin
+ begin
+ tf.add_input(desc, parameters_)
+ end
+ begin
+ tf.add_input(desc, accumulators_)
+ end
+ begin
+ tf.add_input(desc, linears_)
+ end
+ end
+ begin
+ begin
+ if table_id !== nothing
+ desc["table_id"] = Base.Int(table_id)
+ end
+ end
+ begin
+ if table_name !== nothing
+ desc["table_name"] = Base.String(table_name)
+ end
+ end
+ begin
+ if num_shards !== nothing
+ desc["num_shards"] = Base.Int(num_shards)
+ end
+ end
+ begin
+ if shard_id !== nothing
+ desc["shard_id"] = Base.Int(shard_id)
+ end
+ end
+ end
+ end
+ tf.Tensor(tf.Operation(desc))
+ end
+ end
+ begin
+ function load_tpu_embedding_ftrl_parameters_eager(parameters_, accumulators_, linears_; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing)
+ desc = tf.EagerOp("LoadTPUEmbeddingFTRLParameters")
+ parameters_ = convert(tf.EagerTensor, parameters_)
+ accumulators_ = convert(tf.EagerTensor, accumulators_)
+ linears_ = convert(tf.EagerTensor, linears_)
+ begin
+ begin
+ tf.add_input(desc, parameters_)
+ end
+ begin
+ tf.add_input(desc, accumulators_)
+ end
+ begin
+ tf.add_input(desc, linears_)
+ end
+ end
+ begin
+ begin
+ if table_id !== nothing
+ desc["table_id"] = Base.Int(table_id)
+ end
+ end
+ begin
+ if table_name !== nothing
+ desc["table_name"] = Base.String(table_name)
+ end
+ end
+ begin
+ if num_shards !== nothing
+ desc["num_shards"] = Base.Int(num_shards)
+ end
+ end
+ begin
+ if shard_id !== nothing
+ desc["shard_id"] = Base.Int(shard_id)
+ end
+ end
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(load_tpu_embedding_ftrl_parameters, [parameters_, accumulators_, linears_], name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res[1]
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function load_tpu_embedding_ftrl_parameters(parameters_, accumulators_, linears_; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing)
+ if tf.in_eager_mode()
+ load_tpu_embedding_ftrl_parameters_eager(parameters_, accumulators_, linears_; name=name, table_id=table_id, table_name=table_name, num_shards=num_shards, shard_id=shard_id)
+ else
+ load_tpu_embedding_ftrl_parameters_graph(parameters_, accumulators_, linears_; name=name, table_id=table_id, table_name=table_name, num_shards=num_shards, shard_id=shard_id)
+ end
+ end
+ end
+end
+
+
+"""
+ sparse_slice(indices, values, shape, start, size)
+
+
+"""
+begin
+ begin
+ function sparse_slice_graph(indices_, values_, shape_, start_, size_; name=nothing)
+ local desc
+ tf.with_op_name(name, "SparseSlice") do
+ desc = tf.NodeDescription("SparseSlice")
+ begin
+ begin
+ indices_ = convert(Tensor{Int64}, indices_)
+ begin
+ end
+ end
+ begin
+ values_ = convert(Tensor{Any}, values_)
+ begin
+ end
+ end
+ begin
+ shape_ = convert(Tensor{Int64}, shape_)
+ begin
+ end
+ end
+ begin
+ start_ = convert(Tensor{Int64}, start_)
+ begin
+ end
+ end
+ begin
+ size_ = convert(Tensor{Int64}, size_)
+ begin
+ end
+ end
+ begin
+ (values_,) = tf.tf_promote(values_)
+ end
+ end
+ begin
+ begin
+ tf.add_input(desc, indices_)
+ end
+ begin
+ tf.add_input(desc, values_)
+ end
+ begin
+ tf.add_input(desc, shape_)
+ end
+ begin
+ tf.add_input(desc, start_)
+ end
+ begin
+ tf.add_input(desc, size_)
+ end
+ end
+ begin
+ end
+ end
+ begin
+ out = tf.Tensor[]
+ op = tf.Operation(desc)
+ for out_idx = 1:3
+ push!(out, tf.Tensor(op, out_idx))
+ end
+ out
+ end
+ end
+ end
+ begin
+ function sparse_slice_eager(indices_, values_, shape_, start_, size_; name=nothing)
+ desc = tf.EagerOp("SparseSlice")
+ indices_ = convert(tf.EagerTensor, indices_)
+ values_ = convert(tf.EagerTensor, values_)
+ shape_ = convert(tf.EagerTensor, shape_)
+ start_ = convert(tf.EagerTensor, start_)
+ size_ = convert(tf.EagerTensor, size_)
+ begin
+ begin
+ tf.add_input(desc, indices_)
+ end
+ begin
+ tf.add_input(desc, values_)
+ end
+ begin
+ tf.add_input(desc, shape_)
+ end
+ begin
+ tf.add_input(desc, start_)
+ end
+ begin
+ tf.add_input(desc, size_)
+ end
+ end
+ begin
+ end
+ begin
+ desc["T"] = tf.data_type(values_)
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(sparse_slice, [indices_, values_, shape_, start_, size_], name=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function sparse_slice(indices_, values_, shape_, start_, size_; name=nothing)
+ if tf.in_eager_mode()
+ sparse_slice_eager(indices_, values_, shape_, start_, size_; name=name)
+ else
+ sparse_slice_graph(indices_, values_, shape_, start_, size_; name=name)
+ end
+ end
+ end
+end
+
+
+"""
+ boosted_trees_make_quantile_summaries(float_values, example_weights, epsilon)
+
+
+"""
+begin
+ begin
+ function boosted_trees_make_quantile_summaries_graph(float_values_, example_weights_, epsilon_; name=nothing, num_features=nothing)
+ local desc
+ tf.with_op_name(name, "BoostedTreesMakeQuantileSummaries") do
+ desc = tf.NodeDescription("BoostedTreesMakeQuantileSummaries")
+ begin
+ begin
+ float_values_ = [convert(Tensor{Float32}, x) for x = float_values_]
+ begin
+ end
+ end
+ begin
+ example_weights_ = convert(Tensor{Float32}, example_weights_)
+ begin
+ end
+ end
+ begin
+ epsilon_ = convert(Tensor{Float32}, epsilon_)
+ begin
+ end
+ end
+ end
+ begin
+ begin
+ tf.add_input(desc, float_values_)
+ end
+ begin
+ tf.add_input(desc, example_weights_)
+ end
+ begin
+ tf.add_input(desc, epsilon_)
+ end
+ end
+ begin
+ begin
+ if num_features !== nothing
+ desc["num_features"] = Base.Int(num_features)
+ end
+ end
+ end
+ end
+ begin
+ out = tf.Tensor[]
+ op = tf.Operation(desc)
+ for out_idx = 1:num_features
+ push!(out, tf.Tensor(op, out_idx))
+ end
+ out
+ end
+ end
+ end
+ begin
+ function boosted_trees_make_quantile_summaries_eager(float_values_, example_weights_, epsilon_; name=nothing, num_features=nothing)
+ desc = tf.EagerOp("BoostedTreesMakeQuantileSummaries")
+ float_values_ = convert(tf.EagerTensor, float_values_)
+ example_weights_ = convert(tf.EagerTensor, example_weights_)
+ epsilon_ = convert(tf.EagerTensor, epsilon_)
+ begin
+ begin
+ tf.add_input(desc, float_values_)
+ end
+ begin
+ tf.add_input(desc, example_weights_)
+ end
+ begin
+ tf.add_input(desc, epsilon_)
+ end
+ end
+ begin
+ begin
+ if num_features !== nothing
+ desc["num_features"] = Base.Int(num_features)
+ end
+ end
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(boosted_trees_make_quantile_summaries, [float_values_, example_weights_, epsilon_], name=nothing, num_features=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function boosted_trees_make_quantile_summaries(float_values_, example_weights_, epsilon_; name=nothing, num_features=nothing)
+ if tf.in_eager_mode()
+ boosted_trees_make_quantile_summaries_eager(float_values_, example_weights_, epsilon_; name=name, num_features=num_features)
+ else
+ boosted_trees_make_quantile_summaries_graph(float_values_, example_weights_, epsilon_; name=name, num_features=num_features)
+ end
+ end
+ end
+end
+
+
+"""
+ matrix_solve(matrix, rhs; adjoint=false)
+
+
+"""
+begin
+ begin
+ function matrix_solve_graph(matrix_, rhs_; name=nothing, adjoint=nothing)
+ local desc
+ tf.with_op_name(name, "MatrixSolve") do
+ desc = tf.NodeDescription("MatrixSolve")
+ begin
+ begin
+ matrix_ = convert(Tensor{Any}, matrix_)
+ begin
+ end
+ end
+ begin
+ rhs_ = convert(Tensor{Any}, rhs_)
+ begin
+ end
+ end
+ begin
+ (matrix_, rhs_) = tf.tf_promote(matrix_, rhs_)
+ end
+ end
+ begin
+ begin
+ tf.add_input(desc, matrix_)
+ end
+ begin
+ tf.add_input(desc, rhs_)
+ end
+ end
+ begin
+ begin
+ if adjoint !== nothing
+ desc["adjoint"] = Base.Bool(adjoint)
+ end
+ end
+ end
+ end
+ tf.Tensor(tf.Operation(desc))
+ end
+ end
+ begin
+ function matrix_solve_eager(matrix_, rhs_; name=nothing, adjoint=nothing)
+ desc = tf.EagerOp("MatrixSolve")
+ matrix_ = convert(tf.EagerTensor, matrix_)
+ rhs_ = convert(tf.EagerTensor, rhs_)
+ begin
+ begin
+ tf.add_input(desc, matrix_)
+ end
+ begin
+ tf.add_input(desc, rhs_)
+ end
+ end
+ begin
+ begin
+ if adjoint !== nothing
+ desc["adjoint"] = Base.Bool(adjoint)
+ end
+ end
+ end
+ begin
+ desc["T"] = tf.data_type(matrix_)
+ end
+ begin
+ desc["T"] = tf.data_type(rhs_)
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(matrix_solve, [matrix_, rhs_], name=nothing, adjoint=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res[1]
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function matrix_solve(matrix_, rhs_; name=nothing, adjoint=nothing)
+ if tf.in_eager_mode()
+ matrix_solve_eager(matrix_, rhs_; name=name, adjoint=adjoint)
+ else
+ matrix_solve_graph(matrix_, rhs_; name=name, adjoint=adjoint)
+ end
+ end
+ end
+end
+
+
+"""
+ _configure_distributed_tpu(inputs)
+
+An op that sets up the centralized structures for a distributed TPU
+"""
+begin
+ begin
+ function _configure_distributed_tpu_graph(inputs_; name=nothing, N=nothing)
+ local desc
+ tf.with_op_name(name, "_ConfigureDistributedTPU") do
+ desc = tf.NodeDescription("_ConfigureDistributedTPU")
+ begin
+ begin
+ inputs_ = [convert(Tensor{Int32}, x) for x = inputs_]
+ begin
+ end
+ end
+ end
+ begin
+ begin
+ tf.add_input(desc, inputs_)
+ end
+ end
+ begin
+ begin
+ if N !== nothing
+ desc["N"] = Base.Int(N)
+ end
+ end
+ end
+ end
+ tf.Tensor(tf.Operation(desc))
+ end
+ end
+ begin
+ function _configure_distributed_tpu_eager(inputs_; name=nothing, N=nothing)
+ desc = tf.EagerOp("_ConfigureDistributedTPU")
+ inputs_ = convert(tf.EagerTensor, inputs_)
+ begin
+ begin
+ tf.add_input(desc, inputs_)
+ end
+ end
+ begin
+ begin
+ if N !== nothing
+ desc["N"] = Base.Int(N)
+ end
+ end
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(_configure_distributed_tpu, [inputs_], name=nothing, N=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res[1]
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function _configure_distributed_tpu(inputs_; name=nothing, N=nothing)
+ if tf.in_eager_mode()
+ _configure_distributed_tpu_eager(inputs_; name=name, N=N)
+ else
+ _configure_distributed_tpu_graph(inputs_; name=name, N=N)
+ end
+ end
+ end
+end
+
+
+"""
+ adjust_contrastv2(images, contrast_factor)
+
+
+"""
+begin
+ begin
+ function adjust_contrastv2_graph(images_, contrast_factor_; name=nothing)
+ local desc
+ tf.with_op_name(name, "AdjustContrastv2") do
+ desc = tf.NodeDescription("AdjustContrastv2")
+ begin
+ begin
+ images_ = convert(Tensor{Float32}, images_)
+ begin
+ end
+ end
+ begin
+ contrast_factor_ = convert(Tensor{Float32}, contrast_factor_)
+ begin
+ end
+ end
+ end
+ begin
+ begin
+ tf.add_input(desc, images_)
+ end
+ begin
+ tf.add_input(desc, contrast_factor_)
+ end
+ end
+ begin
+ end
+ end
+ tf.Tensor(tf.Operation(desc))
+ end
+ end
+ begin
+ function adjust_contrastv2_eager(images_, contrast_factor_; name=nothing)
+ desc = tf.EagerOp("AdjustContrastv2")
+ images_ = convert(tf.EagerTensor, images_)
+ contrast_factor_ = convert(tf.EagerTensor, contrast_factor_)
+ begin
+ begin
+ tf.add_input(desc, images_)
+ end
+ begin
+ tf.add_input(desc, contrast_factor_)
+ end
+ end
+ begin
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(adjust_contrastv2, [images_, contrast_factor_], name=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res[1]
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function adjust_contrastv2(images_, contrast_factor_; name=nothing)
+ if tf.in_eager_mode()
+ adjust_contrastv2_eager(images_, contrast_factor_; name=name)
+ else
+ adjust_contrastv2_graph(images_, contrast_factor_; name=name)
+ end
+ end
+ end
+end
+
+
+"""
+ _mkl_maximum(x, y, mkl_x, mkl_y)
+
+Returns the max of x and y (i.e. x > y ? x : y) element-wise.
+"""
+begin
+ begin
+ function _mkl_maximum_graph(x_, y_, mkl_x_, mkl_y_; name=nothing)
+ local desc
+ tf.with_op_name(name, "_MklMaximum") do
+ desc = tf.NodeDescription("_MklMaximum")
+ begin
+ begin
+ x_ = convert(Tensor{Any}, x_)
+ begin
+ end
+ end
+ begin
+ y_ = convert(Tensor{Any}, y_)
+ begin
+ end
+ end
+ begin
+ mkl_x_ = convert(Tensor{UInt8}, mkl_x_)
+ begin
+ end
+ end
+ begin
+ mkl_y_ = convert(Tensor{UInt8}, mkl_y_)
+ begin
+ end
+ end
+ begin
+ (x_, y_) = tf.tf_promote(x_, y_)
+ end
+ end
+ begin
+ begin
+ tf.add_input(desc, x_)
+ end
+ begin
+ tf.add_input(desc, y_)
+ end
+ begin
+ tf.add_input(desc, mkl_x_)
+ end
+ begin
+ tf.add_input(desc, mkl_y_)
+ end
+ end
+ begin
+ end
+ end
+ begin
+ out = tf.Tensor[]
+ op = tf.Operation(desc)
+ for out_idx = 1:2
+ push!(out, tf.Tensor(op, out_idx))
+ end
+ out
+ end
+ end
+ end
+ begin
+ function _mkl_maximum_eager(x_, y_, mkl_x_, mkl_y_; name=nothing)
+ desc = tf.EagerOp("_MklMaximum")
+ x_ = convert(tf.EagerTensor, x_)
+ y_ = convert(tf.EagerTensor, y_)
+ mkl_x_ = convert(tf.EagerTensor, mkl_x_)
+ mkl_y_ = convert(tf.EagerTensor, mkl_y_)
+ begin
+ begin
+ tf.add_input(desc, x_)
+ end
+ begin
+ tf.add_input(desc, y_)
+ end
+ begin
+ tf.add_input(desc, mkl_x_)
+ end
+ begin
+ tf.add_input(desc, mkl_y_)
+ end
+ end
+ begin
+ end
+ begin
+ desc["T"] = tf.data_type(x_)
+ end
+ begin
+ desc["T"] = tf.data_type(y_)
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(_mkl_maximum, [x_, y_, mkl_x_, mkl_y_], name=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function _mkl_maximum(x_, y_, mkl_x_, mkl_y_; name=nothing)
+ if tf.in_eager_mode()
+ _mkl_maximum_eager(x_, y_, mkl_x_, mkl_y_; name=name)
+ else
+ _mkl_maximum_graph(x_, y_, mkl_x_, mkl_y_; name=name)
+ end
+ end
+ end
+end
+
+
+"""
+ cudnn_rnn_params_size(num_layers, num_units, input_size; rnn_mode=, input_mode=, direction=, dropout=?, seed=0, seed2=0)
+
+
+"""
+begin
+ begin
+ function cudnn_rnn_params_size_graph(num_layers_, num_units_, input_size_; name=nothing, S=nothing, rnn_mode=nothing, input_mode=nothing, direction=nothing, dropout=nothing, seed=nothing, seed2=nothing)
+ local desc
+ tf.with_op_name(name, "CudnnRNNParamsSize") do
+ desc = tf.NodeDescription("CudnnRNNParamsSize")
+ begin
+ begin
+ num_layers_ = convert(Tensor{Int32}, num_layers_)
+ begin
+ end
+ end
+ begin
+ num_units_ = convert(Tensor{Int32}, num_units_)
+ begin
+ end
+ end
+ begin
+ input_size_ = convert(Tensor{Int32}, input_size_)
+ begin
+ end
+ end
+ end
+ begin
+ begin
+ tf.add_input(desc, num_layers_)
+ end
+ begin
+ tf.add_input(desc, num_units_)
+ end
+ begin
+ tf.add_input(desc, input_size_)
+ end
+ end
+ begin
+ begin
+ if S !== nothing
+ desc["S"] = Base.identity(S)
+ end
+ end
+ begin
+ if rnn_mode !== nothing
+ desc["rnn_mode"] = Base.String(rnn_mode)
+ end
+ end
+ begin
+ if input_mode !== nothing
+ desc["input_mode"] = Base.String(input_mode)
+ end
+ end
+ begin
+ if direction !== nothing
+ desc["direction"] = Base.String(direction)
+ end
+ end
+ begin
+ if dropout !== nothing
+ desc["dropout"] = Base.identity(dropout)
+ end
+ end
+ begin
+ if seed !== nothing
+ desc["seed"] = Base.Int(seed)
+ end
+ end
+ begin
+ if seed2 !== nothing
+ desc["seed2"] = Base.Int(seed2)
+ end
+ end
+ end
+ end
+ tf.Tensor(tf.Operation(desc))
+ end
+ end
+ begin
+ function cudnn_rnn_params_size_eager(num_layers_, num_units_, input_size_; name=nothing, S=nothing, rnn_mode=nothing, input_mode=nothing, direction=nothing, dropout=nothing, seed=nothing, seed2=nothing)
+ desc = tf.EagerOp("CudnnRNNParamsSize")
+ num_layers_ = convert(tf.EagerTensor, num_layers_)
+ num_units_ = convert(tf.EagerTensor, num_units_)
+ input_size_ = convert(tf.EagerTensor, input_size_)
+ begin
+ begin
+ tf.add_input(desc, num_layers_)
+ end
+ begin
+ tf.add_input(desc, num_units_)
+ end
+ begin
+ tf.add_input(desc, input_size_)
+ end
+ end
+ begin
+ begin
+ if S !== nothing
+ desc["S"] = Base.identity(S)
+ end
+ end
+ begin
+ if rnn_mode !== nothing
+ desc["rnn_mode"] = Base.String(rnn_mode)
+ end
+ end
+ begin
+ if input_mode !== nothing
+ desc["input_mode"] = Base.String(input_mode)
+ end
+ end
+ begin
+ if direction !== nothing
+ desc["direction"] = Base.String(direction)
+ end
+ end
+ begin
+ if dropout !== nothing
+ desc["dropout"] = Base.identity(dropout)
+ end
+ end
+ begin
+ if seed !== nothing
+ desc["seed"] = Base.Int(seed)
+ end
+ end
+ begin
+ if seed2 !== nothing
+ desc["seed2"] = Base.Int(seed2)
+ end
+ end
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(cudnn_rnn_params_size, [num_layers_, num_units_, input_size_], name=nothing, S=nothing, rnn_mode=nothing, input_mode=nothing, direction=nothing, dropout=nothing, seed=nothing, seed2=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res[1]
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function cudnn_rnn_params_size(num_layers_, num_units_, input_size_; name=nothing, S=nothing, rnn_mode=nothing, input_mode=nothing, direction=nothing, dropout=nothing, seed=nothing, seed2=nothing)
+ if tf.in_eager_mode()
+ cudnn_rnn_params_size_eager(num_layers_, num_units_, input_size_; name=name, S=S, rnn_mode=rnn_mode, input_mode=input_mode, direction=direction, dropout=dropout, seed=seed, seed2=seed2)
+ else
+ cudnn_rnn_params_size_graph(num_layers_, num_units_, input_size_; name=name, S=S, rnn_mode=rnn_mode, input_mode=input_mode, direction=direction, dropout=dropout, seed=seed, seed2=seed2)
+ end
+ end
+ end
+end
+
+
+"""
+ boosted_trees_quantile_stream_resource_add_summaries(quantile_stream_resource_handle, summaries)
+
+
+"""
+begin
+ begin
+ function boosted_trees_quantile_stream_resource_add_summaries_graph(quantile_stream_resource_handle_, summaries_; name=nothing, num_features=nothing)
+ local desc
+ tf.with_op_name(name, "BoostedTreesQuantileStreamResourceAddSummaries") do
+ desc = tf.NodeDescription("BoostedTreesQuantileStreamResourceAddSummaries")
+ begin
+ begin
+ quantile_stream_resource_handle_ = convert(Tensor{Any}, quantile_stream_resource_handle_)
+ begin
+ end
+ end
+ begin
+ summaries_ = [convert(Tensor{Float32}, x) for x = summaries_]
+ begin
+ end
+ end
+ end
+ begin
+ begin
+ tf.add_input(desc, quantile_stream_resource_handle_)
+ end
+ begin
+ tf.add_input(desc, summaries_)
+ end
+ end
+ begin
+ begin
+ if num_features !== nothing
+ desc["num_features"] = Base.Int(num_features)
+ end
+ end
+ end
+ end
+ tf.Tensor(tf.Operation(desc))
+ end
+ end
+ begin
+ function boosted_trees_quantile_stream_resource_add_summaries_eager(quantile_stream_resource_handle_, summaries_; name=nothing, num_features=nothing)
+ desc = tf.EagerOp("BoostedTreesQuantileStreamResourceAddSummaries")
+ quantile_stream_resource_handle_ = convert(tf.EagerTensor, quantile_stream_resource_handle_)
+ summaries_ = convert(tf.EagerTensor, summaries_)
+ begin
+ begin
+ tf.add_input(desc, quantile_stream_resource_handle_)
+ end
+ begin
+ tf.add_input(desc, summaries_)
+ end
+ end
+ begin
+ begin
+ if num_features !== nothing
+ desc["num_features"] = Base.Int(num_features)
+ end
+ end
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(boosted_trees_quantile_stream_resource_add_summaries, [quantile_stream_resource_handle_, summaries_], name=nothing, num_features=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res[1]
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function boosted_trees_quantile_stream_resource_add_summaries(quantile_stream_resource_handle_, summaries_; name=nothing, num_features=nothing)
+ if tf.in_eager_mode()
+ boosted_trees_quantile_stream_resource_add_summaries_eager(quantile_stream_resource_handle_, summaries_; name=name, num_features=num_features)
+ else
+ boosted_trees_quantile_stream_resource_add_summaries_graph(quantile_stream_resource_handle_, summaries_; name=name, num_features=num_features)
+ end
+ end
+ end
+end
+
+
+"""
+ batch_ifft3d(input)
+
+
+"""
+begin
+ begin
+ function batch_ifft3d_graph(input_; name=nothing)
+ local desc
+ tf.with_op_name(name, "BatchIFFT3D") do
+ desc = tf.NodeDescription("BatchIFFT3D")
+ begin
+ begin
+ input_ = convert(Tensor{Complex{Float32}}, input_)
+ begin
+ end
+ end
+ end
+ begin
+ begin
+ tf.add_input(desc, input_)
+ end
+ end
+ begin
+ end
+ end
+ tf.Tensor(tf.Operation(desc))
+ end
+ end
+ begin
+ function batch_ifft3d_eager(input_; name=nothing)
+ desc = tf.EagerOp("BatchIFFT3D")
+ input_ = convert(tf.EagerTensor, input_)
+ begin
+ begin
+ tf.add_input(desc, input_)
+ end
+ end
+ begin
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(batch_ifft3d, [input_], name=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res[1]
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function batch_ifft3d(input_; name=nothing)
+ if tf.in_eager_mode()
+ batch_ifft3d_eager(input_; name=name)
+ else
+ batch_ifft3d_graph(input_; name=name)
+ end
+ end
+ end
+end
+
+
+"""
+ sigmoid(x)
+
+
+"""
+begin
+ begin
+ function sigmoid_graph(x_; name=nothing)
+ local desc
+ tf.with_op_name(name, "Sigmoid") do
+ desc = tf.NodeDescription("Sigmoid")
+ begin
+ begin
+ x_ = convert(Tensor{Any}, x_)
+ begin
+ end
+ end
+ begin
+ (x_,) = tf.tf_promote(x_)
+ end
+ end
+ begin
+ begin
+ tf.add_input(desc, x_)
+ end
+ end
+ begin
+ end
+ end
+ tf.Tensor(tf.Operation(desc))
+ end
+ end
+ begin
+ function sigmoid_eager(x_; name=nothing)
+ desc = tf.EagerOp("Sigmoid")
+ x_ = convert(tf.EagerTensor, x_)
+ begin
+ begin
+ tf.add_input(desc, x_)
+ end
+ end
+ begin
+ end
+ begin
+ desc["T"] = tf.data_type(x_)
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(sigmoid, [x_], name=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res[1]
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function sigmoid(x_; name=nothing)
+ if tf.in_eager_mode()
+ sigmoid_eager(x_; name=name)
+ else
+ sigmoid_graph(x_; name=name)
+ end
+ end
+ end
+end
+
+
+"""
+ segment_mean(data, segment_ids)
+
+
+"""
+begin
+ begin
+ function segment_mean_graph(data_, segment_ids_; name=nothing)
+ local desc
+ tf.with_op_name(name, "SegmentMean") do
+ desc = tf.NodeDescription("SegmentMean")
+ begin
+ begin
+ data_ = convert(Tensor{Any}, data_)
+ begin
+ end
+ end
+ begin
+ segment_ids_ = convert(Tensor{Any}, segment_ids_)
+ begin
+ segment_ids_ = segment_ids_ - convert(tf.Tensor{eltype(segment_ids_)}, 1)
+ end
+ end
+ begin
+ (data_,) = tf.tf_promote(data_)
+ end
+ begin
+ (segment_ids_,) = tf.tf_promote(segment_ids_)
+ end
+ end
+ begin
+ begin
+ tf.add_input(desc, data_)
+ end
+ begin
+ tf.add_input(desc, segment_ids_)
+ end
+ end
+ begin
+ end
+ end
+ tf.Tensor(tf.Operation(desc))
+ end
+ end
+ begin
+ function segment_mean_eager(data_, segment_ids_; name=nothing)
+ desc = tf.EagerOp("SegmentMean")
+ data_ = convert(tf.EagerTensor, data_)
+ segment_ids_ = convert(tf.EagerTensor, segment_ids_)
+ begin
+ begin
+ tf.add_input(desc, data_)
+ end
+ begin
+ tf.add_input(desc, segment_ids_)
+ end
+ end
+ begin
+ end
+ begin
+ desc["T"] = tf.data_type(data_)
+ end
+ begin
+ desc["Tindices"] = tf.data_type(segment_ids_)
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(segment_mean, [data_, segment_ids_], name=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res[1]
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function segment_mean(data_, segment_ids_; name=nothing)
+ if tf.in_eager_mode()
+ segment_mean_eager(data_, segment_ids_; name=name)
+ else
+ segment_mean_graph(data_, segment_ids_; name=name)
+ end
+ end
+ end
+end
+
+
+"""
+ is_boosted_trees_ensemble_initialized(tree_ensemble_handle)
+
+
+"""
+begin
+ begin
+ function is_boosted_trees_ensemble_initialized_graph(tree_ensemble_handle_; name=nothing)
+ local desc
+ tf.with_op_name(name, "IsBoostedTreesEnsembleInitialized") do
+ desc = tf.NodeDescription("IsBoostedTreesEnsembleInitialized")
+ begin
+ begin
+ tree_ensemble_handle_ = convert(Tensor{Any}, tree_ensemble_handle_)
+ begin
+ end
+ end
+ end
+ begin
+ begin
+ tf.add_input(desc, tree_ensemble_handle_)
+ end
+ end
+ begin
+ end
+ end
+ tf.Tensor(tf.Operation(desc))
+ end
+ end
+ begin
+ function is_boosted_trees_ensemble_initialized_eager(tree_ensemble_handle_; name=nothing)
+ desc = tf.EagerOp("IsBoostedTreesEnsembleInitialized")
+ tree_ensemble_handle_ = convert(tf.EagerTensor, tree_ensemble_handle_)
+ begin
+ begin
+ tf.add_input(desc, tree_ensemble_handle_)
+ end
+ end
+ begin
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(is_boosted_trees_ensemble_initialized, [tree_ensemble_handle_], name=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res[1]
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function is_boosted_trees_ensemble_initialized(tree_ensemble_handle_; name=nothing)
+ if tf.in_eager_mode()
+ is_boosted_trees_ensemble_initialized_eager(tree_ensemble_handle_; name=name)
+ else
+ is_boosted_trees_ensemble_initialized_graph(tree_ensemble_handle_; name=name)
+ end
+ end
+ end
+end
+
+
+"""
+ tensor_array_size_v2(handle, flow_in)
+
+
+"""
+begin
+ begin
+ function tensor_array_size_v2_graph(handle_, flow_in_; name=nothing)
+ local desc
+ tf.with_op_name(name, "TensorArraySizeV2") do
+ desc = tf.NodeDescription("TensorArraySizeV2")
+ begin
+ begin
+ handle_ = convert(Tensor{String}, handle_)
+ begin
+ end
+ end
+ begin
+ flow_in_ = convert(Tensor{Float32}, flow_in_)
+ begin
+ end
+ end
+ end
+ begin
+ begin
+ tf.add_input(desc, handle_)
+ end
+ begin
+ tf.add_input(desc, flow_in_)
+ end
+ end
+ begin
+ end
+ end
+ tf.Tensor(tf.Operation(desc))
+ end
+ end
+ begin
+ function tensor_array_size_v2_eager(handle_, flow_in_; name=nothing)
+ desc = tf.EagerOp("TensorArraySizeV2")
+ handle_ = convert(tf.EagerTensor, handle_)
+ flow_in_ = convert(tf.EagerTensor, flow_in_)
+ begin
+ begin
+ tf.add_input(desc, handle_)
+ end
+ begin
+ tf.add_input(desc, flow_in_)
+ end
+ end
+ begin
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(tensor_array_size_v2, [handle_, flow_in_], name=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res[1]
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function tensor_array_size_v2(handle_, flow_in_; name=nothing)
+ if tf.in_eager_mode()
+ tensor_array_size_v2_eager(handle_, flow_in_; name=name)
+ else
+ tensor_array_size_v2_graph(handle_, flow_in_; name=name)
+ end
+ end
+ end
+end
+
+
+"""
+ _mkl_sub(x, y, mkl_x, mkl_y)
+
+Returns x - y element-wise.
+"""
+begin
+ begin
+ function _mkl_sub_graph(x_, y_, mkl_x_, mkl_y_; name=nothing)
+ local desc
+ tf.with_op_name(name, "_MklSub") do
+ desc = tf.NodeDescription("_MklSub")
+ begin
+ begin
+ x_ = convert(Tensor{Any}, x_)
+ begin
+ end
+ end
+ begin
+ y_ = convert(Tensor{Any}, y_)
+ begin
+ end
+ end
+ begin
+ mkl_x_ = convert(Tensor{UInt8}, mkl_x_)
+ begin
+ end
+ end
+ begin
+ mkl_y_ = convert(Tensor{UInt8}, mkl_y_)
+ begin
+ end
+ end
+ begin
+ (x_, y_) = tf.tf_promote(x_, y_)
+ end
+ end
+ begin
+ begin
+ tf.add_input(desc, x_)
+ end
+ begin
+ tf.add_input(desc, y_)
+ end
+ begin
+ tf.add_input(desc, mkl_x_)
+ end
+ begin
+ tf.add_input(desc, mkl_y_)
+ end
+ end
+ begin
+ end
+ end
+ begin
+ out = tf.Tensor[]
+ op = tf.Operation(desc)
+ for out_idx = 1:2
+ push!(out, tf.Tensor(op, out_idx))
+ end
+ out
+ end
+ end
+ end
+ begin
+ function _mkl_sub_eager(x_, y_, mkl_x_, mkl_y_; name=nothing)
+ desc = tf.EagerOp("_MklSub")
+ x_ = convert(tf.EagerTensor, x_)
+ y_ = convert(tf.EagerTensor, y_)
+ mkl_x_ = convert(tf.EagerTensor, mkl_x_)
+ mkl_y_ = convert(tf.EagerTensor, mkl_y_)
+ begin
+ begin
+ tf.add_input(desc, x_)
+ end
+ begin
+ tf.add_input(desc, y_)
+ end
+ begin
+ tf.add_input(desc, mkl_x_)
+ end
+ begin
+ tf.add_input(desc, mkl_y_)
+ end
+ end
+ begin
+ end
+ begin
+ desc["T"] = tf.data_type(x_)
+ end
+ begin
+ desc["T"] = tf.data_type(y_)
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(_mkl_sub, [x_, y_, mkl_x_, mkl_y_], name=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function _mkl_sub(x_, y_, mkl_x_, mkl_y_; name=nothing)
+ if tf.in_eager_mode()
+ _mkl_sub_eager(x_, y_, mkl_x_, mkl_y_; name=name)
+ else
+ _mkl_sub_graph(x_, y_, mkl_x_, mkl_y_; name=name)
+ end
+ end
+ end
+end
+
+
+"""
+ send_tpu_embedding_gradients(inputs, learning_rates; NN=0)
+
+An op that performs gradient updates of embedding tables.
+"""
+begin
+ begin
+ function send_tpu_embedding_gradients_graph(inputs_, learning_rates_; name=nothing, N=nothing, NN=nothing, config=nothing)
+ local desc
+ tf.with_op_name(name, "SendTPUEmbeddingGradients") do
+ desc = tf.NodeDescription("SendTPUEmbeddingGradients")
+ begin
+ begin
+ inputs_ = [convert(Tensor{Float32}, x) for x = inputs_]
+ begin
+ end
+ end
+ begin
+ learning_rates_ = [convert(Tensor{Float32}, x) for x = learning_rates_]
+ begin
+ end
+ end
+ end
+ begin
+ begin
+ tf.add_input(desc, inputs_)
+ end
+ begin
+ tf.add_input(desc, learning_rates_)
+ end
+ end
+ begin
+ begin
+ if N !== nothing
+ desc["N"] = Base.Int(N)
+ end
+ end
+ begin
+ if NN !== nothing
+ desc["NN"] = Base.Int(NN)
+ end
+ end
+ begin
+ if config !== nothing
+ desc["config"] = Base.String(config)
+ end
+ end
+ end
+ end
+ tf.Tensor(tf.Operation(desc))
+ end
+ end
+ begin
+ function send_tpu_embedding_gradients_eager(inputs_, learning_rates_; name=nothing, N=nothing, NN=nothing, config=nothing)
+ desc = tf.EagerOp("SendTPUEmbeddingGradients")
+ inputs_ = convert(tf.EagerTensor, inputs_)
+ learning_rates_ = convert(tf.EagerTensor, learning_rates_)
+ begin
+ begin
+ tf.add_input(desc, inputs_)
+ end
+ begin
+ tf.add_input(desc, learning_rates_)
+ end
+ end
+ begin
+ begin
+ if N !== nothing
+ desc["N"] = Base.Int(N)
+ end
+ end
+ begin
+ if NN !== nothing
+ desc["NN"] = Base.Int(NN)
+ end
+ end
+ begin
+ if config !== nothing
+ desc["config"] = Base.String(config)
+ end
+ end
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(send_tpu_embedding_gradients, [inputs_, learning_rates_], name=nothing, N=nothing, NN=nothing, config=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res[1]
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function send_tpu_embedding_gradients(inputs_, learning_rates_; name=nothing, N=nothing, NN=nothing, config=nothing)
+ if tf.in_eager_mode()
+ send_tpu_embedding_gradients_eager(inputs_, learning_rates_; name=name, N=N, NN=NN, config=config)
+ else
+ send_tpu_embedding_gradients_graph(inputs_, learning_rates_; name=name, N=N, NN=NN, config=config)
+ end
+ end
+ end
+end
+
+
+"""
+ max_pool3d(input; data_format=)
+
+
+"""
+begin
+ begin
+ function max_pool3d_graph(input_; name=nothing, ksize=nothing, strides=nothing, padding=nothing, data_format=nothing)
+ local desc
+ tf.with_op_name(name, "MaxPool3D") do
+ desc = tf.NodeDescription("MaxPool3D")
+ begin
+ begin
+ input_ = convert(Tensor{Any}, input_)
+ begin
+ end
+ end
+ begin
+ (input_,) = tf.tf_promote(input_)
+ end
+ end
+ begin
+ begin
+ tf.add_input(desc, input_)
+ end
+ end
+ begin
+ begin
+ if ksize !== nothing
+ desc["ksize"] = map(Base.identity, ksize)
+ end
+ end
+ begin
+ if strides !== nothing
+ desc["strides"] = map(Base.identity, strides)
+ end
+ end
+ begin
+ if padding !== nothing
+ desc["padding"] = Base.String(padding)
+ end
+ end
+ begin
+ if data_format !== nothing
+ desc["data_format"] = Base.String(data_format)
+ end
+ end
+ end
+ end
+ tf.Tensor(tf.Operation(desc))
+ end
+ end
+ begin
+ function max_pool3d_eager(input_; name=nothing, ksize=nothing, strides=nothing, padding=nothing, data_format=nothing)
+ desc = tf.EagerOp("MaxPool3D")
+ input_ = convert(tf.EagerTensor, input_)
+ begin
+ begin
+ tf.add_input(desc, input_)
+ end
+ end
+ begin
+ begin
+ if ksize !== nothing
+ desc["ksize"] = map(Base.identity, ksize)
+ end
+ end
+ begin
+ if strides !== nothing
+ desc["strides"] = map(Base.identity, strides)
+ end
+ end
+ begin
+ if padding !== nothing
+ desc["padding"] = Base.String(padding)
+ end
+ end
+ begin
+ if data_format !== nothing
+ desc["data_format"] = Base.String(data_format)
+ end
+ end
+ end
+ begin
+ desc["T"] = tf.data_type(input_)
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(max_pool3d, [input_], name=nothing, ksize=nothing, strides=nothing, padding=nothing, data_format=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res[1]
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function max_pool3d(input_; name=nothing, ksize=nothing, strides=nothing, padding=nothing, data_format=nothing)
+ if tf.in_eager_mode()
+ max_pool3d_eager(input_; name=name, ksize=ksize, strides=strides, padding=padding, data_format=data_format)
+ else
+ max_pool3d_graph(input_; name=name, ksize=ksize, strides=strides, padding=padding, data_format=data_format)
+ end
+ end
+ end
+end
+
+
+"""
+ prod(input, reduction_indices; keep_dims=false)
+
+
+"""
+begin
+ begin
+ function prod_graph(input_, reduction_indices_; name=nothing, keep_dims=nothing)
+ local desc
+ tf.with_op_name(name, "Prod") do
+ desc = tf.NodeDescription("Prod")
+ begin
+ begin
+ input_ = convert(Tensor{Any}, input_)
+ begin
+ end
+ end
+ begin
+ reduction_indices_ = convert(Tensor{Int32}, reduction_indices_)
+ begin
+ reduction_indices_ = reduction_indices_ - convert(tf.Tensor{eltype(reduction_indices_)}, 1)
+ end
+ end
+ begin
+ (input_,) = tf.tf_promote(input_)
+ end
+ begin
+ (reduction_indices_,) = tf.tf_promote(reduction_indices_)
+ end
+ end
+ begin
+ begin
+ tf.add_input(desc, input_)
+ end
+ begin
+ tf.add_input(desc, reduction_indices_)
+ end
+ end
+ begin
+ begin
+ if keep_dims !== nothing
+ desc["keep_dims"] = Base.Bool(keep_dims)
+ end
+ end
+ end
+ end
+ tf.Tensor(tf.Operation(desc))
+ end
+ end
+ begin
+ function prod_eager(input_, reduction_indices_; name=nothing, keep_dims=nothing)
+ desc = tf.EagerOp("Prod")
+ input_ = convert(tf.EagerTensor, input_)
+ reduction_indices_ = convert(tf.EagerTensor, reduction_indices_)
+ begin
+ begin
+ tf.add_input(desc, input_)
+ end
+ begin
+ tf.add_input(desc, reduction_indices_)
+ end
+ end
+ begin
+ begin
+ if keep_dims !== nothing
+ desc["keep_dims"] = Base.Bool(keep_dims)
+ end
+ end
+ end
+ begin
+ desc["T"] = tf.data_type(input_)
+ end
+ begin
+ desc["Tidx"] = tf.data_type(reduction_indices_)
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(prod, [input_, reduction_indices_], name=nothing, keep_dims=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res[1]
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function prod(input_, reduction_indices_; name=nothing, keep_dims=nothing)
+ if tf.in_eager_mode()
+ prod_eager(input_, reduction_indices_; name=name, keep_dims=keep_dims)
+ else
+ prod_graph(input_, reduction_indices_; name=name, keep_dims=keep_dims)
+ end
+ end
+ end
+end
+
+
+"""
+ experimental_identity_indexed_dataset(size)
+
+
+"""
+begin
+ begin
+ function experimental_identity_indexed_dataset_graph(size_; name=nothing)
+ local desc
+ tf.with_op_name(name, "ExperimentalIdentityIndexedDataset") do
+ desc = tf.NodeDescription("ExperimentalIdentityIndexedDataset")
+ begin
+ begin
+ size_ = convert(Tensor{Any}, size_)
+ begin
+ end
+ end
+ end
+ begin
+ begin
+ tf.add_input(desc, size_)
+ end
+ end
+ begin
+ end
+ end
+ tf.Tensor(tf.Operation(desc))
+ end
+ end
+ begin
+ function experimental_identity_indexed_dataset_eager(size_; name=nothing)
+ desc = tf.EagerOp("ExperimentalIdentityIndexedDataset")
+ size_ = convert(tf.EagerTensor, size_)
+ begin
+ begin
+ tf.add_input(desc, size_)
+ end
+ end
+ begin
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(experimental_identity_indexed_dataset, [size_], name=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res[1]
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function experimental_identity_indexed_dataset(size_; name=nothing)
+ if tf.in_eager_mode()
+ experimental_identity_indexed_dataset_eager(size_; name=name)
+ else
+ experimental_identity_indexed_dataset_graph(size_; name=name)
+ end
+ end
+ end
+end
+
+
+"""
+ tensor_list_push_back(input_handle, tensor)
+
+
+"""
+begin
+ begin
+ function tensor_list_push_back_graph(input_handle_, tensor_; name=nothing, element_dtype=nothing)
+ local desc
+ tf.with_op_name(name, "TensorListPushBack") do
+ desc = tf.NodeDescription("TensorListPushBack")
+ begin
+ begin
+ input_handle_ = convert(Tensor{Any}, input_handle_)
+ begin
+ end
+ end
+ begin
+ tensor_ = convert(Tensor{Any}, tensor_)
+ begin
+ end
+ end
+ begin
+ (tensor_,) = tf.tf_promote(tensor_)
+ end
+ end
+ begin
+ begin
+ tf.add_input(desc, input_handle_)
+ end
+ begin
+ tf.add_input(desc, tensor_)
+ end
+ end
+ begin
+ begin
+ if element_dtype !== nothing
+ desc["element_dtype"] = Base.identity(element_dtype)
+ end
+ end
+ end
+ end
+ tf.Tensor(tf.Operation(desc))
+ end
+ end
+ begin
+ function tensor_list_push_back_eager(input_handle_, tensor_; name=nothing, element_dtype=nothing)
+ desc = tf.EagerOp("TensorListPushBack")
+ input_handle_ = convert(tf.EagerTensor, input_handle_)
+ tensor_ = convert(tf.EagerTensor, tensor_)
+ begin
+ begin
+ tf.add_input(desc, input_handle_)
+ end
+ begin
+ tf.add_input(desc, tensor_)
+ end
+ end
+ begin
+ begin
+ if element_dtype !== nothing
+ desc["element_dtype"] = Base.identity(element_dtype)
+ end
+ end
+ end
+ begin
+ desc["element_dtype"] = tf.data_type(tensor_)
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(tensor_list_push_back, [input_handle_, tensor_], name=nothing, element_dtype=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res[1]
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function tensor_list_push_back(input_handle_, tensor_; name=nothing, element_dtype=nothing)
+ if tf.in_eager_mode()
+ tensor_list_push_back_eager(input_handle_, tensor_; name=name, element_dtype=element_dtype)
+ else
+ tensor_list_push_back_graph(input_handle_, tensor_; name=name, element_dtype=element_dtype)
+ end
+ end
+ end
+end
+
+
+"""
+ batch_function(in_tensors, captured_tensors; max_enqueued_batches=10, allowed_batch_sizes=Int64[], container=, shared_name=, batching_queue=)
+
+
+"""
+begin
+ begin
+ function batch_function_graph(in_tensors_, captured_tensors_; name=nothing, f=nothing, num_batch_threads=nothing, max_batch_size=nothing, batch_timeout_micros=nothing, max_enqueued_batches=nothing, allowed_batch_sizes=nothing, container=nothing, shared_name=nothing, batching_queue=nothing, Tin=nothing, Tcaptured=nothing, Tout=nothing)
+ local desc
+ tf.with_op_name(name, "BatchFunction") do
+ desc = tf.NodeDescription("BatchFunction")
+ begin
+ begin
+ in_tensors_ = [convert(Tensor{Any}, x) for x = in_tensors_]
+ begin
+ end
+ end
+ begin
+ captured_tensors_ = [convert(Tensor{Any}, x) for x = captured_tensors_]
+ begin
+ end
+ end
+ end
+ begin
+ begin
+ tf.add_input(desc, in_tensors_)
+ end
+ begin
+ tf.add_input(desc, captured_tensors_)
+ end
+ end
+ begin
+ begin
+ if f !== nothing
+ desc["f"] = Base.identity(f)
+ end
+ end
+ begin
+ if num_batch_threads !== nothing
+ desc["num_batch_threads"] = Base.Int(num_batch_threads)
+ end
+ end
+ begin
+ if max_batch_size !== nothing
+ desc["max_batch_size"] = Base.Int(max_batch_size)
+ end
+ end
+ begin
+ if batch_timeout_micros !== nothing
+ desc["batch_timeout_micros"] = Base.Int(batch_timeout_micros)
+ end
+ end
+ begin
+ if max_enqueued_batches !== nothing
+ desc["max_enqueued_batches"] = Base.Int(max_enqueued_batches)
+ end
+ end
+ begin
+ if allowed_batch_sizes !== nothing
+ desc["allowed_batch_sizes"] = map(Base.identity, allowed_batch_sizes)
+ end
+ end
+ begin
+ if container !== nothing
+ desc["container"] = Base.String(container)
+ end
+ end
+ begin
+ if shared_name !== nothing
+ desc["shared_name"] = Base.String(shared_name)
+ end
+ end
+ begin
+ if batching_queue !== nothing
+ desc["batching_queue"] = Base.String(batching_queue)
+ end
+ end
+ begin
+ if Tin !== nothing
+ desc["Tin"] = map(Base.identity, Tin)
+ end
+ end
+ begin
+ if Tcaptured !== nothing
+ desc["Tcaptured"] = map(Base.identity, Tcaptured)
+ end
+ end
+ begin
+ if Tout !== nothing
+ desc["Tout"] = map(Base.identity, Tout)
+ end
+ end
+ end
+ end
+ tf.Tensor(tf.Operation(desc))
+ end
+ end
+ begin
+ function batch_function_eager(in_tensors_, captured_tensors_; name=nothing, f=nothing, num_batch_threads=nothing, max_batch_size=nothing, batch_timeout_micros=nothing, max_enqueued_batches=nothing, allowed_batch_sizes=nothing, container=nothing, shared_name=nothing, batching_queue=nothing, Tin=nothing, Tcaptured=nothing, Tout=nothing)
+ desc = tf.EagerOp("BatchFunction")
+ in_tensors_ = convert(tf.EagerTensor, in_tensors_)
+ captured_tensors_ = convert(tf.EagerTensor, captured_tensors_)
+ begin
+ begin
+ tf.add_input(desc, in_tensors_)
+ end
+ begin
+ tf.add_input(desc, captured_tensors_)
+ end
+ end
+ begin
+ begin
+ if f !== nothing
+ desc["f"] = Base.identity(f)
+ end
+ end
+ begin
+ if num_batch_threads !== nothing
+ desc["num_batch_threads"] = Base.Int(num_batch_threads)
+ end
+ end
+ begin
+ if max_batch_size !== nothing
+ desc["max_batch_size"] = Base.Int(max_batch_size)
+ end
+ end
+ begin
+ if batch_timeout_micros !== nothing
+ desc["batch_timeout_micros"] = Base.Int(batch_timeout_micros)
+ end
+ end
+ begin
+ if max_enqueued_batches !== nothing
+ desc["max_enqueued_batches"] = Base.Int(max_enqueued_batches)
+ end
+ end
+ begin
+ if allowed_batch_sizes !== nothing
+ desc["allowed_batch_sizes"] = map(Base.identity, allowed_batch_sizes)
+ end
+ end
+ begin
+ if container !== nothing
+ desc["container"] = Base.String(container)
+ end
+ end
+ begin
+ if shared_name !== nothing
+ desc["shared_name"] = Base.String(shared_name)
+ end
+ end
+ begin
+ if batching_queue !== nothing
+ desc["batching_queue"] = Base.String(batching_queue)
+ end
+ end
+ begin
+ if Tin !== nothing
+ desc["Tin"] = map(Base.identity, Tin)
+ end
+ end
+ begin
+ if Tcaptured !== nothing
+ desc["Tcaptured"] = map(Base.identity, Tcaptured)
+ end
+ end
+ begin
+ if Tout !== nothing
+ desc["Tout"] = map(Base.identity, Tout)
+ end
+ end
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(batch_function, [in_tensors_, captured_tensors_], name=nothing, f=nothing, num_batch_threads=nothing, max_batch_size=nothing, batch_timeout_micros=nothing, max_enqueued_batches=nothing, allowed_batch_sizes=nothing, container=nothing, shared_name=nothing, batching_queue=nothing, Tin=nothing, Tcaptured=nothing, Tout=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res[1]
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function batch_function(in_tensors_, captured_tensors_; name=nothing, f=nothing, num_batch_threads=nothing, max_batch_size=nothing, batch_timeout_micros=nothing, max_enqueued_batches=nothing, allowed_batch_sizes=nothing, container=nothing, shared_name=nothing, batching_queue=nothing, Tin=nothing, Tcaptured=nothing, Tout=nothing)
+ if tf.in_eager_mode()
+ batch_function_eager(in_tensors_, captured_tensors_; name=name, f=f, num_batch_threads=num_batch_threads, max_batch_size=max_batch_size, batch_timeout_micros=batch_timeout_micros, max_enqueued_batches=max_enqueued_batches, allowed_batch_sizes=allowed_batch_sizes, container=container, shared_name=shared_name, batching_queue=batching_queue, Tin=Tin, Tcaptured=Tcaptured, Tout=Tout)
+ else
+ batch_function_graph(in_tensors_, captured_tensors_; name=name, f=f, num_batch_threads=num_batch_threads, max_batch_size=max_batch_size, batch_timeout_micros=batch_timeout_micros, max_enqueued_batches=max_enqueued_batches, allowed_batch_sizes=allowed_batch_sizes, container=container, shared_name=shared_name, batching_queue=batching_queue, Tin=Tin, Tcaptured=Tcaptured, Tout=Tout)
+ end
+ end
+ end
+end
+
+
+"""
+ sparse_fill_empty_rows(indices, values, dense_shape, default_value)
+
+
+"""
+begin
+ begin
+ function sparse_fill_empty_rows_graph(indices_, values_, dense_shape_, default_value_; name=nothing)
+ local desc
+ tf.with_op_name(name, "SparseFillEmptyRows") do
+ desc = tf.NodeDescription("SparseFillEmptyRows")
+ begin
+ begin
+ indices_ = convert(Tensor{Int64}, indices_)
+ begin
+ end
+ end
+ begin
+ values_ = convert(Tensor{Any}, values_)
+ begin
+ end
+ end
+ begin
+ dense_shape_ = convert(Tensor{Int64}, dense_shape_)
+ begin
+ end
+ end
+ begin
+ default_value_ = convert(Tensor{Any}, default_value_)
+ begin
+ end
+ end
+ begin
+ (values_, default_value_) = tf.tf_promote(values_, default_value_)
+ end
+ end
+ begin
+ begin
+ tf.add_input(desc, indices_)
+ end
+ begin
+ tf.add_input(desc, values_)
+ end
+ begin
+ tf.add_input(desc, dense_shape_)
+ end
+ begin
+ tf.add_input(desc, default_value_)
+ end
+ end
+ begin
+ end
+ end
+ begin
+ out = tf.Tensor[]
+ op = tf.Operation(desc)
+ for out_idx = 1:4
+ push!(out, tf.Tensor(op, out_idx))
+ end
+ out
+ end
+ end
+ end
+ begin
+ function sparse_fill_empty_rows_eager(indices_, values_, dense_shape_, default_value_; name=nothing)
+ desc = tf.EagerOp("SparseFillEmptyRows")
+ indices_ = convert(tf.EagerTensor, indices_)
+ values_ = convert(tf.EagerTensor, values_)
+ dense_shape_ = convert(tf.EagerTensor, dense_shape_)
+ default_value_ = convert(tf.EagerTensor, default_value_)
+ begin
+ begin
+ tf.add_input(desc, indices_)
+ end
+ begin
+ tf.add_input(desc, values_)
+ end
+ begin
+ tf.add_input(desc, dense_shape_)
+ end
+ begin
+ tf.add_input(desc, default_value_)
+ end
+ end
+ begin
+ end
+ begin
+ desc["T"] = tf.data_type(values_)
+ end
+ begin
+ desc["T"] = tf.data_type(default_value_)
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(sparse_fill_empty_rows, [indices_, values_, dense_shape_, default_value_], name=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function sparse_fill_empty_rows(indices_, values_, dense_shape_, default_value_; name=nothing)
+ if tf.in_eager_mode()
+ sparse_fill_empty_rows_eager(indices_, values_, dense_shape_, default_value_; name=name)
+ else
+ sparse_fill_empty_rows_graph(indices_, values_, dense_shape_, default_value_; name=name)
+ end
+ end
+ end
+end
+
+
+"""
+ self_adjoint_eig_v2(input; compute_v=true)
+
+
+"""
+begin
+ begin
+ function self_adjoint_eig_v2_graph(input_; name=nothing, compute_v=nothing)
+ local desc
+ tf.with_op_name(name, "SelfAdjointEigV2") do
+ desc = tf.NodeDescription("SelfAdjointEigV2")
+ begin
+ begin
+ input_ = convert(Tensor{Any}, input_)
+ begin
+ end
+ end
+ begin
+ (input_,) = tf.tf_promote(input_)
+ end
+ end
+ begin
+ begin
+ tf.add_input(desc, input_)
+ end
+ end
+ begin
+ begin
+ if compute_v !== nothing
+ desc["compute_v"] = Base.Bool(compute_v)
+ end
+ end
+ end
+ end
+ begin
+ out = tf.Tensor[]
+ op = tf.Operation(desc)
+ for out_idx = 1:2
+ push!(out, tf.Tensor(op, out_idx))
+ end
+ out
+ end
+ end
+ end
+ begin
+ function self_adjoint_eig_v2_eager(input_; name=nothing, compute_v=nothing)
+ desc = tf.EagerOp("SelfAdjointEigV2")
+ input_ = convert(tf.EagerTensor, input_)
+ begin
+ begin
+ tf.add_input(desc, input_)
+ end
+ end
+ begin
+ begin
+ if compute_v !== nothing
+ desc["compute_v"] = Base.Bool(compute_v)
+ end
+ end
+ end
+ begin
+ desc["T"] = tf.data_type(input_)
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(self_adjoint_eig_v2, [input_], name=nothing, compute_v=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function self_adjoint_eig_v2(input_; name=nothing, compute_v=nothing)
+ if tf.in_eager_mode()
+ self_adjoint_eig_v2_eager(input_; name=name, compute_v=compute_v)
+ else
+ self_adjoint_eig_v2_graph(input_; name=name, compute_v=compute_v)
+ end
+ end
+ end
+end
+
+
+"""
+ retrieve_tpu_embedding_ftrl_parameters(; table_id=-1, table_name=)
+
+Retrieve embedding parameters for a single table.
+"""
+begin
+ begin
+ function retrieve_tpu_embedding_ftrl_parameters_graph(; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing)
+ local desc
+ tf.with_op_name(name, "RetrieveTPUEmbeddingFTRLParameters") do
+ desc = tf.NodeDescription("RetrieveTPUEmbeddingFTRLParameters")
+ begin
+ end
+ begin
+ end
+ begin
+ begin
+ if table_id !== nothing
+ desc["table_id"] = Base.Int(table_id)
+ end
+ end
+ begin
+ if table_name !== nothing
+ desc["table_name"] = Base.String(table_name)
+ end
+ end
+ begin
+ if num_shards !== nothing
+ desc["num_shards"] = Base.Int(num_shards)
+ end
+ end
+ begin
+ if shard_id !== nothing
+ desc["shard_id"] = Base.Int(shard_id)
+ end
+ end
+ end
+ end
+ begin
+ out = tf.Tensor[]
+ op = tf.Operation(desc)
+ for out_idx = 1:3
+ push!(out, tf.Tensor(op, out_idx))
+ end
+ out
+ end
+ end
+ end
+ begin
+ function retrieve_tpu_embedding_ftrl_parameters_eager(; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing)
+ desc = tf.EagerOp("RetrieveTPUEmbeddingFTRLParameters")
+ begin
+ end
+ begin
+ begin
+ if table_id !== nothing
+ desc["table_id"] = Base.Int(table_id)
+ end
+ end
+ begin
+ if table_name !== nothing
+ desc["table_name"] = Base.String(table_name)
+ end
+ end
+ begin
+ if num_shards !== nothing
+ desc["num_shards"] = Base.Int(num_shards)
+ end
+ end
+ begin
+ if shard_id !== nothing
+ desc["shard_id"] = Base.Int(shard_id)
+ end
+ end
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(retrieve_tpu_embedding_ftrl_parameters, [], name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function retrieve_tpu_embedding_ftrl_parameters(; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing)
+ if tf.in_eager_mode()
+ retrieve_tpu_embedding_ftrl_parameters_eager(; name=name, table_id=table_id, table_name=table_name, num_shards=num_shards, shard_id=shard_id)
+ else
+ retrieve_tpu_embedding_ftrl_parameters_graph(; name=name, table_id=table_id, table_name=table_name, num_shards=num_shards, shard_id=shard_id)
+ end
+ end
+ end
+end
+
+
+"""
+ resource_sparse_apply_adagrad_da(var, gradient_accumulator, gradient_squared_accumulator, grad, indices, lr, l1, l2, global_step; use_locking=false)
+
+
+"""
+begin
+ begin
+ function resource_sparse_apply_adagrad_da_graph(var_, gradient_accumulator_, gradient_squared_accumulator_, grad_, indices_, lr_, l1_, l2_, global_step_; name=nothing, use_locking=nothing)
+ local desc
+ tf.with_op_name(name, "ResourceSparseApplyAdagradDA") do
+ desc = tf.NodeDescription("ResourceSparseApplyAdagradDA")
+ begin
+ begin
+ var_ = convert(Tensor{Any}, var_)
+ begin
+ end
+ end
+ begin
+ gradient_accumulator_ = convert(Tensor{Any}, gradient_accumulator_)
+ begin
+ end
+ end
+ begin
+ gradient_squared_accumulator_ = convert(Tensor{Any}, gradient_squared_accumulator_)
+ begin
+ end
+ end
+ begin
+ grad_ = convert(Tensor{Any}, grad_)
+ begin
+ end
+ end
+ begin
+ indices_ = convert(Tensor{Any}, indices_)
+ begin
+ indices_ = indices_ - convert(tf.Tensor{eltype(indices_)}, 1)
+ end
+ end
+ begin
+ lr_ = convert(Tensor{Any}, lr_)
+ begin
+ end
+ end
+ begin
+ l1_ = convert(Tensor{Any}, l1_)
+ begin
+ end
+ end
+ begin
+ l2_ = convert(Tensor{Any}, l2_)
+ begin
+ end
+ end
+ begin
+ global_step_ = convert(Tensor{Int64}, global_step_)
+ begin
+ end
+ end
+ begin
+ (grad_, lr_, l1_, l2_) = tf.tf_promote(grad_, lr_, l1_, l2_)
+ end
+ begin
+ (indices_,) = tf.tf_promote(indices_)
+ end
+ end
+ begin
+ begin
+ tf.add_input(desc, var_)
+ end
+ begin
+ tf.add_input(desc, gradient_accumulator_)
+ end
+ begin
+ tf.add_input(desc, gradient_squared_accumulator_)
+ end
+ begin
+ tf.add_input(desc, grad_)
+ end
+ begin
+ tf.add_input(desc, indices_)
+ end
+ begin
+ tf.add_input(desc, lr_)
+ end
+ begin
+ tf.add_input(desc, l1_)
+ end
+ begin
+ tf.add_input(desc, l2_)
+ end
+ begin
+ tf.add_input(desc, global_step_)
+ end
+ end
+ begin
+ begin
+ if use_locking !== nothing
+ desc["use_locking"] = Base.Bool(use_locking)
+ end
+ end
+ end
+ end
+ tf.Tensor(tf.Operation(desc))
+ end
+ end
+ begin
+ function resource_sparse_apply_adagrad_da_eager(var_, gradient_accumulator_, gradient_squared_accumulator_, grad_, indices_, lr_, l1_, l2_, global_step_; name=nothing, use_locking=nothing)
+ desc = tf.EagerOp("ResourceSparseApplyAdagradDA")
+ var_ = convert(tf.EagerTensor, var_)
+ gradient_accumulator_ = convert(tf.EagerTensor, gradient_accumulator_)
+ gradient_squared_accumulator_ = convert(tf.EagerTensor, gradient_squared_accumulator_)
+ grad_ = convert(tf.EagerTensor, grad_)
+ indices_ = convert(tf.EagerTensor, indices_)
+ lr_ = convert(tf.EagerTensor, lr_)
+ l1_ = convert(tf.EagerTensor, l1_)
+ l2_ = convert(tf.EagerTensor, l2_)
+ global_step_ = convert(tf.EagerTensor, global_step_)
+ begin
+ begin
+ tf.add_input(desc, var_)
+ end
+ begin
+ tf.add_input(desc, gradient_accumulator_)
+ end
+ begin
+ tf.add_input(desc, gradient_squared_accumulator_)
+ end
+ begin
+ tf.add_input(desc, grad_)
+ end
+ begin
+ tf.add_input(desc, indices_)
+ end
+ begin
+ tf.add_input(desc, lr_)
+ end
+ begin
+ tf.add_input(desc, l1_)
+ end
+ begin
+ tf.add_input(desc, l2_)
+ end
+ begin
+ tf.add_input(desc, global_step_)
+ end
+ end
+ begin
+ begin
+ if use_locking !== nothing
+ desc["use_locking"] = Base.Bool(use_locking)
+ end
+ end
+ end
+ begin
+ desc["T"] = tf.data_type(grad_)
+ end
+ begin
+ desc["Tindices"] = tf.data_type(indices_)
+ end
+ begin
+ desc["T"] = tf.data_type(lr_)
+ end
+ begin
+ desc["T"] = tf.data_type(l1_)
+ end
+ begin
+ desc["T"] = tf.data_type(l2_)
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(resource_sparse_apply_adagrad_da, [var_, gradient_accumulator_, gradient_squared_accumulator_, grad_, indices_, lr_, l1_, l2_, global_step_], name=nothing, use_locking=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res[1]
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function resource_sparse_apply_adagrad_da(var_, gradient_accumulator_, gradient_squared_accumulator_, grad_, indices_, lr_, l1_, l2_, global_step_; name=nothing, use_locking=nothing)
+ if tf.in_eager_mode()
+ resource_sparse_apply_adagrad_da_eager(var_, gradient_accumulator_, gradient_squared_accumulator_, grad_, indices_, lr_, l1_, l2_, global_step_; name=name, use_locking=use_locking)
+ else
+ resource_sparse_apply_adagrad_da_graph(var_, gradient_accumulator_, gradient_squared_accumulator_, grad_, indices_, lr_, l1_, l2_, global_step_; name=name, use_locking=use_locking)
+ end
+ end
+ end
+end
+
+
+"""
+ temporary_variable(; var_name=)
+
+
+"""
+begin
+ begin
+ function temporary_variable_graph(; name=nothing, shape=nothing, dtype=nothing, var_name=nothing)
+ local desc
+ tf.with_op_name(name, "TemporaryVariable") do
+ desc = tf.NodeDescription("TemporaryVariable")
+ begin
+ end
+ begin
+ end
+ begin
+ begin
+ if shape !== nothing
+ desc["shape"] = Base.identity(shape)
+ end
+ end
+ begin
+ if dtype !== nothing
+ desc["dtype"] = Base.identity(dtype)
+ end
+ end
+ begin
+ if var_name !== nothing
+ desc["var_name"] = Base.String(var_name)
+ end
+ end
+ end
+ end
+ tf.Tensor(tf.Operation(desc))
+ end
+ end
+ begin
+ function temporary_variable_eager(; name=nothing, shape=nothing, dtype=nothing, var_name=nothing)
+ desc = tf.EagerOp("TemporaryVariable")
+ begin
+ end
+ begin
+ begin
+ if shape !== nothing
+ desc["shape"] = Base.identity(shape)
+ end
+ end
+ begin
+ if dtype !== nothing
+ desc["dtype"] = Base.identity(dtype)
+ end
+ end
+ begin
+ if var_name !== nothing
+ desc["var_name"] = Base.String(var_name)
+ end
+ end
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(temporary_variable, [], name=nothing, shape=nothing, dtype=nothing, var_name=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res[1]
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function temporary_variable(; name=nothing, shape=nothing, dtype=nothing, var_name=nothing)
+ if tf.in_eager_mode()
+ temporary_variable_eager(; name=name, shape=shape, dtype=dtype, var_name=var_name)
+ else
+ temporary_variable_graph(; name=name, shape=shape, dtype=dtype, var_name=var_name)
+ end
+ end
+ end
+end
+
+
+"""
+ resource_apply_add_sign(var, m, lr, alpha, sign_decay, beta, grad; use_locking=false)
+
+
+"""
+begin
+ begin
+ function resource_apply_add_sign_graph(var_, m_, lr_, alpha_, sign_decay_, beta_, grad_; name=nothing, use_locking=nothing)
+ local desc
+ tf.with_op_name(name, "ResourceApplyAddSign") do
+ desc = tf.NodeDescription("ResourceApplyAddSign")
+ begin
+ begin
+ var_ = convert(Tensor{Any}, var_)
+ begin
+ end
+ end
+ begin
+ m_ = convert(Tensor{Any}, m_)
+ begin
+ end
+ end
+ begin
+ lr_ = convert(Tensor{Any}, lr_)
+ begin
+ end
+ end
+ begin
+ alpha_ = convert(Tensor{Any}, alpha_)
+ begin
+ end
+ end
+ begin
+ sign_decay_ = convert(Tensor{Any}, sign_decay_)
+ begin
+ end
+ end
+ begin
+ beta_ = convert(Tensor{Any}, beta_)
+ begin
+ end
+ end
+ begin
+ grad_ = convert(Tensor{Any}, grad_)
+ begin
+ end
+ end
+ begin
+ (lr_, alpha_, sign_decay_, beta_, grad_) = tf.tf_promote(lr_, alpha_, sign_decay_, beta_, grad_)
+ end
+ end
+ begin
+ begin
+ tf.add_input(desc, var_)
+ end
+ begin
+ tf.add_input(desc, m_)
+ end
+ begin
+ tf.add_input(desc, lr_)
+ end
+ begin
+ tf.add_input(desc, alpha_)
+ end
+ begin
+ tf.add_input(desc, sign_decay_)
+ end
+ begin
+ tf.add_input(desc, beta_)
+ end
+ begin
+ tf.add_input(desc, grad_)
+ end
+ end
+ begin
+ begin
+ if use_locking !== nothing
+ desc["use_locking"] = Base.Bool(use_locking)
+ end
+ end
+ end
+ end
+ tf.Tensor(tf.Operation(desc))
+ end
+ end
+ begin
+ function resource_apply_add_sign_eager(var_, m_, lr_, alpha_, sign_decay_, beta_, grad_; name=nothing, use_locking=nothing)
+ desc = tf.EagerOp("ResourceApplyAddSign")
+ var_ = convert(tf.EagerTensor, var_)
+ m_ = convert(tf.EagerTensor, m_)
+ lr_ = convert(tf.EagerTensor, lr_)
+ alpha_ = convert(tf.EagerTensor, alpha_)
+ sign_decay_ = convert(tf.EagerTensor, sign_decay_)
+ beta_ = convert(tf.EagerTensor, beta_)
+ grad_ = convert(tf.EagerTensor, grad_)
+ begin
+ begin
+ tf.add_input(desc, var_)
+ end
+ begin
+ tf.add_input(desc, m_)
+ end
+ begin
+ tf.add_input(desc, lr_)
+ end
+ begin
+ tf.add_input(desc, alpha_)
+ end
+ begin
+ tf.add_input(desc, sign_decay_)
+ end
+ begin
+ tf.add_input(desc, beta_)
+ end
+ begin
+ tf.add_input(desc, grad_)
+ end
+ end
+ begin
+ begin
+ if use_locking !== nothing
+ desc["use_locking"] = Base.Bool(use_locking)
+ end
+ end
+ end
+ begin
+ desc["T"] = tf.data_type(lr_)
+ end
+ begin
+ desc["T"] = tf.data_type(alpha_)
+ end
+ begin
+ desc["T"] = tf.data_type(sign_decay_)
+ end
+ begin
+ desc["T"] = tf.data_type(beta_)
+ end
+ begin
+ desc["T"] = tf.data_type(grad_)
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(resource_apply_add_sign, [var_, m_, lr_, alpha_, sign_decay_, beta_, grad_], name=nothing, use_locking=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res[1]
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function resource_apply_add_sign(var_, m_, lr_, alpha_, sign_decay_, beta_, grad_; name=nothing, use_locking=nothing)
+ if tf.in_eager_mode()
+ resource_apply_add_sign_eager(var_, m_, lr_, alpha_, sign_decay_, beta_, grad_; name=name, use_locking=use_locking)
+ else
+ resource_apply_add_sign_graph(var_, m_, lr_, alpha_, sign_decay_, beta_, grad_; name=name, use_locking=use_locking)
+ end
+ end
+ end
+end
+
+
+"""
+ roll(input, shift, axis)
+
+
+"""
+begin
+ begin
+ function roll_graph(input_, shift_, axis_; name=nothing)
+ local desc
+ tf.with_op_name(name, "Roll") do
+ desc = tf.NodeDescription("Roll")
+ begin
+ begin
+ input_ = convert(Tensor{Any}, input_)
+ begin
+ end
+ end
+ begin
+ shift_ = convert(Tensor{Any}, shift_)
+ begin
+ end
+ end
+ begin
+ axis_ = convert(Tensor{Any}, axis_)
+ begin
+ end
+ end
+ begin
+ (input_,) = tf.tf_promote(input_)
+ end
+ begin
+ (shift_,) = tf.tf_promote(shift_)
+ end
+ begin
+ (axis_,) = tf.tf_promote(axis_)
+ end
+ end
+ begin
+ begin
+ tf.add_input(desc, input_)
+ end
+ begin
+ tf.add_input(desc, shift_)
+ end
+ begin
+ tf.add_input(desc, axis_)
+ end
+ end
+ begin
+ end
+ end
+ tf.Tensor(tf.Operation(desc))
+ end
+ end
+ begin
+ function roll_eager(input_, shift_, axis_; name=nothing)
+ desc = tf.EagerOp("Roll")
+ input_ = convert(tf.EagerTensor, input_)
+ shift_ = convert(tf.EagerTensor, shift_)
+ axis_ = convert(tf.EagerTensor, axis_)
+ begin
+ begin
+ tf.add_input(desc, input_)
+ end
+ begin
+ tf.add_input(desc, shift_)
+ end
+ begin
+ tf.add_input(desc, axis_)
+ end
+ end
+ begin
+ end
+ begin
+ desc["T"] = tf.data_type(input_)
+ end
+ begin
+ desc["Tshift"] = tf.data_type(shift_)
+ end
+ begin
+ desc["Taxis"] = tf.data_type(axis_)
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(roll, [input_, shift_, axis_], name=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res[1]
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function roll(input_, shift_, axis_; name=nothing)
+ if tf.in_eager_mode()
+ roll_eager(input_, shift_, axis_; name=name)
+ else
+ roll_graph(input_, shift_, axis_; name=name)
+ end
+ end
+ end
+end
+
+
+"""
+ xdivy(x, y)
+
+
+"""
+begin
+ begin
+ function xdivy_graph(x_, y_; name=nothing)
+ local desc
+ tf.with_op_name(name, "Xdivy") do
+ desc = tf.NodeDescription("Xdivy")
+ begin
+ begin
+ x_ = convert(Tensor{Any}, x_)
+ begin
+ end
+ end
+ begin
+ y_ = convert(Tensor{Any}, y_)
+ begin
+ end
+ end
+ begin
+ (x_, y_) = tf.tf_promote(x_, y_)
+ end
+ end
+ begin
+ begin
+ tf.add_input(desc, x_)
+ end
+ begin
+ tf.add_input(desc, y_)
+ end
+ end
+ begin
+ end
+ end
+ tf.Tensor(tf.Operation(desc))
+ end
+ end
+ begin
+ function xdivy_eager(x_, y_; name=nothing)
+ desc = tf.EagerOp("Xdivy")
+ x_ = convert(tf.EagerTensor, x_)
+ y_ = convert(tf.EagerTensor, y_)
+ begin
+ begin
+ tf.add_input(desc, x_)
+ end
+ begin
+ tf.add_input(desc, y_)
+ end
+ end
+ begin
+ end
+ begin
+ desc["T"] = tf.data_type(x_)
+ end
+ begin
+ desc["T"] = tf.data_type(y_)
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(xdivy, [x_, y_], name=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res[1]
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function xdivy(x_, y_; name=nothing)
+ if tf.in_eager_mode()
+ xdivy_eager(x_, y_; name=name)
+ else
+ xdivy_graph(x_, y_; name=name)
+ end
+ end
+ end
+end
+
+
+"""
+ max_pool3d_grad_grad(orig_input, orig_output, grad; data_format=)
+
+
+"""
+begin
+ begin
+ function max_pool3d_grad_grad_graph(orig_input_, orig_output_, grad_; name=nothing, ksize=nothing, strides=nothing, padding=nothing, data_format=nothing)
+ local desc
+ tf.with_op_name(name, "MaxPool3DGradGrad") do
+ desc = tf.NodeDescription("MaxPool3DGradGrad")
+ begin
+ begin
+ orig_input_ = convert(Tensor{Any}, orig_input_)
+ begin
+ end
+ end
+ begin
+ orig_output_ = convert(Tensor{Any}, orig_output_)
+ begin
+ end
+ end
+ begin
+ grad_ = convert(Tensor{Any}, grad_)
+ begin
+ end
+ end
+ begin
+ (orig_input_, orig_output_, grad_) = tf.tf_promote(orig_input_, orig_output_, grad_)
+ end
+ end
+ begin
+ begin
+ tf.add_input(desc, orig_input_)
+ end
+ begin
+ tf.add_input(desc, orig_output_)
+ end
+ begin
+ tf.add_input(desc, grad_)
+ end
+ end
+ begin
+ begin
+ if ksize !== nothing
+ desc["ksize"] = map(Base.identity, ksize)
+ end
+ end
+ begin
+ if strides !== nothing
+ desc["strides"] = map(Base.identity, strides)
+ end
+ end
+ begin
+ if padding !== nothing
+ desc["padding"] = Base.String(padding)
+ end
+ end
+ begin
+ if data_format !== nothing
+ desc["data_format"] = Base.String(data_format)
+ end
+ end
+ end
+ end
+ tf.Tensor(tf.Operation(desc))
+ end
+ end
+ begin
+ function max_pool3d_grad_grad_eager(orig_input_, orig_output_, grad_; name=nothing, ksize=nothing, strides=nothing, padding=nothing, data_format=nothing)
+ desc = tf.EagerOp("MaxPool3DGradGrad")
+ orig_input_ = convert(tf.EagerTensor, orig_input_)
+ orig_output_ = convert(tf.EagerTensor, orig_output_)
+ grad_ = convert(tf.EagerTensor, grad_)
+ begin
+ begin
+ tf.add_input(desc, orig_input_)
+ end
+ begin
+ tf.add_input(desc, orig_output_)
+ end
+ begin
+ tf.add_input(desc, grad_)
+ end
+ end
+ begin
+ begin
+ if ksize !== nothing
+ desc["ksize"] = map(Base.identity, ksize)
+ end
+ end
+ begin
+ if strides !== nothing
+ desc["strides"] = map(Base.identity, strides)
+ end
+ end
+ begin
+ if padding !== nothing
+ desc["padding"] = Base.String(padding)
+ end
+ end
+ begin
+ if data_format !== nothing
+ desc["data_format"] = Base.String(data_format)
+ end
+ end
+ end
+ begin
+ desc["T"] = tf.data_type(orig_input_)
+ end
+ begin
+ desc["T"] = tf.data_type(orig_output_)
+ end
+ begin
+ desc["T"] = tf.data_type(grad_)
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(max_pool3d_grad_grad, [orig_input_, orig_output_, grad_], name=nothing, ksize=nothing, strides=nothing, padding=nothing, data_format=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res[1]
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function max_pool3d_grad_grad(orig_input_, orig_output_, grad_; name=nothing, ksize=nothing, strides=nothing, padding=nothing, data_format=nothing)
+ if tf.in_eager_mode()
+ max_pool3d_grad_grad_eager(orig_input_, orig_output_, grad_; name=name, ksize=ksize, strides=strides, padding=padding, data_format=data_format)
+ else
+ max_pool3d_grad_grad_graph(orig_input_, orig_output_, grad_; name=name, ksize=ksize, strides=strides, padding=padding, data_format=data_format)
+ end
+ end
+ end
+end
+
+
+"""
+ quantized_bias_add(input, bias, min_input, max_input, min_bias, max_bias)
+
+
+"""
+begin
+ begin
+ function quantized_bias_add_graph(input_, bias_, min_input_, max_input_, min_bias_, max_bias_; name=nothing, out_type=nothing)
+ local desc
+ tf.with_op_name(name, "QuantizedBiasAdd") do
+ desc = tf.NodeDescription("QuantizedBiasAdd")
+ begin
+ begin
+ input_ = convert(Tensor{Any}, input_)
+ begin
+ end
+ end
+ begin
+ bias_ = convert(Tensor{Any}, bias_)
+ begin
+ end
+ end
+ begin
+ min_input_ = convert(Tensor{Float32}, min_input_)
+ begin
+ end
+ end
+ begin
+ max_input_ = convert(Tensor{Float32}, max_input_)
+ begin
+ end
+ end
+ begin
+ min_bias_ = convert(Tensor{Float32}, min_bias_)
+ begin
+ end
+ end
+ begin
+ max_bias_ = convert(Tensor{Float32}, max_bias_)
+ begin
+ end
+ end
+ begin
+ (input_,) = tf.tf_promote(input_)
+ end
+ begin
+ (bias_,) = tf.tf_promote(bias_)
+ end
+ end
+ begin
+ begin
+ tf.add_input(desc, input_)
+ end
+ begin
+ tf.add_input(desc, bias_)
+ end
+ begin
+ tf.add_input(desc, min_input_)
+ end
+ begin
+ tf.add_input(desc, max_input_)
+ end
+ begin
+ tf.add_input(desc, min_bias_)
+ end
+ begin
+ tf.add_input(desc, max_bias_)
+ end
+ end
+ begin
+ begin
+ if out_type !== nothing
+ desc["out_type"] = Base.identity(out_type)
+ end
+ end
+ end
+ end
+ begin
+ out = tf.Tensor[]
+ op = tf.Operation(desc)
+ for out_idx = 1:3
+ push!(out, tf.Tensor(op, out_idx))
+ end
+ out
+ end
+ end
+ end
+ begin
+ function quantized_bias_add_eager(input_, bias_, min_input_, max_input_, min_bias_, max_bias_; name=nothing, out_type=nothing)
+ desc = tf.EagerOp("QuantizedBiasAdd")
+ input_ = convert(tf.EagerTensor, input_)
+ bias_ = convert(tf.EagerTensor, bias_)
+ min_input_ = convert(tf.EagerTensor, min_input_)
+ max_input_ = convert(tf.EagerTensor, max_input_)
+ min_bias_ = convert(tf.EagerTensor, min_bias_)
+ max_bias_ = convert(tf.EagerTensor, max_bias_)
+ begin
+ begin
+ tf.add_input(desc, input_)
+ end
+ begin
+ tf.add_input(desc, bias_)
+ end
+ begin
+ tf.add_input(desc, min_input_)
+ end
+ begin
+ tf.add_input(desc, max_input_)
+ end
+ begin
+ tf.add_input(desc, min_bias_)
+ end
+ begin
+ tf.add_input(desc, max_bias_)
+ end
+ end
+ begin
+ begin
+ if out_type !== nothing
+ desc["out_type"] = Base.identity(out_type)
+ end
+ end
+ end
+ begin
+ desc["T1"] = tf.data_type(input_)
+ end
+ begin
+ desc["T2"] = tf.data_type(bias_)
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(quantized_bias_add, [input_, bias_, min_input_, max_input_, min_bias_, max_bias_], name=nothing, out_type=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function quantized_bias_add(input_, bias_, min_input_, max_input_, min_bias_, max_bias_; name=nothing, out_type=nothing)
+ if tf.in_eager_mode()
+ quantized_bias_add_eager(input_, bias_, min_input_, max_input_, min_bias_, max_bias_; name=name, out_type=out_type)
+ else
+ quantized_bias_add_graph(input_, bias_, min_input_, max_input_, min_bias_, max_bias_; name=name, out_type=out_type)
+ end
+ end
+ end
+end
+
+
+"""
+ crop_and_resize(image, boxes, box_ind, crop_size; method=, extrapolation_value=?)
+
+
+"""
+begin
+ begin
+ function crop_and_resize_graph(image_, boxes_, box_ind_, crop_size_; name=nothing, method=nothing, extrapolation_value=nothing)
+ local desc
+ tf.with_op_name(name, "CropAndResize") do
+ desc = tf.NodeDescription("CropAndResize")
+ begin
+ begin
+ image_ = convert(Tensor{Any}, image_)
+ begin
+ end
+ end
+ begin
+ boxes_ = convert(Tensor{Float32}, boxes_)
+ begin
+ end
+ end
+ begin
+ box_ind_ = convert(Tensor{Int32}, box_ind_)
+ begin
+ end
+ end
+ begin
+ crop_size_ = convert(Tensor{Int32}, crop_size_)
+ begin
+ end
+ end
+ begin
+ (image_,) = tf.tf_promote(image_)
+ end
+ end
+ begin
+ begin
+ tf.add_input(desc, image_)
+ end
+ begin
+ tf.add_input(desc, boxes_)
+ end
+ begin
+ tf.add_input(desc, box_ind_)
+ end
+ begin
+ tf.add_input(desc, crop_size_)
+ end
+ end
+ begin
+ begin
+ if method !== nothing
+ desc["method"] = Base.String(method)
+ end
+ end
+ begin
+ if extrapolation_value !== nothing
+ desc["extrapolation_value"] = Base.identity(extrapolation_value)
+ end
+ end
+ end
+ end
+ tf.Tensor(tf.Operation(desc))
+ end
+ end
+ begin
+ function crop_and_resize_eager(image_, boxes_, box_ind_, crop_size_; name=nothing, method=nothing, extrapolation_value=nothing)
+ desc = tf.EagerOp("CropAndResize")
+ image_ = convert(tf.EagerTensor, image_)
+ boxes_ = convert(tf.EagerTensor, boxes_)
+ box_ind_ = convert(tf.EagerTensor, box_ind_)
+ crop_size_ = convert(tf.EagerTensor, crop_size_)
+ begin
+ begin
+ tf.add_input(desc, image_)
+ end
+ begin
+ tf.add_input(desc, boxes_)
+ end
+ begin
+ tf.add_input(desc, box_ind_)
+ end
+ begin
+ tf.add_input(desc, crop_size_)
+ end
+ end
+ begin
+ begin
+ if method !== nothing
+ desc["method"] = Base.String(method)
+ end
+ end
+ begin
+ if extrapolation_value !== nothing
+ desc["extrapolation_value"] = Base.identity(extrapolation_value)
+ end
+ end
+ end
+ begin
+ desc["T"] = tf.data_type(image_)
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(crop_and_resize, [image_, boxes_, box_ind_, crop_size_], name=nothing, method=nothing, extrapolation_value=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res[1]
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function crop_and_resize(image_, boxes_, box_ind_, crop_size_; name=nothing, method=nothing, extrapolation_value=nothing)
+ if tf.in_eager_mode()
+ crop_and_resize_eager(image_, boxes_, box_ind_, crop_size_; name=name, method=method, extrapolation_value=extrapolation_value)
+ else
+ crop_and_resize_graph(image_, boxes_, box_ind_, crop_size_; name=name, method=method, extrapolation_value=extrapolation_value)
+ end
+ end
+ end
+end
+
+
+"""
+ map_unstage_no_key(indices; capacity=0, memory_limit=0, container=, shared_name=)
+
+
+"""
+begin
+ begin
+ function map_unstage_no_key_graph(indices_; name=nothing, capacity=nothing, memory_limit=nothing, dtypes=nothing, container=nothing, shared_name=nothing)
+ local desc
+ tf.with_op_name(name, "MapUnstageNoKey") do
+ desc = tf.NodeDescription("MapUnstageNoKey")
+ begin
+ begin
+ indices_ = convert(Tensor{Int32}, indices_)
+ begin
+ end
+ end
+ end
+ begin
+ begin
+ tf.add_input(desc, indices_)
+ end
+ end
+ begin
+ begin
+ if capacity !== nothing
+ desc["capacity"] = Base.Int(capacity)
+ end
+ end
+ begin
+ if memory_limit !== nothing
+ desc["memory_limit"] = Base.Int(memory_limit)
+ end
+ end
+ begin
+ if dtypes !== nothing
+ desc["dtypes"] = map(Base.identity, dtypes)
+ end
+ end
+ begin
+ if container !== nothing
+ desc["container"] = Base.String(container)
+ end
+ end
+ begin
+ if shared_name !== nothing
+ desc["shared_name"] = Base.String(shared_name)
+ end
+ end
+ end
+ end
+ begin
+ out = tf.Tensor[]
+ op = tf.Operation(desc)
+ for out_idx = 1:2
+ push!(out, tf.Tensor(op, out_idx))
+ end
+ out
+ end
+ end
+ end
+ begin
+ function map_unstage_no_key_eager(indices_; name=nothing, capacity=nothing, memory_limit=nothing, dtypes=nothing, container=nothing, shared_name=nothing)
+ desc = tf.EagerOp("MapUnstageNoKey")
+ indices_ = convert(tf.EagerTensor, indices_)
+ begin
+ begin
+ tf.add_input(desc, indices_)
+ end
+ end
+ begin
+ begin
+ if capacity !== nothing
+ desc["capacity"] = Base.Int(capacity)
+ end
+ end
+ begin
+ if memory_limit !== nothing
+ desc["memory_limit"] = Base.Int(memory_limit)
+ end
+ end
+ begin
+ if dtypes !== nothing
+ desc["dtypes"] = map(Base.identity, dtypes)
+ end
+ end
+ begin
+ if container !== nothing
+ desc["container"] = Base.String(container)
+ end
+ end
+ begin
+ if shared_name !== nothing
+ desc["shared_name"] = Base.String(shared_name)
+ end
+ end
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(map_unstage_no_key, [indices_], name=nothing, capacity=nothing, memory_limit=nothing, dtypes=nothing, container=nothing, shared_name=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function map_unstage_no_key(indices_; name=nothing, capacity=nothing, memory_limit=nothing, dtypes=nothing, container=nothing, shared_name=nothing)
+ if tf.in_eager_mode()
+ map_unstage_no_key_eager(indices_; name=name, capacity=capacity, memory_limit=memory_limit, dtypes=dtypes, container=container, shared_name=shared_name)
+ else
+ map_unstage_no_key_graph(indices_; name=name, capacity=capacity, memory_limit=memory_limit, dtypes=dtypes, container=container, shared_name=shared_name)
+ end
+ end
+ end
+end
+
+
+"""
+ scatter_nd_sub(ref, indices, updates; use_locking=false)
+
+
+"""
+begin
+ begin
+ function scatter_nd_sub_graph(ref_, indices_, updates_; name=nothing, use_locking=nothing)
+ local desc
+ tf.with_op_name(name, "ScatterNdSub") do
+ desc = tf.NodeDescription("ScatterNdSub")
+ begin
+ begin
+ ref_ = convert(Tensor{Any}, ref_)
+ begin
+ end
+ end
+ begin
+ indices_ = convert(Tensor{Any}, indices_)
+ begin
+ indices_ = indices_ - convert(tf.Tensor{eltype(indices_)}, 1)
+ end
+ end
+ begin
+ updates_ = convert(Tensor{Any}, updates_)
+ begin
+ end
+ end
+ begin
+ (ref_, updates_) = tf.tf_promote(ref_, updates_)
+ end
+ begin
+ (indices_,) = tf.tf_promote(indices_)
+ end
+ end
+ begin
+ begin
+ tf.add_input(desc, ref_)
+ end
+ begin
+ tf.add_input(desc, indices_)
+ end
+ begin
+ tf.add_input(desc, updates_)
+ end
+ end
+ begin
+ begin
+ if use_locking !== nothing
+ desc["use_locking"] = Base.Bool(use_locking)
+ end
+ end
+ end
+ end
+ tf.Tensor(tf.Operation(desc))
+ end
+ end
+ begin
+ function scatter_nd_sub_eager(ref_, indices_, updates_; name=nothing, use_locking=nothing)
+ desc = tf.EagerOp("ScatterNdSub")
+ ref_ = convert(tf.EagerTensor, ref_)
+ indices_ = convert(tf.EagerTensor, indices_)
+ updates_ = convert(tf.EagerTensor, updates_)
+ begin
+ begin
+ tf.add_input(desc, ref_)
+ end
+ begin
+ tf.add_input(desc, indices_)
+ end
+ begin
+ tf.add_input(desc, updates_)
+ end
+ end
+ begin
+ begin
+ if use_locking !== nothing
+ desc["use_locking"] = Base.Bool(use_locking)
+ end
+ end
+ end
+ begin
+ desc["T"] = tf.data_type(ref_)
+ end
+ begin
+ desc["Tindices"] = tf.data_type(indices_)
+ end
+ begin
+ desc["T"] = tf.data_type(updates_)
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(scatter_nd_sub, [ref_, indices_, updates_], name=nothing, use_locking=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res[1]
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function scatter_nd_sub(ref_, indices_, updates_; name=nothing, use_locking=nothing)
+ if tf.in_eager_mode()
+ scatter_nd_sub_eager(ref_, indices_, updates_; name=name, use_locking=use_locking)
+ else
+ scatter_nd_sub_graph(ref_, indices_, updates_; name=name, use_locking=use_locking)
+ end
+ end
+ end
+end
+
+
+"""
+ resize_bilinear(images, size; align_corners=false)
+
+
+"""
+begin
+ begin
+ function resize_bilinear_graph(images_, size_; name=nothing, align_corners=nothing)
+ local desc
+ tf.with_op_name(name, "ResizeBilinear") do
+ desc = tf.NodeDescription("ResizeBilinear")
+ begin
+ begin
+ images_ = convert(Tensor{Any}, images_)
+ begin
+ end
+ end
+ begin
+ size_ = convert(Tensor{Int32}, size_)
+ begin
+ end
+ end
+ begin
+ (images_,) = tf.tf_promote(images_)
+ end
+ end
+ begin
+ begin
+ tf.add_input(desc, images_)
+ end
+ begin
+ tf.add_input(desc, size_)
+ end
+ end
+ begin
+ begin
+ if align_corners !== nothing
+ desc["align_corners"] = Base.Bool(align_corners)
+ end
+ end
+ end
+ end
+ tf.Tensor(tf.Operation(desc))
+ end
+ end
+ begin
+ function resize_bilinear_eager(images_, size_; name=nothing, align_corners=nothing)
+ desc = tf.EagerOp("ResizeBilinear")
+ images_ = convert(tf.EagerTensor, images_)
+ size_ = convert(tf.EagerTensor, size_)
+ begin
+ begin
+ tf.add_input(desc, images_)
+ end
+ begin
+ tf.add_input(desc, size_)
+ end
+ end
+ begin
+ begin
+ if align_corners !== nothing
+ desc["align_corners"] = Base.Bool(align_corners)
+ end
+ end
+ end
+ begin
+ desc["T"] = tf.data_type(images_)
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(resize_bilinear, [images_, size_], name=nothing, align_corners=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res[1]
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function resize_bilinear(images_, size_; name=nothing, align_corners=nothing)
+ if tf.in_eager_mode()
+ resize_bilinear_eager(images_, size_; name=name, align_corners=align_corners)
+ else
+ resize_bilinear_graph(images_, size_; name=name, align_corners=align_corners)
+ end
+ end
+ end
+end
+
+
+"""
+ ordered_map_peek(key, indices; capacity=0, memory_limit=0, container=, shared_name=)
+
+
+"""
+begin
+ begin
+ function ordered_map_peek_graph(key_, indices_; name=nothing, capacity=nothing, memory_limit=nothing, dtypes=nothing, container=nothing, shared_name=nothing)
+ local desc
+ tf.with_op_name(name, "OrderedMapPeek") do
+ desc = tf.NodeDescription("OrderedMapPeek")
+ begin
+ begin
+ key_ = convert(Tensor{Int64}, key_)
+ begin
+ end
+ end
+ begin
+ indices_ = convert(Tensor{Int32}, indices_)
+ begin
+ end
+ end
+ end
+ begin
+ begin
+ tf.add_input(desc, key_)
+ end
+ begin
+ tf.add_input(desc, indices_)
+ end
+ end
+ begin
+ begin
+ if capacity !== nothing
+ desc["capacity"] = Base.Int(capacity)
+ end
+ end
+ begin
+ if memory_limit !== nothing
+ desc["memory_limit"] = Base.Int(memory_limit)
+ end
+ end
+ begin
+ if dtypes !== nothing
+ desc["dtypes"] = map(Base.identity, dtypes)
+ end
+ end
+ begin
+ if container !== nothing
+ desc["container"] = Base.String(container)
+ end
+ end
+ begin
+ if shared_name !== nothing
+ desc["shared_name"] = Base.String(shared_name)
+ end
+ end
+ end
+ end
+ tf.Tensor(tf.Operation(desc))
+ end
+ end
+ begin
+ function ordered_map_peek_eager(key_, indices_; name=nothing, capacity=nothing, memory_limit=nothing, dtypes=nothing, container=nothing, shared_name=nothing)
+ desc = tf.EagerOp("OrderedMapPeek")
+ key_ = convert(tf.EagerTensor, key_)
+ indices_ = convert(tf.EagerTensor, indices_)
+ begin
+ begin
+ tf.add_input(desc, key_)
+ end
+ begin
+ tf.add_input(desc, indices_)
+ end
+ end
+ begin
+ begin
+ if capacity !== nothing
+ desc["capacity"] = Base.Int(capacity)
+ end
+ end
+ begin
+ if memory_limit !== nothing
+ desc["memory_limit"] = Base.Int(memory_limit)
+ end
+ end
+ begin
+ if dtypes !== nothing
+ desc["dtypes"] = map(Base.identity, dtypes)
+ end
+ end
+ begin
+ if container !== nothing
+ desc["container"] = Base.String(container)
+ end
+ end
+ begin
+ if shared_name !== nothing
+ desc["shared_name"] = Base.String(shared_name)
+ end
+ end
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(ordered_map_peek, [key_, indices_], name=nothing, capacity=nothing, memory_limit=nothing, dtypes=nothing, container=nothing, shared_name=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res[1]
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function ordered_map_peek(key_, indices_; name=nothing, capacity=nothing, memory_limit=nothing, dtypes=nothing, container=nothing, shared_name=nothing)
+ if tf.in_eager_mode()
+ ordered_map_peek_eager(key_, indices_; name=name, capacity=capacity, memory_limit=memory_limit, dtypes=dtypes, container=container, shared_name=shared_name)
+ else
+ ordered_map_peek_graph(key_, indices_; name=name, capacity=capacity, memory_limit=memory_limit, dtypes=dtypes, container=container, shared_name=shared_name)
+ end
+ end
+ end
+end
+
+
+"""
+ tensor_array(size; dynamic_size=false, clear_after_read=true, tensor_array_name=, element_shape=?)
+
+
+"""
+begin
+ begin
+ function tensor_array_graph(size_; name=nothing, dtype=nothing, dynamic_size=nothing, clear_after_read=nothing, tensor_array_name=nothing, element_shape=nothing)
+ local desc
+ tf.with_op_name(name, "TensorArray") do
+ desc = tf.NodeDescription("TensorArray")
+ begin
+ begin
+ size_ = convert(Tensor{Int32}, size_)
+ begin
+ end
+ end
+ end
+ begin
+ begin
+ tf.add_input(desc, size_)
+ end
+ end
+ begin
+ begin
+ if dtype !== nothing
+ desc["dtype"] = Base.identity(dtype)
+ end
+ end
+ begin
+ if dynamic_size !== nothing
+ desc["dynamic_size"] = Base.Bool(dynamic_size)
+ end
+ end
+ begin
+ if clear_after_read !== nothing
+ desc["clear_after_read"] = Base.Bool(clear_after_read)
+ end
+ end
+ begin
+ if tensor_array_name !== nothing
+ desc["tensor_array_name"] = Base.String(tensor_array_name)
+ end
+ end
+ begin
+ if element_shape !== nothing
+ desc["element_shape"] = Base.identity(element_shape)
+ end
+ end
+ end
+ end
+ tf.Tensor(tf.Operation(desc))
+ end
+ end
+ begin
+ function tensor_array_eager(size_; name=nothing, dtype=nothing, dynamic_size=nothing, clear_after_read=nothing, tensor_array_name=nothing, element_shape=nothing)
+ desc = tf.EagerOp("TensorArray")
+ size_ = convert(tf.EagerTensor, size_)
+ begin
+ begin
+ tf.add_input(desc, size_)
+ end
+ end
+ begin
+ begin
+ if dtype !== nothing
+ desc["dtype"] = Base.identity(dtype)
+ end
+ end
+ begin
+ if dynamic_size !== nothing
+ desc["dynamic_size"] = Base.Bool(dynamic_size)
+ end
+ end
+ begin
+ if clear_after_read !== nothing
+ desc["clear_after_read"] = Base.Bool(clear_after_read)
+ end
+ end
+ begin
+ if tensor_array_name !== nothing
+ desc["tensor_array_name"] = Base.String(tensor_array_name)
+ end
+ end
+ begin
+ if element_shape !== nothing
+ desc["element_shape"] = Base.identity(element_shape)
+ end
+ end
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(tensor_array, [size_], name=nothing, dtype=nothing, dynamic_size=nothing, clear_after_read=nothing, tensor_array_name=nothing, element_shape=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res[1]
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function tensor_array(size_; name=nothing, dtype=nothing, dynamic_size=nothing, clear_after_read=nothing, tensor_array_name=nothing, element_shape=nothing)
+ if tf.in_eager_mode()
+ tensor_array_eager(size_; name=name, dtype=dtype, dynamic_size=dynamic_size, clear_after_read=clear_after_read, tensor_array_name=tensor_array_name, element_shape=element_shape)
+ else
+ tensor_array_graph(size_; name=name, dtype=dtype, dynamic_size=dynamic_size, clear_after_read=clear_after_read, tensor_array_name=tensor_array_name, element_shape=element_shape)
+ end
+ end
+ end
+end
+
+
+"""
+ inplace_sub(x, i, v)
+
+
+"""
+begin
+ begin
+ function inplace_sub_graph(x_, i_, v_; name=nothing)
+ local desc
+ tf.with_op_name(name, "InplaceSub") do
+ desc = tf.NodeDescription("InplaceSub")
+ begin
+ begin
+ x_ = convert(Tensor{Any}, x_)
+ begin
+ end
+ end
+ begin
+ i_ = convert(Tensor{Int32}, i_)
+ begin
+ end
+ end
+ begin
+ v_ = convert(Tensor{Any}, v_)
+ begin
+ end
+ end
+ begin
+ (x_, v_) = tf.tf_promote(x_, v_)
+ end
+ end
+ begin
+ begin
+ tf.add_input(desc, x_)
+ end
+ begin
+ tf.add_input(desc, i_)
+ end
+ begin
+ tf.add_input(desc, v_)
+ end
+ end
+ begin
+ end
+ end
+ tf.Tensor(tf.Operation(desc))
+ end
+ end
+ begin
+ function inplace_sub_eager(x_, i_, v_; name=nothing)
+ desc = tf.EagerOp("InplaceSub")
+ x_ = convert(tf.EagerTensor, x_)
+ i_ = convert(tf.EagerTensor, i_)
+ v_ = convert(tf.EagerTensor, v_)
+ begin
+ begin
+ tf.add_input(desc, x_)
+ end
+ begin
+ tf.add_input(desc, i_)
+ end
+ begin
+ tf.add_input(desc, v_)
+ end
+ end
+ begin
+ end
+ begin
+ desc["T"] = tf.data_type(x_)
+ end
+ begin
+ desc["T"] = tf.data_type(v_)
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(inplace_sub, [x_, i_, v_], name=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res[1]
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function inplace_sub(x_, i_, v_; name=nothing)
+ if tf.in_eager_mode()
+ inplace_sub_eager(x_, i_, v_; name=name)
+ else
+ inplace_sub_graph(x_, i_, v_; name=name)
+ end
+ end
+ end
+end
+
+
+"""
+ pow(x, y)
+
+
+"""
+begin
+ begin
+ function pow_graph(x_, y_; name=nothing)
+ local desc
+ tf.with_op_name(name, "Pow") do
+ desc = tf.NodeDescription("Pow")
+ begin
+ begin
+ x_ = convert(Tensor{Any}, x_)
+ begin
+ end
+ end
+ begin
+ y_ = convert(Tensor{Any}, y_)
+ begin
+ end
+ end
+ begin
+ (x_, y_) = tf.tf_promote(x_, y_)
+ end
+ end
+ begin
+ begin
+ tf.add_input(desc, x_)
+ end
+ begin
+ tf.add_input(desc, y_)
+ end
+ end
+ begin
+ end
+ end
+ tf.Tensor(tf.Operation(desc))
+ end
+ end
+ begin
+ function pow_eager(x_, y_; name=nothing)
+ desc = tf.EagerOp("Pow")
+ x_ = convert(tf.EagerTensor, x_)
+ y_ = convert(tf.EagerTensor, y_)
+ begin
+ begin
+ tf.add_input(desc, x_)
+ end
+ begin
+ tf.add_input(desc, y_)
+ end
+ end
+ begin
+ end
+ begin
+ desc["T"] = tf.data_type(x_)
+ end
+ begin
+ desc["T"] = tf.data_type(y_)
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(pow, [x_, y_], name=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res[1]
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function pow(x_, y_; name=nothing)
+ if tf.in_eager_mode()
+ pow_eager(x_, y_; name=name)
+ else
+ pow_graph(x_, y_; name=name)
+ end
+ end
+ end
+end
+
+
+"""
+ ref_next_iteration(data)
+
+
+"""
+begin
+ begin
+ function ref_next_iteration_graph(data_; name=nothing)
+ local desc
+ tf.with_op_name(name, "RefNextIteration") do
+ desc = tf.NodeDescription("RefNextIteration")
+ begin
+ begin
+ data_ = convert(Tensor{Any}, data_)
+ begin
+ end
+ end
+ begin
+ (data_,) = tf.tf_promote(data_)
+ end
+ end
+ begin
+ begin
+ tf.add_input(desc, data_)
+ end
+ end
+ begin
+ end
+ end
+ tf.Tensor(tf.Operation(desc))
+ end
+ end
+ begin
+ function ref_next_iteration_eager(data_; name=nothing)
+ desc = tf.EagerOp("RefNextIteration")
+ data_ = convert(tf.EagerTensor, data_)
+ begin
+ begin
+ tf.add_input(desc, data_)
+ end
+ end
+ begin
+ end
+ begin
+ desc["T"] = tf.data_type(data_)
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(ref_next_iteration, [data_], name=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res[1]
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function ref_next_iteration(data_; name=nothing)
+ if tf.in_eager_mode()
+ ref_next_iteration_eager(data_; name=name)
+ else
+ ref_next_iteration_graph(data_; name=name)
+ end
+ end
+ end
+end
+
+
+"""
+ scalar_summary(tags, values)
+
+
+"""
+begin
+ begin
+ function scalar_summary_graph(tags_, values_; name=nothing)
+ local desc
+ tf.with_op_name(name, "ScalarSummary") do
+ desc = tf.NodeDescription("ScalarSummary")
+ begin
+ begin
+ tags_ = convert(Tensor{String}, tags_)
+ begin
+ end
+ end
+ begin
+ values_ = convert(Tensor{Any}, values_)
+ begin
+ end
+ end
+ begin
+ (values_,) = tf.tf_promote(values_)
+ end
+ end
+ begin
+ begin
+ tf.add_input(desc, tags_)
+ end
+ begin
+ tf.add_input(desc, values_)
+ end
+ end
+ begin
+ end
+ end
+ tf.Tensor(tf.Operation(desc))
+ end
+ end
+ begin
+ function scalar_summary_eager(tags_, values_; name=nothing)
+ desc = tf.EagerOp("ScalarSummary")
+ tags_ = convert(tf.EagerTensor, tags_)
+ values_ = convert(tf.EagerTensor, values_)
+ begin
+ begin
+ tf.add_input(desc, tags_)
+ end
+ begin
+ tf.add_input(desc, values_)
+ end
+ end
+ begin
+ end
+ begin
+ desc["T"] = tf.data_type(values_)
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(scalar_summary, [tags_, values_], name=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res[1]
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function scalar_summary(tags_, values_; name=nothing)
+ if tf.in_eager_mode()
+ scalar_summary_eager(tags_, values_; name=name)
+ else
+ scalar_summary_graph(tags_, values_; name=name)
+ end
+ end
+ end
+end
+
+
+"""
+ string_split_v2(input, sep; maxsplit=-1)
+
+
+"""
+begin
+ begin
+ function string_split_v2_graph(input_, sep_; name=nothing, maxsplit=nothing)
+ local desc
+ tf.with_op_name(name, "StringSplitV2") do
+ desc = tf.NodeDescription("StringSplitV2")
+ begin
+ begin
+ input_ = convert(Tensor{String}, input_)
+ begin
+ end
+ end
+ begin
+ sep_ = convert(Tensor{String}, sep_)
+ begin
+ end
+ end
+ end
+ begin
+ begin
+ tf.add_input(desc, input_)
+ end
+ begin
+ tf.add_input(desc, sep_)
+ end
+ end
+ begin
+ begin
+ if maxsplit !== nothing
+ desc["maxsplit"] = Base.Int(maxsplit)
+ end
+ end
+ end
+ end
+ begin
+ out = tf.Tensor[]
+ op = tf.Operation(desc)
+ for out_idx = 1:3
+ push!(out, tf.Tensor(op, out_idx))
+ end
+ out
+ end
+ end
+ end
+ begin
+ function string_split_v2_eager(input_, sep_; name=nothing, maxsplit=nothing)
+ desc = tf.EagerOp("StringSplitV2")
+ input_ = convert(tf.EagerTensor, input_)
+ sep_ = convert(tf.EagerTensor, sep_)
+ begin
+ begin
+ tf.add_input(desc, input_)
+ end
+ begin
+ tf.add_input(desc, sep_)
+ end
+ end
+ begin
+ begin
+ if maxsplit !== nothing
+ desc["maxsplit"] = Base.Int(maxsplit)
+ end
+ end
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(string_split_v2, [input_, sep_], name=nothing, maxsplit=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function string_split_v2(input_, sep_; name=nothing, maxsplit=nothing)
+ if tf.in_eager_mode()
+ string_split_v2_eager(input_, sep_; name=name, maxsplit=maxsplit)
+ else
+ string_split_v2_graph(input_, sep_; name=name, maxsplit=maxsplit)
+ end
+ end
+ end
+end
+
+
+"""
+ bessel_i0e(x)
+
+
+"""
+begin
+ begin
+ function bessel_i0e_graph(x_; name=nothing)
+ local desc
+ tf.with_op_name(name, "BesselI0e") do
+ desc = tf.NodeDescription("BesselI0e")
+ begin
+ begin
+ x_ = convert(Tensor{Any}, x_)
+ begin
+ end
+ end
+ begin
+ (x_,) = tf.tf_promote(x_)
+ end
+ end
+ begin
+ begin
+ tf.add_input(desc, x_)
+ end
+ end
+ begin
+ end
+ end
+ tf.Tensor(tf.Operation(desc))
+ end
+ end
+ begin
+ function bessel_i0e_eager(x_; name=nothing)
+ desc = tf.EagerOp("BesselI0e")
+ x_ = convert(tf.EagerTensor, x_)
+ begin
+ begin
+ tf.add_input(desc, x_)
+ end
+ end
+ begin
+ end
+ begin
+ desc["T"] = tf.data_type(x_)
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(bessel_i0e, [x_], name=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res[1]
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function bessel_i0e(x_; name=nothing)
+ if tf.in_eager_mode()
+ bessel_i0e_eager(x_; name=name)
+ else
+ bessel_i0e_graph(x_; name=name)
+ end
+ end
+ end
+end
+
+
+"""
+ unique(x; out_idx=Int32)
+
+
+"""
+begin
+ begin
+ function unique_graph(x_; name=nothing, out_idx=nothing)
+ local desc
+ tf.with_op_name(name, "Unique") do
+ desc = tf.NodeDescription("Unique")
+ begin
+ begin
+ x_ = convert(Tensor{Any}, x_)
+ begin
+ end
+ end
+ begin
+ (x_,) = tf.tf_promote(x_)
+ end
+ end
+ begin
+ begin
+ tf.add_input(desc, x_)
+ end
+ end
+ begin
+ begin
+ if out_idx !== nothing
+ desc["out_idx"] = Base.identity(out_idx)
+ end
+ end
+ end
+ end
+ begin
+ out = tf.Tensor[]
+ op = tf.Operation(desc)
+ for out_idx = 1:2
+ push!(out, tf.Tensor(op, out_idx))
+ end
+ out
+ end
+ end
+ end
+ begin
+ function unique_eager(x_; name=nothing, out_idx=nothing)
+ desc = tf.EagerOp("Unique")
+ x_ = convert(tf.EagerTensor, x_)
+ begin
+ begin
+ tf.add_input(desc, x_)
+ end
+ end
+ begin
+ begin
+ if out_idx !== nothing
+ desc["out_idx"] = Base.identity(out_idx)
+ end
+ end
+ end
+ begin
+ desc["T"] = tf.data_type(x_)
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(unique, [x_], name=nothing, out_idx=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function unique(x_; name=nothing, out_idx=nothing)
+ if tf.in_eager_mode()
+ unique_eager(x_; name=name, out_idx=out_idx)
+ else
+ unique_graph(x_; name=name, out_idx=out_idx)
+ end
+ end
+ end
+end
+
+
+"""
+ next_iteration(data)
+
+
+"""
+begin
+ begin
+ function next_iteration_graph(data_; name=nothing)
+ local desc
+ tf.with_op_name(name, "NextIteration") do
+ desc = tf.NodeDescription("NextIteration")
+ begin
+ begin
+ data_ = convert(Tensor{Any}, data_)
+ begin
+ end
+ end
+ begin
+ (data_,) = tf.tf_promote(data_)
+ end
+ end
+ begin
+ begin
+ tf.add_input(desc, data_)
+ end
+ end
+ begin
+ end
+ end
+ tf.Tensor(tf.Operation(desc))
+ end
+ end
+ begin
+ function next_iteration_eager(data_; name=nothing)
+ desc = tf.EagerOp("NextIteration")
+ data_ = convert(tf.EagerTensor, data_)
+ begin
+ begin
+ tf.add_input(desc, data_)
+ end
+ end
+ begin
+ end
+ begin
+ desc["T"] = tf.data_type(data_)
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(next_iteration, [data_], name=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res[1]
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function next_iteration(data_; name=nothing)
+ if tf.in_eager_mode()
+ next_iteration_eager(data_; name=name)
+ else
+ next_iteration_graph(data_; name=name)
+ end
+ end
+ end
+end
+
+
+"""
+ load_tpu_embedding_rms_prop_parameters(parameters, ms, mom; table_id=-1, table_name=)
+
+Load embedding parameters for a single table.
+"""
+begin
+ begin
+ function load_tpu_embedding_rms_prop_parameters_graph(parameters_, ms_, mom_; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing)
+ local desc
+ tf.with_op_name(name, "LoadTPUEmbeddingRMSPropParameters") do
+ desc = tf.NodeDescription("LoadTPUEmbeddingRMSPropParameters")
+ begin
+ begin
+ parameters_ = convert(Tensor{Float32}, parameters_)
+ begin
+ end
+ end
+ begin
+ ms_ = convert(Tensor{Float32}, ms_)
+ begin
+ end
+ end
+ begin
+ mom_ = convert(Tensor{Float32}, mom_)
+ begin
+ end
+ end
+ end
+ begin
+ begin
+ tf.add_input(desc, parameters_)
+ end
+ begin
+ tf.add_input(desc, ms_)
+ end
+ begin
+ tf.add_input(desc, mom_)
+ end
+ end
+ begin
+ begin
+ if table_id !== nothing
+ desc["table_id"] = Base.Int(table_id)
+ end
+ end
+ begin
+ if table_name !== nothing
+ desc["table_name"] = Base.String(table_name)
+ end
+ end
+ begin
+ if num_shards !== nothing
+ desc["num_shards"] = Base.Int(num_shards)
+ end
+ end
+ begin
+ if shard_id !== nothing
+ desc["shard_id"] = Base.Int(shard_id)
+ end
+ end
+ end
+ end
+ tf.Tensor(tf.Operation(desc))
+ end
+ end
+ begin
+ function load_tpu_embedding_rms_prop_parameters_eager(parameters_, ms_, mom_; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing)
+ desc = tf.EagerOp("LoadTPUEmbeddingRMSPropParameters")
+ parameters_ = convert(tf.EagerTensor, parameters_)
+ ms_ = convert(tf.EagerTensor, ms_)
+ mom_ = convert(tf.EagerTensor, mom_)
+ begin
+ begin
+ tf.add_input(desc, parameters_)
+ end
+ begin
+ tf.add_input(desc, ms_)
+ end
+ begin
+ tf.add_input(desc, mom_)
+ end
+ end
+ begin
+ begin
+ if table_id !== nothing
+ desc["table_id"] = Base.Int(table_id)
+ end
+ end
+ begin
+ if table_name !== nothing
+ desc["table_name"] = Base.String(table_name)
+ end
+ end
+ begin
+ if num_shards !== nothing
+ desc["num_shards"] = Base.Int(num_shards)
+ end
+ end
+ begin
+ if shard_id !== nothing
+ desc["shard_id"] = Base.Int(shard_id)
+ end
+ end
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(load_tpu_embedding_rms_prop_parameters, [parameters_, ms_, mom_], name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res[1]
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function load_tpu_embedding_rms_prop_parameters(parameters_, ms_, mom_; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing)
+ if tf.in_eager_mode()
+ load_tpu_embedding_rms_prop_parameters_eager(parameters_, ms_, mom_; name=name, table_id=table_id, table_name=table_name, num_shards=num_shards, shard_id=shard_id)
+ else
+ load_tpu_embedding_rms_prop_parameters_graph(parameters_, ms_, mom_; name=name, table_id=table_id, table_name=table_name, num_shards=num_shards, shard_id=shard_id)
+ end
+ end
+ end
+end
+
+
+"""
+ eager_py_func(input)
+
+
+"""
+begin
+ begin
+ function eager_py_func_graph(input_; name=nothing, token=nothing, Tin=nothing, Tout=nothing)
+ local desc
+ tf.with_op_name(name, "EagerPyFunc") do
+ desc = tf.NodeDescription("EagerPyFunc")
+ begin
+ begin
+ input_ = [convert(Tensor{Any}, x) for x = input_]
+ begin
+ end
+ end
+ end
+ begin
+ begin
+ tf.add_input(desc, input_)
+ end
+ end
+ begin
+ begin
+ if token !== nothing
+ desc["token"] = Base.String(token)
+ end
+ end
+ begin
+ if Tin !== nothing
+ desc["Tin"] = map(Base.identity, Tin)
+ end
+ end
+ begin
+ if Tout !== nothing
+ desc["Tout"] = map(Base.identity, Tout)
+ end
+ end
+ end
+ end
+ tf.Tensor(tf.Operation(desc))
+ end
+ end
+ begin
+ function eager_py_func_eager(input_; name=nothing, token=nothing, Tin=nothing, Tout=nothing)
+ desc = tf.EagerOp("EagerPyFunc")
+ input_ = convert(tf.EagerTensor, input_)
+ begin
+ begin
+ tf.add_input(desc, input_)
+ end
+ end
+ begin
+ begin
+ if token !== nothing
+ desc["token"] = Base.String(token)
+ end
+ end
+ begin
+ if Tin !== nothing
+ desc["Tin"] = map(Base.identity, Tin)
+ end
+ end
+ begin
+ if Tout !== nothing
+ desc["Tout"] = map(Base.identity, Tout)
+ end
+ end
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(eager_py_func, [input_], name=nothing, token=nothing, Tin=nothing, Tout=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res[1]
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function eager_py_func(input_; name=nothing, token=nothing, Tin=nothing, Tout=nothing)
+ if tf.in_eager_mode()
+ eager_py_func_eager(input_; name=name, token=token, Tin=Tin, Tout=Tout)
+ else
+ eager_py_func_graph(input_; name=name, token=token, Tin=Tin, Tout=Tout)
+ end
+ end
+ end
+end
+
+
+"""
+ whole_file_reader_v2(; container=, shared_name=)
+
+
+"""
+begin
+ begin
+ function whole_file_reader_v2_graph(; name=nothing, container=nothing, shared_name=nothing)
+ local desc
+ tf.with_op_name(name, "WholeFileReaderV2") do
+ desc = tf.NodeDescription("WholeFileReaderV2")
+ begin
+ end
+ begin
+ end
+ begin
+ begin
+ if container !== nothing
+ desc["container"] = Base.String(container)
+ end
+ end
+ begin
+ if shared_name !== nothing
+ desc["shared_name"] = Base.String(shared_name)
+ end
+ end
+ end
+ end
+ tf.Tensor(tf.Operation(desc))
+ end
+ end
+ begin
+ function whole_file_reader_v2_eager(; name=nothing, container=nothing, shared_name=nothing)
+ desc = tf.EagerOp("WholeFileReaderV2")
+ begin
+ end
+ begin
+ begin
+ if container !== nothing
+ desc["container"] = Base.String(container)
+ end
+ end
+ begin
+ if shared_name !== nothing
+ desc["shared_name"] = Base.String(shared_name)
+ end
+ end
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(whole_file_reader_v2, [], name=nothing, container=nothing, shared_name=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res[1]
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function whole_file_reader_v2(; name=nothing, container=nothing, shared_name=nothing)
+ if tf.in_eager_mode()
+ whole_file_reader_v2_eager(; name=name, container=container, shared_name=shared_name)
+ else
+ whole_file_reader_v2_graph(; name=name, container=container, shared_name=shared_name)
+ end
+ end
+ end
+end
+
+
+"""
+ tensor_scatter_sub(tensor, indices, updates)
+
+
+"""
+begin
+ begin
+ function tensor_scatter_sub_graph(tensor_, indices_, updates_; name=nothing)
+ local desc
+ tf.with_op_name(name, "TensorScatterSub") do
+ desc = tf.NodeDescription("TensorScatterSub")
+ begin
+ begin
+ tensor_ = convert(Tensor{Any}, tensor_)
+ begin
+ end
+ end
+ begin
+ indices_ = convert(Tensor{Any}, indices_)
+ begin
+ indices_ = indices_ - convert(tf.Tensor{eltype(indices_)}, 1)
+ end
+ end
+ begin
+ updates_ = convert(Tensor{Any}, updates_)
+ begin
+ end
+ end
+ begin
+ (tensor_, updates_) = tf.tf_promote(tensor_, updates_)
+ end
+ begin
+ (indices_,) = tf.tf_promote(indices_)
+ end
+ end
+ begin
+ begin
+ tf.add_input(desc, tensor_)
+ end
+ begin
+ tf.add_input(desc, indices_)
+ end
+ begin
+ tf.add_input(desc, updates_)
+ end
+ end
+ begin
+ end
+ end
+ tf.Tensor(tf.Operation(desc))
+ end
+ end
+ begin
+ function tensor_scatter_sub_eager(tensor_, indices_, updates_; name=nothing)
+ desc = tf.EagerOp("TensorScatterSub")
+ tensor_ = convert(tf.EagerTensor, tensor_)
+ indices_ = convert(tf.EagerTensor, indices_)
+ updates_ = convert(tf.EagerTensor, updates_)
+ begin
+ begin
+ tf.add_input(desc, tensor_)
+ end
+ begin
+ tf.add_input(desc, indices_)
+ end
+ begin
+ tf.add_input(desc, updates_)
+ end
+ end
+ begin
+ end
+ begin
+ desc["T"] = tf.data_type(tensor_)
+ end
+ begin
+ desc["Tindices"] = tf.data_type(indices_)
+ end
+ begin
+ desc["T"] = tf.data_type(updates_)
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(tensor_scatter_sub, [tensor_, indices_, updates_], name=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res[1]
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function tensor_scatter_sub(tensor_, indices_, updates_; name=nothing)
+ if tf.in_eager_mode()
+ tensor_scatter_sub_eager(tensor_, indices_, updates_; name=name)
+ else
+ tensor_scatter_sub_graph(tensor_, indices_, updates_; name=name)
+ end
+ end
+ end
+end
+
+
+"""
+ scatter_max(ref, indices, updates; use_locking=false)
+
+
+"""
+begin
+ begin
+ function scatter_max_graph(ref_, indices_, updates_; name=nothing, use_locking=nothing)
+ local desc
+ tf.with_op_name(name, "ScatterMax") do
+ desc = tf.NodeDescription("ScatterMax")
+ begin
+ begin
+ ref_ = convert(Tensor{Any}, ref_)
+ begin
+ end
+ end
+ begin
+ indices_ = convert(Tensor{Any}, indices_)
+ begin
+ indices_ = indices_ - convert(tf.Tensor{eltype(indices_)}, 1)
+ end
+ end
+ begin
+ updates_ = convert(Tensor{Any}, updates_)
+ begin
+ end
+ end
+ begin
+ (ref_, updates_) = tf.tf_promote(ref_, updates_)
+ end
+ begin
+ (indices_,) = tf.tf_promote(indices_)
+ end
+ end
+ begin
+ begin
+ tf.add_input(desc, ref_)
+ end
+ begin
+ tf.add_input(desc, indices_)
+ end
+ begin
+ tf.add_input(desc, updates_)
+ end
+ end
+ begin
+ begin
+ if use_locking !== nothing
+ desc["use_locking"] = Base.Bool(use_locking)
+ end
+ end
+ end
+ end
+ tf.Tensor(tf.Operation(desc))
+ end
+ end
+ begin
+ function scatter_max_eager(ref_, indices_, updates_; name=nothing, use_locking=nothing)
+ desc = tf.EagerOp("ScatterMax")
+ ref_ = convert(tf.EagerTensor, ref_)
+ indices_ = convert(tf.EagerTensor, indices_)
+ updates_ = convert(tf.EagerTensor, updates_)
+ begin
+ begin
+ tf.add_input(desc, ref_)
+ end
+ begin
+ tf.add_input(desc, indices_)
+ end
+ begin
+ tf.add_input(desc, updates_)
+ end
+ end
+ begin
+ begin
+ if use_locking !== nothing
+ desc["use_locking"] = Base.Bool(use_locking)
+ end
+ end
+ end
+ begin
+ desc["T"] = tf.data_type(ref_)
+ end
+ begin
+ desc["Tindices"] = tf.data_type(indices_)
+ end
+ begin
+ desc["T"] = tf.data_type(updates_)
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(scatter_max, [ref_, indices_, updates_], name=nothing, use_locking=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res[1]
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function scatter_max(ref_, indices_, updates_; name=nothing, use_locking=nothing)
+ if tf.in_eager_mode()
+ scatter_max_eager(ref_, indices_, updates_; name=name, use_locking=use_locking)
+ else
+ scatter_max_graph(ref_, indices_, updates_; name=name, use_locking=use_locking)
+ end
+ end
+ end
+end
+
+
+"""
+ sqrt(x)
+
+
+"""
+begin
+ begin
+ function sqrt_graph(x_; name=nothing)
+ local desc
+ tf.with_op_name(name, "Sqrt") do
+ desc = tf.NodeDescription("Sqrt")
+ begin
+ begin
+ x_ = convert(Tensor{Any}, x_)
+ begin
+ end
+ end
+ begin
+ (x_,) = tf.tf_promote(x_)
+ end
+ end
+ begin
+ begin
+ tf.add_input(desc, x_)
+ end
+ end
+ begin
+ end
+ end
+ tf.Tensor(tf.Operation(desc))
+ end
+ end
+ begin
+ function sqrt_eager(x_; name=nothing)
+ desc = tf.EagerOp("Sqrt")
+ x_ = convert(tf.EagerTensor, x_)
+ begin
+ begin
+ tf.add_input(desc, x_)
+ end
+ end
+ begin
+ end
+ begin
+ desc["T"] = tf.data_type(x_)
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(sqrt, [x_], name=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res[1]
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function sqrt(x_; name=nothing)
+ if tf.in_eager_mode()
+ sqrt_eager(x_; name=name)
+ else
+ sqrt_graph(x_; name=name)
+ end
+ end
+ end
+end
+
+
+"""
+ accumulator_take_gradient(handle, num_required)
+
+
+"""
+begin
+ begin
+ function accumulator_take_gradient_graph(handle_, num_required_; name=nothing, dtype=nothing)
+ local desc
+ tf.with_op_name(name, "AccumulatorTakeGradient") do
+ desc = tf.NodeDescription("AccumulatorTakeGradient")
+ begin
+ begin
+ handle_ = convert(Tensor{String}, handle_)
+ begin
+ end
+ end
+ begin
+ num_required_ = convert(Tensor{Int32}, num_required_)
+ begin
+ end
+ end
+ end
+ begin
+ begin
+ tf.add_input(desc, handle_)
+ end
+ begin
+ tf.add_input(desc, num_required_)
+ end
+ end
+ begin
+ begin
+ if dtype !== nothing
+ desc["dtype"] = Base.identity(dtype)
+ end
+ end
+ end
+ end
+ tf.Tensor(tf.Operation(desc))
+ end
+ end
+ begin
+ function accumulator_take_gradient_eager(handle_, num_required_; name=nothing, dtype=nothing)
+ desc = tf.EagerOp("AccumulatorTakeGradient")
+ handle_ = convert(tf.EagerTensor, handle_)
+ num_required_ = convert(tf.EagerTensor, num_required_)
+ begin
+ begin
+ tf.add_input(desc, handle_)
+ end
+ begin
+ tf.add_input(desc, num_required_)
+ end
+ end
+ begin
+ begin
+ if dtype !== nothing
+ desc["dtype"] = Base.identity(dtype)
+ end
+ end
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(accumulator_take_gradient, [handle_, num_required_], name=nothing, dtype=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res[1]
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function accumulator_take_gradient(handle_, num_required_; name=nothing, dtype=nothing)
+ if tf.in_eager_mode()
+ accumulator_take_gradient_eager(handle_, num_required_; name=name, dtype=dtype)
+ else
+ accumulator_take_gradient_graph(handle_, num_required_; name=name, dtype=dtype)
+ end
+ end
+ end
+end
+
+
+"""
+ _mkl_add(x, y, mkl_x, mkl_y)
+
+Returns x + y element-wise.
+"""
+begin
+ begin
+ function _mkl_add_graph(x_, y_, mkl_x_, mkl_y_; name=nothing)
+ local desc
+ tf.with_op_name(name, "_MklAdd") do
+ desc = tf.NodeDescription("_MklAdd")
+ begin
+ begin
+ x_ = convert(Tensor{Any}, x_)
+ begin
+ end
+ end
+ begin
+ y_ = convert(Tensor{Any}, y_)
+ begin
+ end
+ end
+ begin
+ mkl_x_ = convert(Tensor{UInt8}, mkl_x_)
+ begin
+ end
+ end
+ begin
+ mkl_y_ = convert(Tensor{UInt8}, mkl_y_)
+ begin
+ end
+ end
+ begin
+ (x_, y_) = tf.tf_promote(x_, y_)
+ end
+ end
+ begin
+ begin
+ tf.add_input(desc, x_)
+ end
+ begin
+ tf.add_input(desc, y_)
+ end
+ begin
+ tf.add_input(desc, mkl_x_)
+ end
+ begin
+ tf.add_input(desc, mkl_y_)
+ end
+ end
+ begin
+ end
+ end
+ begin
+ out = tf.Tensor[]
+ op = tf.Operation(desc)
+ for out_idx = 1:2
+ push!(out, tf.Tensor(op, out_idx))
+ end
+ out
+ end
+ end
+ end
+ begin
+ function _mkl_add_eager(x_, y_, mkl_x_, mkl_y_; name=nothing)
+ desc = tf.EagerOp("_MklAdd")
+ x_ = convert(tf.EagerTensor, x_)
+ y_ = convert(tf.EagerTensor, y_)
+ mkl_x_ = convert(tf.EagerTensor, mkl_x_)
+ mkl_y_ = convert(tf.EagerTensor, mkl_y_)
+ begin
+ begin
+ tf.add_input(desc, x_)
+ end
+ begin
+ tf.add_input(desc, y_)
+ end
+ begin
+ tf.add_input(desc, mkl_x_)
+ end
+ begin
+ tf.add_input(desc, mkl_y_)
+ end
+ end
+ begin
+ end
+ begin
+ desc["T"] = tf.data_type(x_)
+ end
+ begin
+ desc["T"] = tf.data_type(y_)
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(_mkl_add, [x_, y_, mkl_x_, mkl_y_], name=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function _mkl_add(x_, y_, mkl_x_, mkl_y_; name=nothing)
+ if tf.in_eager_mode()
+ _mkl_add_eager(x_, y_, mkl_x_, mkl_y_; name=name)
+ else
+ _mkl_add_graph(x_, y_, mkl_x_, mkl_y_; name=name)
+ end
+ end
+ end
+end
+
+
+"""
+ outfeed_enqueue_tuple(inputs)
+
+An op which emits multiple Tensor values from an XLA computation.
+"""
+begin
+ begin
+ function outfeed_enqueue_tuple_graph(inputs_; name=nothing, dtypes=nothing)
+ local desc
+ tf.with_op_name(name, "OutfeedEnqueueTuple") do
+ desc = tf.NodeDescription("OutfeedEnqueueTuple")
+ begin
+ begin
+ inputs_ = [convert(Tensor{Any}, x) for x = inputs_]
+ begin
+ end
+ end
+ end
+ begin
+ begin
+ tf.add_input(desc, inputs_)
+ end
+ end
+ begin
+ begin
+ if dtypes !== nothing
+ desc["dtypes"] = map(Base.identity, dtypes)
+ end
+ end
+ end
+ end
+ tf.Tensor(tf.Operation(desc))
+ end
+ end
+ begin
+ function outfeed_enqueue_tuple_eager(inputs_; name=nothing, dtypes=nothing)
+ desc = tf.EagerOp("OutfeedEnqueueTuple")
+ inputs_ = convert(tf.EagerTensor, inputs_)
+ begin
+ begin
+ tf.add_input(desc, inputs_)
+ end
+ end
+ begin
+ begin
+ if dtypes !== nothing
+ desc["dtypes"] = map(Base.identity, dtypes)
+ end
+ end
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(outfeed_enqueue_tuple, [inputs_], name=nothing, dtypes=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res[1]
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function outfeed_enqueue_tuple(inputs_; name=nothing, dtypes=nothing)
+ if tf.in_eager_mode()
+ outfeed_enqueue_tuple_eager(inputs_; name=name, dtypes=dtypes)
+ else
+ outfeed_enqueue_tuple_graph(inputs_; name=name, dtypes=dtypes)
+ end
+ end
+ end
+end
+
+
+"""
+ reciprocal(x)
+
+
+"""
+begin
+ begin
+ function reciprocal_graph(x_; name=nothing)
+ local desc
+ tf.with_op_name(name, "Reciprocal") do
+ desc = tf.NodeDescription("Reciprocal")
+ begin
+ begin
+ x_ = convert(Tensor{Any}, x_)
+ begin
+ end
+ end
+ begin
+ (x_,) = tf.tf_promote(x_)
+ end
+ end
+ begin
+ begin
+ tf.add_input(desc, x_)
+ end
+ end
+ begin
+ end
+ end
+ tf.Tensor(tf.Operation(desc))
+ end
+ end
+ begin
+ function reciprocal_eager(x_; name=nothing)
+ desc = tf.EagerOp("Reciprocal")
+ x_ = convert(tf.EagerTensor, x_)
+ begin
+ begin
+ tf.add_input(desc, x_)
+ end
+ end
+ begin
+ end
+ begin
+ desc["T"] = tf.data_type(x_)
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(reciprocal, [x_], name=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res[1]
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function reciprocal(x_; name=nothing)
+ if tf.in_eager_mode()
+ reciprocal_eager(x_; name=name)
+ else
+ reciprocal_graph(x_; name=name)
+ end
+ end
+ end
+end
+
+
+"""
+ string_strip(input)
+
+
+"""
+begin
+ begin
+ function string_strip_graph(input_; name=nothing)
+ local desc
+ tf.with_op_name(name, "StringStrip") do
+ desc = tf.NodeDescription("StringStrip")
+ begin
+ begin
+ input_ = convert(Tensor{String}, input_)
+ begin
+ end
+ end
+ end
+ begin
+ begin
+ tf.add_input(desc, input_)
+ end
+ end
+ begin
+ end
+ end
+ tf.Tensor(tf.Operation(desc))
+ end
+ end
+ begin
+ function string_strip_eager(input_; name=nothing)
+ desc = tf.EagerOp("StringStrip")
+ input_ = convert(tf.EagerTensor, input_)
+ begin
+ begin
+ tf.add_input(desc, input_)
+ end
+ end
+ begin
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(string_strip, [input_], name=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res[1]
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function string_strip(input_; name=nothing)
+ if tf.in_eager_mode()
+ string_strip_eager(input_; name=name)
+ else
+ string_strip_graph(input_; name=name)
+ end
+ end
+ end
+end
+
+
+"""
+ barrier_ready_size(handle)
+
+
+"""
+begin
+ begin
+ function barrier_ready_size_graph(handle_; name=nothing)
+ local desc
+ tf.with_op_name(name, "BarrierReadySize") do
+ desc = tf.NodeDescription("BarrierReadySize")
+ begin
+ begin
+ handle_ = convert(Tensor{String}, handle_)
+ begin
+ end
+ end
+ end
+ begin
+ begin
+ tf.add_input(desc, handle_)
+ end
+ end
+ begin
+ end
+ end
+ tf.Tensor(tf.Operation(desc))
+ end
+ end
+ begin
+ function barrier_ready_size_eager(handle_; name=nothing)
+ desc = tf.EagerOp("BarrierReadySize")
+ handle_ = convert(tf.EagerTensor, handle_)
+ begin
+ begin
+ tf.add_input(desc, handle_)
+ end
+ end
+ begin
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(barrier_ready_size, [handle_], name=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res[1]
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function barrier_ready_size(handle_; name=nothing)
+ if tf.in_eager_mode()
+ barrier_ready_size_eager(handle_; name=name)
+ else
+ barrier_ready_size_graph(handle_; name=name)
+ end
+ end
+ end
+end
+
+
+"""
+ fake_quant_with_min_max_vars_per_channel(inputs, min, max; num_bits=8, narrow_range=false)
+
+
+"""
+begin
+ begin
+ function fake_quant_with_min_max_vars_per_channel_graph(inputs_, min_, max_; name=nothing, num_bits=nothing, narrow_range=nothing)
+ local desc
+ tf.with_op_name(name, "FakeQuantWithMinMaxVarsPerChannel") do
+ desc = tf.NodeDescription("FakeQuantWithMinMaxVarsPerChannel")
+ begin
+ begin
+ inputs_ = convert(Tensor{Float32}, inputs_)
+ begin
+ end
+ end
+ begin
+ min_ = convert(Tensor{Float32}, min_)
+ begin
+ end
+ end
+ begin
+ max_ = convert(Tensor{Float32}, max_)
+ begin
+ end
+ end
+ end
+ begin
+ begin
+ tf.add_input(desc, inputs_)
+ end
+ begin
+ tf.add_input(desc, min_)
+ end
+ begin
+ tf.add_input(desc, max_)
+ end
+ end
+ begin
+ begin
+ if num_bits !== nothing
+ desc["num_bits"] = Base.Int(num_bits)
+ end
+ end
+ begin
+ if narrow_range !== nothing
+ desc["narrow_range"] = Base.Bool(narrow_range)
+ end
+ end
+ end
+ end
+ tf.Tensor(tf.Operation(desc))
+ end
+ end
+ begin
+ function fake_quant_with_min_max_vars_per_channel_eager(inputs_, min_, max_; name=nothing, num_bits=nothing, narrow_range=nothing)
+ desc = tf.EagerOp("FakeQuantWithMinMaxVarsPerChannel")
+ inputs_ = convert(tf.EagerTensor, inputs_)
+ min_ = convert(tf.EagerTensor, min_)
+ max_ = convert(tf.EagerTensor, max_)
+ begin
+ begin
+ tf.add_input(desc, inputs_)
+ end
+ begin
+ tf.add_input(desc, min_)
+ end
+ begin
+ tf.add_input(desc, max_)
+ end
+ end
+ begin
+ begin
+ if num_bits !== nothing
+ desc["num_bits"] = Base.Int(num_bits)
+ end
+ end
+ begin
+ if narrow_range !== nothing
+ desc["narrow_range"] = Base.Bool(narrow_range)
+ end
+ end
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(fake_quant_with_min_max_vars_per_channel, [inputs_, min_, max_], name=nothing, num_bits=nothing, narrow_range=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res[1]
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function fake_quant_with_min_max_vars_per_channel(inputs_, min_, max_; name=nothing, num_bits=nothing, narrow_range=nothing)
+ if tf.in_eager_mode()
+ fake_quant_with_min_max_vars_per_channel_eager(inputs_, min_, max_; name=name, num_bits=num_bits, narrow_range=narrow_range)
+ else
+ fake_quant_with_min_max_vars_per_channel_graph(inputs_, min_, max_; name=name, num_bits=num_bits, narrow_range=narrow_range)
+ end
+ end
+ end
+end
+
+
+"""
+ string_to_hash_bucket(string_tensor)
+
+
+"""
+begin
+ begin
+ function string_to_hash_bucket_graph(string_tensor_; name=nothing, num_buckets=nothing)
+ local desc
+ tf.with_op_name(name, "StringToHashBucket") do
+ desc = tf.NodeDescription("StringToHashBucket")
+ begin
+ begin
+ string_tensor_ = convert(Tensor{String}, string_tensor_)
+ begin
+ end
+ end
+ end
+ begin
+ begin
+ tf.add_input(desc, string_tensor_)
+ end
+ end
+ begin
+ begin
+ if num_buckets !== nothing
+ desc["num_buckets"] = Base.Int(num_buckets)
+ end
+ end
+ end
+ end
+ tf.Tensor(tf.Operation(desc))
+ end
+ end
+ begin
+ function string_to_hash_bucket_eager(string_tensor_; name=nothing, num_buckets=nothing)
+ desc = tf.EagerOp("StringToHashBucket")
+ string_tensor_ = convert(tf.EagerTensor, string_tensor_)
+ begin
+ begin
+ tf.add_input(desc, string_tensor_)
+ end
+ end
+ begin
+ begin
+ if num_buckets !== nothing
+ desc["num_buckets"] = Base.Int(num_buckets)
+ end
+ end
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(string_to_hash_bucket, [string_tensor_], name=nothing, num_buckets=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res[1]
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function string_to_hash_bucket(string_tensor_; name=nothing, num_buckets=nothing)
+ if tf.in_eager_mode()
+ string_to_hash_bucket_eager(string_tensor_; name=name, num_buckets=num_buckets)
+ else
+ string_to_hash_bucket_graph(string_tensor_; name=name, num_buckets=num_buckets)
+ end
+ end
+ end
+end
+
+
+"""
+ tensor_array_concat(handle, flow_in; element_shape_except0=?)
+
+
+"""
+begin
+ begin
+ function tensor_array_concat_graph(handle_, flow_in_; name=nothing, dtype=nothing, element_shape_except0=nothing)
+ local desc
+ tf.with_op_name(name, "TensorArrayConcat") do
+ desc = tf.NodeDescription("TensorArrayConcat")
+ begin
+ begin
+ handle_ = convert(Tensor{String}, handle_)
+ begin
+ end
+ end
+ begin
+ flow_in_ = convert(Tensor{Float32}, flow_in_)
+ begin
+ end
+ end
+ end
+ begin
+ begin
+ tf.add_input(desc, handle_)
+ end
+ begin
+ tf.add_input(desc, flow_in_)
+ end
+ end
+ begin
+ begin
+ if dtype !== nothing
+ desc["dtype"] = Base.identity(dtype)
+ end
+ end
+ begin
+ if element_shape_except0 !== nothing
+ desc["element_shape_except0"] = Base.identity(element_shape_except0)
+ end
+ end
+ end
+ end
+ begin
+ out = tf.Tensor[]
+ op = tf.Operation(desc)
+ for out_idx = 1:2
+ push!(out, tf.Tensor(op, out_idx))
+ end
+ out
+ end
+ end
+ end
+ begin
+ function tensor_array_concat_eager(handle_, flow_in_; name=nothing, dtype=nothing, element_shape_except0=nothing)
+ desc = tf.EagerOp("TensorArrayConcat")
+ handle_ = convert(tf.EagerTensor, handle_)
+ flow_in_ = convert(tf.EagerTensor, flow_in_)
+ begin
+ begin
+ tf.add_input(desc, handle_)
+ end
+ begin
+ tf.add_input(desc, flow_in_)
+ end
+ end
+ begin
+ begin
+ if dtype !== nothing
+ desc["dtype"] = Base.identity(dtype)
+ end
+ end
+ begin
+ if element_shape_except0 !== nothing
+ desc["element_shape_except0"] = Base.identity(element_shape_except0)
+ end
+ end
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(tensor_array_concat, [handle_, flow_in_], name=nothing, dtype=nothing, element_shape_except0=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function tensor_array_concat(handle_, flow_in_; name=nothing, dtype=nothing, element_shape_except0=nothing)
+ if tf.in_eager_mode()
+ tensor_array_concat_eager(handle_, flow_in_; name=name, dtype=dtype, element_shape_except0=element_shape_except0)
+ else
+ tensor_array_concat_graph(handle_, flow_in_; name=name, dtype=dtype, element_shape_except0=element_shape_except0)
+ end
+ end
+ end
+end
+
+
+"""
+ sharded_filename(basename, shard, num_shards)
+
+
+"""
+begin
+ begin
+ function sharded_filename_graph(basename_, shard_, num_shards_; name=nothing)
+ local desc
+ tf.with_op_name(name, "ShardedFilename") do
+ desc = tf.NodeDescription("ShardedFilename")
+ begin
+ begin
+ basename_ = convert(Tensor{String}, basename_)
+ begin
+ end
+ end
+ begin
+ shard_ = convert(Tensor{Int32}, shard_)
+ begin
+ end
+ end
+ begin
+ num_shards_ = convert(Tensor{Int32}, num_shards_)
+ begin
+ end
+ end
+ end
+ begin
+ begin
+ tf.add_input(desc, basename_)
+ end
+ begin
+ tf.add_input(desc, shard_)
+ end
+ begin
+ tf.add_input(desc, num_shards_)
+ end
+ end
+ begin
+ end
+ end
+ tf.Tensor(tf.Operation(desc))
+ end
+ end
+ begin
+ function sharded_filename_eager(basename_, shard_, num_shards_; name=nothing)
+ desc = tf.EagerOp("ShardedFilename")
+ basename_ = convert(tf.EagerTensor, basename_)
+ shard_ = convert(tf.EagerTensor, shard_)
+ num_shards_ = convert(tf.EagerTensor, num_shards_)
+ begin
+ begin
+ tf.add_input(desc, basename_)
+ end
+ begin
+ tf.add_input(desc, shard_)
+ end
+ begin
+ tf.add_input(desc, num_shards_)
+ end
+ end
+ begin
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(sharded_filename, [basename_, shard_, num_shards_], name=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res[1]
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function sharded_filename(basename_, shard_, num_shards_; name=nothing)
+ if tf.in_eager_mode()
+ sharded_filename_eager(basename_, shard_, num_shards_; name=name)
+ else
+ sharded_filename_graph(basename_, shard_, num_shards_; name=name)
+ end
+ end
+ end
+end
+
+
+"""
+ py_func(input)
+
+
+"""
+begin
+ begin
+ function py_func_graph(input_; name=nothing, token=nothing, Tin=nothing, Tout=nothing)
+ local desc
+ tf.with_op_name(name, "PyFunc") do
+ desc = tf.NodeDescription("PyFunc")
+ begin
+ begin
+ input_ = [convert(Tensor{Any}, x) for x = input_]
+ begin
+ end
+ end
+ end
+ begin
+ begin
+ tf.add_input(desc, input_)
+ end
+ end
+ begin
+ begin
+ if token !== nothing
+ desc["token"] = Base.String(token)
+ end
+ end
+ begin
+ if Tin !== nothing
+ desc["Tin"] = map(Base.identity, Tin)
+ end
+ end
+ begin
+ if Tout !== nothing
+ desc["Tout"] = map(Base.identity, Tout)
+ end
+ end
+ end
+ end
+ tf.Tensor(tf.Operation(desc))
+ end
+ end
+ begin
+ function py_func_eager(input_; name=nothing, token=nothing, Tin=nothing, Tout=nothing)
+ desc = tf.EagerOp("PyFunc")
+ input_ = convert(tf.EagerTensor, input_)
+ begin
+ begin
+ tf.add_input(desc, input_)
+ end
+ end
+ begin
+ begin
+ if token !== nothing
+ desc["token"] = Base.String(token)
+ end
+ end
+ begin
+ if Tin !== nothing
+ desc["Tin"] = map(Base.identity, Tin)
+ end
+ end
+ begin
+ if Tout !== nothing
+ desc["Tout"] = map(Base.identity, Tout)
+ end
+ end
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(py_func, [input_], name=nothing, token=nothing, Tin=nothing, Tout=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res[1]
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function py_func(input_; name=nothing, token=nothing, Tin=nothing, Tout=nothing)
+ if tf.in_eager_mode()
+ py_func_eager(input_; name=name, token=token, Tin=Tin, Tout=Tout)
+ else
+ py_func_graph(input_; name=name, token=token, Tin=Tin, Tout=Tout)
+ end
+ end
+ end
+end
+
+
+"""
+ unsorted_segment_prod(data, segment_ids, num_segments)
+
+
+"""
+begin
+ begin
+ function unsorted_segment_prod_graph(data_, segment_ids_, num_segments_; name=nothing)
+ local desc
+ tf.with_op_name(name, "UnsortedSegmentProd") do
+ desc = tf.NodeDescription("UnsortedSegmentProd")
+ begin
+ begin
+ data_ = convert(Tensor{Any}, data_)
+ begin
+ end
+ end
+ begin
+ segment_ids_ = convert(Tensor{Any}, segment_ids_)
+ begin
+ segment_ids_ = segment_ids_ - convert(tf.Tensor{eltype(segment_ids_)}, 1)
+ end
+ end
+ begin
+ num_segments_ = convert(Tensor{Int32}, num_segments_)
+ begin
+ end
+ end
+ begin
+ (num_segments_,) = tf.tf_promote(num_segments_)
+ end
+ begin
+ (data_,) = tf.tf_promote(data_)
+ end
+ begin
+ (segment_ids_,) = tf.tf_promote(segment_ids_)
+ end
+ end
+ begin
+ begin
+ tf.add_input(desc, data_)
+ end
+ begin
+ tf.add_input(desc, segment_ids_)
+ end
+ begin
+ tf.add_input(desc, num_segments_)
+ end
+ end
+ begin
+ end
+ end
+ tf.Tensor(tf.Operation(desc))
+ end
+ end
+ begin
+ function unsorted_segment_prod_eager(data_, segment_ids_, num_segments_; name=nothing)
+ desc = tf.EagerOp("UnsortedSegmentProd")
+ data_ = convert(tf.EagerTensor, data_)
+ segment_ids_ = convert(tf.EagerTensor, segment_ids_)
+ num_segments_ = convert(tf.EagerTensor, num_segments_)
+ begin
+ begin
+ tf.add_input(desc, data_)
+ end
+ begin
+ tf.add_input(desc, segment_ids_)
+ end
+ begin
+ tf.add_input(desc, num_segments_)
+ end
+ end
+ begin
+ end
+ begin
+ desc["T"] = tf.data_type(data_)
+ end
+ begin
+ desc["Tindices"] = tf.data_type(segment_ids_)
+ end
+ begin
+ desc["Tnumsegments"] = tf.data_type(num_segments_)
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(unsorted_segment_prod, [data_, segment_ids_, num_segments_], name=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res[1]
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function unsorted_segment_prod(data_, segment_ids_, num_segments_; name=nothing)
+ if tf.in_eager_mode()
+ unsorted_segment_prod_eager(data_, segment_ids_, num_segments_; name=name)
+ else
+ unsorted_segment_prod_graph(data_, segment_ids_, num_segments_; name=name)
+ end
+ end
+ end
+end
+
+
+"""
+ count_up_to(ref)
+
+
+"""
+begin
+ begin
+ function count_up_to_graph(ref_; name=nothing, limit=nothing)
+ local desc
+ tf.with_op_name(name, "CountUpTo") do
+ desc = tf.NodeDescription("CountUpTo")
+ begin
+ begin
+ ref_ = convert(Tensor{Any}, ref_)
+ begin
+ end
+ end
+ begin
+ (ref_,) = tf.tf_promote(ref_)
+ end
+ end
+ begin
+ begin
+ tf.add_input(desc, ref_)
+ end
+ end
+ begin
+ begin
+ if limit !== nothing
+ desc["limit"] = Base.Int(limit)
+ end
+ end
+ end
+ end
+ tf.Tensor(tf.Operation(desc))
+ end
+ end
+ begin
+ function count_up_to_eager(ref_; name=nothing, limit=nothing)
+ desc = tf.EagerOp("CountUpTo")
+ ref_ = convert(tf.EagerTensor, ref_)
+ begin
+ begin
+ tf.add_input(desc, ref_)
+ end
+ end
+ begin
+ begin
+ if limit !== nothing
+ desc["limit"] = Base.Int(limit)
+ end
+ end
+ end
+ begin
+ desc["T"] = tf.data_type(ref_)
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(count_up_to, [ref_], name=nothing, limit=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res[1]
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function count_up_to(ref_; name=nothing, limit=nothing)
+ if tf.in_eager_mode()
+ count_up_to_eager(ref_; name=name, limit=limit)
+ else
+ count_up_to_graph(ref_; name=name, limit=limit)
+ end
+ end
+ end
+end
+
+
+"""
+ random_gamma(shape, alpha; seed=0, seed2=0)
+
+
+"""
+begin
+ begin
+ function random_gamma_graph(shape_, alpha_; name=nothing, seed=nothing, seed2=nothing, S=nothing)
+ local desc
+ tf.with_op_name(name, "RandomGamma") do
+ desc = tf.NodeDescription("RandomGamma")
+ begin
+ begin
+ shape_ = convert(Tensor{Any}, shape_)
+ begin
+ end
+ end
+ begin
+ alpha_ = convert(Tensor{Any}, alpha_)
+ begin
+ end
+ end
+ begin
+ (shape_,) = tf.tf_promote(shape_)
+ end
+ begin
+ (alpha_,) = tf.tf_promote(alpha_)
+ end
+ end
+ begin
+ begin
+ tf.add_input(desc, shape_)
+ end
+ begin
+ tf.add_input(desc, alpha_)
+ end
+ end
+ begin
+ begin
+ if seed !== nothing
+ desc["seed"] = Base.Int(seed)
+ end
+ end
+ begin
+ if seed2 !== nothing
+ desc["seed2"] = Base.Int(seed2)
+ end
+ end
+ begin
+ if S !== nothing
+ desc["S"] = Base.identity(S)
+ end
+ end
+ end
+ end
+ tf.Tensor(tf.Operation(desc))
+ end
+ end
+ begin
+ function random_gamma_eager(shape_, alpha_; name=nothing, seed=nothing, seed2=nothing, S=nothing)
+ desc = tf.EagerOp("RandomGamma")
+ shape_ = convert(tf.EagerTensor, shape_)
+ alpha_ = convert(tf.EagerTensor, alpha_)
+ begin
+ begin
+ tf.add_input(desc, shape_)
+ end
+ begin
+ tf.add_input(desc, alpha_)
+ end
+ end
+ begin
+ begin
+ if seed !== nothing
+ desc["seed"] = Base.Int(seed)
+ end
+ end
+ begin
+ if seed2 !== nothing
+ desc["seed2"] = Base.Int(seed2)
+ end
+ end
+ begin
+ if S !== nothing
+ desc["S"] = Base.identity(S)
+ end
+ end
+ end
+ begin
+ desc["S"] = tf.data_type(shape_)
+ end
+ begin
+ desc["T"] = tf.data_type(alpha_)
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(random_gamma, [shape_, alpha_], name=nothing, seed=nothing, seed2=nothing, S=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res[1]
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function random_gamma(shape_, alpha_; name=nothing, seed=nothing, seed2=nothing, S=nothing)
+ if tf.in_eager_mode()
+ random_gamma_eager(shape_, alpha_; name=name, seed=seed, seed2=seed2, S=S)
+ else
+ random_gamma_graph(shape_, alpha_; name=name, seed=seed, seed2=seed2, S=S)
+ end
+ end
+ end
+end
+
+
+"""
+ tensor_array_grad(handle, flow_in)
+
+
+"""
+begin
+ begin
+ function tensor_array_grad_graph(handle_, flow_in_; name=nothing, source=nothing)
+ local desc
+ tf.with_op_name(name, "TensorArrayGrad") do
+ desc = tf.NodeDescription("TensorArrayGrad")
+ begin
+ begin
+ handle_ = convert(Tensor{String}, handle_)
+ begin
+ end
+ end
+ begin
+ flow_in_ = convert(Tensor{Float32}, flow_in_)
+ begin
+ end
+ end
+ end
+ begin
+ begin
+ tf.add_input(desc, handle_)
+ end
+ begin
+ tf.add_input(desc, flow_in_)
+ end
+ end
+ begin
+ begin
+ if source !== nothing
+ desc["source"] = Base.String(source)
+ end
+ end
+ end
+ end
+ tf.Tensor(tf.Operation(desc))
+ end
+ end
+ begin
+ function tensor_array_grad_eager(handle_, flow_in_; name=nothing, source=nothing)
+ desc = tf.EagerOp("TensorArrayGrad")
+ handle_ = convert(tf.EagerTensor, handle_)
+ flow_in_ = convert(tf.EagerTensor, flow_in_)
+ begin
+ begin
+ tf.add_input(desc, handle_)
+ end
+ begin
+ tf.add_input(desc, flow_in_)
+ end
+ end
+ begin
+ begin
+ if source !== nothing
+ desc["source"] = Base.String(source)
+ end
+ end
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(tensor_array_grad, [handle_, flow_in_], name=nothing, source=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res[1]
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function tensor_array_grad(handle_, flow_in_; name=nothing, source=nothing)
+ if tf.in_eager_mode()
+ tensor_array_grad_eager(handle_, flow_in_; name=name, source=source)
+ else
+ tensor_array_grad_graph(handle_, flow_in_; name=name, source=source)
+ end
+ end
+ end
+end
+
+
+"""
+ dilation2d(input, filter)
+
+
+"""
+begin
+ begin
+ function dilation2d_graph(input_, filter_; name=nothing, strides=nothing, rates=nothing, padding=nothing)
+ local desc
+ tf.with_op_name(name, "Dilation2D") do
+ desc = tf.NodeDescription("Dilation2D")
+ begin
+ begin
+ input_ = convert(Tensor{Any}, input_)
+ begin
+ end
+ end
+ begin
+ filter_ = convert(Tensor{Any}, filter_)
+ begin
+ end
+ end
+ begin
+ (input_, filter_) = tf.tf_promote(input_, filter_)
+ end
+ end
+ begin
+ begin
+ tf.add_input(desc, input_)
+ end
+ begin
+ tf.add_input(desc, filter_)
+ end
+ end
+ begin
+ begin
+ if strides !== nothing
+ desc["strides"] = map(Base.identity, strides)
+ end
+ end
+ begin
+ if rates !== nothing
+ desc["rates"] = map(Base.identity, rates)
+ end
+ end
+ begin
+ if padding !== nothing
+ desc["padding"] = Base.String(padding)
+ end
+ end
+ end
+ end
+ tf.Tensor(tf.Operation(desc))
+ end
+ end
+ begin
+ function dilation2d_eager(input_, filter_; name=nothing, strides=nothing, rates=nothing, padding=nothing)
+ desc = tf.EagerOp("Dilation2D")
+ input_ = convert(tf.EagerTensor, input_)
+ filter_ = convert(tf.EagerTensor, filter_)
+ begin
+ begin
+ tf.add_input(desc, input_)
+ end
+ begin
+ tf.add_input(desc, filter_)
+ end
+ end
+ begin
+ begin
+ if strides !== nothing
+ desc["strides"] = map(Base.identity, strides)
+ end
+ end
+ begin
+ if rates !== nothing
+ desc["rates"] = map(Base.identity, rates)
+ end
+ end
+ begin
+ if padding !== nothing
+ desc["padding"] = Base.String(padding)
+ end
+ end
+ end
+ begin
+ desc["T"] = tf.data_type(input_)
+ end
+ begin
+ desc["T"] = tf.data_type(filter_)
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(dilation2d, [input_, filter_], name=nothing, strides=nothing, rates=nothing, padding=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res[1]
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function dilation2d(input_, filter_; name=nothing, strides=nothing, rates=nothing, padding=nothing)
+ if tf.in_eager_mode()
+ dilation2d_eager(input_, filter_; name=name, strides=strides, rates=rates, padding=padding)
+ else
+ dilation2d_graph(input_, filter_; name=name, strides=strides, rates=rates, padding=padding)
+ end
+ end
+ end
+end
+
+
+"""
+ unbatch(batched_tensor, batch_index, id; container=, shared_name=)
+
+
+"""
+begin
+ begin
+ function unbatch_graph(batched_tensor_, batch_index_, id_; name=nothing, timeout_micros=nothing, container=nothing, shared_name=nothing)
+ local desc
+ tf.with_op_name(name, "Unbatch") do
+ desc = tf.NodeDescription("Unbatch")
+ begin
+ begin
+ batched_tensor_ = convert(Tensor{Any}, batched_tensor_)
+ begin
+ end
+ end
+ begin
+ batch_index_ = convert(Tensor{Int64}, batch_index_)
+ begin
+ end
+ end
+ begin
+ id_ = convert(Tensor{Int64}, id_)
+ begin
+ end
+ end
+ begin
+ (batched_tensor_,) = tf.tf_promote(batched_tensor_)
+ end
+ end
+ begin
+ begin
+ tf.add_input(desc, batched_tensor_)
+ end
+ begin
+ tf.add_input(desc, batch_index_)
+ end
+ begin
+ tf.add_input(desc, id_)
+ end
+ end
+ begin
+ begin
+ if timeout_micros !== nothing
+ desc["timeout_micros"] = Base.Int(timeout_micros)
+ end
+ end
+ begin
+ if container !== nothing
+ desc["container"] = Base.String(container)
+ end
+ end
+ begin
+ if shared_name !== nothing
+ desc["shared_name"] = Base.String(shared_name)
+ end
+ end
+ end
+ end
+ tf.Tensor(tf.Operation(desc))
+ end
+ end
+ begin
+ function unbatch_eager(batched_tensor_, batch_index_, id_; name=nothing, timeout_micros=nothing, container=nothing, shared_name=nothing)
+ desc = tf.EagerOp("Unbatch")
+ batched_tensor_ = convert(tf.EagerTensor, batched_tensor_)
+ batch_index_ = convert(tf.EagerTensor, batch_index_)
+ id_ = convert(tf.EagerTensor, id_)
+ begin
+ begin
+ tf.add_input(desc, batched_tensor_)
+ end
+ begin
+ tf.add_input(desc, batch_index_)
+ end
+ begin
+ tf.add_input(desc, id_)
+ end
+ end
+ begin
+ begin
+ if timeout_micros !== nothing
+ desc["timeout_micros"] = Base.Int(timeout_micros)
+ end
+ end
+ begin
+ if container !== nothing
+ desc["container"] = Base.String(container)
+ end
+ end
+ begin
+ if shared_name !== nothing
+ desc["shared_name"] = Base.String(shared_name)
+ end
+ end
+ end
+ begin
+ desc["T"] = tf.data_type(batched_tensor_)
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(unbatch, [batched_tensor_, batch_index_, id_], name=nothing, timeout_micros=nothing, container=nothing, shared_name=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res[1]
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function unbatch(batched_tensor_, batch_index_, id_; name=nothing, timeout_micros=nothing, container=nothing, shared_name=nothing)
+ if tf.in_eager_mode()
+ unbatch_eager(batched_tensor_, batch_index_, id_; name=name, timeout_micros=timeout_micros, container=container, shared_name=shared_name)
+ else
+ unbatch_graph(batched_tensor_, batch_index_, id_; name=name, timeout_micros=timeout_micros, container=container, shared_name=shared_name)
+ end
+ end
+ end
+end
+
+
+"""
+ get_session_handle(value)
+
+
+"""
+begin
+ begin
+ function get_session_handle_graph(value_; name=nothing)
+ local desc
+ tf.with_op_name(name, "GetSessionHandle") do
+ desc = tf.NodeDescription("GetSessionHandle")
+ begin
+ begin
+ value_ = convert(Tensor{Any}, value_)
+ begin
+ end
+ end
+ begin
+ (value_,) = tf.tf_promote(value_)
+ end
+ end
+ begin
+ begin
+ tf.add_input(desc, value_)
+ end
+ end
+ begin
+ end
+ end
+ tf.Tensor(tf.Operation(desc))
+ end
+ end
+ begin
+ function get_session_handle_eager(value_; name=nothing)
+ desc = tf.EagerOp("GetSessionHandle")
+ value_ = convert(tf.EagerTensor, value_)
+ begin
+ begin
+ tf.add_input(desc, value_)
+ end
+ end
+ begin
+ end
+ begin
+ desc["T"] = tf.data_type(value_)
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(get_session_handle, [value_], name=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res[1]
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function get_session_handle(value_; name=nothing)
+ if tf.in_eager_mode()
+ get_session_handle_eager(value_; name=name)
+ else
+ get_session_handle_graph(value_; name=name)
+ end
+ end
+ end
+end
+
+
+"""
+ retrieve_tpu_embedding_adam_parameters(; table_id=-1, table_name=)
+
+Retrieve embedding parameters for a single table.
+"""
+begin
+ begin
+ function retrieve_tpu_embedding_adam_parameters_graph(; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing)
+ local desc
+ tf.with_op_name(name, "RetrieveTPUEmbeddingADAMParameters") do
+ desc = tf.NodeDescription("RetrieveTPUEmbeddingADAMParameters")
+ begin
+ end
+ begin
+ end
+ begin
+ begin
+ if table_id !== nothing
+ desc["table_id"] = Base.Int(table_id)
+ end
+ end
+ begin
+ if table_name !== nothing
+ desc["table_name"] = Base.String(table_name)
+ end
+ end
+ begin
+ if num_shards !== nothing
+ desc["num_shards"] = Base.Int(num_shards)
+ end
+ end
+ begin
+ if shard_id !== nothing
+ desc["shard_id"] = Base.Int(shard_id)
+ end
+ end
+ end
+ end
+ begin
+ out = tf.Tensor[]
+ op = tf.Operation(desc)
+ for out_idx = 1:3
+ push!(out, tf.Tensor(op, out_idx))
+ end
+ out
+ end
+ end
+ end
+ begin
+ function retrieve_tpu_embedding_adam_parameters_eager(; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing)
+ desc = tf.EagerOp("RetrieveTPUEmbeddingADAMParameters")
+ begin
+ end
+ begin
+ begin
+ if table_id !== nothing
+ desc["table_id"] = Base.Int(table_id)
+ end
+ end
+ begin
+ if table_name !== nothing
+ desc["table_name"] = Base.String(table_name)
+ end
+ end
+ begin
+ if num_shards !== nothing
+ desc["num_shards"] = Base.Int(num_shards)
+ end
+ end
+ begin
+ if shard_id !== nothing
+ desc["shard_id"] = Base.Int(shard_id)
+ end
+ end
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(retrieve_tpu_embedding_adam_parameters, [], name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function retrieve_tpu_embedding_adam_parameters(; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing)
+ if tf.in_eager_mode()
+ retrieve_tpu_embedding_adam_parameters_eager(; name=name, table_id=table_id, table_name=table_name, num_shards=num_shards, shard_id=shard_id)
+ else
+ retrieve_tpu_embedding_adam_parameters_graph(; name=name, table_id=table_id, table_name=table_name, num_shards=num_shards, shard_id=shard_id)
+ end
+ end
+ end
+end
+
+
+"""
+ mutable_hash_table_of_tensors_v2(; container=, shared_name=, use_node_name_sharing=false, value_shape=?)
+
+
+"""
+begin
+ begin
+ function mutable_hash_table_of_tensors_v2_graph(; name=nothing, container=nothing, shared_name=nothing, use_node_name_sharing=nothing, key_dtype=nothing, value_dtype=nothing, value_shape=nothing)
+ local desc
+ tf.with_op_name(name, "MutableHashTableOfTensorsV2") do
+ desc = tf.NodeDescription("MutableHashTableOfTensorsV2")
+ begin
+ end
+ begin
+ end
+ begin
+ begin
+ if container !== nothing
+ desc["container"] = Base.String(container)
+ end
+ end
+ begin
+ if shared_name !== nothing
+ desc["shared_name"] = Base.String(shared_name)
+ end
+ end
+ begin
+ if use_node_name_sharing !== nothing
+ desc["use_node_name_sharing"] = Base.Bool(use_node_name_sharing)
+ end
+ end
+ begin
+ if key_dtype !== nothing
+ desc["key_dtype"] = Base.identity(key_dtype)
+ end
+ end
+ begin
+ if value_dtype !== nothing
+ desc["value_dtype"] = Base.identity(value_dtype)
+ end
+ end
+ begin
+ if value_shape !== nothing
+ desc["value_shape"] = Base.identity(value_shape)
+ end
+ end
+ end
+ end
+ tf.Tensor(tf.Operation(desc))
+ end
+ end
+ begin
+ function mutable_hash_table_of_tensors_v2_eager(; name=nothing, container=nothing, shared_name=nothing, use_node_name_sharing=nothing, key_dtype=nothing, value_dtype=nothing, value_shape=nothing)
+ desc = tf.EagerOp("MutableHashTableOfTensorsV2")
+ begin
+ end
+ begin
+ begin
+ if container !== nothing
+ desc["container"] = Base.String(container)
+ end
+ end
+ begin
+ if shared_name !== nothing
+ desc["shared_name"] = Base.String(shared_name)
+ end
+ end
+ begin
+ if use_node_name_sharing !== nothing
+ desc["use_node_name_sharing"] = Base.Bool(use_node_name_sharing)
+ end
+ end
+ begin
+ if key_dtype !== nothing
+ desc["key_dtype"] = Base.identity(key_dtype)
+ end
+ end
+ begin
+ if value_dtype !== nothing
+ desc["value_dtype"] = Base.identity(value_dtype)
+ end
+ end
+ begin
+ if value_shape !== nothing
+ desc["value_shape"] = Base.identity(value_shape)
+ end
+ end
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(mutable_hash_table_of_tensors_v2, [], name=nothing, container=nothing, shared_name=nothing, use_node_name_sharing=nothing, key_dtype=nothing, value_dtype=nothing, value_shape=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res[1]
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function mutable_hash_table_of_tensors_v2(; name=nothing, container=nothing, shared_name=nothing, use_node_name_sharing=nothing, key_dtype=nothing, value_dtype=nothing, value_shape=nothing)
+ if tf.in_eager_mode()
+ mutable_hash_table_of_tensors_v2_eager(; name=name, container=container, shared_name=shared_name, use_node_name_sharing=use_node_name_sharing, key_dtype=key_dtype, value_dtype=value_dtype, value_shape=value_shape)
+ else
+ mutable_hash_table_of_tensors_v2_graph(; name=name, container=container, shared_name=shared_name, use_node_name_sharing=use_node_name_sharing, key_dtype=key_dtype, value_dtype=value_dtype, value_shape=value_shape)
+ end
+ end
+ end
+end
+
+
+"""
+ sparse_apply_ftrl(var, accum, linear, grad, indices, lr, l1, l2, lr_power; use_locking=false)
+
+
+"""
+begin
+ begin
+ function sparse_apply_ftrl_graph(var_, accum_, linear_, grad_, indices_, lr_, l1_, l2_, lr_power_; name=nothing, use_locking=nothing)
+ local desc
+ tf.with_op_name(name, "SparseApplyFtrl") do
+ desc = tf.NodeDescription("SparseApplyFtrl")
+ begin
+ begin
+ var_ = convert(Tensor{Any}, var_)
+ begin
+ end
+ end
+ begin
+ accum_ = convert(Tensor{Any}, accum_)
+ begin
+ end
+ end
+ begin
+ linear_ = convert(Tensor{Any}, linear_)
+ begin
+ end
+ end
+ begin
+ grad_ = convert(Tensor{Any}, grad_)
+ begin
+ end
+ end
+ begin
+ indices_ = convert(Tensor{Any}, indices_)
+ begin
+ indices_ = indices_ - convert(tf.Tensor{eltype(indices_)}, 1)
+ end
+ end
+ begin
+ lr_ = convert(Tensor{Any}, lr_)
+ begin
+ end
+ end
+ begin
+ l1_ = convert(Tensor{Any}, l1_)
+ begin
+ end
+ end
+ begin
+ l2_ = convert(Tensor{Any}, l2_)
+ begin
+ end
+ end
+ begin
+ lr_power_ = convert(Tensor{Any}, lr_power_)
+ begin
+ end
+ end
+ begin
+ (var_, accum_, linear_, grad_, lr_, l1_, l2_, lr_power_) = tf.tf_promote(var_, accum_, linear_, grad_, lr_, l1_, l2_, lr_power_)
+ end
+ begin
+ (indices_,) = tf.tf_promote(indices_)
+ end
+ end
+ begin
+ begin
+ tf.add_input(desc, var_)
+ end
+ begin
+ tf.add_input(desc, accum_)
+ end
+ begin
+ tf.add_input(desc, linear_)
+ end
+ begin
+ tf.add_input(desc, grad_)
+ end
+ begin
+ tf.add_input(desc, indices_)
+ end
+ begin
+ tf.add_input(desc, lr_)
+ end
+ begin
+ tf.add_input(desc, l1_)
+ end
+ begin
+ tf.add_input(desc, l2_)
+ end
+ begin
+ tf.add_input(desc, lr_power_)
+ end
+ end
+ begin
+ begin
+ if use_locking !== nothing
+ desc["use_locking"] = Base.Bool(use_locking)
+ end
+ end
+ end
+ end
+ tf.Tensor(tf.Operation(desc))
+ end
+ end
+ begin
+ function sparse_apply_ftrl_eager(var_, accum_, linear_, grad_, indices_, lr_, l1_, l2_, lr_power_; name=nothing, use_locking=nothing)
+ desc = tf.EagerOp("SparseApplyFtrl")
+ var_ = convert(tf.EagerTensor, var_)
+ accum_ = convert(tf.EagerTensor, accum_)
+ linear_ = convert(tf.EagerTensor, linear_)
+ grad_ = convert(tf.EagerTensor, grad_)
+ indices_ = convert(tf.EagerTensor, indices_)
+ lr_ = convert(tf.EagerTensor, lr_)
+ l1_ = convert(tf.EagerTensor, l1_)
+ l2_ = convert(tf.EagerTensor, l2_)
+ lr_power_ = convert(tf.EagerTensor, lr_power_)
+ begin
+ begin
+ tf.add_input(desc, var_)
+ end
+ begin
+ tf.add_input(desc, accum_)
+ end
+ begin
+ tf.add_input(desc, linear_)
+ end
+ begin
+ tf.add_input(desc, grad_)
+ end
+ begin
+ tf.add_input(desc, indices_)
+ end
+ begin
+ tf.add_input(desc, lr_)
+ end
+ begin
+ tf.add_input(desc, l1_)
+ end
+ begin
+ tf.add_input(desc, l2_)
+ end
+ begin
+ tf.add_input(desc, lr_power_)
+ end
+ end
+ begin
+ begin
+ if use_locking !== nothing
+ desc["use_locking"] = Base.Bool(use_locking)
+ end
+ end
+ end
+ begin
+ desc["T"] = tf.data_type(var_)
+ end
+ begin
+ desc["T"] = tf.data_type(accum_)
+ end
+ begin
+ desc["T"] = tf.data_type(linear_)
+ end
+ begin
+ desc["T"] = tf.data_type(grad_)
+ end
+ begin
+ desc["Tindices"] = tf.data_type(indices_)
+ end
+ begin
+ desc["T"] = tf.data_type(lr_)
+ end
+ begin
+ desc["T"] = tf.data_type(l1_)
+ end
+ begin
+ desc["T"] = tf.data_type(l2_)
+ end
+ begin
+ desc["T"] = tf.data_type(lr_power_)
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(sparse_apply_ftrl, [var_, accum_, linear_, grad_, indices_, lr_, l1_, l2_, lr_power_], name=nothing, use_locking=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res[1]
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function sparse_apply_ftrl(var_, accum_, linear_, grad_, indices_, lr_, l1_, l2_, lr_power_; name=nothing, use_locking=nothing)
+ if tf.in_eager_mode()
+ sparse_apply_ftrl_eager(var_, accum_, linear_, grad_, indices_, lr_, l1_, l2_, lr_power_; name=name, use_locking=use_locking)
+ else
+ sparse_apply_ftrl_graph(var_, accum_, linear_, grad_, indices_, lr_, l1_, l2_, lr_power_; name=name, use_locking=use_locking)
+ end
+ end
+ end
+end
+
+
+"""
+ batch_dataset_v2(input_dataset, batch_size, drop_remainder)
+
+
+"""
+begin
+ begin
+ function batch_dataset_v2_graph(input_dataset_, batch_size_, drop_remainder_; name=nothing, output_types=nothing, output_shapes=nothing)
+ local desc
+ tf.with_op_name(name, "BatchDatasetV2") do
+ desc = tf.NodeDescription("BatchDatasetV2")
+ begin
+ begin
+ input_dataset_ = convert(Tensor{Any}, input_dataset_)
+ begin
+ end
+ end
+ begin
+ batch_size_ = convert(Tensor{Int64}, batch_size_)
+ begin
+ end
+ end
+ begin
+ drop_remainder_ = convert(Tensor{Bool}, drop_remainder_)
+ begin
+ end
+ end
+ end
+ begin
+ begin
+ tf.add_input(desc, input_dataset_)
+ end
+ begin
+ tf.add_input(desc, batch_size_)
+ end
+ begin
+ tf.add_input(desc, drop_remainder_)
+ end
+ end
+ begin
+ begin
+ if output_types !== nothing
+ desc["output_types"] = map(Base.identity, output_types)
+ end
+ end
+ begin
+ if output_shapes !== nothing
+ desc["output_shapes"] = map(Base.identity, output_shapes)
+ end
+ end
+ end
+ end
+ tf.Tensor(tf.Operation(desc))
+ end
+ end
+ begin
+ function batch_dataset_v2_eager(input_dataset_, batch_size_, drop_remainder_; name=nothing, output_types=nothing, output_shapes=nothing)
+ desc = tf.EagerOp("BatchDatasetV2")
+ input_dataset_ = convert(tf.EagerTensor, input_dataset_)
+ batch_size_ = convert(tf.EagerTensor, batch_size_)
+ drop_remainder_ = convert(tf.EagerTensor, drop_remainder_)
+ begin
+ begin
+ tf.add_input(desc, input_dataset_)
+ end
+ begin
+ tf.add_input(desc, batch_size_)
+ end
+ begin
+ tf.add_input(desc, drop_remainder_)
+ end
+ end
+ begin
+ begin
+ if output_types !== nothing
+ desc["output_types"] = map(Base.identity, output_types)
+ end
+ end
+ begin
+ if output_shapes !== nothing
+ desc["output_shapes"] = map(Base.identity, output_shapes)
+ end
+ end
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(batch_dataset_v2, [input_dataset_, batch_size_, drop_remainder_], name=nothing, output_types=nothing, output_shapes=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res[1]
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function batch_dataset_v2(input_dataset_, batch_size_, drop_remainder_; name=nothing, output_types=nothing, output_shapes=nothing)
+ if tf.in_eager_mode()
+ batch_dataset_v2_eager(input_dataset_, batch_size_, drop_remainder_; name=name, output_types=output_types, output_shapes=output_shapes)
+ else
+ batch_dataset_v2_graph(input_dataset_, batch_size_, drop_remainder_; name=name, output_types=output_types, output_shapes=output_shapes)
+ end
+ end
+ end
+end
+
+
+"""
+ sparse_sparse_minimum(a_indices, a_values, a_shape, b_indices, b_values, b_shape)
+
+
+"""
+begin
+ begin
+ function sparse_sparse_minimum_graph(a_indices_, a_values_, a_shape_, b_indices_, b_values_, b_shape_; name=nothing)
+ local desc
+ tf.with_op_name(name, "SparseSparseMinimum") do
+ desc = tf.NodeDescription("SparseSparseMinimum")
+ begin
+ begin
+ a_indices_ = convert(Tensor{Int64}, a_indices_)
+ begin
+ end
+ end
+ begin
+ a_values_ = convert(Tensor{Any}, a_values_)
+ begin
+ end
+ end
+ begin
+ a_shape_ = convert(Tensor{Int64}, a_shape_)
+ begin
+ end
+ end
+ begin
+ b_indices_ = convert(Tensor{Int64}, b_indices_)
+ begin
+ end
+ end
+ begin
+ b_values_ = convert(Tensor{Any}, b_values_)
+ begin
+ end
+ end
+ begin
+ b_shape_ = convert(Tensor{Int64}, b_shape_)
+ begin
+ end
+ end
+ begin
+ (a_values_, b_values_) = tf.tf_promote(a_values_, b_values_)
+ end
+ end
+ begin
+ begin
+ tf.add_input(desc, a_indices_)
+ end
+ begin
+ tf.add_input(desc, a_values_)
+ end
+ begin
+ tf.add_input(desc, a_shape_)
+ end
+ begin
+ tf.add_input(desc, b_indices_)
+ end
+ begin
+ tf.add_input(desc, b_values_)
+ end
+ begin
+ tf.add_input(desc, b_shape_)
+ end
+ end
+ begin
+ end
+ end
+ begin
+ out = tf.Tensor[]
+ op = tf.Operation(desc)
+ for out_idx = 1:2
+ push!(out, tf.Tensor(op, out_idx))
+ end
+ out
+ end
+ end
+ end
+ begin
+ function sparse_sparse_minimum_eager(a_indices_, a_values_, a_shape_, b_indices_, b_values_, b_shape_; name=nothing)
+ desc = tf.EagerOp("SparseSparseMinimum")
+ a_indices_ = convert(tf.EagerTensor, a_indices_)
+ a_values_ = convert(tf.EagerTensor, a_values_)
+ a_shape_ = convert(tf.EagerTensor, a_shape_)
+ b_indices_ = convert(tf.EagerTensor, b_indices_)
+ b_values_ = convert(tf.EagerTensor, b_values_)
+ b_shape_ = convert(tf.EagerTensor, b_shape_)
+ begin
+ begin
+ tf.add_input(desc, a_indices_)
+ end
+ begin
+ tf.add_input(desc, a_values_)
+ end
+ begin
+ tf.add_input(desc, a_shape_)
+ end
+ begin
+ tf.add_input(desc, b_indices_)
+ end
+ begin
+ tf.add_input(desc, b_values_)
+ end
+ begin
+ tf.add_input(desc, b_shape_)
+ end
+ end
+ begin
+ end
+ begin
+ desc["T"] = tf.data_type(a_values_)
+ end
+ begin
+ desc["T"] = tf.data_type(b_values_)
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(sparse_sparse_minimum, [a_indices_, a_values_, a_shape_, b_indices_, b_values_, b_shape_], name=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function sparse_sparse_minimum(a_indices_, a_values_, a_shape_, b_indices_, b_values_, b_shape_; name=nothing)
+ if tf.in_eager_mode()
+ sparse_sparse_minimum_eager(a_indices_, a_values_, a_shape_, b_indices_, b_values_, b_shape_; name=name)
+ else
+ sparse_sparse_minimum_graph(a_indices_, a_values_, a_shape_, b_indices_, b_values_, b_shape_; name=name)
+ end
+ end
+ end
+end
+
+
+"""
+ reverse_v2(tensor, axis)
+
+
+"""
+begin
+ begin
+ function reverse_v2_graph(tensor_, axis_; name=nothing)
+ local desc
+ tf.with_op_name(name, "ReverseV2") do
+ desc = tf.NodeDescription("ReverseV2")
+ begin
+ begin
+ tensor_ = convert(Tensor{Any}, tensor_)
+ begin
+ end
+ end
+ begin
+ axis_ = convert(Tensor{Int32}, axis_)
+ begin
+ axis_ = axis_ - convert(tf.Tensor{eltype(axis_)}, 1)
+ end
+ end
+ begin
+ (tensor_,) = tf.tf_promote(tensor_)
+ end
+ begin
+ (axis_,) = tf.tf_promote(axis_)
+ end
+ end
+ begin
+ begin
+ tf.add_input(desc, tensor_)
+ end
+ begin
+ tf.add_input(desc, axis_)
+ end
+ end
+ begin
+ end
+ end
+ tf.Tensor(tf.Operation(desc))
+ end
+ end
+ begin
+ function reverse_v2_eager(tensor_, axis_; name=nothing)
+ desc = tf.EagerOp("ReverseV2")
+ tensor_ = convert(tf.EagerTensor, tensor_)
+ axis_ = convert(tf.EagerTensor, axis_)
+ begin
+ begin
+ tf.add_input(desc, tensor_)
+ end
+ begin
+ tf.add_input(desc, axis_)
+ end
+ end
+ begin
+ end
+ begin
+ desc["T"] = tf.data_type(tensor_)
+ end
+ begin
+ desc["Tidx"] = tf.data_type(axis_)
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(reverse_v2, [tensor_, axis_], name=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res[1]
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function reverse_v2(tensor_, axis_; name=nothing)
+ if tf.in_eager_mode()
+ reverse_v2_eager(tensor_, axis_; name=name)
+ else
+ reverse_v2_graph(tensor_, axis_; name=name)
+ end
+ end
+ end
+end
+
+
+"""
+ strided_slice(input, begin, end, strides; begin_mask=0, end_mask=0, ellipsis_mask=0, new_axis_mask=0, shrink_axis_mask=0)
+
+
+"""
+begin
+ begin
+ function strided_slice_graph(input_, begin_, end_, strides_; name=nothing, Index=nothing, begin_mask=nothing, end_mask=nothing, ellipsis_mask=nothing, new_axis_mask=nothing, shrink_axis_mask=nothing)
+ local desc
+ tf.with_op_name(name, "StridedSlice") do
+ desc = tf.NodeDescription("StridedSlice")
+ begin
+ begin
+ input_ = convert(Tensor{Any}, input_)
+ begin
+ end
+ end
+ begin
+ begin_ = convert(Tensor{Any}, begin_)
+ begin
+ begin_ = begin_ - convert(tf.Tensor{eltype(begin_)}, 1)
+ end
+ end
+ begin
+ end_ = convert(Tensor{Any}, end_)
+ begin
+ end_ = end_ - convert(tf.Tensor{eltype(end_)}, 1)
+ end
+ end
+ begin
+ strides_ = convert(Tensor{Any}, strides_)
+ begin
+ strides_ = strides_ - convert(tf.Tensor{eltype(strides_)}, 1)
+ end
+ end
+ begin
+ (input_,) = tf.tf_promote(input_)
+ end
+ begin
+ (begin_, end_, strides_) = tf.tf_promote(begin_, end_, strides_)
+ end
+ end
+ begin
+ begin
+ tf.add_input(desc, input_)
+ end
+ begin
+ tf.add_input(desc, begin_)
+ end
+ begin
+ tf.add_input(desc, end_)
+ end
+ begin
+ tf.add_input(desc, strides_)
+ end
+ end
+ begin
+ begin
+ if Index !== nothing
+ desc["Index"] = Base.identity(Index)
+ end
+ end
+ begin
+ if begin_mask !== nothing
+ begin_mask = Base.Int(begin_mask) - 1
+ end
+ end
+ begin
+ if begin_mask !== nothing
+ desc["begin_mask"] = Base.Int(begin_mask)
+ end
+ end
+ begin
+ if end_mask !== nothing
+ end_mask = Base.Int(end_mask) - 1
+ end
+ end
+ begin
+ if end_mask !== nothing
+ desc["end_mask"] = Base.Int(end_mask)
+ end
+ end
+ begin
+ if ellipsis_mask !== nothing
+ ellipsis_mask = Base.Int(ellipsis_mask) - 1
+ end
+ end
+ begin
+ if ellipsis_mask !== nothing
+ desc["ellipsis_mask"] = Base.Int(ellipsis_mask)
+ end
+ end
+ begin
+ if new_axis_mask !== nothing
+ new_axis_mask = Base.Int(new_axis_mask) - 1
+ end
+ end
+ begin
+ if new_axis_mask !== nothing
+ desc["new_axis_mask"] = Base.Int(new_axis_mask)
+ end
+ end
+ begin
+ if shrink_axis_mask !== nothing
+ shrink_axis_mask = Base.Int(shrink_axis_mask) - 1
+ end
+ end
+ begin
+ if shrink_axis_mask !== nothing
+ desc["shrink_axis_mask"] = Base.Int(shrink_axis_mask)
+ end
+ end
+ end
+ end
+ tf.Tensor(tf.Operation(desc))
+ end
+ end
+ begin
+ function strided_slice_eager(input_, begin_, end_, strides_; name=nothing, Index=nothing, begin_mask=nothing, end_mask=nothing, ellipsis_mask=nothing, new_axis_mask=nothing, shrink_axis_mask=nothing)
+ desc = tf.EagerOp("StridedSlice")
+ input_ = convert(tf.EagerTensor, input_)
+ begin_ = convert(tf.EagerTensor, begin_)
+ end_ = convert(tf.EagerTensor, end_)
+ strides_ = convert(tf.EagerTensor, strides_)
+ begin
+ begin
+ tf.add_input(desc, input_)
+ end
+ begin
+ tf.add_input(desc, begin_)
+ end
+ begin
+ tf.add_input(desc, end_)
+ end
+ begin
+ tf.add_input(desc, strides_)
+ end
+ end
+ begin
+ begin
+ if Index !== nothing
+ desc["Index"] = Base.identity(Index)
+ end
+ end
+ begin
+ if begin_mask !== nothing
+ begin_mask = Base.Int(begin_mask) - 1
+ end
+ end
+ begin
+ if begin_mask !== nothing
+ desc["begin_mask"] = Base.Int(begin_mask)
+ end
+ end
+ begin
+ if end_mask !== nothing
+ end_mask = Base.Int(end_mask) - 1
+ end
+ end
+ begin
+ if end_mask !== nothing
+ desc["end_mask"] = Base.Int(end_mask)
+ end
+ end
+ begin
+ if ellipsis_mask !== nothing
+ ellipsis_mask = Base.Int(ellipsis_mask) - 1
+ end
+ end
+ begin
+ if ellipsis_mask !== nothing
+ desc["ellipsis_mask"] = Base.Int(ellipsis_mask)
+ end
+ end
+ begin
+ if new_axis_mask !== nothing
+ new_axis_mask = Base.Int(new_axis_mask) - 1
+ end
+ end
+ begin
+ if new_axis_mask !== nothing
+ desc["new_axis_mask"] = Base.Int(new_axis_mask)
+ end
+ end
+ begin
+ if shrink_axis_mask !== nothing
+ shrink_axis_mask = Base.Int(shrink_axis_mask) - 1
+ end
+ end
+ begin
+ if shrink_axis_mask !== nothing
+ desc["shrink_axis_mask"] = Base.Int(shrink_axis_mask)
+ end
+ end
+ end
+ begin
+ desc["T"] = tf.data_type(input_)
+ end
+ begin
+ desc["Index"] = tf.data_type(begin_)
+ end
+ begin
+ desc["Index"] = tf.data_type(end_)
+ end
+ begin
+ desc["Index"] = tf.data_type(strides_)
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(strided_slice, [input_, begin_, end_, strides_], name=nothing, Index=nothing, begin_mask=nothing, end_mask=nothing, ellipsis_mask=nothing, new_axis_mask=nothing, shrink_axis_mask=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res[1]
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function strided_slice(input_, begin_, end_, strides_; name=nothing, Index=nothing, begin_mask=nothing, end_mask=nothing, ellipsis_mask=nothing, new_axis_mask=nothing, shrink_axis_mask=nothing)
+ if tf.in_eager_mode()
+ strided_slice_eager(input_, begin_, end_, strides_; name=name, Index=Index, begin_mask=begin_mask, end_mask=end_mask, ellipsis_mask=ellipsis_mask, new_axis_mask=new_axis_mask, shrink_axis_mask=shrink_axis_mask)
+ else
+ strided_slice_graph(input_, begin_, end_, strides_; name=name, Index=Index, begin_mask=begin_mask, end_mask=end_mask, ellipsis_mask=ellipsis_mask, new_axis_mask=new_axis_mask, shrink_axis_mask=shrink_axis_mask)
+ end
+ end
+ end
+end
+
+
+"""
+ matching_files(pattern)
+
+
+"""
+begin
+ begin
+ function matching_files_graph(pattern_; name=nothing)
+ local desc
+ tf.with_op_name(name, "MatchingFiles") do
+ desc = tf.NodeDescription("MatchingFiles")
+ begin
+ begin
+ pattern_ = convert(Tensor{String}, pattern_)
+ begin
+ end
+ end
+ end
+ begin
+ begin
+ tf.add_input(desc, pattern_)
+ end
+ end
+ begin
+ end
+ end
+ tf.Tensor(tf.Operation(desc))
+ end
+ end
+ begin
+ function matching_files_eager(pattern_; name=nothing)
+ desc = tf.EagerOp("MatchingFiles")
+ pattern_ = convert(tf.EagerTensor, pattern_)
+ begin
+ begin
+ tf.add_input(desc, pattern_)
+ end
+ end
+ begin
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(matching_files, [pattern_], name=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res[1]
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function matching_files(pattern_; name=nothing)
+ if tf.in_eager_mode()
+ matching_files_eager(pattern_; name=name)
+ else
+ matching_files_graph(pattern_; name=name)
+ end
+ end
+ end
+end
+
+
+"""
+ encode_base64(input; pad=false)
+
+
+"""
+begin
+ begin
+ function encode_base64_graph(input_; name=nothing, pad=nothing)
+ local desc
+ tf.with_op_name(name, "EncodeBase64") do
+ desc = tf.NodeDescription("EncodeBase64")
+ begin
+ begin
+ input_ = convert(Tensor{String}, input_)
+ begin
+ end
+ end
+ end
+ begin
+ begin
+ tf.add_input(desc, input_)
+ end
+ end
+ begin
+ begin
+ if pad !== nothing
+ desc["pad"] = Base.Bool(pad)
+ end
+ end
+ end
+ end
+ tf.Tensor(tf.Operation(desc))
+ end
+ end
+ begin
+ function encode_base64_eager(input_; name=nothing, pad=nothing)
+ desc = tf.EagerOp("EncodeBase64")
+ input_ = convert(tf.EagerTensor, input_)
+ begin
+ begin
+ tf.add_input(desc, input_)
+ end
+ end
+ begin
+ begin
+ if pad !== nothing
+ desc["pad"] = Base.Bool(pad)
+ end
+ end
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(encode_base64, [input_], name=nothing, pad=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res[1]
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function encode_base64(input_; name=nothing, pad=nothing)
+ if tf.in_eager_mode()
+ encode_base64_eager(input_; name=name, pad=pad)
+ else
+ encode_base64_graph(input_; name=name, pad=pad)
+ end
+ end
+ end
+end
+
+
+"""
+ iterator_get_next_as_optional(iterator)
+
+
+"""
+begin
+ begin
+ function iterator_get_next_as_optional_graph(iterator_; name=nothing, output_types=nothing, output_shapes=nothing)
+ local desc
+ tf.with_op_name(name, "IteratorGetNextAsOptional") do
+ desc = tf.NodeDescription("IteratorGetNextAsOptional")
+ begin
+ begin
+ iterator_ = convert(Tensor{Any}, iterator_)
+ begin
+ end
+ end
+ end
+ begin
+ begin
+ tf.add_input(desc, iterator_)
+ end
+ end
+ begin
+ begin
+ if output_types !== nothing
+ desc["output_types"] = map(Base.identity, output_types)
+ end
+ end
+ begin
+ if output_shapes !== nothing
+ desc["output_shapes"] = map(Base.identity, output_shapes)
+ end
+ end
+ end
+ end
+ tf.Tensor(tf.Operation(desc))
+ end
+ end
+ begin
+ function iterator_get_next_as_optional_eager(iterator_; name=nothing, output_types=nothing, output_shapes=nothing)
+ desc = tf.EagerOp("IteratorGetNextAsOptional")
+ iterator_ = convert(tf.EagerTensor, iterator_)
+ begin
+ begin
+ tf.add_input(desc, iterator_)
+ end
+ end
+ begin
+ begin
+ if output_types !== nothing
+ desc["output_types"] = map(Base.identity, output_types)
+ end
+ end
+ begin
+ if output_shapes !== nothing
+ desc["output_shapes"] = map(Base.identity, output_shapes)
+ end
+ end
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(iterator_get_next_as_optional, [iterator_], name=nothing, output_types=nothing, output_shapes=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res[1]
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function iterator_get_next_as_optional(iterator_; name=nothing, output_types=nothing, output_shapes=nothing)
+ if tf.in_eager_mode()
+ iterator_get_next_as_optional_eager(iterator_; name=name, output_types=output_types, output_shapes=output_shapes)
+ else
+ iterator_get_next_as_optional_graph(iterator_; name=name, output_types=output_types, output_shapes=output_shapes)
+ end
+ end
+ end
+end
+
+
+"""
+ padding_fifo_queue(; shapes=Int64[], capacity=-1, container=, shared_name=)
+
+
+"""
+begin
+ begin
+ function padding_fifo_queue_graph(; name=nothing, component_types=nothing, shapes=nothing, capacity=nothing, container=nothing, shared_name=nothing)
+ local desc
+ tf.with_op_name(name, "PaddingFIFOQueue") do
+ desc = tf.NodeDescription("PaddingFIFOQueue")
+ begin
+ end
+ begin
+ end
+ begin
+ begin
+ if component_types !== nothing
+ desc["component_types"] = map(Base.identity, component_types)
+ end
+ end
+ begin
+ if shapes !== nothing
+ desc["shapes"] = map(Base.identity, shapes)
+ end
+ end
+ begin
+ if capacity !== nothing
+ desc["capacity"] = Base.Int(capacity)
+ end
+ end
+ begin
+ if container !== nothing
+ desc["container"] = Base.String(container)
+ end
+ end
+ begin
+ if shared_name !== nothing
+ desc["shared_name"] = Base.String(shared_name)
+ end
+ end
+ end
+ end
+ tf.Tensor(tf.Operation(desc))
+ end
+ end
+ begin
+ function padding_fifo_queue_eager(; name=nothing, component_types=nothing, shapes=nothing, capacity=nothing, container=nothing, shared_name=nothing)
+ desc = tf.EagerOp("PaddingFIFOQueue")
+ begin
+ end
+ begin
+ begin
+ if component_types !== nothing
+ desc["component_types"] = map(Base.identity, component_types)
+ end
+ end
+ begin
+ if shapes !== nothing
+ desc["shapes"] = map(Base.identity, shapes)
+ end
+ end
+ begin
+ if capacity !== nothing
+ desc["capacity"] = Base.Int(capacity)
+ end
+ end
+ begin
+ if container !== nothing
+ desc["container"] = Base.String(container)
+ end
+ end
+ begin
+ if shared_name !== nothing
+ desc["shared_name"] = Base.String(shared_name)
+ end
+ end
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(padding_fifo_queue, [], name=nothing, component_types=nothing, shapes=nothing, capacity=nothing, container=nothing, shared_name=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res[1]
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function padding_fifo_queue(; name=nothing, component_types=nothing, shapes=nothing, capacity=nothing, container=nothing, shared_name=nothing)
+ if tf.in_eager_mode()
+ padding_fifo_queue_eager(; name=name, component_types=component_types, shapes=shapes, capacity=capacity, container=container, shared_name=shared_name)
+ else
+ padding_fifo_queue_graph(; name=name, component_types=component_types, shapes=shapes, capacity=capacity, container=container, shared_name=shared_name)
+ end
+ end
+ end
+end
+
+
+"""
+ iterator_to_string_handle(resource_handle)
+
+
+"""
+begin
+ begin
+ function iterator_to_string_handle_graph(resource_handle_; name=nothing)
+ local desc
+ tf.with_op_name(name, "IteratorToStringHandle") do
+ desc = tf.NodeDescription("IteratorToStringHandle")
+ begin
+ begin
+ resource_handle_ = convert(Tensor{Any}, resource_handle_)
+ begin
+ end
+ end
+ end
+ begin
+ begin
+ tf.add_input(desc, resource_handle_)
+ end
+ end
+ begin
+ end
+ end
+ tf.Tensor(tf.Operation(desc))
+ end
+ end
+ begin
+ function iterator_to_string_handle_eager(resource_handle_; name=nothing)
+ desc = tf.EagerOp("IteratorToStringHandle")
+ resource_handle_ = convert(tf.EagerTensor, resource_handle_)
+ begin
+ begin
+ tf.add_input(desc, resource_handle_)
+ end
+ end
+ begin
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(iterator_to_string_handle, [resource_handle_], name=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res[1]
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function iterator_to_string_handle(resource_handle_; name=nothing)
+ if tf.in_eager_mode()
+ iterator_to_string_handle_eager(resource_handle_; name=name)
+ else
+ iterator_to_string_handle_graph(resource_handle_; name=name)
+ end
+ end
+ end
+end
+
+
+"""
+ max_pool_grad_grad_with_argmax(input, grad, argmax)
+
+
+"""
+begin
+ begin
+ function max_pool_grad_grad_with_argmax_graph(input_, grad_, argmax_; name=nothing, ksize=nothing, strides=nothing, padding=nothing)
+ local desc
+ tf.with_op_name(name, "MaxPoolGradGradWithArgmax") do
+ desc = tf.NodeDescription("MaxPoolGradGradWithArgmax")
+ begin
+ begin
+ input_ = convert(Tensor{Any}, input_)
+ begin
+ end
+ end
+ begin
+ grad_ = convert(Tensor{Any}, grad_)
+ begin
+ end
+ end
+ begin
+ argmax_ = convert(Tensor{Any}, argmax_)
+ begin
+ end
+ end
+ begin
+ (argmax_,) = tf.tf_promote(argmax_)
+ end
+ begin
+ (input_, grad_) = tf.tf_promote(input_, grad_)
+ end
+ end
+ begin
+ begin
+ tf.add_input(desc, input_)
+ end
+ begin
+ tf.add_input(desc, grad_)
+ end
+ begin
+ tf.add_input(desc, argmax_)
+ end
+ end
+ begin
+ begin
+ if ksize !== nothing
+ desc["ksize"] = map(Base.identity, ksize)
+ end
+ end
+ begin
+ if strides !== nothing
+ desc["strides"] = map(Base.identity, strides)
+ end
+ end
+ begin
+ if padding !== nothing
+ desc["padding"] = Base.String(padding)
+ end
+ end
+ end
+ end
+ tf.Tensor(tf.Operation(desc))
+ end
+ end
+ begin
+ function max_pool_grad_grad_with_argmax_eager(input_, grad_, argmax_; name=nothing, ksize=nothing, strides=nothing, padding=nothing)
+ desc = tf.EagerOp("MaxPoolGradGradWithArgmax")
+ input_ = convert(tf.EagerTensor, input_)
+ grad_ = convert(tf.EagerTensor, grad_)
+ argmax_ = convert(tf.EagerTensor, argmax_)
+ begin
+ begin
+ tf.add_input(desc, input_)
+ end
+ begin
+ tf.add_input(desc, grad_)
+ end
+ begin
+ tf.add_input(desc, argmax_)
+ end
+ end
+ begin
+ begin
+ if ksize !== nothing
+ desc["ksize"] = map(Base.identity, ksize)
+ end
+ end
+ begin
+ if strides !== nothing
+ desc["strides"] = map(Base.identity, strides)
+ end
+ end
+ begin
+ if padding !== nothing
+ desc["padding"] = Base.String(padding)
+ end
+ end
+ end
+ begin
+ desc["T"] = tf.data_type(input_)
+ end
+ begin
+ desc["T"] = tf.data_type(grad_)
+ end
+ begin
+ desc["Targmax"] = tf.data_type(argmax_)
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(max_pool_grad_grad_with_argmax, [input_, grad_, argmax_], name=nothing, ksize=nothing, strides=nothing, padding=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res[1]
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function max_pool_grad_grad_with_argmax(input_, grad_, argmax_; name=nothing, ksize=nothing, strides=nothing, padding=nothing)
+ if tf.in_eager_mode()
+ max_pool_grad_grad_with_argmax_eager(input_, grad_, argmax_; name=name, ksize=ksize, strides=strides, padding=padding)
+ else
+ max_pool_grad_grad_with_argmax_graph(input_, grad_, argmax_; name=name, ksize=ksize, strides=strides, padding=padding)
+ end
+ end
+ end
+end
+
+
+"""
+ tensor_list_gather(input_handle, indices)
+
+
+"""
+begin
+ begin
+ function tensor_list_gather_graph(input_handle_, indices_; name=nothing, element_dtype=nothing)
+ local desc
+ tf.with_op_name(name, "TensorListGather") do
+ desc = tf.NodeDescription("TensorListGather")
+ begin
+ begin
+ input_handle_ = convert(Tensor{Any}, input_handle_)
+ begin
+ end
+ end
+ begin
+ indices_ = convert(Tensor{Int32}, indices_)
+ begin
+ end
+ end
+ end
+ begin
+ begin
+ tf.add_input(desc, input_handle_)
+ end
+ begin
+ tf.add_input(desc, indices_)
+ end
+ end
+ begin
+ begin
+ if element_dtype !== nothing
+ desc["element_dtype"] = Base.identity(element_dtype)
+ end
+ end
+ end
+ end
+ tf.Tensor(tf.Operation(desc))
+ end
+ end
+ begin
+ function tensor_list_gather_eager(input_handle_, indices_; name=nothing, element_dtype=nothing)
+ desc = tf.EagerOp("TensorListGather")
+ input_handle_ = convert(tf.EagerTensor, input_handle_)
+ indices_ = convert(tf.EagerTensor, indices_)
+ begin
+ begin
+ tf.add_input(desc, input_handle_)
+ end
+ begin
+ tf.add_input(desc, indices_)
+ end
+ end
+ begin
+ begin
+ if element_dtype !== nothing
+ desc["element_dtype"] = Base.identity(element_dtype)
+ end
+ end
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(tensor_list_gather, [input_handle_, indices_], name=nothing, element_dtype=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res[1]
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function tensor_list_gather(input_handle_, indices_; name=nothing, element_dtype=nothing)
+ if tf.in_eager_mode()
+ tensor_list_gather_eager(input_handle_, indices_; name=name, element_dtype=element_dtype)
+ else
+ tensor_list_gather_graph(input_handle_, indices_; name=name, element_dtype=element_dtype)
+ end
+ end
+ end
+end
+
+
+"""
+ multinomial(logits, num_samples; seed=0, seed2=0, output_dtype=Int64)
+
+
+"""
+begin
+ begin
+ function multinomial_graph(logits_, num_samples_; name=nothing, seed=nothing, seed2=nothing, output_dtype=nothing)
+ local desc
+ tf.with_op_name(name, "Multinomial") do
+ desc = tf.NodeDescription("Multinomial")
+ begin
+ begin
+ logits_ = convert(Tensor{Any}, logits_)
+ begin
+ end
+ end
+ begin
+ num_samples_ = convert(Tensor{Int32}, num_samples_)
+ begin
+ end
+ end
+ begin
+ (logits_,) = tf.tf_promote(logits_)
+ end
+ end
+ begin
+ begin
+ tf.add_input(desc, logits_)
+ end
+ begin
+ tf.add_input(desc, num_samples_)
+ end
+ end
+ begin
+ begin
+ if seed !== nothing
+ desc["seed"] = Base.Int(seed)
+ end
+ end
+ begin
+ if seed2 !== nothing
+ desc["seed2"] = Base.Int(seed2)
+ end
+ end
+ begin
+ if output_dtype !== nothing
+ desc["output_dtype"] = Base.identity(output_dtype)
+ end
+ end
+ end
+ end
+ tf.Tensor(tf.Operation(desc))
+ end
+ end
+ begin
+ function multinomial_eager(logits_, num_samples_; name=nothing, seed=nothing, seed2=nothing, output_dtype=nothing)
+ desc = tf.EagerOp("Multinomial")
+ logits_ = convert(tf.EagerTensor, logits_)
+ num_samples_ = convert(tf.EagerTensor, num_samples_)
+ begin
+ begin
+ tf.add_input(desc, logits_)
+ end
+ begin
+ tf.add_input(desc, num_samples_)
+ end
+ end
+ begin
+ begin
+ if seed !== nothing
+ desc["seed"] = Base.Int(seed)
+ end
+ end
+ begin
+ if seed2 !== nothing
+ desc["seed2"] = Base.Int(seed2)
+ end
+ end
+ begin
+ if output_dtype !== nothing
+ desc["output_dtype"] = Base.identity(output_dtype)
+ end
+ end
+ end
+ begin
+ desc["T"] = tf.data_type(logits_)
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(multinomial, [logits_, num_samples_], name=nothing, seed=nothing, seed2=nothing, output_dtype=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res[1]
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function multinomial(logits_, num_samples_; name=nothing, seed=nothing, seed2=nothing, output_dtype=nothing)
+ if tf.in_eager_mode()
+ multinomial_eager(logits_, num_samples_; name=name, seed=seed, seed2=seed2, output_dtype=output_dtype)
+ else
+ multinomial_graph(logits_, num_samples_; name=name, seed=seed, seed2=seed2, output_dtype=output_dtype)
+ end
+ end
+ end
+end
+
+
+"""
+ tensor_array_read(handle, index, flow_in)
+
+
+"""
+begin
+ begin
+ function tensor_array_read_graph(handle_, index_, flow_in_; name=nothing, dtype=nothing)
+ local desc
+ tf.with_op_name(name, "TensorArrayRead") do
+ desc = tf.NodeDescription("TensorArrayRead")
+ begin
+ begin
+ handle_ = convert(Tensor{String}, handle_)
+ begin
+ end
+ end
+ begin
+ index_ = convert(Tensor{Int32}, index_)
+ begin
+ end
+ end
+ begin
+ flow_in_ = convert(Tensor{Float32}, flow_in_)
+ begin
+ end
+ end
+ end
+ begin
+ begin
+ tf.add_input(desc, handle_)
+ end
+ begin
+ tf.add_input(desc, index_)
+ end
+ begin
+ tf.add_input(desc, flow_in_)
+ end
+ end
+ begin
+ begin
+ if dtype !== nothing
+ desc["dtype"] = Base.identity(dtype)
+ end
+ end
+ end
+ end
+ tf.Tensor(tf.Operation(desc))
+ end
+ end
+ begin
+ function tensor_array_read_eager(handle_, index_, flow_in_; name=nothing, dtype=nothing)
+ desc = tf.EagerOp("TensorArrayRead")
+ handle_ = convert(tf.EagerTensor, handle_)
+ index_ = convert(tf.EagerTensor, index_)
+ flow_in_ = convert(tf.EagerTensor, flow_in_)
+ begin
+ begin
+ tf.add_input(desc, handle_)
+ end
+ begin
+ tf.add_input(desc, index_)
+ end
+ begin
+ tf.add_input(desc, flow_in_)
+ end
+ end
+ begin
+ begin
+ if dtype !== nothing
+ desc["dtype"] = Base.identity(dtype)
+ end
+ end
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(tensor_array_read, [handle_, index_, flow_in_], name=nothing, dtype=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res[1]
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function tensor_array_read(handle_, index_, flow_in_; name=nothing, dtype=nothing)
+ if tf.in_eager_mode()
+ tensor_array_read_eager(handle_, index_, flow_in_; name=name, dtype=dtype)
+ else
+ tensor_array_read_graph(handle_, index_, flow_in_; name=name, dtype=dtype)
+ end
+ end
+ end
+end
+
+
+"""
+ experimental_indexed_dataset_get(materialized, index)
+
+
+"""
+begin
+ begin
+ function experimental_indexed_dataset_get_graph(materialized_, index_; name=nothing, output_types=nothing, output_shapes=nothing)
+ local desc
+ tf.with_op_name(name, "ExperimentalIndexedDatasetGet") do
+ desc = tf.NodeDescription("ExperimentalIndexedDatasetGet")
+ begin
+ begin
+ materialized_ = convert(Tensor{Any}, materialized_)
+ begin
+ end
+ end
+ begin
+ index_ = convert(Tensor{Any}, index_)
+ begin
+ end
+ end
+ end
+ begin
+ begin
+ tf.add_input(desc, materialized_)
+ end
+ begin
+ tf.add_input(desc, index_)
+ end
+ end
+ begin
+ begin
+ if output_types !== nothing
+ desc["output_types"] = map(Base.identity, output_types)
+ end
+ end
+ begin
+ if output_shapes !== nothing
+ desc["output_shapes"] = map(Base.identity, output_shapes)
+ end
+ end
+ end
+ end
+ tf.Tensor(tf.Operation(desc))
+ end
+ end
+ begin
+ function experimental_indexed_dataset_get_eager(materialized_, index_; name=nothing, output_types=nothing, output_shapes=nothing)
+ desc = tf.EagerOp("ExperimentalIndexedDatasetGet")
+ materialized_ = convert(tf.EagerTensor, materialized_)
+ index_ = convert(tf.EagerTensor, index_)
+ begin
+ begin
+ tf.add_input(desc, materialized_)
+ end
+ begin
+ tf.add_input(desc, index_)
+ end
+ end
+ begin
+ begin
+ if output_types !== nothing
+ desc["output_types"] = map(Base.identity, output_types)
+ end
+ end
+ begin
+ if output_shapes !== nothing
+ desc["output_shapes"] = map(Base.identity, output_shapes)
+ end
+ end
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(experimental_indexed_dataset_get, [materialized_, index_], name=nothing, output_types=nothing, output_shapes=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res[1]
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function experimental_indexed_dataset_get(materialized_, index_; name=nothing, output_types=nothing, output_shapes=nothing)
+ if tf.in_eager_mode()
+ experimental_indexed_dataset_get_eager(materialized_, index_; name=name, output_types=output_types, output_shapes=output_shapes)
+ else
+ experimental_indexed_dataset_get_graph(materialized_, index_; name=name, output_types=output_types, output_shapes=output_shapes)
+ end
+ end
+ end
+end
+
+
+"""
+ iterator_from_string_handle_v2(string_handle; output_types=Int64[], output_shapes=Int64[])
+
+
+"""
+begin
+ begin
+ function iterator_from_string_handle_v2_graph(string_handle_; name=nothing, output_types=nothing, output_shapes=nothing)
+ local desc
+ tf.with_op_name(name, "IteratorFromStringHandleV2") do
+ desc = tf.NodeDescription("IteratorFromStringHandleV2")
+ begin
+ begin
+ string_handle_ = convert(Tensor{String}, string_handle_)
+ begin
+ end
+ end
+ end
+ begin
+ begin
+ tf.add_input(desc, string_handle_)
+ end
+ end
+ begin
+ begin
+ if output_types !== nothing
+ desc["output_types"] = map(Base.identity, output_types)
+ end
+ end
+ begin
+ if output_shapes !== nothing
+ desc["output_shapes"] = map(Base.identity, output_shapes)
+ end
+ end
+ end
+ end
+ tf.Tensor(tf.Operation(desc))
+ end
+ end
+ begin
+ function iterator_from_string_handle_v2_eager(string_handle_; name=nothing, output_types=nothing, output_shapes=nothing)
+ desc = tf.EagerOp("IteratorFromStringHandleV2")
+ string_handle_ = convert(tf.EagerTensor, string_handle_)
+ begin
+ begin
+ tf.add_input(desc, string_handle_)
+ end
+ end
+ begin
+ begin
+ if output_types !== nothing
+ desc["output_types"] = map(Base.identity, output_types)
+ end
+ end
+ begin
+ if output_shapes !== nothing
+ desc["output_shapes"] = map(Base.identity, output_shapes)
+ end
+ end
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(iterator_from_string_handle_v2, [string_handle_], name=nothing, output_types=nothing, output_shapes=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res[1]
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function iterator_from_string_handle_v2(string_handle_; name=nothing, output_types=nothing, output_shapes=nothing)
+ if tf.in_eager_mode()
+ iterator_from_string_handle_v2_eager(string_handle_; name=name, output_types=output_types, output_shapes=output_shapes)
+ else
+ iterator_from_string_handle_v2_graph(string_handle_; name=name, output_types=output_types, output_shapes=output_shapes)
+ end
+ end
+ end
+end
+
+
+"""
+ bitwise_or(x, y)
+
+
+"""
+begin
+ begin
+ function bitwise_or_graph(x_, y_; name=nothing)
+ local desc
+ tf.with_op_name(name, "BitwiseOr") do
+ desc = tf.NodeDescription("BitwiseOr")
+ begin
+ begin
+ x_ = convert(Tensor{Any}, x_)
+ begin
+ end
+ end
+ begin
+ y_ = convert(Tensor{Any}, y_)
+ begin
+ end
+ end
+ begin
+ (x_, y_) = tf.tf_promote(x_, y_)
+ end
+ end
+ begin
+ begin
+ tf.add_input(desc, x_)
+ end
+ begin
+ tf.add_input(desc, y_)
+ end
+ end
+ begin
+ end
+ end
+ tf.Tensor(tf.Operation(desc))
+ end
+ end
+ begin
+ function bitwise_or_eager(x_, y_; name=nothing)
+ desc = tf.EagerOp("BitwiseOr")
+ x_ = convert(tf.EagerTensor, x_)
+ y_ = convert(tf.EagerTensor, y_)
+ begin
+ begin
+ tf.add_input(desc, x_)
+ end
+ begin
+ tf.add_input(desc, y_)
+ end
+ end
+ begin
+ end
+ begin
+ desc["T"] = tf.data_type(x_)
+ end
+ begin
+ desc["T"] = tf.data_type(y_)
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(bitwise_or, [x_, y_], name=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res[1]
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function bitwise_or(x_, y_; name=nothing)
+ if tf.in_eager_mode()
+ bitwise_or_eager(x_, y_; name=name)
+ else
+ bitwise_or_graph(x_, y_; name=name)
+ end
+ end
+ end
+end
+
+
+"""
+ unsorted_segment_max(data, segment_ids, num_segments)
+
+
+"""
+begin
+ begin
+ function unsorted_segment_max_graph(data_, segment_ids_, num_segments_; name=nothing)
+ local desc
+ tf.with_op_name(name, "UnsortedSegmentMax") do
+ desc = tf.NodeDescription("UnsortedSegmentMax")
+ begin
+ begin
+ data_ = convert(Tensor{Any}, data_)
+ begin
+ end
+ end
+ begin
+ segment_ids_ = convert(Tensor{Any}, segment_ids_)
+ begin
+ segment_ids_ = segment_ids_ - convert(tf.Tensor{eltype(segment_ids_)}, 1)
+ end
+ end
+ begin
+ num_segments_ = convert(Tensor{Int32}, num_segments_)
+ begin
+ end
+ end
+ begin
+ (num_segments_,) = tf.tf_promote(num_segments_)
+ end
+ begin
+ (data_,) = tf.tf_promote(data_)
+ end
+ begin
+ (segment_ids_,) = tf.tf_promote(segment_ids_)
+ end
+ end
+ begin
+ begin
+ tf.add_input(desc, data_)
+ end
+ begin
+ tf.add_input(desc, segment_ids_)
+ end
+ begin
+ tf.add_input(desc, num_segments_)
+ end
+ end
+ begin
+ end
+ end
+ tf.Tensor(tf.Operation(desc))
+ end
+ end
+ begin
+ function unsorted_segment_max_eager(data_, segment_ids_, num_segments_; name=nothing)
+ desc = tf.EagerOp("UnsortedSegmentMax")
+ data_ = convert(tf.EagerTensor, data_)
+ segment_ids_ = convert(tf.EagerTensor, segment_ids_)
+ num_segments_ = convert(tf.EagerTensor, num_segments_)
+ begin
+ begin
+ tf.add_input(desc, data_)
+ end
+ begin
+ tf.add_input(desc, segment_ids_)
+ end
+ begin
+ tf.add_input(desc, num_segments_)
+ end
+ end
+ begin
+ end
+ begin
+ desc["T"] = tf.data_type(data_)
+ end
+ begin
+ desc["Tindices"] = tf.data_type(segment_ids_)
+ end
+ begin
+ desc["Tnumsegments"] = tf.data_type(num_segments_)
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(unsorted_segment_max, [data_, segment_ids_, num_segments_], name=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res[1]
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function unsorted_segment_max(data_, segment_ids_, num_segments_; name=nothing)
+ if tf.in_eager_mode()
+ unsorted_segment_max_eager(data_, segment_ids_, num_segments_; name=name)
+ else
+ unsorted_segment_max_graph(data_, segment_ids_, num_segments_; name=name)
+ end
+ end
+ end
+end
+
+
+"""
+ _mkl_squared_difference(x, y, mkl_x, mkl_y)
+
+Returns (x - y)(x - y) element-wise.
+"""
+begin
+ begin
+ function _mkl_squared_difference_graph(x_, y_, mkl_x_, mkl_y_; name=nothing)
+ local desc
+ tf.with_op_name(name, "_MklSquaredDifference") do
+ desc = tf.NodeDescription("_MklSquaredDifference")
+ begin
+ begin
+ x_ = convert(Tensor{Any}, x_)
+ begin
+ end
+ end
+ begin
+ y_ = convert(Tensor{Any}, y_)
+ begin
+ end
+ end
+ begin
+ mkl_x_ = convert(Tensor{UInt8}, mkl_x_)
+ begin
+ end
+ end
+ begin
+ mkl_y_ = convert(Tensor{UInt8}, mkl_y_)
+ begin
+ end
+ end
+ begin
+ (x_, y_) = tf.tf_promote(x_, y_)
+ end
+ end
+ begin
+ begin
+ tf.add_input(desc, x_)
+ end
+ begin
+ tf.add_input(desc, y_)
+ end
+ begin
+ tf.add_input(desc, mkl_x_)
+ end
+ begin
+ tf.add_input(desc, mkl_y_)
+ end
+ end
+ begin
+ end
+ end
+ begin
+ out = tf.Tensor[]
+ op = tf.Operation(desc)
+ for out_idx = 1:2
+ push!(out, tf.Tensor(op, out_idx))
+ end
+ out
+ end
+ end
+ end
+ begin
+ function _mkl_squared_difference_eager(x_, y_, mkl_x_, mkl_y_; name=nothing)
+ desc = tf.EagerOp("_MklSquaredDifference")
+ x_ = convert(tf.EagerTensor, x_)
+ y_ = convert(tf.EagerTensor, y_)
+ mkl_x_ = convert(tf.EagerTensor, mkl_x_)
+ mkl_y_ = convert(tf.EagerTensor, mkl_y_)
+ begin
+ begin
+ tf.add_input(desc, x_)
+ end
+ begin
+ tf.add_input(desc, y_)
+ end
+ begin
+ tf.add_input(desc, mkl_x_)
+ end
+ begin
+ tf.add_input(desc, mkl_y_)
+ end
+ end
+ begin
+ end
+ begin
+ desc["T"] = tf.data_type(x_)
+ end
+ begin
+ desc["T"] = tf.data_type(y_)
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(_mkl_squared_difference, [x_, y_, mkl_x_, mkl_y_], name=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function _mkl_squared_difference(x_, y_, mkl_x_, mkl_y_; name=nothing)
+ if tf.in_eager_mode()
+ _mkl_squared_difference_eager(x_, y_, mkl_x_, mkl_y_; name=name)
+ else
+ _mkl_squared_difference_graph(x_, y_, mkl_x_, mkl_y_; name=name)
+ end
+ end
+ end
+end
+
+
+"""
+ conv3d_backprop_filter(input, filter, out_backprop; dilations=[1, 1, 1, 1, 1])
+
+
+"""
+begin
+ begin
+ function conv3d_backprop_filter_graph(input_, filter_, out_backprop_; name=nothing, strides=nothing, padding=nothing, dilations=nothing)
+ local desc
+ tf.with_op_name(name, "Conv3DBackpropFilter") do
+ desc = tf.NodeDescription("Conv3DBackpropFilter")
+ begin
+ begin
+ input_ = convert(Tensor{Any}, input_)
+ begin
+ end
+ end
+ begin
+ filter_ = convert(Tensor{Any}, filter_)
+ begin
+ end
+ end
+ begin
+ out_backprop_ = convert(Tensor{Any}, out_backprop_)
+ begin
+ end
+ end
+ begin
+ (input_, filter_, out_backprop_) = tf.tf_promote(input_, filter_, out_backprop_)
+ end
+ end
+ begin
+ begin
+ tf.add_input(desc, input_)
+ end
+ begin
+ tf.add_input(desc, filter_)
+ end
+ begin
+ tf.add_input(desc, out_backprop_)
+ end
+ end
+ begin
+ begin
+ if strides !== nothing
+ desc["strides"] = map(Base.identity, strides)
+ end
+ end
+ begin
+ if padding !== nothing
+ desc["padding"] = Base.String(padding)
+ end
+ end
+ begin
+ if dilations !== nothing
+ desc["dilations"] = map(Base.identity, dilations)
+ end
+ end
+ end
+ end
+ tf.Tensor(tf.Operation(desc))
+ end
+ end
+ begin
+ function conv3d_backprop_filter_eager(input_, filter_, out_backprop_; name=nothing, strides=nothing, padding=nothing, dilations=nothing)
+ desc = tf.EagerOp("Conv3DBackpropFilter")
+ input_ = convert(tf.EagerTensor, input_)
+ filter_ = convert(tf.EagerTensor, filter_)
+ out_backprop_ = convert(tf.EagerTensor, out_backprop_)
+ begin
+ begin
+ tf.add_input(desc, input_)
+ end
+ begin
+ tf.add_input(desc, filter_)
+ end
+ begin
+ tf.add_input(desc, out_backprop_)
+ end
+ end
+ begin
+ begin
+ if strides !== nothing
+ desc["strides"] = map(Base.identity, strides)
+ end
+ end
+ begin
+ if padding !== nothing
+ desc["padding"] = Base.String(padding)
+ end
+ end
+ begin
+ if dilations !== nothing
+ desc["dilations"] = map(Base.identity, dilations)
+ end
+ end
+ end
+ begin
+ desc["T"] = tf.data_type(input_)
+ end
+ begin
+ desc["T"] = tf.data_type(filter_)
+ end
+ begin
+ desc["T"] = tf.data_type(out_backprop_)
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(conv3d_backprop_filter, [input_, filter_, out_backprop_], name=nothing, strides=nothing, padding=nothing, dilations=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res[1]
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function conv3d_backprop_filter(input_, filter_, out_backprop_; name=nothing, strides=nothing, padding=nothing, dilations=nothing)
+ if tf.in_eager_mode()
+ conv3d_backprop_filter_eager(input_, filter_, out_backprop_; name=name, strides=strides, padding=padding, dilations=dilations)
+ else
+ conv3d_backprop_filter_graph(input_, filter_, out_backprop_; name=name, strides=strides, padding=padding, dilations=dilations)
+ end
+ end
+ end
+end
+
+
+"""
+ if_(cond, input; output_shapes=Int64[])
+
+
+"""
+begin
+ begin
+ function if__graph(cond_, input_; name=nothing, Tin=nothing, Tout=nothing, then_branch=nothing, else_branch=nothing, output_shapes=nothing)
+ local desc
+ tf.with_op_name(name, "If") do
+ desc = tf.NodeDescription("If")
+ begin
+ begin
+ cond_ = convert(Tensor{Any}, cond_)
+ begin
+ end
+ end
+ begin
+ input_ = [convert(Tensor{Any}, x) for x = input_]
+ begin
+ end
+ end
+ begin
+ (cond_,) = tf.tf_promote(cond_)
+ end
+ end
+ begin
+ begin
+ tf.add_input(desc, cond_)
+ end
+ begin
+ tf.add_input(desc, input_)
+ end
+ end
+ begin
+ begin
+ if Tin !== nothing
+ desc["Tin"] = map(Base.identity, Tin)
+ end
+ end
+ begin
+ if Tout !== nothing
+ desc["Tout"] = map(Base.identity, Tout)
+ end
+ end
+ begin
+ if then_branch !== nothing
+ desc["then_branch"] = Base.identity(then_branch)
+ end
+ end
+ begin
+ if else_branch !== nothing
+ desc["else_branch"] = Base.identity(else_branch)
+ end
+ end
+ begin
+ if output_shapes !== nothing
+ desc["output_shapes"] = map(Base.identity, output_shapes)
+ end
+ end
+ end
+ end
+ tf.Tensor(tf.Operation(desc))
+ end
+ end
+ begin
+ function if__eager(cond_, input_; name=nothing, Tin=nothing, Tout=nothing, then_branch=nothing, else_branch=nothing, output_shapes=nothing)
+ desc = tf.EagerOp("If")
+ cond_ = convert(tf.EagerTensor, cond_)
+ input_ = convert(tf.EagerTensor, input_)
+ begin
+ begin
+ tf.add_input(desc, cond_)
+ end
+ begin
+ tf.add_input(desc, input_)
+ end
+ end
+ begin
+ begin
+ if Tin !== nothing
+ desc["Tin"] = map(Base.identity, Tin)
+ end
+ end
+ begin
+ if Tout !== nothing
+ desc["Tout"] = map(Base.identity, Tout)
+ end
+ end
+ begin
+ if then_branch !== nothing
+ desc["then_branch"] = Base.identity(then_branch)
+ end
+ end
+ begin
+ if else_branch !== nothing
+ desc["else_branch"] = Base.identity(else_branch)
+ end
+ end
+ begin
+ if output_shapes !== nothing
+ desc["output_shapes"] = map(Base.identity, output_shapes)
+ end
+ end
+ end
+ begin
+ desc["Tcond"] = tf.data_type(cond_)
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(if_, [cond_, input_], name=nothing, Tin=nothing, Tout=nothing, then_branch=nothing, else_branch=nothing, output_shapes=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res[1]
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function if_(cond_, input_; name=nothing, Tin=nothing, Tout=nothing, then_branch=nothing, else_branch=nothing, output_shapes=nothing)
+ if tf.in_eager_mode()
+ if__eager(cond_, input_; name=name, Tin=Tin, Tout=Tout, then_branch=then_branch, else_branch=else_branch, output_shapes=output_shapes)
+ else
+ if__graph(cond_, input_; name=name, Tin=Tin, Tout=Tout, then_branch=then_branch, else_branch=else_branch, output_shapes=output_shapes)
+ end
+ end
+ end
+end
+
+
+"""
+ flat_map_dataset(input_dataset, other_arguments)
+
+
+"""
+begin
+ begin
+ function flat_map_dataset_graph(input_dataset_, other_arguments_; name=nothing, f=nothing, Targuments=nothing, output_types=nothing, output_shapes=nothing)
+ local desc
+ tf.with_op_name(name, "FlatMapDataset") do
+ desc = tf.NodeDescription("FlatMapDataset")
+ begin
+ begin
+ input_dataset_ = convert(Tensor{Any}, input_dataset_)
+ begin
+ end
+ end
+ begin
+ other_arguments_ = [convert(Tensor{Any}, x) for x = other_arguments_]
+ begin
+ end
+ end
+ end
+ begin
+ begin
+ tf.add_input(desc, input_dataset_)
+ end
+ begin
+ tf.add_input(desc, other_arguments_)
+ end
+ end
+ begin
+ begin
+ if f !== nothing
+ desc["f"] = Base.identity(f)
+ end
+ end
+ begin
+ if Targuments !== nothing
+ desc["Targuments"] = map(Base.identity, Targuments)
+ end
+ end
+ begin
+ if output_types !== nothing
+ desc["output_types"] = map(Base.identity, output_types)
+ end
+ end
+ begin
+ if output_shapes !== nothing
+ desc["output_shapes"] = map(Base.identity, output_shapes)
+ end
+ end
+ end
+ end
+ tf.Tensor(tf.Operation(desc))
+ end
+ end
+ begin
+ function flat_map_dataset_eager(input_dataset_, other_arguments_; name=nothing, f=nothing, Targuments=nothing, output_types=nothing, output_shapes=nothing)
+ desc = tf.EagerOp("FlatMapDataset")
+ input_dataset_ = convert(tf.EagerTensor, input_dataset_)
+ other_arguments_ = convert(tf.EagerTensor, other_arguments_)
+ begin
+ begin
+ tf.add_input(desc, input_dataset_)
+ end
+ begin
+ tf.add_input(desc, other_arguments_)
+ end
+ end
+ begin
+ begin
+ if f !== nothing
+ desc["f"] = Base.identity(f)
+ end
+ end
+ begin
+ if Targuments !== nothing
+ desc["Targuments"] = map(Base.identity, Targuments)
+ end
+ end
+ begin
+ if output_types !== nothing
+ desc["output_types"] = map(Base.identity, output_types)
+ end
+ end
+ begin
+ if output_shapes !== nothing
+ desc["output_shapes"] = map(Base.identity, output_shapes)
+ end
+ end
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(flat_map_dataset, [input_dataset_, other_arguments_], name=nothing, f=nothing, Targuments=nothing, output_types=nothing, output_shapes=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res[1]
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function flat_map_dataset(input_dataset_, other_arguments_; name=nothing, f=nothing, Targuments=nothing, output_types=nothing, output_shapes=nothing)
+ if tf.in_eager_mode()
+ flat_map_dataset_eager(input_dataset_, other_arguments_; name=name, f=f, Targuments=Targuments, output_types=output_types, output_shapes=output_shapes)
+ else
+ flat_map_dataset_graph(input_dataset_, other_arguments_; name=name, f=f, Targuments=Targuments, output_types=output_types, output_shapes=output_shapes)
+ end
+ end
+ end
+end
+
+
+"""
+ tensor_list_scatter(tensor, indices, element_shape)
+
+
+"""
+begin
+ begin
+ function tensor_list_scatter_graph(tensor_, indices_, element_shape_; name=nothing, element_dtype=nothing, shape_type=nothing)
+ local desc
+ tf.with_op_name(name, "TensorListScatter") do
+ desc = tf.NodeDescription("TensorListScatter")
+ begin
+ begin
+ tensor_ = convert(Tensor{Any}, tensor_)
+ begin
+ end
+ end
+ begin
+ indices_ = convert(Tensor{Int32}, indices_)
+ begin
+ end
+ end
+ begin
+ element_shape_ = convert(Tensor{Any}, element_shape_)
+ begin
+ end
+ end
+ begin
+ (tensor_,) = tf.tf_promote(tensor_)
+ end
+ begin
+ (element_shape_,) = tf.tf_promote(element_shape_)
+ end
+ end
+ begin
+ begin
+ tf.add_input(desc, tensor_)
+ end
+ begin
+ tf.add_input(desc, indices_)
+ end
+ begin
+ tf.add_input(desc, element_shape_)
+ end
+ end
+ begin
+ begin
+ if element_dtype !== nothing
+ desc["element_dtype"] = Base.identity(element_dtype)
+ end
+ end
+ begin
+ if shape_type !== nothing
+ desc["shape_type"] = Base.identity(shape_type)
+ end
+ end
+ end
+ end
+ tf.Tensor(tf.Operation(desc))
+ end
+ end
+ begin
+ function tensor_list_scatter_eager(tensor_, indices_, element_shape_; name=nothing, element_dtype=nothing, shape_type=nothing)
+ desc = tf.EagerOp("TensorListScatter")
+ tensor_ = convert(tf.EagerTensor, tensor_)
+ indices_ = convert(tf.EagerTensor, indices_)
+ element_shape_ = convert(tf.EagerTensor, element_shape_)
+ begin
+ begin
+ tf.add_input(desc, tensor_)
+ end
+ begin
+ tf.add_input(desc, indices_)
+ end
+ begin
+ tf.add_input(desc, element_shape_)
+ end
+ end
+ begin
+ begin
+ if element_dtype !== nothing
+ desc["element_dtype"] = Base.identity(element_dtype)
+ end
+ end
+ begin
+ if shape_type !== nothing
+ desc["shape_type"] = Base.identity(shape_type)
+ end
+ end
+ end
+ begin
+ desc["element_dtype"] = tf.data_type(tensor_)
+ end
+ begin
+ desc["shape_type"] = tf.data_type(element_shape_)
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(tensor_list_scatter, [tensor_, indices_, element_shape_], name=nothing, element_dtype=nothing, shape_type=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res[1]
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function tensor_list_scatter(tensor_, indices_, element_shape_; name=nothing, element_dtype=nothing, shape_type=nothing)
+ if tf.in_eager_mode()
+ tensor_list_scatter_eager(tensor_, indices_, element_shape_; name=name, element_dtype=element_dtype, shape_type=shape_type)
+ else
+ tensor_list_scatter_graph(tensor_, indices_, element_shape_; name=name, element_dtype=element_dtype, shape_type=shape_type)
+ end
+ end
+ end
+end
+
+
+"""
+ softsign_grad(gradients, features)
+
+
+"""
+begin
+ begin
+ function softsign_grad_graph(gradients_, features_; name=nothing)
+ local desc
+ tf.with_op_name(name, "SoftsignGrad") do
+ desc = tf.NodeDescription("SoftsignGrad")
+ begin
+ begin
+ gradients_ = convert(Tensor{Any}, gradients_)
+ begin
+ end
+ end
+ begin
+ features_ = convert(Tensor{Any}, features_)
+ begin
+ end
+ end
+ begin
+ (gradients_, features_) = tf.tf_promote(gradients_, features_)
+ end
+ end
+ begin
+ begin
+ tf.add_input(desc, gradients_)
+ end
+ begin
+ tf.add_input(desc, features_)
+ end
+ end
+ begin
+ end
+ end
+ tf.Tensor(tf.Operation(desc))
+ end
+ end
+ begin
+ function softsign_grad_eager(gradients_, features_; name=nothing)
+ desc = tf.EagerOp("SoftsignGrad")
+ gradients_ = convert(tf.EagerTensor, gradients_)
+ features_ = convert(tf.EagerTensor, features_)
+ begin
+ begin
+ tf.add_input(desc, gradients_)
+ end
+ begin
+ tf.add_input(desc, features_)
+ end
+ end
+ begin
+ end
+ begin
+ desc["T"] = tf.data_type(gradients_)
+ end
+ begin
+ desc["T"] = tf.data_type(features_)
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(softsign_grad, [gradients_, features_], name=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res[1]
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function softsign_grad(gradients_, features_; name=nothing)
+ if tf.in_eager_mode()
+ softsign_grad_eager(gradients_, features_; name=name)
+ else
+ softsign_grad_graph(gradients_, features_; name=name)
+ end
+ end
+ end
+end
+
+
+"""
+ copy_host(input; tensor_name=, debug_ops_spec=Int64[])
+
+Copy Host Op.
+"""
+begin
+ begin
+ function copy_host_graph(input_; name=nothing, tensor_name=nothing, debug_ops_spec=nothing)
+ local desc
+ tf.with_op_name(name, "CopyHost") do
+ desc = tf.NodeDescription("CopyHost")
+ begin
+ begin
+ input_ = convert(Tensor{Any}, input_)
+ begin
+ end
+ end
+ begin
+ (input_,) = tf.tf_promote(input_)
+ end
+ end
+ begin
+ begin
+ tf.add_input(desc, input_)
+ end
+ end
+ begin
+ begin
+ if tensor_name !== nothing
+ desc["tensor_name"] = Base.String(tensor_name)
+ end
+ end
+ begin
+ if debug_ops_spec !== nothing
+ desc["debug_ops_spec"] = map(Base.identity, debug_ops_spec)
+ end
+ end
+ end
+ end
+ tf.Tensor(tf.Operation(desc))
+ end
+ end
+ begin
+ function copy_host_eager(input_; name=nothing, tensor_name=nothing, debug_ops_spec=nothing)
+ desc = tf.EagerOp("CopyHost")
+ input_ = convert(tf.EagerTensor, input_)
+ begin
+ begin
+ tf.add_input(desc, input_)
+ end
+ end
+ begin
+ begin
+ if tensor_name !== nothing
+ desc["tensor_name"] = Base.String(tensor_name)
+ end
+ end
+ begin
+ if debug_ops_spec !== nothing
+ desc["debug_ops_spec"] = map(Base.identity, debug_ops_spec)
+ end
+ end
+ end
+ begin
+ desc["T"] = tf.data_type(input_)
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(copy_host, [input_], name=nothing, tensor_name=nothing, debug_ops_spec=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res[1]
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function copy_host(input_; name=nothing, tensor_name=nothing, debug_ops_spec=nothing)
+ if tf.in_eager_mode()
+ copy_host_eager(input_; name=name, tensor_name=tensor_name, debug_ops_spec=debug_ops_spec)
+ else
+ copy_host_graph(input_; name=name, tensor_name=tensor_name, debug_ops_spec=debug_ops_spec)
+ end
+ end
+ end
+end
+
+
+"""
+ lin_space(start, stop, num)
+
+
+"""
+begin
+ begin
+ function lin_space_graph(start_, stop_, num_; name=nothing)
+ local desc
+ tf.with_op_name(name, "LinSpace") do
+ desc = tf.NodeDescription("LinSpace")
+ begin
+ begin
+ start_ = convert(Tensor{Any}, start_)
+ begin
+ end
+ end
+ begin
+ stop_ = convert(Tensor{Any}, stop_)
+ begin
+ end
+ end
+ begin
+ num_ = convert(Tensor{Int32}, num_)
+ begin
+ num_ = num_ - convert(tf.Tensor{eltype(num_)}, 1)
+ end
+ end
+ begin
+ (start_, stop_) = tf.tf_promote(start_, stop_)
+ end
+ begin
+ (num_,) = tf.tf_promote(num_)
+ end
+ end
+ begin
+ begin
+ tf.add_input(desc, start_)
+ end
+ begin
+ tf.add_input(desc, stop_)
+ end
+ begin
+ tf.add_input(desc, num_)
+ end
+ end
+ begin
+ end
+ end
+ tf.Tensor(tf.Operation(desc))
+ end
+ end
+ begin
+ function lin_space_eager(start_, stop_, num_; name=nothing)
+ desc = tf.EagerOp("LinSpace")
+ start_ = convert(tf.EagerTensor, start_)
+ stop_ = convert(tf.EagerTensor, stop_)
+ num_ = convert(tf.EagerTensor, num_)
+ begin
+ begin
+ tf.add_input(desc, start_)
+ end
+ begin
+ tf.add_input(desc, stop_)
+ end
+ begin
+ tf.add_input(desc, num_)
+ end
+ end
+ begin
+ end
+ begin
+ desc["T"] = tf.data_type(start_)
+ end
+ begin
+ desc["T"] = tf.data_type(stop_)
+ end
+ begin
+ desc["Tidx"] = tf.data_type(num_)
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(lin_space, [start_, stop_, num_], name=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res[1]
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function lin_space(start_, stop_, num_; name=nothing)
+ if tf.in_eager_mode()
+ lin_space_eager(start_, stop_, num_; name=name)
+ else
+ lin_space_graph(start_, stop_, num_; name=name)
+ end
+ end
+ end
+end
+
+
+"""
+ _parallel_concat_update(value, update)
+
+Updates input `value` at `loc` with `update`.
+"""
+begin
+ begin
+ function _parallel_concat_update_graph(value_, update_; name=nothing, loc=nothing)
+ local desc
+ tf.with_op_name(name, "_ParallelConcatUpdate") do
+ desc = tf.NodeDescription("_ParallelConcatUpdate")
+ begin
+ begin
+ value_ = convert(Tensor{Any}, value_)
+ begin
+ end
+ end
+ begin
+ update_ = convert(Tensor{Any}, update_)
+ begin
+ end
+ end
+ begin
+ (value_, update_) = tf.tf_promote(value_, update_)
+ end
+ end
+ begin
+ begin
+ tf.add_input(desc, value_)
+ end
+ begin
+ tf.add_input(desc, update_)
+ end
+ end
+ begin
+ begin
+ if loc !== nothing
+ desc["loc"] = Base.Int(loc)
+ end
+ end
+ end
+ end
+ tf.Tensor(tf.Operation(desc))
+ end
+ end
+ begin
+ function _parallel_concat_update_eager(value_, update_; name=nothing, loc=nothing)
+ desc = tf.EagerOp("_ParallelConcatUpdate")
+ value_ = convert(tf.EagerTensor, value_)
+ update_ = convert(tf.EagerTensor, update_)
+ begin
+ begin
+ tf.add_input(desc, value_)
+ end
+ begin
+ tf.add_input(desc, update_)
+ end
+ end
+ begin
+ begin
+ if loc !== nothing
+ desc["loc"] = Base.Int(loc)
+ end
+ end
+ end
+ begin
+ desc["T"] = tf.data_type(value_)
+ end
+ begin
+ desc["T"] = tf.data_type(update_)
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(_parallel_concat_update, [value_, update_], name=nothing, loc=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res[1]
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function _parallel_concat_update(value_, update_; name=nothing, loc=nothing)
+ if tf.in_eager_mode()
+ _parallel_concat_update_eager(value_, update_; name=name, loc=loc)
+ else
+ _parallel_concat_update_graph(value_, update_; name=name, loc=loc)
+ end
+ end
+ end
+end
+
+
+"""
+ stack(; stack_name=)
+
+
+"""
+begin
+ begin
+ function stack_graph(; name=nothing, elem_type=nothing, stack_name=nothing)
+ local desc
+ tf.with_op_name(name, "Stack") do
+ desc = tf.NodeDescription("Stack")
+ begin
+ end
+ begin
+ end
+ begin
+ begin
+ if elem_type !== nothing
+ desc["elem_type"] = Base.identity(elem_type)
+ end
+ end
+ begin
+ if stack_name !== nothing
+ desc["stack_name"] = Base.String(stack_name)
+ end
+ end
+ end
+ end
+ tf.Tensor(tf.Operation(desc))
+ end
+ end
+ begin
+ function stack_eager(; name=nothing, elem_type=nothing, stack_name=nothing)
+ desc = tf.EagerOp("Stack")
+ begin
+ end
+ begin
+ begin
+ if elem_type !== nothing
+ desc["elem_type"] = Base.identity(elem_type)
+ end
+ end
+ begin
+ if stack_name !== nothing
+ desc["stack_name"] = Base.String(stack_name)
+ end
+ end
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(stack, [], name=nothing, elem_type=nothing, stack_name=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res[1]
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function stack(; name=nothing, elem_type=nothing, stack_name=nothing)
+ if tf.in_eager_mode()
+ stack_eager(; name=name, elem_type=elem_type, stack_name=stack_name)
+ else
+ stack_graph(; name=name, elem_type=elem_type, stack_name=stack_name)
+ end
+ end
+ end
+end
+
+
+"""
+ stack_push_v2(handle, elem; swap_memory=false)
+
+
+"""
+begin
+ begin
+ function stack_push_v2_graph(handle_, elem_; name=nothing, swap_memory=nothing)
+ local desc
+ tf.with_op_name(name, "StackPushV2") do
+ desc = tf.NodeDescription("StackPushV2")
+ begin
+ begin
+ handle_ = convert(Tensor{Any}, handle_)
+ begin
+ end
+ end
+ begin
+ elem_ = convert(Tensor{Any}, elem_)
+ begin
+ end
+ end
+ begin
+ (elem_,) = tf.tf_promote(elem_)
+ end
+ end
+ begin
+ begin
+ tf.add_input(desc, handle_)
+ end
+ begin
+ tf.add_input(desc, elem_)
+ end
+ end
+ begin
+ begin
+ if swap_memory !== nothing
+ desc["swap_memory"] = Base.Bool(swap_memory)
+ end
+ end
+ end
+ end
+ tf.Tensor(tf.Operation(desc))
+ end
+ end
+ begin
+ function stack_push_v2_eager(handle_, elem_; name=nothing, swap_memory=nothing)
+ desc = tf.EagerOp("StackPushV2")
+ handle_ = convert(tf.EagerTensor, handle_)
+ elem_ = convert(tf.EagerTensor, elem_)
+ begin
+ begin
+ tf.add_input(desc, handle_)
+ end
+ begin
+ tf.add_input(desc, elem_)
+ end
+ end
+ begin
+ begin
+ if swap_memory !== nothing
+ desc["swap_memory"] = Base.Bool(swap_memory)
+ end
+ end
+ end
+ begin
+ desc["T"] = tf.data_type(elem_)
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(stack_push_v2, [handle_, elem_], name=nothing, swap_memory=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res[1]
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function stack_push_v2(handle_, elem_; name=nothing, swap_memory=nothing)
+ if tf.in_eager_mode()
+ stack_push_v2_eager(handle_, elem_; name=name, swap_memory=swap_memory)
+ else
+ stack_push_v2_graph(handle_, elem_; name=name, swap_memory=swap_memory)
+ end
+ end
+ end
+end
+
+
+"""
+ assign_variable_op(resource, value)
+
+
+"""
+begin
+ begin
+ function assign_variable_op_graph(resource_, value_; name=nothing, dtype=nothing)
+ local desc
+ tf.with_op_name(name, "AssignVariableOp") do
+ desc = tf.NodeDescription("AssignVariableOp")
+ begin
+ begin
+ resource_ = convert(Tensor{Any}, resource_)
+ begin
+ end
+ end
+ begin
+ value_ = convert(Tensor{Any}, value_)
+ begin
+ end
+ end
+ begin
+ (value_,) = tf.tf_promote(value_)
+ end
+ end
+ begin
+ begin
+ tf.add_input(desc, resource_)
+ end
+ begin
+ tf.add_input(desc, value_)
+ end
+ end
+ begin
+ begin
+ if dtype !== nothing
+ desc["dtype"] = Base.identity(dtype)
+ end
+ end
+ end
+ end
+ tf.Tensor(tf.Operation(desc))
+ end
+ end
+ begin
+ function assign_variable_op_eager(resource_, value_; name=nothing, dtype=nothing)
+ desc = tf.EagerOp("AssignVariableOp")
+ resource_ = convert(tf.EagerTensor, resource_)
+ value_ = convert(tf.EagerTensor, value_)
+ begin
+ begin
+ tf.add_input(desc, resource_)
+ end
+ begin
+ tf.add_input(desc, value_)
+ end
+ end
+ begin
+ begin
+ if dtype !== nothing
+ desc["dtype"] = Base.identity(dtype)
+ end
+ end
+ end
+ begin
+ desc["dtype"] = tf.data_type(value_)
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(assign_variable_op, [resource_, value_], name=nothing, dtype=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res[1]
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function assign_variable_op(resource_, value_; name=nothing, dtype=nothing)
+ if tf.in_eager_mode()
+ assign_variable_op_eager(resource_, value_; name=name, dtype=dtype)
+ else
+ assign_variable_op_graph(resource_, value_; name=name, dtype=dtype)
+ end
+ end
+ end
+end
+
+
+"""
+ sparse_split(split_dim, indices, values, shape)
+
+
+"""
+begin
+ begin
+ function sparse_split_graph(split_dim_, indices_, values_, shape_; name=nothing, num_split=nothing)
+ local desc
+ tf.with_op_name(name, "SparseSplit") do
+ desc = tf.NodeDescription("SparseSplit")
+ begin
+ begin
+ split_dim_ = convert(Tensor{Int64}, split_dim_)
+ begin
+ split_dim_ = split_dim_ - convert(tf.Tensor{eltype(split_dim_)}, 1)
+ end
+ end
+ begin
+ indices_ = convert(Tensor{Int64}, indices_)
+ begin
+ end
+ end
+ begin
+ values_ = convert(Tensor{Any}, values_)
+ begin
+ end
+ end
+ begin
+ shape_ = convert(Tensor{Int64}, shape_)
+ begin
+ end
+ end
+ begin
+ (values_,) = tf.tf_promote(values_)
+ end
+ end
+ begin
+ begin
+ tf.add_input(desc, split_dim_)
+ end
+ begin
+ tf.add_input(desc, indices_)
+ end
+ begin
+ tf.add_input(desc, values_)
+ end
+ begin
+ tf.add_input(desc, shape_)
+ end
+ end
+ begin
+ begin
+ if num_split !== nothing
+ desc["num_split"] = Base.Int(num_split)
+ end
+ end
+ end
+ end
+ begin
+ out = tf.Tensor[]
+ op = tf.Operation(desc)
+ for out_idx = 1:3
+ push!(out, tf.Tensor(op, out_idx))
+ end
+ out
+ end
+ end
+ end
+ begin
+ function sparse_split_eager(split_dim_, indices_, values_, shape_; name=nothing, num_split=nothing)
+ desc = tf.EagerOp("SparseSplit")
+ split_dim_ = convert(tf.EagerTensor, split_dim_)
+ indices_ = convert(tf.EagerTensor, indices_)
+ values_ = convert(tf.EagerTensor, values_)
+ shape_ = convert(tf.EagerTensor, shape_)
+ begin
+ begin
+ tf.add_input(desc, split_dim_)
+ end
+ begin
+ tf.add_input(desc, indices_)
+ end
+ begin
+ tf.add_input(desc, values_)
+ end
+ begin
+ tf.add_input(desc, shape_)
+ end
+ end
+ begin
+ begin
+ if num_split !== nothing
+ desc["num_split"] = Base.Int(num_split)
+ end
+ end
+ end
+ begin
+ desc["T"] = tf.data_type(values_)
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(sparse_split, [split_dim_, indices_, values_, shape_], name=nothing, num_split=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function sparse_split(split_dim_, indices_, values_, shape_; name=nothing, num_split=nothing)
+ if tf.in_eager_mode()
+ sparse_split_eager(split_dim_, indices_, values_, shape_; name=name, num_split=num_split)
+ else
+ sparse_split_graph(split_dim_, indices_, values_, shape_; name=name, num_split=num_split)
+ end
+ end
+ end
+end
+
+
+"""
+ tensor_array_unpack(handle, value, flow_in)
+
+
+"""
+begin
+ begin
+ function tensor_array_unpack_graph(handle_, value_, flow_in_; name=nothing)
+ local desc
+ tf.with_op_name(name, "TensorArrayUnpack") do
+ desc = tf.NodeDescription("TensorArrayUnpack")
+ begin
+ begin
+ handle_ = convert(Tensor{String}, handle_)
+ begin
+ end
+ end
+ begin
+ value_ = convert(Tensor{Any}, value_)
+ begin
+ end
+ end
+ begin
+ flow_in_ = convert(Tensor{Float32}, flow_in_)
+ begin
+ end
+ end
+ begin
+ (value_,) = tf.tf_promote(value_)
+ end
+ end
+ begin
+ begin
+ tf.add_input(desc, handle_)
+ end
+ begin
+ tf.add_input(desc, value_)
+ end
+ begin
+ tf.add_input(desc, flow_in_)
+ end
+ end
+ begin
+ end
+ end
+ tf.Tensor(tf.Operation(desc))
+ end
+ end
+ begin
+ function tensor_array_unpack_eager(handle_, value_, flow_in_; name=nothing)
+ desc = tf.EagerOp("TensorArrayUnpack")
+ handle_ = convert(tf.EagerTensor, handle_)
+ value_ = convert(tf.EagerTensor, value_)
+ flow_in_ = convert(tf.EagerTensor, flow_in_)
+ begin
+ begin
+ tf.add_input(desc, handle_)
+ end
+ begin
+ tf.add_input(desc, value_)
+ end
+ begin
+ tf.add_input(desc, flow_in_)
+ end
+ end
+ begin
+ end
+ begin
+ desc["T"] = tf.data_type(value_)
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(tensor_array_unpack, [handle_, value_, flow_in_], name=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res[1]
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function tensor_array_unpack(handle_, value_, flow_in_; name=nothing)
+ if tf.in_eager_mode()
+ tensor_array_unpack_eager(handle_, value_, flow_in_; name=name)
+ else
+ tensor_array_unpack_graph(handle_, value_, flow_in_; name=name)
+ end
+ end
+ end
+end
+
+
+"""
+ tensor_list_stack(input_handle; num_elements=-1)
+
+
+"""
+begin
+ begin
+ function tensor_list_stack_graph(input_handle_; name=nothing, element_dtype=nothing, num_elements=nothing)
+ local desc
+ tf.with_op_name(name, "TensorListStack") do
+ desc = tf.NodeDescription("TensorListStack")
+ begin
+ begin
+ input_handle_ = convert(Tensor{Any}, input_handle_)
+ begin
+ end
+ end
+ end
+ begin
+ begin
+ tf.add_input(desc, input_handle_)
+ end
+ end
+ begin
+ begin
+ if element_dtype !== nothing
+ desc["element_dtype"] = Base.identity(element_dtype)
+ end
+ end
+ begin
+ if num_elements !== nothing
+ desc["num_elements"] = Base.Int(num_elements)
+ end
+ end
+ end
+ end
+ tf.Tensor(tf.Operation(desc))
+ end
+ end
+ begin
+ function tensor_list_stack_eager(input_handle_; name=nothing, element_dtype=nothing, num_elements=nothing)
+ desc = tf.EagerOp("TensorListStack")
+ input_handle_ = convert(tf.EagerTensor, input_handle_)
+ begin
+ begin
+ tf.add_input(desc, input_handle_)
+ end
+ end
+ begin
+ begin
+ if element_dtype !== nothing
+ desc["element_dtype"] = Base.identity(element_dtype)
+ end
+ end
+ begin
+ if num_elements !== nothing
+ desc["num_elements"] = Base.Int(num_elements)
+ end
+ end
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(tensor_list_stack, [input_handle_], name=nothing, element_dtype=nothing, num_elements=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res[1]
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function tensor_list_stack(input_handle_; name=nothing, element_dtype=nothing, num_elements=nothing)
+ if tf.in_eager_mode()
+ tensor_list_stack_eager(input_handle_; name=name, element_dtype=element_dtype, num_elements=num_elements)
+ else
+ tensor_list_stack_graph(input_handle_; name=name, element_dtype=element_dtype, num_elements=num_elements)
+ end
+ end
+ end
+end
+
+
+"""
+ barrier_incomplete_size(handle)
+
+
+"""
+begin
+ begin
+ function barrier_incomplete_size_graph(handle_; name=nothing)
+ local desc
+ tf.with_op_name(name, "BarrierIncompleteSize") do
+ desc = tf.NodeDescription("BarrierIncompleteSize")
+ begin
+ begin
+ handle_ = convert(Tensor{String}, handle_)
+ begin
+ end
+ end
+ end
+ begin
+ begin
+ tf.add_input(desc, handle_)
+ end
+ end
+ begin
+ end
+ end
+ tf.Tensor(tf.Operation(desc))
+ end
+ end
+ begin
+ function barrier_incomplete_size_eager(handle_; name=nothing)
+ desc = tf.EagerOp("BarrierIncompleteSize")
+ handle_ = convert(tf.EagerTensor, handle_)
+ begin
+ begin
+ tf.add_input(desc, handle_)
+ end
+ end
+ begin
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(barrier_incomplete_size, [handle_], name=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res[1]
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function barrier_incomplete_size(handle_; name=nothing)
+ if tf.in_eager_mode()
+ barrier_incomplete_size_eager(handle_; name=name)
+ else
+ barrier_incomplete_size_graph(handle_; name=name)
+ end
+ end
+ end
+end
+
+
+"""
+ restore(file_pattern, tensor_name; preferred_shard=-1)
+
+
+"""
+begin
+ begin
+ function restore_graph(file_pattern_, tensor_name_; name=nothing, dt=nothing, preferred_shard=nothing)
+ local desc
+ tf.with_op_name(name, "Restore") do
+ desc = tf.NodeDescription("Restore")
+ begin
+ begin
+ file_pattern_ = convert(Tensor{String}, file_pattern_)
+ begin
+ end
+ end
+ begin
+ tensor_name_ = convert(Tensor{String}, tensor_name_)
+ begin
+ end
+ end
+ end
+ begin
+ begin
+ tf.add_input(desc, file_pattern_)
+ end
+ begin
+ tf.add_input(desc, tensor_name_)
+ end
+ end
+ begin
+ begin
+ if dt !== nothing
+ desc["dt"] = Base.identity(dt)
+ end
+ end
+ begin
+ if preferred_shard !== nothing
+ desc["preferred_shard"] = Base.Int(preferred_shard)
+ end
+ end
+ end
+ end
+ tf.Tensor(tf.Operation(desc))
+ end
+ end
+ begin
+ function restore_eager(file_pattern_, tensor_name_; name=nothing, dt=nothing, preferred_shard=nothing)
+ desc = tf.EagerOp("Restore")
+ file_pattern_ = convert(tf.EagerTensor, file_pattern_)
+ tensor_name_ = convert(tf.EagerTensor, tensor_name_)
+ begin
+ begin
+ tf.add_input(desc, file_pattern_)
+ end
+ begin
+ tf.add_input(desc, tensor_name_)
+ end
+ end
+ begin
+ begin
+ if dt !== nothing
+ desc["dt"] = Base.identity(dt)
+ end
+ end
+ begin
+ if preferred_shard !== nothing
+ desc["preferred_shard"] = Base.Int(preferred_shard)
+ end
+ end
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(restore, [file_pattern_, tensor_name_], name=nothing, dt=nothing, preferred_shard=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res[1]
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function restore(file_pattern_, tensor_name_; name=nothing, dt=nothing, preferred_shard=nothing)
+ if tf.in_eager_mode()
+ restore_eager(file_pattern_, tensor_name_; name=name, dt=dt, preferred_shard=preferred_shard)
+ else
+ restore_graph(file_pattern_, tensor_name_; name=name, dt=dt, preferred_shard=preferred_shard)
+ end
+ end
+ end
+end
+
+
+"""
+ tensor_array_v3(size; element_shape=?, dynamic_size=false, clear_after_read=true, identical_element_shapes=false, tensor_array_name=)
+
+
+"""
+begin
+ begin
+ function tensor_array_v3_graph(size_; name=nothing, dtype=nothing, element_shape=nothing, dynamic_size=nothing, clear_after_read=nothing, identical_element_shapes=nothing, tensor_array_name=nothing)
+ local desc
+ tf.with_op_name(name, "TensorArrayV3") do
+ desc = tf.NodeDescription("TensorArrayV3")
+ begin
+ begin
+ size_ = convert(Tensor{Int32}, size_)
+ begin
+ end
+ end
+ end
+ begin
+ begin
+ tf.add_input(desc, size_)
+ end
+ end
+ begin
+ begin
+ if dtype !== nothing
+ desc["dtype"] = Base.identity(dtype)
+ end
+ end
+ begin
+ if element_shape !== nothing
+ desc["element_shape"] = Base.identity(element_shape)
+ end
+ end
+ begin
+ if dynamic_size !== nothing
+ desc["dynamic_size"] = Base.Bool(dynamic_size)
+ end
+ end
+ begin
+ if clear_after_read !== nothing
+ desc["clear_after_read"] = Base.Bool(clear_after_read)
+ end
+ end
+ begin
+ if identical_element_shapes !== nothing
+ desc["identical_element_shapes"] = Base.Bool(identical_element_shapes)
+ end
+ end
+ begin
+ if tensor_array_name !== nothing
+ desc["tensor_array_name"] = Base.String(tensor_array_name)
+ end
+ end
+ end
+ end
+ begin
+ out = tf.Tensor[]
+ op = tf.Operation(desc)
+ for out_idx = 1:2
+ push!(out, tf.Tensor(op, out_idx))
+ end
+ out
+ end
+ end
+ end
+ begin
+ function tensor_array_v3_eager(size_; name=nothing, dtype=nothing, element_shape=nothing, dynamic_size=nothing, clear_after_read=nothing, identical_element_shapes=nothing, tensor_array_name=nothing)
+ desc = tf.EagerOp("TensorArrayV3")
+ size_ = convert(tf.EagerTensor, size_)
+ begin
+ begin
+ tf.add_input(desc, size_)
+ end
+ end
+ begin
+ begin
+ if dtype !== nothing
+ desc["dtype"] = Base.identity(dtype)
+ end
+ end
+ begin
+ if element_shape !== nothing
+ desc["element_shape"] = Base.identity(element_shape)
+ end
+ end
+ begin
+ if dynamic_size !== nothing
+ desc["dynamic_size"] = Base.Bool(dynamic_size)
+ end
+ end
+ begin
+ if clear_after_read !== nothing
+ desc["clear_after_read"] = Base.Bool(clear_after_read)
+ end
+ end
+ begin
+ if identical_element_shapes !== nothing
+ desc["identical_element_shapes"] = Base.Bool(identical_element_shapes)
+ end
+ end
+ begin
+ if tensor_array_name !== nothing
+ desc["tensor_array_name"] = Base.String(tensor_array_name)
+ end
+ end
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(tensor_array_v3, [size_], name=nothing, dtype=nothing, element_shape=nothing, dynamic_size=nothing, clear_after_read=nothing, identical_element_shapes=nothing, tensor_array_name=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function tensor_array_v3(size_; name=nothing, dtype=nothing, element_shape=nothing, dynamic_size=nothing, clear_after_read=nothing, identical_element_shapes=nothing, tensor_array_name=nothing)
+ if tf.in_eager_mode()
+ tensor_array_v3_eager(size_; name=name, dtype=dtype, element_shape=element_shape, dynamic_size=dynamic_size, clear_after_read=clear_after_read, identical_element_shapes=identical_element_shapes, tensor_array_name=tensor_array_name)
+ else
+ tensor_array_v3_graph(size_; name=name, dtype=dtype, element_shape=element_shape, dynamic_size=dynamic_size, clear_after_read=clear_after_read, identical_element_shapes=identical_element_shapes, tensor_array_name=tensor_array_name)
+ end
+ end
+ end
+end
+
+
+"""
+ experimental_assert_next_dataset(input_dataset, transformations)
+
+
+"""
+begin
+ begin
+ function experimental_assert_next_dataset_graph(input_dataset_, transformations_; name=nothing, output_types=nothing, output_shapes=nothing)
+ local desc
+ tf.with_op_name(name, "ExperimentalAssertNextDataset") do
+ desc = tf.NodeDescription("ExperimentalAssertNextDataset")
+ begin
+ begin
+ input_dataset_ = convert(Tensor{Any}, input_dataset_)
+ begin
+ end
+ end
+ begin
+ transformations_ = convert(Tensor{String}, transformations_)
+ begin
+ end
+ end
+ end
+ begin
+ begin
+ tf.add_input(desc, input_dataset_)
+ end
+ begin
+ tf.add_input(desc, transformations_)
+ end
+ end
+ begin
+ begin
+ if output_types !== nothing
+ desc["output_types"] = map(Base.identity, output_types)
+ end
+ end
+ begin
+ if output_shapes !== nothing
+ desc["output_shapes"] = map(Base.identity, output_shapes)
+ end
+ end
+ end
+ end
+ tf.Tensor(tf.Operation(desc))
+ end
+ end
+ begin
+ function experimental_assert_next_dataset_eager(input_dataset_, transformations_; name=nothing, output_types=nothing, output_shapes=nothing)
+ desc = tf.EagerOp("ExperimentalAssertNextDataset")
+ input_dataset_ = convert(tf.EagerTensor, input_dataset_)
+ transformations_ = convert(tf.EagerTensor, transformations_)
+ begin
+ begin
+ tf.add_input(desc, input_dataset_)
+ end
+ begin
+ tf.add_input(desc, transformations_)
+ end
+ end
+ begin
+ begin
+ if output_types !== nothing
+ desc["output_types"] = map(Base.identity, output_types)
+ end
+ end
+ begin
+ if output_shapes !== nothing
+ desc["output_shapes"] = map(Base.identity, output_shapes)
+ end
+ end
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(experimental_assert_next_dataset, [input_dataset_, transformations_], name=nothing, output_types=nothing, output_shapes=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res[1]
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function experimental_assert_next_dataset(input_dataset_, transformations_; name=nothing, output_types=nothing, output_shapes=nothing)
+ if tf.in_eager_mode()
+ experimental_assert_next_dataset_eager(input_dataset_, transformations_; name=name, output_types=output_types, output_shapes=output_shapes)
+ else
+ experimental_assert_next_dataset_graph(input_dataset_, transformations_; name=name, output_types=output_types, output_shapes=output_shapes)
+ end
+ end
+ end
+end
+
+
+"""
+ in_top_k(predictions, targets)
+
+
+"""
+begin
+ begin
+ function in_top_k_graph(predictions_, targets_; name=nothing, k=nothing)
+ local desc
+ tf.with_op_name(name, "InTopK") do
+ desc = tf.NodeDescription("InTopK")
+ begin
+ begin
+ predictions_ = convert(Tensor{Float32}, predictions_)
+ begin
+ end
+ end
+ begin
+ targets_ = convert(Tensor{Int32}, targets_)
+ begin
+ end
+ end
+ begin
+ (targets_,) = tf.tf_promote(targets_)
+ end
+ end
+ begin
+ begin
+ tf.add_input(desc, predictions_)
+ end
+ begin
+ tf.add_input(desc, targets_)
+ end
+ end
+ begin
+ begin
+ if k !== nothing
+ desc["k"] = Base.Int(k)
+ end
+ end
+ end
+ end
+ tf.Tensor(tf.Operation(desc))
+ end
+ end
+ begin
+ function in_top_k_eager(predictions_, targets_; name=nothing, k=nothing)
+ desc = tf.EagerOp("InTopK")
+ predictions_ = convert(tf.EagerTensor, predictions_)
+ targets_ = convert(tf.EagerTensor, targets_)
+ begin
+ begin
+ tf.add_input(desc, predictions_)
+ end
+ begin
+ tf.add_input(desc, targets_)
+ end
+ end
+ begin
+ begin
+ if k !== nothing
+ desc["k"] = Base.Int(k)
+ end
+ end
+ end
+ begin
+ desc["T"] = tf.data_type(targets_)
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(in_top_k, [predictions_, targets_], name=nothing, k=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res[1]
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function in_top_k(predictions_, targets_; name=nothing, k=nothing)
+ if tf.in_eager_mode()
+ in_top_k_eager(predictions_, targets_; name=name, k=k)
+ else
+ in_top_k_graph(predictions_, targets_; name=name, k=k)
+ end
+ end
+ end
+end
+
+
+"""
+ scatter_sub(ref, indices, updates; use_locking=false)
+
+
+"""
+begin
+ begin
+ function scatter_sub_graph(ref_, indices_, updates_; name=nothing, use_locking=nothing)
+ local desc
+ tf.with_op_name(name, "ScatterSub") do
+ desc = tf.NodeDescription("ScatterSub")
+ begin
+ begin
+ ref_ = convert(Tensor{Any}, ref_)
+ begin
+ end
+ end
+ begin
+ indices_ = convert(Tensor{Any}, indices_)
+ begin
+ indices_ = indices_ - convert(tf.Tensor{eltype(indices_)}, 1)
+ end
+ end
+ begin
+ updates_ = convert(Tensor{Any}, updates_)
+ begin
+ end
+ end
+ begin
+ (ref_, updates_) = tf.tf_promote(ref_, updates_)
+ end
+ begin
+ (indices_,) = tf.tf_promote(indices_)
+ end
+ end
+ begin
+ begin
+ tf.add_input(desc, ref_)
+ end
+ begin
+ tf.add_input(desc, indices_)
+ end
+ begin
+ tf.add_input(desc, updates_)
+ end
+ end
+ begin
+ begin
+ if use_locking !== nothing
+ desc["use_locking"] = Base.Bool(use_locking)
+ end
+ end
+ end
+ end
+ tf.Tensor(tf.Operation(desc))
+ end
+ end
+ begin
+ function scatter_sub_eager(ref_, indices_, updates_; name=nothing, use_locking=nothing)
+ desc = tf.EagerOp("ScatterSub")
+ ref_ = convert(tf.EagerTensor, ref_)
+ indices_ = convert(tf.EagerTensor, indices_)
+ updates_ = convert(tf.EagerTensor, updates_)
+ begin
+ begin
+ tf.add_input(desc, ref_)
+ end
+ begin
+ tf.add_input(desc, indices_)
+ end
+ begin
+ tf.add_input(desc, updates_)
+ end
+ end
+ begin
+ begin
+ if use_locking !== nothing
+ desc["use_locking"] = Base.Bool(use_locking)
+ end
+ end
+ end
+ begin
+ desc["T"] = tf.data_type(ref_)
+ end
+ begin
+ desc["Tindices"] = tf.data_type(indices_)
+ end
+ begin
+ desc["T"] = tf.data_type(updates_)
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(scatter_sub, [ref_, indices_, updates_], name=nothing, use_locking=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res[1]
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function scatter_sub(ref_, indices_, updates_; name=nothing, use_locking=nothing)
+ if tf.in_eager_mode()
+ scatter_sub_eager(ref_, indices_, updates_; name=name, use_locking=use_locking)
+ else
+ scatter_sub_graph(ref_, indices_, updates_; name=name, use_locking=use_locking)
+ end
+ end
+ end
+end
+
+
+"""
+ acosh(x)
+
+
+"""
+begin
+ begin
+ function acosh_graph(x_; name=nothing)
+ local desc
+ tf.with_op_name(name, "Acosh") do
+ desc = tf.NodeDescription("Acosh")
+ begin
+ begin
+ x_ = convert(Tensor{Any}, x_)
+ begin
+ end
+ end
+ begin
+ (x_,) = tf.tf_promote(x_)
+ end
+ end
+ begin
+ begin
+ tf.add_input(desc, x_)
+ end
+ end
+ begin
+ end
+ end
+ tf.Tensor(tf.Operation(desc))
+ end
+ end
+ begin
+ function acosh_eager(x_; name=nothing)
+ desc = tf.EagerOp("Acosh")
+ x_ = convert(tf.EagerTensor, x_)
+ begin
+ begin
+ tf.add_input(desc, x_)
+ end
+ end
+ begin
+ end
+ begin
+ desc["T"] = tf.data_type(x_)
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(acosh, [x_], name=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res[1]
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function acosh(x_; name=nothing)
+ if tf.in_eager_mode()
+ acosh_eager(x_; name=name)
+ else
+ acosh_graph(x_; name=name)
+ end
+ end
+ end
+end
+
+
+"""
+ depthwise_conv2d_native_backprop_filter(input, filter_sizes, out_backprop; data_format=, dilations=[1, 1, 1, 1])
+
+
+"""
+begin
+ begin
+ function depthwise_conv2d_native_backprop_filter_graph(input_, filter_sizes_, out_backprop_; name=nothing, strides=nothing, padding=nothing, data_format=nothing, dilations=nothing)
+ local desc
+ tf.with_op_name(name, "DepthwiseConv2dNativeBackpropFilter") do
+ desc = tf.NodeDescription("DepthwiseConv2dNativeBackpropFilter")
+ begin
+ begin
+ input_ = convert(Tensor{Any}, input_)
+ begin
+ end
+ end
+ begin
+ filter_sizes_ = convert(Tensor{Int32}, filter_sizes_)
+ begin
+ end
+ end
+ begin
+ out_backprop_ = convert(Tensor{Any}, out_backprop_)
+ begin
+ end
+ end
+ begin
+ (input_, out_backprop_) = tf.tf_promote(input_, out_backprop_)
+ end
+ end
+ begin
+ begin
+ tf.add_input(desc, input_)
+ end
+ begin
+ tf.add_input(desc, filter_sizes_)
+ end
+ begin
+ tf.add_input(desc, out_backprop_)
+ end
+ end
+ begin
+ begin
+ if strides !== nothing
+ desc["strides"] = map(Base.identity, strides)
+ end
+ end
+ begin
+ if padding !== nothing
+ desc["padding"] = Base.String(padding)
+ end
+ end
+ begin
+ if data_format !== nothing
+ desc["data_format"] = Base.String(data_format)
+ end
+ end
+ begin
+ if dilations !== nothing
+ desc["dilations"] = map(Base.identity, dilations)
+ end
+ end
+ end
+ end
+ tf.Tensor(tf.Operation(desc))
+ end
+ end
+ begin
+ function depthwise_conv2d_native_backprop_filter_eager(input_, filter_sizes_, out_backprop_; name=nothing, strides=nothing, padding=nothing, data_format=nothing, dilations=nothing)
+ desc = tf.EagerOp("DepthwiseConv2dNativeBackpropFilter")
+ input_ = convert(tf.EagerTensor, input_)
+ filter_sizes_ = convert(tf.EagerTensor, filter_sizes_)
+ out_backprop_ = convert(tf.EagerTensor, out_backprop_)
+ begin
+ begin
+ tf.add_input(desc, input_)
+ end
+ begin
+ tf.add_input(desc, filter_sizes_)
+ end
+ begin
+ tf.add_input(desc, out_backprop_)
+ end
+ end
+ begin
+ begin
+ if strides !== nothing
+ desc["strides"] = map(Base.identity, strides)
+ end
+ end
+ begin
+ if padding !== nothing
+ desc["padding"] = Base.String(padding)
+ end
+ end
+ begin
+ if data_format !== nothing
+ desc["data_format"] = Base.String(data_format)
+ end
+ end
+ begin
+ if dilations !== nothing
+ desc["dilations"] = map(Base.identity, dilations)
+ end
+ end
+ end
+ begin
+ desc["T"] = tf.data_type(input_)
+ end
+ begin
+ desc["T"] = tf.data_type(out_backprop_)
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(depthwise_conv2d_native_backprop_filter, [input_, filter_sizes_, out_backprop_], name=nothing, strides=nothing, padding=nothing, data_format=nothing, dilations=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res[1]
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function depthwise_conv2d_native_backprop_filter(input_, filter_sizes_, out_backprop_; name=nothing, strides=nothing, padding=nothing, data_format=nothing, dilations=nothing)
+ if tf.in_eager_mode()
+ depthwise_conv2d_native_backprop_filter_eager(input_, filter_sizes_, out_backprop_; name=name, strides=strides, padding=padding, data_format=data_format, dilations=dilations)
+ else
+ depthwise_conv2d_native_backprop_filter_graph(input_, filter_sizes_, out_backprop_; name=name, strides=strides, padding=padding, data_format=data_format, dilations=dilations)
+ end
+ end
+ end
+end
+
+
+"""
+ quantize_v2(input, min_range, max_range; mode=, round_mode=)
+
+
+"""
+begin
+ begin
+ function quantize_v2_graph(input_, min_range_, max_range_; name=nothing, mode=nothing, round_mode=nothing)
+ local desc
+ tf.with_op_name(name, "QuantizeV2") do
+ desc = tf.NodeDescription("QuantizeV2")
+ begin
+ begin
+ input_ = convert(Tensor{Float32}, input_)
+ begin
+ end
+ end
+ begin
+ min_range_ = convert(Tensor{Float32}, min_range_)
+ begin
+ end
+ end
+ begin
+ max_range_ = convert(Tensor{Float32}, max_range_)
+ begin
+ end
+ end
+ end
+ begin
+ begin
+ tf.add_input(desc, input_)
+ end
+ begin
+ tf.add_input(desc, min_range_)
+ end
+ begin
+ tf.add_input(desc, max_range_)
+ end
+ end
+ begin
+ begin
+ if mode !== nothing
+ desc["mode"] = Base.String(mode)
+ end
+ end
+ begin
+ if round_mode !== nothing
+ desc["round_mode"] = Base.String(round_mode)
+ end
+ end
+ end
+ end
+ begin
+ out = tf.Tensor[]
+ op = tf.Operation(desc)
+ for out_idx = 1:3
+ push!(out, tf.Tensor(op, out_idx))
+ end
+ out
+ end
+ end
+ end
+ begin
+ function quantize_v2_eager(input_, min_range_, max_range_; name=nothing, mode=nothing, round_mode=nothing)
+ desc = tf.EagerOp("QuantizeV2")
+ input_ = convert(tf.EagerTensor, input_)
+ min_range_ = convert(tf.EagerTensor, min_range_)
+ max_range_ = convert(tf.EagerTensor, max_range_)
+ begin
+ begin
+ tf.add_input(desc, input_)
+ end
+ begin
+ tf.add_input(desc, min_range_)
+ end
+ begin
+ tf.add_input(desc, max_range_)
+ end
+ end
+ begin
+ begin
+ if mode !== nothing
+ desc["mode"] = Base.String(mode)
+ end
+ end
+ begin
+ if round_mode !== nothing
+ desc["round_mode"] = Base.String(round_mode)
+ end
+ end
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(quantize_v2, [input_, min_range_, max_range_], name=nothing, mode=nothing, round_mode=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function quantize_v2(input_, min_range_, max_range_; name=nothing, mode=nothing, round_mode=nothing)
+ if tf.in_eager_mode()
+ quantize_v2_eager(input_, min_range_, max_range_; name=name, mode=mode, round_mode=round_mode)
+ else
+ quantize_v2_graph(input_, min_range_, max_range_; name=name, mode=mode, round_mode=round_mode)
+ end
+ end
+ end
+end
+
+
+"""
+ cast(x; Truncate=false)
+
+
+"""
+begin
+ begin
+ function cast_graph(x_; name=nothing, SrcT=nothing, DstT=nothing, Truncate=nothing)
+ local desc
+ tf.with_op_name(name, "Cast") do
+ desc = tf.NodeDescription("Cast")
+ begin
+ begin
+ x_ = convert(Tensor{Any}, x_)
+ begin
+ end
+ end
+ begin
+ (x_,) = tf.tf_promote(x_)
+ end
+ end
+ begin
+ begin
+ tf.add_input(desc, x_)
+ end
+ end
+ begin
+ begin
+ if SrcT !== nothing
+ desc["SrcT"] = Base.identity(SrcT)
+ end
+ end
+ begin
+ if DstT !== nothing
+ desc["DstT"] = Base.identity(DstT)
+ end
+ end
+ begin
+ if Truncate !== nothing
+ desc["Truncate"] = Base.Bool(Truncate)
+ end
+ end
+ end
+ end
+ tf.Tensor(tf.Operation(desc))
+ end
+ end
+ begin
+ function cast_eager(x_; name=nothing, SrcT=nothing, DstT=nothing, Truncate=nothing)
+ desc = tf.EagerOp("Cast")
+ x_ = convert(tf.EagerTensor, x_)
+ begin
+ begin
+ tf.add_input(desc, x_)
+ end
+ end
+ begin
+ begin
+ if SrcT !== nothing
+ desc["SrcT"] = Base.identity(SrcT)
+ end
+ end
+ begin
+ if DstT !== nothing
+ desc["DstT"] = Base.identity(DstT)
+ end
+ end
+ begin
+ if Truncate !== nothing
+ desc["Truncate"] = Base.Bool(Truncate)
+ end
+ end
+ end
+ begin
+ desc["SrcT"] = tf.data_type(x_)
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(cast, [x_], name=nothing, SrcT=nothing, DstT=nothing, Truncate=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res[1]
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function cast(x_; name=nothing, SrcT=nothing, DstT=nothing, Truncate=nothing)
+ if tf.in_eager_mode()
+ cast_eager(x_; name=name, SrcT=SrcT, DstT=DstT, Truncate=Truncate)
+ else
+ cast_graph(x_; name=name, SrcT=SrcT, DstT=DstT, Truncate=Truncate)
+ end
+ end
+ end
+end
+
+
+"""
+ generator_dataset(init_func_other_args, next_func_other_args, finalize_func_other_args)
+
+
+"""
+begin
+ begin
+ function generator_dataset_graph(init_func_other_args_, next_func_other_args_, finalize_func_other_args_; name=nothing, init_func=nothing, next_func=nothing, finalize_func=nothing, Tinit_func_args=nothing, Tnext_func_args=nothing, Tfinalize_func_args=nothing, output_types=nothing, output_shapes=nothing)
+ local desc
+ tf.with_op_name(name, "GeneratorDataset") do
+ desc = tf.NodeDescription("GeneratorDataset")
+ begin
+ begin
+ init_func_other_args_ = [convert(Tensor{Any}, x) for x = init_func_other_args_]
+ begin
+ end
+ end
+ begin
+ next_func_other_args_ = [convert(Tensor{Any}, x) for x = next_func_other_args_]
+ begin
+ end
+ end
+ begin
+ finalize_func_other_args_ = [convert(Tensor{Any}, x) for x = finalize_func_other_args_]
+ begin
+ end
+ end
+ end
+ begin
+ begin
+ tf.add_input(desc, init_func_other_args_)
+ end
+ begin
+ tf.add_input(desc, next_func_other_args_)
+ end
+ begin
+ tf.add_input(desc, finalize_func_other_args_)
+ end
+ end
+ begin
+ begin
+ if init_func !== nothing
+ desc["init_func"] = Base.identity(init_func)
+ end
+ end
+ begin
+ if next_func !== nothing
+ desc["next_func"] = Base.identity(next_func)
+ end
+ end
+ begin
+ if finalize_func !== nothing
+ desc["finalize_func"] = Base.identity(finalize_func)
+ end
+ end
+ begin
+ if Tinit_func_args !== nothing
+ desc["Tinit_func_args"] = map(Base.identity, Tinit_func_args)
+ end
+ end
+ begin
+ if Tnext_func_args !== nothing
+ desc["Tnext_func_args"] = map(Base.identity, Tnext_func_args)
+ end
+ end
+ begin
+ if Tfinalize_func_args !== nothing
+ desc["Tfinalize_func_args"] = map(Base.identity, Tfinalize_func_args)
+ end
+ end
+ begin
+ if output_types !== nothing
+ desc["output_types"] = map(Base.identity, output_types)
+ end
+ end
+ begin
+ if output_shapes !== nothing
+ desc["output_shapes"] = map(Base.identity, output_shapes)
+ end
+ end
+ end
+ end
+ tf.Tensor(tf.Operation(desc))
+ end
+ end
+ begin
+ function generator_dataset_eager(init_func_other_args_, next_func_other_args_, finalize_func_other_args_; name=nothing, init_func=nothing, next_func=nothing, finalize_func=nothing, Tinit_func_args=nothing, Tnext_func_args=nothing, Tfinalize_func_args=nothing, output_types=nothing, output_shapes=nothing)
+ desc = tf.EagerOp("GeneratorDataset")
+ init_func_other_args_ = convert(tf.EagerTensor, init_func_other_args_)
+ next_func_other_args_ = convert(tf.EagerTensor, next_func_other_args_)
+ finalize_func_other_args_ = convert(tf.EagerTensor, finalize_func_other_args_)
+ begin
+ begin
+ tf.add_input(desc, init_func_other_args_)
+ end
+ begin
+ tf.add_input(desc, next_func_other_args_)
+ end
+ begin
+ tf.add_input(desc, finalize_func_other_args_)
+ end
+ end
+ begin
+ begin
+ if init_func !== nothing
+ desc["init_func"] = Base.identity(init_func)
+ end
+ end
+ begin
+ if next_func !== nothing
+ desc["next_func"] = Base.identity(next_func)
+ end
+ end
+ begin
+ if finalize_func !== nothing
+ desc["finalize_func"] = Base.identity(finalize_func)
+ end
+ end
+ begin
+ if Tinit_func_args !== nothing
+ desc["Tinit_func_args"] = map(Base.identity, Tinit_func_args)
+ end
+ end
+ begin
+ if Tnext_func_args !== nothing
+ desc["Tnext_func_args"] = map(Base.identity, Tnext_func_args)
+ end
+ end
+ begin
+ if Tfinalize_func_args !== nothing
+ desc["Tfinalize_func_args"] = map(Base.identity, Tfinalize_func_args)
+ end
+ end
+ begin
+ if output_types !== nothing
+ desc["output_types"] = map(Base.identity, output_types)
+ end
+ end
+ begin
+ if output_shapes !== nothing
+ desc["output_shapes"] = map(Base.identity, output_shapes)
+ end
+ end
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(generator_dataset, [init_func_other_args_, next_func_other_args_, finalize_func_other_args_], name=nothing, init_func=nothing, next_func=nothing, finalize_func=nothing, Tinit_func_args=nothing, Tnext_func_args=nothing, Tfinalize_func_args=nothing, output_types=nothing, output_shapes=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res[1]
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function generator_dataset(init_func_other_args_, next_func_other_args_, finalize_func_other_args_; name=nothing, init_func=nothing, next_func=nothing, finalize_func=nothing, Tinit_func_args=nothing, Tnext_func_args=nothing, Tfinalize_func_args=nothing, output_types=nothing, output_shapes=nothing)
+ if tf.in_eager_mode()
+ generator_dataset_eager(init_func_other_args_, next_func_other_args_, finalize_func_other_args_; name=name, init_func=init_func, next_func=next_func, finalize_func=finalize_func, Tinit_func_args=Tinit_func_args, Tnext_func_args=Tnext_func_args, Tfinalize_func_args=Tfinalize_func_args, output_types=output_types, output_shapes=output_shapes)
+ else
+ generator_dataset_graph(init_func_other_args_, next_func_other_args_, finalize_func_other_args_; name=name, init_func=init_func, next_func=next_func, finalize_func=finalize_func, Tinit_func_args=Tinit_func_args, Tnext_func_args=Tnext_func_args, Tfinalize_func_args=Tfinalize_func_args, output_types=output_types, output_shapes=output_shapes)
+ end
+ end
+ end
+end
+
+
+"""
+ tensor_forest_tree_serialize(tree_handle)
+
+
+"""
+begin
+ begin
+ function tensor_forest_tree_serialize_graph(tree_handle_; name=nothing)
+ local desc
+ tf.with_op_name(name, "TensorForestTreeSerialize") do
+ desc = tf.NodeDescription("TensorForestTreeSerialize")
+ begin
+ begin
+ tree_handle_ = convert(Tensor{Any}, tree_handle_)
+ begin
+ end
+ end
+ end
+ begin
+ begin
+ tf.add_input(desc, tree_handle_)
+ end
+ end
+ begin
+ end
+ end
+ tf.Tensor(tf.Operation(desc))
+ end
+ end
+ begin
+ function tensor_forest_tree_serialize_eager(tree_handle_; name=nothing)
+ desc = tf.EagerOp("TensorForestTreeSerialize")
+ tree_handle_ = convert(tf.EagerTensor, tree_handle_)
+ begin
+ begin
+ tf.add_input(desc, tree_handle_)
+ end
+ end
+ begin
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(tensor_forest_tree_serialize, [tree_handle_], name=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res[1]
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function tensor_forest_tree_serialize(tree_handle_; name=nothing)
+ if tf.in_eager_mode()
+ tensor_forest_tree_serialize_eager(tree_handle_; name=name)
+ else
+ tensor_forest_tree_serialize_graph(tree_handle_; name=name)
+ end
+ end
+ end
+end
+
+
+"""
+ tensor_array_close_v2(handle)
+
+
+"""
+begin
+ begin
+ function tensor_array_close_v2_graph(handle_; name=nothing)
+ local desc
+ tf.with_op_name(name, "TensorArrayCloseV2") do
+ desc = tf.NodeDescription("TensorArrayCloseV2")
+ begin
+ begin
+ handle_ = convert(Tensor{String}, handle_)
+ begin
+ end
+ end
+ end
+ begin
+ begin
+ tf.add_input(desc, handle_)
+ end
+ end
+ begin
+ end
+ end
+ tf.Tensor(tf.Operation(desc))
+ end
+ end
+ begin
+ function tensor_array_close_v2_eager(handle_; name=nothing)
+ desc = tf.EagerOp("TensorArrayCloseV2")
+ handle_ = convert(tf.EagerTensor, handle_)
+ begin
+ begin
+ tf.add_input(desc, handle_)
+ end
+ end
+ begin
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(tensor_array_close_v2, [handle_], name=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res[1]
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function tensor_array_close_v2(handle_; name=nothing)
+ if tf.in_eager_mode()
+ tensor_array_close_v2_eager(handle_; name=name)
+ else
+ tensor_array_close_v2_graph(handle_; name=name)
+ end
+ end
+ end
+end
+
+
+"""
+ big_query_reader(; container=, shared_name=, test_end_point=)
+
+A Reader that outputs rows from a BigQuery table as tensorflow Examples.
+"""
+begin
+ begin
+ function big_query_reader_graph(; name=nothing, container=nothing, shared_name=nothing, project_id=nothing, dataset_id=nothing, table_id=nothing, columns=nothing, timestamp_millis=nothing, test_end_point=nothing)
+ local desc
+ tf.with_op_name(name, "BigQueryReader") do
+ desc = tf.NodeDescription("BigQueryReader")
+ begin
+ end
+ begin
+ end
+ begin
+ begin
+ if container !== nothing
+ desc["container"] = Base.String(container)
+ end
+ end
+ begin
+ if shared_name !== nothing
+ desc["shared_name"] = Base.String(shared_name)
+ end
+ end
+ begin
+ if project_id !== nothing
+ desc["project_id"] = Base.String(project_id)
+ end
+ end
+ begin
+ if dataset_id !== nothing
+ desc["dataset_id"] = Base.String(dataset_id)
+ end
+ end
+ begin
+ if table_id !== nothing
+ desc["table_id"] = Base.String(table_id)
+ end
+ end
+ begin
+ if columns !== nothing
+ desc["columns"] = map(Base.identity, columns)
+ end
+ end
+ begin
+ if timestamp_millis !== nothing
+ desc["timestamp_millis"] = Base.Int(timestamp_millis)
+ end
+ end
+ begin
+ if test_end_point !== nothing
+ desc["test_end_point"] = Base.String(test_end_point)
+ end
+ end
+ end
+ end
+ tf.Tensor(tf.Operation(desc))
+ end
+ end
+ begin
+ function big_query_reader_eager(; name=nothing, container=nothing, shared_name=nothing, project_id=nothing, dataset_id=nothing, table_id=nothing, columns=nothing, timestamp_millis=nothing, test_end_point=nothing)
+ desc = tf.EagerOp("BigQueryReader")
+ begin
+ end
+ begin
+ begin
+ if container !== nothing
+ desc["container"] = Base.String(container)
+ end
+ end
+ begin
+ if shared_name !== nothing
+ desc["shared_name"] = Base.String(shared_name)
+ end
+ end
+ begin
+ if project_id !== nothing
+ desc["project_id"] = Base.String(project_id)
+ end
+ end
+ begin
+ if dataset_id !== nothing
+ desc["dataset_id"] = Base.String(dataset_id)
+ end
+ end
+ begin
+ if table_id !== nothing
+ desc["table_id"] = Base.String(table_id)
+ end
+ end
+ begin
+ if columns !== nothing
+ desc["columns"] = map(Base.identity, columns)
+ end
+ end
+ begin
+ if timestamp_millis !== nothing
+ desc["timestamp_millis"] = Base.Int(timestamp_millis)
+ end
+ end
+ begin
+ if test_end_point !== nothing
+ desc["test_end_point"] = Base.String(test_end_point)
+ end
+ end
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(big_query_reader, [], name=nothing, container=nothing, shared_name=nothing, project_id=nothing, dataset_id=nothing, table_id=nothing, columns=nothing, timestamp_millis=nothing, test_end_point=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res[1]
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function big_query_reader(; name=nothing, container=nothing, shared_name=nothing, project_id=nothing, dataset_id=nothing, table_id=nothing, columns=nothing, timestamp_millis=nothing, test_end_point=nothing)
+ if tf.in_eager_mode()
+ big_query_reader_eager(; name=name, container=container, shared_name=shared_name, project_id=project_id, dataset_id=dataset_id, table_id=table_id, columns=columns, timestamp_millis=timestamp_millis, test_end_point=test_end_point)
+ else
+ big_query_reader_graph(; name=name, container=container, shared_name=shared_name, project_id=project_id, dataset_id=dataset_id, table_id=table_id, columns=columns, timestamp_millis=timestamp_millis, test_end_point=test_end_point)
+ end
+ end
+ end
+end
+
+
+"""
+ reader_read_v2(reader_handle, queue_handle)
+
+
+"""
+begin
+ begin
+ function reader_read_v2_graph(reader_handle_, queue_handle_; name=nothing)
+ local desc
+ tf.with_op_name(name, "ReaderReadV2") do
+ desc = tf.NodeDescription("ReaderReadV2")
+ begin
+ begin
+ reader_handle_ = convert(Tensor{Any}, reader_handle_)
+ begin
+ end
+ end
+ begin
+ queue_handle_ = convert(Tensor{Any}, queue_handle_)
+ begin
+ end
+ end
+ end
+ begin
+ begin
+ tf.add_input(desc, reader_handle_)
+ end
+ begin
+ tf.add_input(desc, queue_handle_)
+ end
+ end
+ begin
+ end
+ end
+ begin
+ out = tf.Tensor[]
+ op = tf.Operation(desc)
+ for out_idx = 1:2
+ push!(out, tf.Tensor(op, out_idx))
+ end
+ out
+ end
+ end
+ end
+ begin
+ function reader_read_v2_eager(reader_handle_, queue_handle_; name=nothing)
+ desc = tf.EagerOp("ReaderReadV2")
+ reader_handle_ = convert(tf.EagerTensor, reader_handle_)
+ queue_handle_ = convert(tf.EagerTensor, queue_handle_)
+ begin
+ begin
+ tf.add_input(desc, reader_handle_)
+ end
+ begin
+ tf.add_input(desc, queue_handle_)
+ end
+ end
+ begin
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(reader_read_v2, [reader_handle_, queue_handle_], name=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function reader_read_v2(reader_handle_, queue_handle_; name=nothing)
+ if tf.in_eager_mode()
+ reader_read_v2_eager(reader_handle_, queue_handle_; name=name)
+ else
+ reader_read_v2_graph(reader_handle_, queue_handle_; name=name)
+ end
+ end
+ end
+end
+
+
+"""
+ mod(x, y)
+
+
+"""
+begin
+ begin
+ function mod_graph(x_, y_; name=nothing)
+ local desc
+ tf.with_op_name(name, "Mod") do
+ desc = tf.NodeDescription("Mod")
+ begin
+ begin
+ x_ = convert(Tensor{Any}, x_)
+ begin
+ end
+ end
+ begin
+ y_ = convert(Tensor{Any}, y_)
+ begin
+ end
+ end
+ begin
+ (x_, y_) = tf.tf_promote(x_, y_)
+ end
+ end
+ begin
+ begin
+ tf.add_input(desc, x_)
+ end
+ begin
+ tf.add_input(desc, y_)
+ end
+ end
+ begin
+ end
+ end
+ tf.Tensor(tf.Operation(desc))
+ end
+ end
+ begin
+ function mod_eager(x_, y_; name=nothing)
+ desc = tf.EagerOp("Mod")
+ x_ = convert(tf.EagerTensor, x_)
+ y_ = convert(tf.EagerTensor, y_)
+ begin
+ begin
+ tf.add_input(desc, x_)
+ end
+ begin
+ tf.add_input(desc, y_)
+ end
+ end
+ begin
+ end
+ begin
+ desc["T"] = tf.data_type(x_)
+ end
+ begin
+ desc["T"] = tf.data_type(y_)
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(mod, [x_, y_], name=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res[1]
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function mod(x_, y_; name=nothing)
+ if tf.in_eager_mode()
+ mod_eager(x_, y_; name=name)
+ else
+ mod_graph(x_, y_; name=name)
+ end
+ end
+ end
+end
+
+
+"""
+ add_v2(x, y)
+
+
+"""
+begin
+ begin
+ function add_v2_graph(x_, y_; name=nothing)
+ local desc
+ tf.with_op_name(name, "AddV2") do
+ desc = tf.NodeDescription("AddV2")
+ begin
+ begin
+ x_ = convert(Tensor{Any}, x_)
+ begin
+ end
+ end
+ begin
+ y_ = convert(Tensor{Any}, y_)
+ begin
+ end
+ end
+ begin
+ (x_, y_) = tf.tf_promote(x_, y_)
+ end
+ end
+ begin
+ begin
+ tf.add_input(desc, x_)
+ end
+ begin
+ tf.add_input(desc, y_)
+ end
+ end
+ begin
+ end
+ end
+ tf.Tensor(tf.Operation(desc))
+ end
+ end
+ begin
+ function add_v2_eager(x_, y_; name=nothing)
+ desc = tf.EagerOp("AddV2")
+ x_ = convert(tf.EagerTensor, x_)
+ y_ = convert(tf.EagerTensor, y_)
+ begin
+ begin
+ tf.add_input(desc, x_)
+ end
+ begin
+ tf.add_input(desc, y_)
+ end
+ end
+ begin
+ end
+ begin
+ desc["T"] = tf.data_type(x_)
+ end
+ begin
+ desc["T"] = tf.data_type(y_)
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(add_v2, [x_, y_], name=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res[1]
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function add_v2(x_, y_; name=nothing)
+ if tf.in_eager_mode()
+ add_v2_eager(x_, y_; name=name)
+ else
+ add_v2_graph(x_, y_; name=name)
+ end
+ end
+ end
+end
+
+
+"""
+ stateless_random_normal(shape, seed; dtype=Float32)
+
+
+"""
+begin
+ begin
+ function stateless_random_normal_graph(shape_, seed_; name=nothing, dtype=nothing)
+ local desc
+ tf.with_op_name(name, "StatelessRandomNormal") do
+ desc = tf.NodeDescription("StatelessRandomNormal")
+ begin
+ begin
+ shape_ = convert(Tensor{Int32}, shape_)
+ begin
+ end
+ end
+ begin
+ seed_ = convert(Tensor{Int64}, seed_)
+ begin
+ end
+ end
+ begin
+ (shape_,) = tf.tf_promote(shape_)
+ end
+ begin
+ (seed_,) = tf.tf_promote(seed_)
+ end
+ end
+ begin
+ begin
+ tf.add_input(desc, shape_)
+ end
+ begin
+ tf.add_input(desc, seed_)
+ end
+ end
+ begin
+ begin
+ if dtype !== nothing
+ desc["dtype"] = Base.identity(dtype)
+ end
+ end
+ end
+ end
+ tf.Tensor(tf.Operation(desc))
+ end
+ end
+ begin
+ function stateless_random_normal_eager(shape_, seed_; name=nothing, dtype=nothing)
+ desc = tf.EagerOp("StatelessRandomNormal")
+ shape_ = convert(tf.EagerTensor, shape_)
+ seed_ = convert(tf.EagerTensor, seed_)
+ begin
+ begin
+ tf.add_input(desc, shape_)
+ end
+ begin
+ tf.add_input(desc, seed_)
+ end
+ end
+ begin
+ begin
+ if dtype !== nothing
+ desc["dtype"] = Base.identity(dtype)
+ end
+ end
+ end
+ begin
+ desc["T"] = tf.data_type(shape_)
+ end
+ begin
+ desc["Tseed"] = tf.data_type(seed_)
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(stateless_random_normal, [shape_, seed_], name=nothing, dtype=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res[1]
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function stateless_random_normal(shape_, seed_; name=nothing, dtype=nothing)
+ if tf.in_eager_mode()
+ stateless_random_normal_eager(shape_, seed_; name=name, dtype=dtype)
+ else
+ stateless_random_normal_graph(shape_, seed_; name=name, dtype=dtype)
+ end
+ end
+ end
+end
+
+
+"""
+ strided_slice_assign(ref, begin, end, strides, value; begin_mask=0, end_mask=0, ellipsis_mask=0, new_axis_mask=0, shrink_axis_mask=0)
+
+
+"""
+begin
+ begin
+ function strided_slice_assign_graph(ref_, begin_, end_, strides_, value_; name=nothing, Index=nothing, begin_mask=nothing, end_mask=nothing, ellipsis_mask=nothing, new_axis_mask=nothing, shrink_axis_mask=nothing)
+ local desc
+ tf.with_op_name(name, "StridedSliceAssign") do
+ desc = tf.NodeDescription("StridedSliceAssign")
+ begin
+ begin
+ ref_ = convert(Tensor{Any}, ref_)
+ begin
+ end
+ end
+ begin
+ begin_ = convert(Tensor{Any}, begin_)
+ begin
+ begin_ = begin_ - convert(tf.Tensor{eltype(begin_)}, 1)
+ end
+ end
+ begin
+ end_ = convert(Tensor{Any}, end_)
+ begin
+ end_ = end_ - convert(tf.Tensor{eltype(end_)}, 1)
+ end
+ end
+ begin
+ strides_ = convert(Tensor{Any}, strides_)
+ begin
+ strides_ = strides_ - convert(tf.Tensor{eltype(strides_)}, 1)
+ end
+ end
+ begin
+ value_ = convert(Tensor{Any}, value_)
+ begin
+ end
+ end
+ begin
+ (ref_, value_) = tf.tf_promote(ref_, value_)
+ end
+ begin
+ (begin_, end_, strides_) = tf.tf_promote(begin_, end_, strides_)
+ end
+ end
+ begin
+ begin
+ tf.add_input(desc, ref_)
+ end
+ begin
+ tf.add_input(desc, begin_)
+ end
+ begin
+ tf.add_input(desc, end_)
+ end
+ begin
+ tf.add_input(desc, strides_)
+ end
+ begin
+ tf.add_input(desc, value_)
+ end
+ end
+ begin
+ begin
+ if Index !== nothing
+ desc["Index"] = Base.identity(Index)
+ end
+ end
+ begin
+ if begin_mask !== nothing
+ begin_mask = Base.Int(begin_mask) - 1
+ end
+ end
+ begin
+ if begin_mask !== nothing
+ desc["begin_mask"] = Base.Int(begin_mask)
+ end
+ end
+ begin
+ if end_mask !== nothing
+ end_mask = Base.Int(end_mask) - 1
+ end
+ end
+ begin
+ if end_mask !== nothing
+ desc["end_mask"] = Base.Int(end_mask)
+ end
+ end
+ begin
+ if ellipsis_mask !== nothing
+ ellipsis_mask = Base.Int(ellipsis_mask) - 1
+ end
+ end
+ begin
+ if ellipsis_mask !== nothing
+ desc["ellipsis_mask"] = Base.Int(ellipsis_mask)
+ end
+ end
+ begin
+ if new_axis_mask !== nothing
+ new_axis_mask = Base.Int(new_axis_mask) - 1
+ end
+ end
+ begin
+ if new_axis_mask !== nothing
+ desc["new_axis_mask"] = Base.Int(new_axis_mask)
+ end
+ end
+ begin
+ if shrink_axis_mask !== nothing
+ shrink_axis_mask = Base.Int(shrink_axis_mask) - 1
+ end
+ end
+ begin
+ if shrink_axis_mask !== nothing
+ desc["shrink_axis_mask"] = Base.Int(shrink_axis_mask)
+ end
+ end
+ end
+ end
+ tf.Tensor(tf.Operation(desc))
+ end
+ end
+ begin
+ function strided_slice_assign_eager(ref_, begin_, end_, strides_, value_; name=nothing, Index=nothing, begin_mask=nothing, end_mask=nothing, ellipsis_mask=nothing, new_axis_mask=nothing, shrink_axis_mask=nothing)
+ desc = tf.EagerOp("StridedSliceAssign")
+ ref_ = convert(tf.EagerTensor, ref_)
+ begin_ = convert(tf.EagerTensor, begin_)
+ end_ = convert(tf.EagerTensor, end_)
+ strides_ = convert(tf.EagerTensor, strides_)
+ value_ = convert(tf.EagerTensor, value_)
+ begin
+ begin
+ tf.add_input(desc, ref_)
+ end
+ begin
+ tf.add_input(desc, begin_)
+ end
+ begin
+ tf.add_input(desc, end_)
+ end
+ begin
+ tf.add_input(desc, strides_)
+ end
+ begin
+ tf.add_input(desc, value_)
+ end
+ end
+ begin
+ begin
+ if Index !== nothing
+ desc["Index"] = Base.identity(Index)
+ end
+ end
+ begin
+ if begin_mask !== nothing
+ begin_mask = Base.Int(begin_mask) - 1
+ end
+ end
+ begin
+ if begin_mask !== nothing
+ desc["begin_mask"] = Base.Int(begin_mask)
+ end
+ end
+ begin
+ if end_mask !== nothing
+ end_mask = Base.Int(end_mask) - 1
+ end
+ end
+ begin
+ if end_mask !== nothing
+ desc["end_mask"] = Base.Int(end_mask)
+ end
+ end
+ begin
+ if ellipsis_mask !== nothing
+ ellipsis_mask = Base.Int(ellipsis_mask) - 1
+ end
+ end
+ begin
+ if ellipsis_mask !== nothing
+ desc["ellipsis_mask"] = Base.Int(ellipsis_mask)
+ end
+ end
+ begin
+ if new_axis_mask !== nothing
+ new_axis_mask = Base.Int(new_axis_mask) - 1
+ end
+ end
+ begin
+ if new_axis_mask !== nothing
+ desc["new_axis_mask"] = Base.Int(new_axis_mask)
+ end
+ end
+ begin
+ if shrink_axis_mask !== nothing
+ shrink_axis_mask = Base.Int(shrink_axis_mask) - 1
+ end
+ end
+ begin
+ if shrink_axis_mask !== nothing
+ desc["shrink_axis_mask"] = Base.Int(shrink_axis_mask)
+ end
+ end
+ end
+ begin
+ desc["T"] = tf.data_type(ref_)
+ end
+ begin
+ desc["Index"] = tf.data_type(begin_)
+ end
+ begin
+ desc["Index"] = tf.data_type(end_)
+ end
+ begin
+ desc["Index"] = tf.data_type(strides_)
+ end
+ begin
+ desc["T"] = tf.data_type(value_)
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(strided_slice_assign, [ref_, begin_, end_, strides_, value_], name=nothing, Index=nothing, begin_mask=nothing, end_mask=nothing, ellipsis_mask=nothing, new_axis_mask=nothing, shrink_axis_mask=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res[1]
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function strided_slice_assign(ref_, begin_, end_, strides_, value_; name=nothing, Index=nothing, begin_mask=nothing, end_mask=nothing, ellipsis_mask=nothing, new_axis_mask=nothing, shrink_axis_mask=nothing)
+ if tf.in_eager_mode()
+ strided_slice_assign_eager(ref_, begin_, end_, strides_, value_; name=name, Index=Index, begin_mask=begin_mask, end_mask=end_mask, ellipsis_mask=ellipsis_mask, new_axis_mask=new_axis_mask, shrink_axis_mask=shrink_axis_mask)
+ else
+ strided_slice_assign_graph(ref_, begin_, end_, strides_, value_; name=name, Index=Index, begin_mask=begin_mask, end_mask=end_mask, ellipsis_mask=ellipsis_mask, new_axis_mask=new_axis_mask, shrink_axis_mask=shrink_axis_mask)
+ end
+ end
+ end
+end
+
+
+"""
+ scatter_min(ref, indices, updates; use_locking=false)
+
+
+"""
+begin
+ begin
+ function scatter_min_graph(ref_, indices_, updates_; name=nothing, use_locking=nothing)
+ local desc
+ tf.with_op_name(name, "ScatterMin") do
+ desc = tf.NodeDescription("ScatterMin")
+ begin
+ begin
+ ref_ = convert(Tensor{Any}, ref_)
+ begin
+ end
+ end
+ begin
+ indices_ = convert(Tensor{Any}, indices_)
+ begin
+ indices_ = indices_ - convert(tf.Tensor{eltype(indices_)}, 1)
+ end
+ end
+ begin
+ updates_ = convert(Tensor{Any}, updates_)
+ begin
+ end
+ end
+ begin
+ (ref_, updates_) = tf.tf_promote(ref_, updates_)
+ end
+ begin
+ (indices_,) = tf.tf_promote(indices_)
+ end
+ end
+ begin
+ begin
+ tf.add_input(desc, ref_)
+ end
+ begin
+ tf.add_input(desc, indices_)
+ end
+ begin
+ tf.add_input(desc, updates_)
+ end
+ end
+ begin
+ begin
+ if use_locking !== nothing
+ desc["use_locking"] = Base.Bool(use_locking)
+ end
+ end
+ end
+ end
+ tf.Tensor(tf.Operation(desc))
+ end
+ end
+ begin
+ function scatter_min_eager(ref_, indices_, updates_; name=nothing, use_locking=nothing)
+ desc = tf.EagerOp("ScatterMin")
+ ref_ = convert(tf.EagerTensor, ref_)
+ indices_ = convert(tf.EagerTensor, indices_)
+ updates_ = convert(tf.EagerTensor, updates_)
+ begin
+ begin
+ tf.add_input(desc, ref_)
+ end
+ begin
+ tf.add_input(desc, indices_)
+ end
+ begin
+ tf.add_input(desc, updates_)
+ end
+ end
+ begin
+ begin
+ if use_locking !== nothing
+ desc["use_locking"] = Base.Bool(use_locking)
+ end
+ end
+ end
+ begin
+ desc["T"] = tf.data_type(ref_)
+ end
+ begin
+ desc["Tindices"] = tf.data_type(indices_)
+ end
+ begin
+ desc["T"] = tf.data_type(updates_)
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(scatter_min, [ref_, indices_, updates_], name=nothing, use_locking=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res[1]
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function scatter_min(ref_, indices_, updates_; name=nothing, use_locking=nothing)
+ if tf.in_eager_mode()
+ scatter_min_eager(ref_, indices_, updates_; name=name, use_locking=use_locking)
+ else
+ scatter_min_graph(ref_, indices_, updates_; name=name, use_locking=use_locking)
+ end
+ end
+ end
+end
+
+
+"""
+ resource_strided_slice_assign(ref, begin, end, strides, value; begin_mask=0, end_mask=0, ellipsis_mask=0, new_axis_mask=0, shrink_axis_mask=0)
+
+
+"""
+begin
+ begin
+ function resource_strided_slice_assign_graph(ref_, begin_, end_, strides_, value_; name=nothing, Index=nothing, begin_mask=nothing, end_mask=nothing, ellipsis_mask=nothing, new_axis_mask=nothing, shrink_axis_mask=nothing)
+ local desc
+ tf.with_op_name(name, "ResourceStridedSliceAssign") do
+ desc = tf.NodeDescription("ResourceStridedSliceAssign")
+ begin
+ begin
+ ref_ = convert(Tensor{Any}, ref_)
+ begin
+ end
+ end
+ begin
+ begin_ = convert(Tensor{Any}, begin_)
+ begin
+ begin_ = begin_ - convert(tf.Tensor{eltype(begin_)}, 1)
+ end
+ end
+ begin
+ end_ = convert(Tensor{Any}, end_)
+ begin
+ end_ = end_ - convert(tf.Tensor{eltype(end_)}, 1)
+ end
+ end
+ begin
+ strides_ = convert(Tensor{Any}, strides_)
+ begin
+ strides_ = strides_ - convert(tf.Tensor{eltype(strides_)}, 1)
+ end
+ end
+ begin
+ value_ = convert(Tensor{Any}, value_)
+ begin
+ end
+ end
+ begin
+ (value_,) = tf.tf_promote(value_)
+ end
+ begin
+ (begin_, end_, strides_) = tf.tf_promote(begin_, end_, strides_)
+ end
+ end
+ begin
+ begin
+ tf.add_input(desc, ref_)
+ end
+ begin
+ tf.add_input(desc, begin_)
+ end
+ begin
+ tf.add_input(desc, end_)
+ end
+ begin
+ tf.add_input(desc, strides_)
+ end
+ begin
+ tf.add_input(desc, value_)
+ end
+ end
+ begin
+ begin
+ if Index !== nothing
+ desc["Index"] = Base.identity(Index)
+ end
+ end
+ begin
+ if begin_mask !== nothing
+ begin_mask = Base.Int(begin_mask) - 1
+ end
+ end
+ begin
+ if begin_mask !== nothing
+ desc["begin_mask"] = Base.Int(begin_mask)
+ end
+ end
+ begin
+ if end_mask !== nothing
+ end_mask = Base.Int(end_mask) - 1
+ end
+ end
+ begin
+ if end_mask !== nothing
+ desc["end_mask"] = Base.Int(end_mask)
+ end
+ end
+ begin
+ if ellipsis_mask !== nothing
+ ellipsis_mask = Base.Int(ellipsis_mask) - 1
+ end
+ end
+ begin
+ if ellipsis_mask !== nothing
+ desc["ellipsis_mask"] = Base.Int(ellipsis_mask)
+ end
+ end
+ begin
+ if new_axis_mask !== nothing
+ new_axis_mask = Base.Int(new_axis_mask) - 1
+ end
+ end
+ begin
+ if new_axis_mask !== nothing
+ desc["new_axis_mask"] = Base.Int(new_axis_mask)
+ end
+ end
+ begin
+ if shrink_axis_mask !== nothing
+ shrink_axis_mask = Base.Int(shrink_axis_mask) - 1
+ end
+ end
+ begin
+ if shrink_axis_mask !== nothing
+ desc["shrink_axis_mask"] = Base.Int(shrink_axis_mask)
+ end
+ end
+ end
+ end
+ tf.Tensor(tf.Operation(desc))
+ end
+ end
+ begin
+ function resource_strided_slice_assign_eager(ref_, begin_, end_, strides_, value_; name=nothing, Index=nothing, begin_mask=nothing, end_mask=nothing, ellipsis_mask=nothing, new_axis_mask=nothing, shrink_axis_mask=nothing)
+ desc = tf.EagerOp("ResourceStridedSliceAssign")
+ ref_ = convert(tf.EagerTensor, ref_)
+ begin_ = convert(tf.EagerTensor, begin_)
+ end_ = convert(tf.EagerTensor, end_)
+ strides_ = convert(tf.EagerTensor, strides_)
+ value_ = convert(tf.EagerTensor, value_)
+ begin
+ begin
+ tf.add_input(desc, ref_)
+ end
+ begin
+ tf.add_input(desc, begin_)
+ end
+ begin
+ tf.add_input(desc, end_)
+ end
+ begin
+ tf.add_input(desc, strides_)
+ end
+ begin
+ tf.add_input(desc, value_)
+ end
+ end
+ begin
+ begin
+ if Index !== nothing
+ desc["Index"] = Base.identity(Index)
+ end
+ end
+ begin
+ if begin_mask !== nothing
+ begin_mask = Base.Int(begin_mask) - 1
+ end
+ end
+ begin
+ if begin_mask !== nothing
+ desc["begin_mask"] = Base.Int(begin_mask)
+ end
+ end
+ begin
+ if end_mask !== nothing
+ end_mask = Base.Int(end_mask) - 1
+ end
+ end
+ begin
+ if end_mask !== nothing
+ desc["end_mask"] = Base.Int(end_mask)
+ end
+ end
+ begin
+ if ellipsis_mask !== nothing
+ ellipsis_mask = Base.Int(ellipsis_mask) - 1
+ end
+ end
+ begin
+ if ellipsis_mask !== nothing
+ desc["ellipsis_mask"] = Base.Int(ellipsis_mask)
+ end
+ end
+ begin
+ if new_axis_mask !== nothing
+ new_axis_mask = Base.Int(new_axis_mask) - 1
+ end
+ end
+ begin
+ if new_axis_mask !== nothing
+ desc["new_axis_mask"] = Base.Int(new_axis_mask)
+ end
+ end
+ begin
+ if shrink_axis_mask !== nothing
+ shrink_axis_mask = Base.Int(shrink_axis_mask) - 1
+ end
+ end
+ begin
+ if shrink_axis_mask !== nothing
+ desc["shrink_axis_mask"] = Base.Int(shrink_axis_mask)
+ end
+ end
+ end
+ begin
+ desc["Index"] = tf.data_type(begin_)
+ end
+ begin
+ desc["Index"] = tf.data_type(end_)
+ end
+ begin
+ desc["Index"] = tf.data_type(strides_)
+ end
+ begin
+ desc["T"] = tf.data_type(value_)
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(resource_strided_slice_assign, [ref_, begin_, end_, strides_, value_], name=nothing, Index=nothing, begin_mask=nothing, end_mask=nothing, ellipsis_mask=nothing, new_axis_mask=nothing, shrink_axis_mask=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res[1]
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function resource_strided_slice_assign(ref_, begin_, end_, strides_, value_; name=nothing, Index=nothing, begin_mask=nothing, end_mask=nothing, ellipsis_mask=nothing, new_axis_mask=nothing, shrink_axis_mask=nothing)
+ if tf.in_eager_mode()
+ resource_strided_slice_assign_eager(ref_, begin_, end_, strides_, value_; name=name, Index=Index, begin_mask=begin_mask, end_mask=end_mask, ellipsis_mask=ellipsis_mask, new_axis_mask=new_axis_mask, shrink_axis_mask=shrink_axis_mask)
+ else
+ resource_strided_slice_assign_graph(ref_, begin_, end_, strides_, value_; name=name, Index=Index, begin_mask=begin_mask, end_mask=end_mask, ellipsis_mask=ellipsis_mask, new_axis_mask=new_axis_mask, shrink_axis_mask=shrink_axis_mask)
+ end
+ end
+ end
+end
+
+
+"""
+ random_gamma_grad(alpha, sample)
+
+
+"""
+begin
+ begin
+ function random_gamma_grad_graph(alpha_, sample_; name=nothing)
+ local desc
+ tf.with_op_name(name, "RandomGammaGrad") do
+ desc = tf.NodeDescription("RandomGammaGrad")
+ begin
+ begin
+ alpha_ = convert(Tensor{Any}, alpha_)
+ begin
+ end
+ end
+ begin
+ sample_ = convert(Tensor{Any}, sample_)
+ begin
+ end
+ end
+ begin
+ (alpha_, sample_) = tf.tf_promote(alpha_, sample_)
+ end
+ end
+ begin
+ begin
+ tf.add_input(desc, alpha_)
+ end
+ begin
+ tf.add_input(desc, sample_)
+ end
+ end
+ begin
+ end
+ end
+ tf.Tensor(tf.Operation(desc))
+ end
+ end
+ begin
+ function random_gamma_grad_eager(alpha_, sample_; name=nothing)
+ desc = tf.EagerOp("RandomGammaGrad")
+ alpha_ = convert(tf.EagerTensor, alpha_)
+ sample_ = convert(tf.EagerTensor, sample_)
+ begin
+ begin
+ tf.add_input(desc, alpha_)
+ end
+ begin
+ tf.add_input(desc, sample_)
+ end
+ end
+ begin
+ end
+ begin
+ desc["T"] = tf.data_type(alpha_)
+ end
+ begin
+ desc["T"] = tf.data_type(sample_)
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(random_gamma_grad, [alpha_, sample_], name=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res[1]
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function random_gamma_grad(alpha_, sample_; name=nothing)
+ if tf.in_eager_mode()
+ random_gamma_grad_eager(alpha_, sample_; name=name)
+ else
+ random_gamma_grad_graph(alpha_, sample_; name=name)
+ end
+ end
+ end
+end
+
+
+"""
+ resource_sparse_apply_keras_momentum(var, accum, lr, grad, indices, momentum; use_locking=false, use_nesterov=false)
+
+
+"""
+begin
+ begin
+ function resource_sparse_apply_keras_momentum_graph(var_, accum_, lr_, grad_, indices_, momentum_; name=nothing, use_locking=nothing, use_nesterov=nothing)
+ local desc
+ tf.with_op_name(name, "ResourceSparseApplyKerasMomentum") do
+ desc = tf.NodeDescription("ResourceSparseApplyKerasMomentum")
+ begin
+ begin
+ var_ = convert(Tensor{Any}, var_)
+ begin
+ end
+ end
+ begin
+ accum_ = convert(Tensor{Any}, accum_)
+ begin
+ end
+ end
+ begin
+ lr_ = convert(Tensor{Any}, lr_)
+ begin
+ end
+ end
+ begin
+ grad_ = convert(Tensor{Any}, grad_)
+ begin
+ end
+ end
+ begin
+ indices_ = convert(Tensor{Any}, indices_)
+ begin
+ indices_ = indices_ - convert(tf.Tensor{eltype(indices_)}, 1)
+ end
+ end
+ begin
+ momentum_ = convert(Tensor{Any}, momentum_)
+ begin
+ end
+ end
+ begin
+ (lr_, grad_, momentum_) = tf.tf_promote(lr_, grad_, momentum_)
+ end
+ begin
+ (indices_,) = tf.tf_promote(indices_)
+ end
+ end
+ begin
+ begin
+ tf.add_input(desc, var_)
+ end
+ begin
+ tf.add_input(desc, accum_)
+ end
+ begin
+ tf.add_input(desc, lr_)
+ end
+ begin
+ tf.add_input(desc, grad_)
+ end
+ begin
+ tf.add_input(desc, indices_)
+ end
+ begin
+ tf.add_input(desc, momentum_)
+ end
+ end
+ begin
+ begin
+ if use_locking !== nothing
+ desc["use_locking"] = Base.Bool(use_locking)
+ end
+ end
+ begin
+ if use_nesterov !== nothing
+ desc["use_nesterov"] = Base.Bool(use_nesterov)
+ end
+ end
+ end
+ end
+ tf.Tensor(tf.Operation(desc))
+ end
+ end
+ begin
+ function resource_sparse_apply_keras_momentum_eager(var_, accum_, lr_, grad_, indices_, momentum_; name=nothing, use_locking=nothing, use_nesterov=nothing)
+ desc = tf.EagerOp("ResourceSparseApplyKerasMomentum")
+ var_ = convert(tf.EagerTensor, var_)
+ accum_ = convert(tf.EagerTensor, accum_)
+ lr_ = convert(tf.EagerTensor, lr_)
+ grad_ = convert(tf.EagerTensor, grad_)
+ indices_ = convert(tf.EagerTensor, indices_)
+ momentum_ = convert(tf.EagerTensor, momentum_)
+ begin
+ begin
+ tf.add_input(desc, var_)
+ end
+ begin
+ tf.add_input(desc, accum_)
+ end
+ begin
+ tf.add_input(desc, lr_)
+ end
+ begin
+ tf.add_input(desc, grad_)
+ end
+ begin
+ tf.add_input(desc, indices_)
+ end
+ begin
+ tf.add_input(desc, momentum_)
+ end
+ end
+ begin
+ begin
+ if use_locking !== nothing
+ desc["use_locking"] = Base.Bool(use_locking)
+ end
+ end
+ begin
+ if use_nesterov !== nothing
+ desc["use_nesterov"] = Base.Bool(use_nesterov)
+ end
+ end
+ end
+ begin
+ desc["T"] = tf.data_type(lr_)
+ end
+ begin
+ desc["T"] = tf.data_type(grad_)
+ end
+ begin
+ desc["Tindices"] = tf.data_type(indices_)
+ end
+ begin
+ desc["T"] = tf.data_type(momentum_)
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(resource_sparse_apply_keras_momentum, [var_, accum_, lr_, grad_, indices_, momentum_], name=nothing, use_locking=nothing, use_nesterov=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res[1]
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function resource_sparse_apply_keras_momentum(var_, accum_, lr_, grad_, indices_, momentum_; name=nothing, use_locking=nothing, use_nesterov=nothing)
+ if tf.in_eager_mode()
+ resource_sparse_apply_keras_momentum_eager(var_, accum_, lr_, grad_, indices_, momentum_; name=name, use_locking=use_locking, use_nesterov=use_nesterov)
+ else
+ resource_sparse_apply_keras_momentum_graph(var_, accum_, lr_, grad_, indices_, momentum_; name=name, use_locking=use_locking, use_nesterov=use_nesterov)
+ end
+ end
+ end
+end
+
+
+"""
+ boosted_trees_create_quantile_stream_resource(quantile_stream_resource_handle, epsilon, num_streams; max_elements=1099511627776)
+
+
+"""
+begin
+ begin
+ function boosted_trees_create_quantile_stream_resource_graph(quantile_stream_resource_handle_, epsilon_, num_streams_; name=nothing, max_elements=nothing)
+ local desc
+ tf.with_op_name(name, "BoostedTreesCreateQuantileStreamResource") do
+ desc = tf.NodeDescription("BoostedTreesCreateQuantileStreamResource")
+ begin
+ begin
+ quantile_stream_resource_handle_ = convert(Tensor{Any}, quantile_stream_resource_handle_)
+ begin
+ end
+ end
+ begin
+ epsilon_ = convert(Tensor{Float32}, epsilon_)
+ begin
+ end
+ end
+ begin
+ num_streams_ = convert(Tensor{Int64}, num_streams_)
+ begin
+ end
+ end
+ end
+ begin
+ begin
+ tf.add_input(desc, quantile_stream_resource_handle_)
+ end
+ begin
+ tf.add_input(desc, epsilon_)
+ end
+ begin
+ tf.add_input(desc, num_streams_)
+ end
+ end
+ begin
+ begin
+ if max_elements !== nothing
+ desc["max_elements"] = Base.Int(max_elements)
+ end
+ end
+ end
+ end
+ tf.Tensor(tf.Operation(desc))
+ end
+ end
+ begin
+ function boosted_trees_create_quantile_stream_resource_eager(quantile_stream_resource_handle_, epsilon_, num_streams_; name=nothing, max_elements=nothing)
+ desc = tf.EagerOp("BoostedTreesCreateQuantileStreamResource")
+ quantile_stream_resource_handle_ = convert(tf.EagerTensor, quantile_stream_resource_handle_)
+ epsilon_ = convert(tf.EagerTensor, epsilon_)
+ num_streams_ = convert(tf.EagerTensor, num_streams_)
+ begin
+ begin
+ tf.add_input(desc, quantile_stream_resource_handle_)
+ end
+ begin
+ tf.add_input(desc, epsilon_)
+ end
+ begin
+ tf.add_input(desc, num_streams_)
+ end
+ end
+ begin
+ begin
+ if max_elements !== nothing
+ desc["max_elements"] = Base.Int(max_elements)
+ end
+ end
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(boosted_trees_create_quantile_stream_resource, [quantile_stream_resource_handle_, epsilon_, num_streams_], name=nothing, max_elements=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res[1]
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function boosted_trees_create_quantile_stream_resource(quantile_stream_resource_handle_, epsilon_, num_streams_; name=nothing, max_elements=nothing)
+ if tf.in_eager_mode()
+ boosted_trees_create_quantile_stream_resource_eager(quantile_stream_resource_handle_, epsilon_, num_streams_; name=name, max_elements=max_elements)
+ else
+ boosted_trees_create_quantile_stream_resource_graph(quantile_stream_resource_handle_, epsilon_, num_streams_; name=name, max_elements=max_elements)
+ end
+ end
+ end
+end
+
+
+"""
+ quantized_relu6(features, min_features, max_features; out_type=Float32)
+
+
+"""
+begin
+ begin
+ function quantized_relu6_graph(features_, min_features_, max_features_; name=nothing, out_type=nothing)
+ local desc
+ tf.with_op_name(name, "QuantizedRelu6") do
+ desc = tf.NodeDescription("QuantizedRelu6")
+ begin
+ begin
+ features_ = convert(Tensor{Any}, features_)
+ begin
+ end
+ end
+ begin
+ min_features_ = convert(Tensor{Float32}, min_features_)
+ begin
+ end
+ end
+ begin
+ max_features_ = convert(Tensor{Float32}, max_features_)
+ begin
+ end
+ end
+ begin
+ (features_,) = tf.tf_promote(features_)
+ end
+ end
+ begin
+ begin
+ tf.add_input(desc, features_)
+ end
+ begin
+ tf.add_input(desc, min_features_)
+ end
+ begin
+ tf.add_input(desc, max_features_)
+ end
+ end
+ begin
+ begin
+ if out_type !== nothing
+ desc["out_type"] = Base.identity(out_type)
+ end
+ end
+ end
+ end
+ begin
+ out = tf.Tensor[]
+ op = tf.Operation(desc)
+ for out_idx = 1:3
+ push!(out, tf.Tensor(op, out_idx))
+ end
+ out
+ end
+ end
+ end
+ begin
+ function quantized_relu6_eager(features_, min_features_, max_features_; name=nothing, out_type=nothing)
+ desc = tf.EagerOp("QuantizedRelu6")
+ features_ = convert(tf.EagerTensor, features_)
+ min_features_ = convert(tf.EagerTensor, min_features_)
+ max_features_ = convert(tf.EagerTensor, max_features_)
+ begin
+ begin
+ tf.add_input(desc, features_)
+ end
+ begin
+ tf.add_input(desc, min_features_)
+ end
+ begin
+ tf.add_input(desc, max_features_)
+ end
+ end
+ begin
+ begin
+ if out_type !== nothing
+ desc["out_type"] = Base.identity(out_type)
+ end
+ end
+ end
+ begin
+ desc["Tinput"] = tf.data_type(features_)
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(quantized_relu6, [features_, min_features_, max_features_], name=nothing, out_type=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function quantized_relu6(features_, min_features_, max_features_; name=nothing, out_type=nothing)
+ if tf.in_eager_mode()
+ quantized_relu6_eager(features_, min_features_, max_features_; name=name, out_type=out_type)
+ else
+ quantized_relu6_graph(features_, min_features_, max_features_; name=name, out_type=out_type)
+ end
+ end
+ end
+end
+
+
+"""
+ sparse_sparse_maximum(a_indices, a_values, a_shape, b_indices, b_values, b_shape)
+
+
+"""
+begin
+ begin
+ function sparse_sparse_maximum_graph(a_indices_, a_values_, a_shape_, b_indices_, b_values_, b_shape_; name=nothing)
+ local desc
+ tf.with_op_name(name, "SparseSparseMaximum") do
+ desc = tf.NodeDescription("SparseSparseMaximum")
+ begin
+ begin
+ a_indices_ = convert(Tensor{Int64}, a_indices_)
+ begin
+ end
+ end
+ begin
+ a_values_ = convert(Tensor{Any}, a_values_)
+ begin
+ end
+ end
+ begin
+ a_shape_ = convert(Tensor{Int64}, a_shape_)
+ begin
+ end
+ end
+ begin
+ b_indices_ = convert(Tensor{Int64}, b_indices_)
+ begin
+ end
+ end
+ begin
+ b_values_ = convert(Tensor{Any}, b_values_)
+ begin
+ end
+ end
+ begin
+ b_shape_ = convert(Tensor{Int64}, b_shape_)
+ begin
+ end
+ end
+ begin
+ (a_values_, b_values_) = tf.tf_promote(a_values_, b_values_)
+ end
+ end
+ begin
+ begin
+ tf.add_input(desc, a_indices_)
+ end
+ begin
+ tf.add_input(desc, a_values_)
+ end
+ begin
+ tf.add_input(desc, a_shape_)
+ end
+ begin
+ tf.add_input(desc, b_indices_)
+ end
+ begin
+ tf.add_input(desc, b_values_)
+ end
+ begin
+ tf.add_input(desc, b_shape_)
+ end
+ end
+ begin
+ end
+ end
+ begin
+ out = tf.Tensor[]
+ op = tf.Operation(desc)
+ for out_idx = 1:2
+ push!(out, tf.Tensor(op, out_idx))
+ end
+ out
+ end
+ end
+ end
+ begin
+ function sparse_sparse_maximum_eager(a_indices_, a_values_, a_shape_, b_indices_, b_values_, b_shape_; name=nothing)
+ desc = tf.EagerOp("SparseSparseMaximum")
+ a_indices_ = convert(tf.EagerTensor, a_indices_)
+ a_values_ = convert(tf.EagerTensor, a_values_)
+ a_shape_ = convert(tf.EagerTensor, a_shape_)
+ b_indices_ = convert(tf.EagerTensor, b_indices_)
+ b_values_ = convert(tf.EagerTensor, b_values_)
+ b_shape_ = convert(tf.EagerTensor, b_shape_)
+ begin
+ begin
+ tf.add_input(desc, a_indices_)
+ end
+ begin
+ tf.add_input(desc, a_values_)
+ end
+ begin
+ tf.add_input(desc, a_shape_)
+ end
+ begin
+ tf.add_input(desc, b_indices_)
+ end
+ begin
+ tf.add_input(desc, b_values_)
+ end
+ begin
+ tf.add_input(desc, b_shape_)
+ end
+ end
+ begin
+ end
+ begin
+ desc["T"] = tf.data_type(a_values_)
+ end
+ begin
+ desc["T"] = tf.data_type(b_values_)
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(sparse_sparse_maximum, [a_indices_, a_values_, a_shape_, b_indices_, b_values_, b_shape_], name=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function sparse_sparse_maximum(a_indices_, a_values_, a_shape_, b_indices_, b_values_, b_shape_; name=nothing)
+ if tf.in_eager_mode()
+ sparse_sparse_maximum_eager(a_indices_, a_values_, a_shape_, b_indices_, b_values_, b_shape_; name=name)
+ else
+ sparse_sparse_maximum_graph(a_indices_, a_values_, a_shape_, b_indices_, b_values_, b_shape_; name=name)
+ end
+ end
+ end
+end
+
+
+"""
+ batch_norm_with_global_normalization(t, m, v, beta, gamma)
+
+
+"""
+begin
+ begin
+ function batch_norm_with_global_normalization_graph(t_, m_, v_, beta_, gamma_; name=nothing, variance_epsilon=nothing, scale_after_normalization=nothing)
+ local desc
+ tf.with_op_name(name, "BatchNormWithGlobalNormalization") do
+ desc = tf.NodeDescription("BatchNormWithGlobalNormalization")
+ begin
+ begin
+ t_ = convert(Tensor{Any}, t_)
+ begin
+ end
+ end
+ begin
+ m_ = convert(Tensor{Any}, m_)
+ begin
+ end
+ end
+ begin
+ v_ = convert(Tensor{Any}, v_)
+ begin
+ end
+ end
+ begin
+ beta_ = convert(Tensor{Any}, beta_)
+ begin
+ end
+ end
+ begin
+ gamma_ = convert(Tensor{Any}, gamma_)
+ begin
+ end
+ end
+ begin
+ (t_, m_, v_, beta_, gamma_) = tf.tf_promote(t_, m_, v_, beta_, gamma_)
+ end
+ end
+ begin
+ begin
+ tf.add_input(desc, t_)
+ end
+ begin
+ tf.add_input(desc, m_)
+ end
+ begin
+ tf.add_input(desc, v_)
+ end
+ begin
+ tf.add_input(desc, beta_)
+ end
+ begin
+ tf.add_input(desc, gamma_)
+ end
+ end
+ begin
+ begin
+ if variance_epsilon !== nothing
+ desc["variance_epsilon"] = Base.identity(variance_epsilon)
+ end
+ end
+ begin
+ if scale_after_normalization !== nothing
+ desc["scale_after_normalization"] = Base.Bool(scale_after_normalization)
+ end
+ end
+ end
+ end
+ tf.Tensor(tf.Operation(desc))
+ end
+ end
+ begin
+ function batch_norm_with_global_normalization_eager(t_, m_, v_, beta_, gamma_; name=nothing, variance_epsilon=nothing, scale_after_normalization=nothing)
+ desc = tf.EagerOp("BatchNormWithGlobalNormalization")
+ t_ = convert(tf.EagerTensor, t_)
+ m_ = convert(tf.EagerTensor, m_)
+ v_ = convert(tf.EagerTensor, v_)
+ beta_ = convert(tf.EagerTensor, beta_)
+ gamma_ = convert(tf.EagerTensor, gamma_)
+ begin
+ begin
+ tf.add_input(desc, t_)
+ end
+ begin
+ tf.add_input(desc, m_)
+ end
+ begin
+ tf.add_input(desc, v_)
+ end
+ begin
+ tf.add_input(desc, beta_)
+ end
+ begin
+ tf.add_input(desc, gamma_)
+ end
+ end
+ begin
+ begin
+ if variance_epsilon !== nothing
+ desc["variance_epsilon"] = Base.identity(variance_epsilon)
+ end
+ end
+ begin
+ if scale_after_normalization !== nothing
+ desc["scale_after_normalization"] = Base.Bool(scale_after_normalization)
+ end
+ end
+ end
+ begin
+ desc["T"] = tf.data_type(t_)
+ end
+ begin
+ desc["T"] = tf.data_type(m_)
+ end
+ begin
+ desc["T"] = tf.data_type(v_)
+ end
+ begin
+ desc["T"] = tf.data_type(beta_)
+ end
+ begin
+ desc["T"] = tf.data_type(gamma_)
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(batch_norm_with_global_normalization, [t_, m_, v_, beta_, gamma_], name=nothing, variance_epsilon=nothing, scale_after_normalization=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res[1]
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function batch_norm_with_global_normalization(t_, m_, v_, beta_, gamma_; name=nothing, variance_epsilon=nothing, scale_after_normalization=nothing)
+ if tf.in_eager_mode()
+ batch_norm_with_global_normalization_eager(t_, m_, v_, beta_, gamma_; name=name, variance_epsilon=variance_epsilon, scale_after_normalization=scale_after_normalization)
+ else
+ batch_norm_with_global_normalization_graph(t_, m_, v_, beta_, gamma_; name=name, variance_epsilon=variance_epsilon, scale_after_normalization=scale_after_normalization)
+ end
+ end
+ end
+end
+
+
+"""
+ in_top_kv2(predictions, targets, k)
+
+
+"""
+begin
+ begin
+ function in_top_kv2_graph(predictions_, targets_, k_; name=nothing)
+ local desc
+ tf.with_op_name(name, "InTopKV2") do
+ desc = tf.NodeDescription("InTopKV2")
+ begin
+ begin
+ predictions_ = convert(Tensor{Float32}, predictions_)
+ begin
+ end
+ end
+ begin
+ targets_ = convert(Tensor{Int32}, targets_)
+ begin
+ end
+ end
+ begin
+ k_ = convert(Tensor{Int32}, k_)
+ begin
+ end
+ end
+ begin
+ (targets_, k_) = tf.tf_promote(targets_, k_)
+ end
+ end
+ begin
+ begin
+ tf.add_input(desc, predictions_)
+ end
+ begin
+ tf.add_input(desc, targets_)
+ end
+ begin
+ tf.add_input(desc, k_)
+ end
+ end
+ begin
+ end
+ end
+ tf.Tensor(tf.Operation(desc))
+ end
+ end
+ begin
+ function in_top_kv2_eager(predictions_, targets_, k_; name=nothing)
+ desc = tf.EagerOp("InTopKV2")
+ predictions_ = convert(tf.EagerTensor, predictions_)
+ targets_ = convert(tf.EagerTensor, targets_)
+ k_ = convert(tf.EagerTensor, k_)
+ begin
+ begin
+ tf.add_input(desc, predictions_)
+ end
+ begin
+ tf.add_input(desc, targets_)
+ end
+ begin
+ tf.add_input(desc, k_)
+ end
+ end
+ begin
+ end
+ begin
+ desc["T"] = tf.data_type(targets_)
+ end
+ begin
+ desc["T"] = tf.data_type(k_)
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(in_top_kv2, [predictions_, targets_, k_], name=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res[1]
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function in_top_kv2(predictions_, targets_, k_; name=nothing)
+ if tf.in_eager_mode()
+ in_top_kv2_eager(predictions_, targets_, k_; name=name)
+ else
+ in_top_kv2_graph(predictions_, targets_, k_; name=name)
+ end
+ end
+ end
+end
+
+
+"""
+ cholesky(input)
+
+
+"""
+begin
+ begin
+ function cholesky_graph(input_; name=nothing)
+ local desc
+ tf.with_op_name(name, "Cholesky") do
+ desc = tf.NodeDescription("Cholesky")
+ begin
+ begin
+ input_ = convert(Tensor{Any}, input_)
+ begin
+ end
+ end
+ begin
+ (input_,) = tf.tf_promote(input_)
+ end
+ end
+ begin
+ begin
+ tf.add_input(desc, input_)
+ end
+ end
+ begin
+ end
+ end
+ tf.Tensor(tf.Operation(desc))
+ end
+ end
+ begin
+ function cholesky_eager(input_; name=nothing)
+ desc = tf.EagerOp("Cholesky")
+ input_ = convert(tf.EagerTensor, input_)
+ begin
+ begin
+ tf.add_input(desc, input_)
+ end
+ end
+ begin
+ end
+ begin
+ desc["T"] = tf.data_type(input_)
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(cholesky, [input_], name=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res[1]
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function cholesky(input_; name=nothing)
+ if tf.in_eager_mode()
+ cholesky_eager(input_; name=name)
+ else
+ cholesky_graph(input_; name=name)
+ end
+ end
+ end
+end
+
+
+"""
+ resource_apply_centered_rms_prop(var, mg, ms, mom, lr, rho, momentum, epsilon, grad; use_locking=false)
+
+
+"""
+begin
+ begin
+ function resource_apply_centered_rms_prop_graph(var_, mg_, ms_, mom_, lr_, rho_, momentum_, epsilon_, grad_; name=nothing, use_locking=nothing)
+ local desc
+ tf.with_op_name(name, "ResourceApplyCenteredRMSProp") do
+ desc = tf.NodeDescription("ResourceApplyCenteredRMSProp")
+ begin
+ begin
+ var_ = convert(Tensor{Any}, var_)
+ begin
+ end
+ end
+ begin
+ mg_ = convert(Tensor{Any}, mg_)
+ begin
+ end
+ end
+ begin
+ ms_ = convert(Tensor{Any}, ms_)
+ begin
+ end
+ end
+ begin
+ mom_ = convert(Tensor{Any}, mom_)
+ begin
+ end
+ end
+ begin
+ lr_ = convert(Tensor{Any}, lr_)
+ begin
+ end
+ end
+ begin
+ rho_ = convert(Tensor{Any}, rho_)
+ begin
+ end
+ end
+ begin
+ momentum_ = convert(Tensor{Any}, momentum_)
+ begin
+ end
+ end
+ begin
+ epsilon_ = convert(Tensor{Any}, epsilon_)
+ begin
+ end
+ end
+ begin
+ grad_ = convert(Tensor{Any}, grad_)
+ begin
+ end
+ end
+ begin
+ (lr_, rho_, momentum_, epsilon_, grad_) = tf.tf_promote(lr_, rho_, momentum_, epsilon_, grad_)
+ end
+ end
+ begin
+ begin
+ tf.add_input(desc, var_)
+ end
+ begin
+ tf.add_input(desc, mg_)
+ end
+ begin
+ tf.add_input(desc, ms_)
+ end
+ begin
+ tf.add_input(desc, mom_)
+ end
+ begin
+ tf.add_input(desc, lr_)
+ end
+ begin
+ tf.add_input(desc, rho_)
+ end
+ begin
+ tf.add_input(desc, momentum_)
+ end
+ begin
+ tf.add_input(desc, epsilon_)
+ end
+ begin
+ tf.add_input(desc, grad_)
+ end
+ end
+ begin
+ begin
+ if use_locking !== nothing
+ desc["use_locking"] = Base.Bool(use_locking)
+ end
+ end
+ end
+ end
+ tf.Tensor(tf.Operation(desc))
+ end
+ end
+ begin
+ function resource_apply_centered_rms_prop_eager(var_, mg_, ms_, mom_, lr_, rho_, momentum_, epsilon_, grad_; name=nothing, use_locking=nothing)
+ desc = tf.EagerOp("ResourceApplyCenteredRMSProp")
+ var_ = convert(tf.EagerTensor, var_)
+ mg_ = convert(tf.EagerTensor, mg_)
+ ms_ = convert(tf.EagerTensor, ms_)
+ mom_ = convert(tf.EagerTensor, mom_)
+ lr_ = convert(tf.EagerTensor, lr_)
+ rho_ = convert(tf.EagerTensor, rho_)
+ momentum_ = convert(tf.EagerTensor, momentum_)
+ epsilon_ = convert(tf.EagerTensor, epsilon_)
+ grad_ = convert(tf.EagerTensor, grad_)
+ begin
+ begin
+ tf.add_input(desc, var_)
+ end
+ begin
+ tf.add_input(desc, mg_)
+ end
+ begin
+ tf.add_input(desc, ms_)
+ end
+ begin
+ tf.add_input(desc, mom_)
+ end
+ begin
+ tf.add_input(desc, lr_)
+ end
+ begin
+ tf.add_input(desc, rho_)
+ end
+ begin
+ tf.add_input(desc, momentum_)
+ end
+ begin
+ tf.add_input(desc, epsilon_)
+ end
+ begin
+ tf.add_input(desc, grad_)
+ end
+ end
+ begin
+ begin
+ if use_locking !== nothing
+ desc["use_locking"] = Base.Bool(use_locking)
+ end
+ end
+ end
+ begin
+ desc["T"] = tf.data_type(lr_)
+ end
+ begin
+ desc["T"] = tf.data_type(rho_)
+ end
+ begin
+ desc["T"] = tf.data_type(momentum_)
+ end
+ begin
+ desc["T"] = tf.data_type(epsilon_)
+ end
+ begin
+ desc["T"] = tf.data_type(grad_)
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(resource_apply_centered_rms_prop, [var_, mg_, ms_, mom_, lr_, rho_, momentum_, epsilon_, grad_], name=nothing, use_locking=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res[1]
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function resource_apply_centered_rms_prop(var_, mg_, ms_, mom_, lr_, rho_, momentum_, epsilon_, grad_; name=nothing, use_locking=nothing)
+ if tf.in_eager_mode()
+ resource_apply_centered_rms_prop_eager(var_, mg_, ms_, mom_, lr_, rho_, momentum_, epsilon_, grad_; name=name, use_locking=use_locking)
+ else
+ resource_apply_centered_rms_prop_graph(var_, mg_, ms_, mom_, lr_, rho_, momentum_, epsilon_, grad_; name=name, use_locking=use_locking)
+ end
+ end
+ end
+end
+
+
+"""
+ resource_apply_adagrad(var, accum, lr, grad; use_locking=false, update_slots=true)
+
+
+"""
+begin
+ begin
+ function resource_apply_adagrad_graph(var_, accum_, lr_, grad_; name=nothing, use_locking=nothing, update_slots=nothing)
+ local desc
+ tf.with_op_name(name, "ResourceApplyAdagrad") do
+ desc = tf.NodeDescription("ResourceApplyAdagrad")
+ begin
+ begin
+ var_ = convert(Tensor{Any}, var_)
+ begin
+ end
+ end
+ begin
+ accum_ = convert(Tensor{Any}, accum_)
+ begin
+ end
+ end
+ begin
+ lr_ = convert(Tensor{Any}, lr_)
+ begin
+ end
+ end
+ begin
+ grad_ = convert(Tensor{Any}, grad_)
+ begin
+ end
+ end
+ begin
+ (lr_, grad_) = tf.tf_promote(lr_, grad_)
+ end
+ end
+ begin
+ begin
+ tf.add_input(desc, var_)
+ end
+ begin
+ tf.add_input(desc, accum_)
+ end
+ begin
+ tf.add_input(desc, lr_)
+ end
+ begin
+ tf.add_input(desc, grad_)
+ end
+ end
+ begin
+ begin
+ if use_locking !== nothing
+ desc["use_locking"] = Base.Bool(use_locking)
+ end
+ end
+ begin
+ if update_slots !== nothing
+ desc["update_slots"] = Base.Bool(update_slots)
+ end
+ end
+ end
+ end
+ tf.Tensor(tf.Operation(desc))
+ end
+ end
+ begin
+ function resource_apply_adagrad_eager(var_, accum_, lr_, grad_; name=nothing, use_locking=nothing, update_slots=nothing)
+ desc = tf.EagerOp("ResourceApplyAdagrad")
+ var_ = convert(tf.EagerTensor, var_)
+ accum_ = convert(tf.EagerTensor, accum_)
+ lr_ = convert(tf.EagerTensor, lr_)
+ grad_ = convert(tf.EagerTensor, grad_)
+ begin
+ begin
+ tf.add_input(desc, var_)
+ end
+ begin
+ tf.add_input(desc, accum_)
+ end
+ begin
+ tf.add_input(desc, lr_)
+ end
+ begin
+ tf.add_input(desc, grad_)
+ end
+ end
+ begin
+ begin
+ if use_locking !== nothing
+ desc["use_locking"] = Base.Bool(use_locking)
+ end
+ end
+ begin
+ if update_slots !== nothing
+ desc["update_slots"] = Base.Bool(update_slots)
+ end
+ end
+ end
+ begin
+ desc["T"] = tf.data_type(lr_)
+ end
+ begin
+ desc["T"] = tf.data_type(grad_)
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(resource_apply_adagrad, [var_, accum_, lr_, grad_], name=nothing, use_locking=nothing, update_slots=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res[1]
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function resource_apply_adagrad(var_, accum_, lr_, grad_; name=nothing, use_locking=nothing, update_slots=nothing)
+ if tf.in_eager_mode()
+ resource_apply_adagrad_eager(var_, accum_, lr_, grad_; name=name, use_locking=use_locking, update_slots=update_slots)
+ else
+ resource_apply_adagrad_graph(var_, accum_, lr_, grad_; name=name, use_locking=use_locking, update_slots=update_slots)
+ end
+ end
+ end
+end
+
+
+"""
+ experimental_parallel_interleave_dataset(input_dataset, other_arguments, cycle_length, block_length, sloppy, buffer_output_elements, prefetch_input_elements)
+
+
+"""
+begin
+ begin
+ function experimental_parallel_interleave_dataset_graph(input_dataset_, other_arguments_, cycle_length_, block_length_, sloppy_, buffer_output_elements_, prefetch_input_elements_; name=nothing, f=nothing, Targuments=nothing, output_types=nothing, output_shapes=nothing)
+ local desc
+ tf.with_op_name(name, "ExperimentalParallelInterleaveDataset") do
+ desc = tf.NodeDescription("ExperimentalParallelInterleaveDataset")
+ begin
+ begin
+ input_dataset_ = convert(Tensor{Any}, input_dataset_)
+ begin
+ end
+ end
+ begin
+ other_arguments_ = [convert(Tensor{Any}, x) for x = other_arguments_]
+ begin
+ end
+ end
+ begin
+ cycle_length_ = convert(Tensor{Int64}, cycle_length_)
+ begin
+ end
+ end
+ begin
+ block_length_ = convert(Tensor{Int64}, block_length_)
+ begin
+ end
+ end
+ begin
+ sloppy_ = convert(Tensor{Bool}, sloppy_)
+ begin
+ end
+ end
+ begin
+ buffer_output_elements_ = convert(Tensor{Int64}, buffer_output_elements_)
+ begin
+ end
+ end
+ begin
+ prefetch_input_elements_ = convert(Tensor{Int64}, prefetch_input_elements_)
+ begin
+ end
+ end
+ end
+ begin
+ begin
+ tf.add_input(desc, input_dataset_)
+ end
+ begin
+ tf.add_input(desc, other_arguments_)
+ end
+ begin
+ tf.add_input(desc, cycle_length_)
+ end
+ begin
+ tf.add_input(desc, block_length_)
+ end
+ begin
+ tf.add_input(desc, sloppy_)
+ end
+ begin
+ tf.add_input(desc, buffer_output_elements_)
+ end
+ begin
+ tf.add_input(desc, prefetch_input_elements_)
+ end
+ end
+ begin
+ begin
+ if f !== nothing
+ desc["f"] = Base.identity(f)
+ end
+ end
+ begin
+ if Targuments !== nothing
+ desc["Targuments"] = map(Base.identity, Targuments)
+ end
+ end
+ begin
+ if output_types !== nothing
+ desc["output_types"] = map(Base.identity, output_types)
+ end
+ end
+ begin
+ if output_shapes !== nothing
+ desc["output_shapes"] = map(Base.identity, output_shapes)
+ end
+ end
+ end
+ end
+ tf.Tensor(tf.Operation(desc))
+ end
+ end
+ begin
+ function experimental_parallel_interleave_dataset_eager(input_dataset_, other_arguments_, cycle_length_, block_length_, sloppy_, buffer_output_elements_, prefetch_input_elements_; name=nothing, f=nothing, Targuments=nothing, output_types=nothing, output_shapes=nothing)
+ desc = tf.EagerOp("ExperimentalParallelInterleaveDataset")
+ input_dataset_ = convert(tf.EagerTensor, input_dataset_)
+ other_arguments_ = convert(tf.EagerTensor, other_arguments_)
+ cycle_length_ = convert(tf.EagerTensor, cycle_length_)
+ block_length_ = convert(tf.EagerTensor, block_length_)
+ sloppy_ = convert(tf.EagerTensor, sloppy_)
+ buffer_output_elements_ = convert(tf.EagerTensor, buffer_output_elements_)
+ prefetch_input_elements_ = convert(tf.EagerTensor, prefetch_input_elements_)
+ begin
+ begin
+ tf.add_input(desc, input_dataset_)
+ end
+ begin
+ tf.add_input(desc, other_arguments_)
+ end
+ begin
+ tf.add_input(desc, cycle_length_)
+ end
+ begin
+ tf.add_input(desc, block_length_)
+ end
+ begin
+ tf.add_input(desc, sloppy_)
+ end
+ begin
+ tf.add_input(desc, buffer_output_elements_)
+ end
+ begin
+ tf.add_input(desc, prefetch_input_elements_)
+ end
+ end
+ begin
+ begin
+ if f !== nothing
+ desc["f"] = Base.identity(f)
+ end
+ end
+ begin
+ if Targuments !== nothing
+ desc["Targuments"] = map(Base.identity, Targuments)
+ end
+ end
+ begin
+ if output_types !== nothing
+ desc["output_types"] = map(Base.identity, output_types)
+ end
+ end
+ begin
+ if output_shapes !== nothing
+ desc["output_shapes"] = map(Base.identity, output_shapes)
+ end
+ end
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(experimental_parallel_interleave_dataset, [input_dataset_, other_arguments_, cycle_length_, block_length_, sloppy_, buffer_output_elements_, prefetch_input_elements_], name=nothing, f=nothing, Targuments=nothing, output_types=nothing, output_shapes=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res[1]
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function experimental_parallel_interleave_dataset(input_dataset_, other_arguments_, cycle_length_, block_length_, sloppy_, buffer_output_elements_, prefetch_input_elements_; name=nothing, f=nothing, Targuments=nothing, output_types=nothing, output_shapes=nothing)
+ if tf.in_eager_mode()
+ experimental_parallel_interleave_dataset_eager(input_dataset_, other_arguments_, cycle_length_, block_length_, sloppy_, buffer_output_elements_, prefetch_input_elements_; name=name, f=f, Targuments=Targuments, output_types=output_types, output_shapes=output_shapes)
+ else
+ experimental_parallel_interleave_dataset_graph(input_dataset_, other_arguments_, cycle_length_, block_length_, sloppy_, buffer_output_elements_, prefetch_input_elements_; name=name, f=f, Targuments=Targuments, output_types=output_types, output_shapes=output_shapes)
+ end
+ end
end
+end
+
"""
- sign(x)
+ resize_bicubic_grad(grads, original_image; align_corners=false)
"""
-tf.@op function sign(x_; name=nothing)
- local desc
- tf.with_op_name((()->begin
- desc = tf.NodeDescription("Sign")
- x_ = convert(TensorFlow.Tensor{Any}, x_)
- (x_,) = tf.tf_promote(x_)
- tf.add_input(desc, x_)
- end), name, "Sign")
- tf.Tensor(tf.Operation(desc))
+begin
+ begin
+ function resize_bicubic_grad_graph(grads_, original_image_; name=nothing, align_corners=nothing)
+ local desc
+ tf.with_op_name(name, "ResizeBicubicGrad") do
+ desc = tf.NodeDescription("ResizeBicubicGrad")
+ begin
+ begin
+ grads_ = convert(Tensor{Float32}, grads_)
+ begin
+ end
+ end
+ begin
+ original_image_ = convert(Tensor{Any}, original_image_)
+ begin
+ end
+ end
+ begin
+ (original_image_,) = tf.tf_promote(original_image_)
+ end
+ end
+ begin
+ begin
+ tf.add_input(desc, grads_)
+ end
+ begin
+ tf.add_input(desc, original_image_)
+ end
+ end
+ begin
+ begin
+ if align_corners !== nothing
+ desc["align_corners"] = Base.Bool(align_corners)
+ end
+ end
+ end
+ end
+ tf.Tensor(tf.Operation(desc))
+ end
+ end
+ begin
+ function resize_bicubic_grad_eager(grads_, original_image_; name=nothing, align_corners=nothing)
+ desc = tf.EagerOp("ResizeBicubicGrad")
+ grads_ = convert(tf.EagerTensor, grads_)
+ original_image_ = convert(tf.EagerTensor, original_image_)
+ begin
+ begin
+ tf.add_input(desc, grads_)
+ end
+ begin
+ tf.add_input(desc, original_image_)
+ end
+ end
+ begin
+ begin
+ if align_corners !== nothing
+ desc["align_corners"] = Base.Bool(align_corners)
+ end
+ end
+ end
+ begin
+ desc["T"] = tf.data_type(original_image_)
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(resize_bicubic_grad, [grads_, original_image_], name=nothing, align_corners=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res[1]
+ end
+ end
end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function resize_bicubic_grad(grads_, original_image_; name=nothing, align_corners=nothing)
+ if tf.in_eager_mode()
+ resize_bicubic_grad_eager(grads_, original_image_; name=name, align_corners=align_corners)
+ else
+ resize_bicubic_grad_graph(grads_, original_image_; name=name, align_corners=align_corners)
+ end
+ end
+ end
+end
+
"""
- conj(input)
+ batch_self_adjoint_eig(input)
"""
-tf.@op function conj(input_; name=nothing)
- local desc
- tf.with_op_name((()->begin
- desc = tf.NodeDescription("Conj")
- input_ = convert(TensorFlow.Tensor{Complex{Float32}}, input_)
- (input_,) = tf.tf_promote(input_)
+begin
+ begin
+ function batch_self_adjoint_eig_graph(input_; name=nothing)
+ local desc
+ tf.with_op_name(name, "BatchSelfAdjointEig") do
+ desc = tf.NodeDescription("BatchSelfAdjointEig")
+ begin
+ begin
+ input_ = convert(Tensor{Any}, input_)
+ begin
+ end
+ end
+ begin
+ (input_,) = tf.tf_promote(input_)
+ end
+ end
+ begin
+ begin
+ tf.add_input(desc, input_)
+ end
+ end
+ begin
+ end
+ end
+ tf.Tensor(tf.Operation(desc))
+ end
+ end
+ begin
+ function batch_self_adjoint_eig_eager(input_; name=nothing)
+ desc = tf.EagerOp("BatchSelfAdjointEig")
+ input_ = convert(tf.EagerTensor, input_)
+ begin
+ begin
tf.add_input(desc, input_)
- end), name, "Conj")
- tf.Tensor(tf.Operation(desc))
+ end
+ end
+ begin
+ end
+ begin
+ desc["T"] = tf.data_type(input_)
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(batch_self_adjoint_eig, [input_], name=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res[1]
+ end
+ end
end
-
-"""
- round(x)
-
-
-"""
-tf.@op function round(x_; name=nothing)
- local desc
- tf.with_op_name((()->begin
- desc = tf.NodeDescription("Round")
- x_ = convert(TensorFlow.Tensor{Any}, x_)
- (x_,) = tf.tf_promote(x_)
- tf.add_input(desc, x_)
- end), name, "Round")
- tf.Tensor(tf.Operation(desc))
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function batch_self_adjoint_eig(input_; name=nothing)
+ if tf.in_eager_mode()
+ batch_self_adjoint_eig_eager(input_; name=name)
+ else
+ batch_self_adjoint_eig_graph(input_; name=name)
+ end
+ end
end
+end
+
"""
- polygamma(a, x)
+ sparse_softmax(sp_indices, sp_values, sp_shape)
"""
-tf.@op function polygamma(a_, x_; name=nothing)
- local desc
- tf.with_op_name((()->begin
- desc = tf.NodeDescription("Polygamma")
- a_ = convert(TensorFlow.Tensor{Any}, a_)
- x_ = convert(TensorFlow.Tensor{Any}, x_)
- (a_, x_) = tf.tf_promote(a_, x_)
- tf.add_input(desc, a_)
- tf.add_input(desc, x_)
- end), name, "Polygamma")
- tf.Tensor(tf.Operation(desc))
+begin
+ begin
+ function sparse_softmax_graph(sp_indices_, sp_values_, sp_shape_; name=nothing)
+ local desc
+ tf.with_op_name(name, "SparseSoftmax") do
+ desc = tf.NodeDescription("SparseSoftmax")
+ begin
+ begin
+ sp_indices_ = convert(Tensor{Int64}, sp_indices_)
+ begin
+ end
+ end
+ begin
+ sp_values_ = convert(Tensor{Any}, sp_values_)
+ begin
+ end
+ end
+ begin
+ sp_shape_ = convert(Tensor{Int64}, sp_shape_)
+ begin
+ end
+ end
+ begin
+ (sp_values_,) = tf.tf_promote(sp_values_)
+ end
+ end
+ begin
+ begin
+ tf.add_input(desc, sp_indices_)
+ end
+ begin
+ tf.add_input(desc, sp_values_)
+ end
+ begin
+ tf.add_input(desc, sp_shape_)
+ end
+ end
+ begin
+ end
+ end
+ tf.Tensor(tf.Operation(desc))
+ end
+ end
+ begin
+ function sparse_softmax_eager(sp_indices_, sp_values_, sp_shape_; name=nothing)
+ desc = tf.EagerOp("SparseSoftmax")
+ sp_indices_ = convert(tf.EagerTensor, sp_indices_)
+ sp_values_ = convert(tf.EagerTensor, sp_values_)
+ sp_shape_ = convert(tf.EagerTensor, sp_shape_)
+ begin
+ begin
+ tf.add_input(desc, sp_indices_)
+ end
+ begin
+ tf.add_input(desc, sp_values_)
+ end
+ begin
+ tf.add_input(desc, sp_shape_)
+ end
+ end
+ begin
+ end
+ begin
+ desc["T"] = tf.data_type(sp_values_)
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(sparse_softmax, [sp_indices_, sp_values_, sp_shape_], name=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res[1]
+ end
+ end
end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function sparse_softmax(sp_indices_, sp_values_, sp_shape_; name=nothing)
+ if tf.in_eager_mode()
+ sparse_softmax_eager(sp_indices_, sp_values_, sp_shape_; name=name)
+ else
+ sparse_softmax_graph(sp_indices_, sp_values_, sp_shape_; name=name)
+ end
+ end
+ end
+end
+
"""
- zeta(x, q)
+ asinh(x)
"""
-tf.@op function zeta(x_, q_; name=nothing)
- local desc
- tf.with_op_name((()->begin
- desc = tf.NodeDescription("Zeta")
- x_ = convert(TensorFlow.Tensor{Any}, x_)
- q_ = convert(TensorFlow.Tensor{Any}, q_)
- (x_, q_) = tf.tf_promote(x_, q_)
+begin
+ begin
+ function asinh_graph(x_; name=nothing)
+ local desc
+ tf.with_op_name(name, "Asinh") do
+ desc = tf.NodeDescription("Asinh")
+ begin
+ begin
+ x_ = convert(Tensor{Any}, x_)
+ begin
+ end
+ end
+ begin
+ (x_,) = tf.tf_promote(x_)
+ end
+ end
+ begin
+ begin
+ tf.add_input(desc, x_)
+ end
+ end
+ begin
+ end
+ end
+ tf.Tensor(tf.Operation(desc))
+ end
+ end
+ begin
+ function asinh_eager(x_; name=nothing)
+ desc = tf.EagerOp("Asinh")
+ x_ = convert(tf.EagerTensor, x_)
+ begin
+ begin
tf.add_input(desc, x_)
- tf.add_input(desc, q_)
- end), name, "Zeta")
- tf.Tensor(tf.Operation(desc))
+ end
+ end
+ begin
+ end
+ begin
+ desc["T"] = tf.data_type(x_)
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(asinh, [x_], name=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res[1]
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function asinh(x_; name=nothing)
+ if tf.in_eager_mode()
+ asinh_eager(x_; name=name)
+ else
+ asinh_graph(x_; name=name)
+ end
+ end
end
+end
+
"""
matrix_inverse(input; adjoint=false)
"""
-tf.@op function matrix_inverse(input_; name=nothing, adjoint=nothing)
- local desc
- tf.with_op_name((()->begin
- desc = tf.NodeDescription("MatrixInverse")
- input_ = convert(TensorFlow.Tensor{Any}, input_)
- (input_,) = tf.tf_promote(input_)
+begin
+ begin
+ function matrix_inverse_graph(input_; name=nothing, adjoint=nothing)
+ local desc
+ tf.with_op_name(name, "MatrixInverse") do
+ desc = tf.NodeDescription("MatrixInverse")
+ begin
+ begin
+ input_ = convert(Tensor{Any}, input_)
+ begin
+ end
+ end
+ begin
+ (input_,) = tf.tf_promote(input_)
+ end
+ end
+ begin
+ begin
+ tf.add_input(desc, input_)
+ end
+ end
+ begin
+ begin
+ if adjoint !== nothing
+ desc["adjoint"] = Base.Bool(adjoint)
+ end
+ end
+ end
+ end
+ tf.Tensor(tf.Operation(desc))
+ end
+ end
+ begin
+ function matrix_inverse_eager(input_; name=nothing, adjoint=nothing)
+ desc = tf.EagerOp("MatrixInverse")
+ input_ = convert(tf.EagerTensor, input_)
+ begin
+ begin
tf.add_input(desc, input_)
+ end
+ end
+ begin
+ begin
if adjoint !== nothing
desc["adjoint"] = Base.Bool(adjoint)
end
- end), name, "MatrixInverse")
- tf.Tensor(tf.Operation(desc))
+ end
+ end
+ begin
+ desc["T"] = tf.data_type(input_)
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(matrix_inverse, [input_], name=nothing, adjoint=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res[1]
+ end
+ end
end
-
-"""
- matrix_determinant(input)
-
-
-"""
-tf.@op function matrix_determinant(input_; name=nothing)
- local desc
- tf.with_op_name((()->begin
- desc = tf.NodeDescription("MatrixDeterminant")
- input_ = convert(TensorFlow.Tensor{Any}, input_)
- (input_,) = tf.tf_promote(input_)
- tf.add_input(desc, input_)
- end), name, "MatrixDeterminant")
- tf.Tensor(tf.Operation(desc))
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function matrix_inverse(input_; name=nothing, adjoint=nothing)
+ if tf.in_eager_mode()
+ matrix_inverse_eager(input_; name=name, adjoint=adjoint)
+ else
+ matrix_inverse_graph(input_; name=name, adjoint=adjoint)
+ end
+ end
end
+end
-"""
- diag(diagonal)
-
-
-"""
-tf.@op function diag(diagonal_; name=nothing)
- local desc
- tf.with_op_name((()->begin
- desc = tf.NodeDescription("Diag")
- diagonal_ = convert(TensorFlow.Tensor{Any}, diagonal_)
- (diagonal_,) = tf.tf_promote(diagonal_)
- tf.add_input(desc, diagonal_)
- end), name, "Diag")
- tf.Tensor(tf.Operation(desc))
- end
"""
- matrix_diag_part(input)
+ tensor_list_concat_lists(input_a, input_b)
"""
-tf.@op function matrix_diag_part(input_; name=nothing)
- local desc
- tf.with_op_name((()->begin
- desc = tf.NodeDescription("MatrixDiagPart")
- input_ = convert(TensorFlow.Tensor{Any}, input_)
- (input_,) = tf.tf_promote(input_)
- tf.add_input(desc, input_)
- end), name, "MatrixDiagPart")
- tf.Tensor(tf.Operation(desc))
+begin
+ begin
+ function tensor_list_concat_lists_graph(input_a_, input_b_; name=nothing, element_dtype=nothing)
+ local desc
+ tf.with_op_name(name, "TensorListConcatLists") do
+ desc = tf.NodeDescription("TensorListConcatLists")
+ begin
+ begin
+ input_a_ = convert(Tensor{Any}, input_a_)
+ begin
+ end
+ end
+ begin
+ input_b_ = convert(Tensor{Any}, input_b_)
+ begin
+ end
+ end
+ end
+ begin
+ begin
+ tf.add_input(desc, input_a_)
+ end
+ begin
+ tf.add_input(desc, input_b_)
+ end
+ end
+ begin
+ begin
+ if element_dtype !== nothing
+ desc["element_dtype"] = Base.identity(element_dtype)
+ end
+ end
+ end
+ end
+ tf.Tensor(tf.Operation(desc))
+ end
+ end
+ begin
+ function tensor_list_concat_lists_eager(input_a_, input_b_; name=nothing, element_dtype=nothing)
+ desc = tf.EagerOp("TensorListConcatLists")
+ input_a_ = convert(tf.EagerTensor, input_a_)
+ input_b_ = convert(tf.EagerTensor, input_b_)
+ begin
+ begin
+ tf.add_input(desc, input_a_)
+ end
+ begin
+ tf.add_input(desc, input_b_)
+ end
+ end
+ begin
+ begin
+ if element_dtype !== nothing
+ desc["element_dtype"] = Base.identity(element_dtype)
+ end
+ end
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(tensor_list_concat_lists, [input_a_, input_b_], name=nothing, element_dtype=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res[1]
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function tensor_list_concat_lists(input_a_, input_b_; name=nothing, element_dtype=nothing)
+ if tf.in_eager_mode()
+ tensor_list_concat_lists_eager(input_a_, input_b_; name=name, element_dtype=element_dtype)
+ else
+ tensor_list_concat_lists_graph(input_a_, input_b_; name=name, element_dtype=element_dtype)
+ end
+ end
end
+end
+
"""
- cast(x)
+ requantize(input, input_min, input_max, requested_output_min, requested_output_max)
"""
-tf.@op function cast(x_; name=nothing, SrcT=nothing, DstT=nothing)
- local desc
- tf.with_op_name((()->begin
- desc = tf.NodeDescription("Cast")
- x_ = convert(TensorFlow.Tensor{Any}, x_)
- (x_,) = tf.tf_promote(x_)
- tf.add_input(desc, x_)
- if SrcT !== nothing
- desc["SrcT"] = Base.identity(SrcT)
+begin
+ begin
+ function requantize_graph(input_, input_min_, input_max_, requested_output_min_, requested_output_max_; name=nothing, out_type=nothing)
+ local desc
+ tf.with_op_name(name, "Requantize") do
+ desc = tf.NodeDescription("Requantize")
+ begin
+ begin
+ input_ = convert(Tensor{Any}, input_)
+ begin
+ end
end
- if DstT !== nothing
- desc["DstT"] = Base.identity(DstT)
+ begin
+ input_min_ = convert(Tensor{Float32}, input_min_)
+ begin
+ end
+ end
+ begin
+ input_max_ = convert(Tensor{Float32}, input_max_)
+ begin
+ end
+ end
+ begin
+ requested_output_min_ = convert(Tensor{Float32}, requested_output_min_)
+ begin
+ end
+ end
+ begin
+ requested_output_max_ = convert(Tensor{Float32}, requested_output_max_)
+ begin
+ end
+ end
+ begin
+ (input_,) = tf.tf_promote(input_)
+ end
+ end
+ begin
+ begin
+ tf.add_input(desc, input_)
+ end
+ begin
+ tf.add_input(desc, input_min_)
+ end
+ begin
+ tf.add_input(desc, input_max_)
+ end
+ begin
+ tf.add_input(desc, requested_output_min_)
+ end
+ begin
+ tf.add_input(desc, requested_output_max_)
end
- end), name, "Cast")
- tf.Tensor(tf.Operation(desc))
+ end
+ begin
+ begin
+ if out_type !== nothing
+ desc["out_type"] = Base.identity(out_type)
+ end
+ end
+ end
+ end
+ begin
+ out = tf.Tensor[]
+ op = tf.Operation(desc)
+ for out_idx = 1:3
+ push!(out, tf.Tensor(op, out_idx))
+ end
+ out
+ end
+ end
+ end
+ begin
+ function requantize_eager(input_, input_min_, input_max_, requested_output_min_, requested_output_max_; name=nothing, out_type=nothing)
+ desc = tf.EagerOp("Requantize")
+ input_ = convert(tf.EagerTensor, input_)
+ input_min_ = convert(tf.EagerTensor, input_min_)
+ input_max_ = convert(tf.EagerTensor, input_max_)
+ requested_output_min_ = convert(tf.EagerTensor, requested_output_min_)
+ requested_output_max_ = convert(tf.EagerTensor, requested_output_max_)
+ begin
+ begin
+ tf.add_input(desc, input_)
+ end
+ begin
+ tf.add_input(desc, input_min_)
+ end
+ begin
+ tf.add_input(desc, input_max_)
+ end
+ begin
+ tf.add_input(desc, requested_output_min_)
+ end
+ begin
+ tf.add_input(desc, requested_output_max_)
+ end
+ end
+ begin
+ begin
+ if out_type !== nothing
+ desc["out_type"] = Base.identity(out_type)
+ end
+ end
+ end
+ begin
+ desc["Tinput"] = tf.data_type(input_)
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(requantize, [input_, input_min_, input_max_, requested_output_min_, requested_output_max_], name=nothing, out_type=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res
+ end
+ end
end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function requantize(input_, input_min_, input_max_, requested_output_min_, requested_output_max_; name=nothing, out_type=nothing)
+ if tf.in_eager_mode()
+ requantize_eager(input_, input_min_, input_max_, requested_output_min_, requested_output_max_; name=name, out_type=out_type)
+ else
+ requantize_graph(input_, input_min_, input_max_, requested_output_min_, requested_output_max_; name=name, out_type=out_type)
+ end
+ end
+ end
+end
+
"""
- one_hot(indices, depth, on_value, off_value; axis=-1)
+ fft(input)
"""
-tf.@op function one_hot(indices_, depth_, on_value_, off_value_; name=nothing, axis=nothing)
- local desc
- tf.with_op_name((()->begin
- desc = tf.NodeDescription("OneHot")
- indices_ = convert(TensorFlow.Tensor{Int64}, indices_)
- indices_ = indices_ - convert(tf.Tensor{eltype(indices_)}, 1)
- depth_ = convert(TensorFlow.Tensor{Int32}, depth_)
- on_value_ = convert(TensorFlow.Tensor{Any}, on_value_)
- off_value_ = convert(TensorFlow.Tensor{Any}, off_value_)
- (on_value_, off_value_) = tf.tf_promote(on_value_, off_value_)
- (indices_,) = tf.tf_promote(indices_)
- tf.add_input(desc, indices_)
- tf.add_input(desc, depth_)
- tf.add_input(desc, on_value_)
- tf.add_input(desc, off_value_)
- if axis !== nothing
- axis = Base.Int(axis) - 1
+begin
+ begin
+ function fft_graph(input_; name=nothing)
+ local desc
+ tf.with_op_name(name, "FFT") do
+ desc = tf.NodeDescription("FFT")
+ begin
+ begin
+ input_ = convert(Tensor{Complex{Float32}}, input_)
+ begin
+ end
end
- if axis !== nothing
- desc["axis"] = Base.Int(axis)
+ begin
+ (input_,) = tf.tf_promote(input_)
end
- end), name, "OneHot")
- tf.Tensor(tf.Operation(desc))
+ end
+ begin
+ begin
+ tf.add_input(desc, input_)
+ end
+ end
+ begin
+ end
+ end
+ tf.Tensor(tf.Operation(desc))
+ end
end
-
-"""
- reshape(tensor, shape)
-
-
-"""
-tf.@op function reshape(tensor_, shape_; name=nothing)
- local desc
- tf.with_op_name((()->begin
- desc = tf.NodeDescription("Reshape")
- tensor_ = convert(TensorFlow.Tensor{Any}, tensor_)
- shape_ = convert(TensorFlow.Tensor{Int32}, shape_)
- (tensor_,) = tf.tf_promote(tensor_)
- (shape_,) = tf.tf_promote(shape_)
- tf.add_input(desc, tensor_)
- tf.add_input(desc, shape_)
- end), name, "Reshape")
- tf.Tensor(tf.Operation(desc))
+ begin
+ function fft_eager(input_; name=nothing)
+ desc = tf.EagerOp("FFT")
+ input_ = convert(tf.EagerTensor, input_)
+ begin
+ begin
+ tf.add_input(desc, input_)
+ end
+ end
+ begin
+ end
+ begin
+ desc["Tcomplex"] = tf.data_type(input_)
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(fft, [input_], name=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res[1]
+ end
+ end
end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function fft(input_; name=nothing)
+ if tf.in_eager_mode()
+ fft_eager(input_; name=name)
+ else
+ fft_graph(input_; name=name)
+ end
+ end
+ end
+end
+
"""
- split(split_dim, value)
+ conjugate_transpose(x, perm)
"""
-tf.@op function split(split_dim_, value_; name=nothing, num_split=nothing)
- local desc
- tf.with_op_name((()->begin
- desc = tf.NodeDescription("Split")
- split_dim_ = convert(TensorFlow.Tensor{Int32}, split_dim_)
- split_dim_ = split_dim_ - convert(tf.Tensor{eltype(split_dim_)}, 1)
- value_ = convert(TensorFlow.Tensor{Any}, value_)
- (value_,) = tf.tf_promote(value_)
- tf.add_input(desc, split_dim_)
- tf.add_input(desc, value_)
- if num_split !== nothing
- desc["num_split"] = Base.Int(num_split)
+begin
+ begin
+ function conjugate_transpose_graph(x_, perm_; name=nothing)
+ local desc
+ tf.with_op_name(name, "ConjugateTranspose") do
+ desc = tf.NodeDescription("ConjugateTranspose")
+ begin
+ begin
+ x_ = convert(Tensor{Any}, x_)
+ begin
+ end
end
- end), name, "Split")
- out = tf.Tensor[]
- op = tf.Operation(desc)
- for out_idx = 1:num_split
- push!(out, tf.Tensor(op, out_idx))
+ begin
+ perm_ = convert(Tensor{Int32}, perm_)
+ begin
+ end
+ end
+ begin
+ (perm_,) = tf.tf_promote(perm_)
+ end
+ begin
+ (x_,) = tf.tf_promote(x_)
+ end
+ end
+ begin
+ begin
+ tf.add_input(desc, x_)
+ end
+ begin
+ tf.add_input(desc, perm_)
+ end
+ end
+ begin
+ end
+ end
+ tf.Tensor(tf.Operation(desc))
end
- out
end
-
-"""
- div(x, y)
-
-
-"""
-tf.@op function div(x_, y_; name=nothing)
- local desc
- tf.with_op_name((()->begin
- desc = tf.NodeDescription("Div")
- x_ = convert(TensorFlow.Tensor{Any}, x_)
- y_ = convert(TensorFlow.Tensor{Any}, y_)
- (x_, y_) = tf.tf_promote(x_, y_)
+ begin
+ function conjugate_transpose_eager(x_, perm_; name=nothing)
+ desc = tf.EagerOp("ConjugateTranspose")
+ x_ = convert(tf.EagerTensor, x_)
+ perm_ = convert(tf.EagerTensor, perm_)
+ begin
+ begin
tf.add_input(desc, x_)
- tf.add_input(desc, y_)
- end), name, "Div")
- tf.Tensor(tf.Operation(desc))
+ end
+ begin
+ tf.add_input(desc, perm_)
+ end
+ end
+ begin
+ end
+ begin
+ desc["T"] = tf.data_type(x_)
+ end
+ begin
+ desc["Tperm"] = tf.data_type(perm_)
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(conjugate_transpose, [x_, perm_], name=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res[1]
+ end
+ end
end
-
-"""
- minimum(x, y)
-
-
-"""
-tf.@op function minimum(x_, y_; name=nothing)
- local desc
- tf.with_op_name((()->begin
- desc = tf.NodeDescription("Minimum")
- x_ = convert(TensorFlow.Tensor{Any}, x_)
- y_ = convert(TensorFlow.Tensor{Any}, y_)
- (x_, y_) = tf.tf_promote(x_, y_)
- tf.add_input(desc, x_)
- tf.add_input(desc, y_)
- end), name, "Minimum")
- tf.Tensor(tf.Operation(desc))
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function conjugate_transpose(x_, perm_; name=nothing)
+ if tf.in_eager_mode()
+ conjugate_transpose_eager(x_, perm_; name=name)
+ else
+ conjugate_transpose_graph(x_, perm_; name=name)
+ end
+ end
end
+end
-"""
- maximum(x, y)
-
-
-"""
-tf.@op function maximum(x_, y_; name=nothing)
- local desc
- tf.with_op_name((()->begin
- desc = tf.NodeDescription("Maximum")
- x_ = convert(TensorFlow.Tensor{Any}, x_)
- y_ = convert(TensorFlow.Tensor{Any}, y_)
- (x_, y_) = tf.tf_promote(x_, y_)
- tf.add_input(desc, x_)
- tf.add_input(desc, y_)
- end), name, "Maximum")
- tf.Tensor(tf.Operation(desc))
- end
"""
- select(condition, t, e)
+ unstage(; capacity=0, memory_limit=0, container=, shared_name=)
"""
-tf.@op function select(condition_, t_, e_; name=nothing)
- local desc
- tf.with_op_name((()->begin
- desc = tf.NodeDescription("Select")
- condition_ = convert(TensorFlow.Tensor{Bool}, condition_)
- t_ = convert(TensorFlow.Tensor{Any}, t_)
- e_ = convert(TensorFlow.Tensor{Any}, e_)
- (t_, e_) = tf.tf_promote(t_, e_)
- tf.add_input(desc, condition_)
- tf.add_input(desc, t_)
- tf.add_input(desc, e_)
- end), name, "Select")
- tf.Tensor(tf.Operation(desc))
+begin
+ begin
+ function unstage_graph(; name=nothing, capacity=nothing, memory_limit=nothing, dtypes=nothing, container=nothing, shared_name=nothing)
+ local desc
+ tf.with_op_name(name, "Unstage") do
+ desc = tf.NodeDescription("Unstage")
+ begin
+ end
+ begin
+ end
+ begin
+ begin
+ if capacity !== nothing
+ desc["capacity"] = Base.Int(capacity)
+ end
+ end
+ begin
+ if memory_limit !== nothing
+ desc["memory_limit"] = Base.Int(memory_limit)
+ end
+ end
+ begin
+ if dtypes !== nothing
+ desc["dtypes"] = map(Base.identity, dtypes)
+ end
+ end
+ begin
+ if container !== nothing
+ desc["container"] = Base.String(container)
+ end
+ end
+ begin
+ if shared_name !== nothing
+ desc["shared_name"] = Base.String(shared_name)
+ end
+ end
+ end
+ end
+ tf.Tensor(tf.Operation(desc))
+ end
end
-
-"""
- switch(data, pred)
-
-
-"""
-tf.@op function switch(data_, pred_; name=nothing)
- local desc
- tf.with_op_name((()->begin
- desc = tf.NodeDescription("Switch")
- data_ = convert(TensorFlow.Tensor{Any}, data_)
- pred_ = convert(TensorFlow.Tensor{Bool}, pred_)
- (data_,) = tf.tf_promote(data_)
- tf.add_input(desc, data_)
- tf.add_input(desc, pred_)
- end), name, "Switch")
- out = tf.Tensor[]
- op = tf.Operation(desc)
- for out_idx = 1:2
- push!(out, tf.Tensor(op, out_idx))
+ begin
+ function unstage_eager(; name=nothing, capacity=nothing, memory_limit=nothing, dtypes=nothing, container=nothing, shared_name=nothing)
+ desc = tf.EagerOp("Unstage")
+ begin
+ end
+ begin
+ begin
+ if capacity !== nothing
+ desc["capacity"] = Base.Int(capacity)
+ end
+ end
+ begin
+ if memory_limit !== nothing
+ desc["memory_limit"] = Base.Int(memory_limit)
+ end
+ end
+ begin
+ if dtypes !== nothing
+ desc["dtypes"] = map(Base.identity, dtypes)
+ end
+ end
+ begin
+ if container !== nothing
+ desc["container"] = Base.String(container)
+ end
+ end
+ begin
+ if shared_name !== nothing
+ desc["shared_name"] = Base.String(shared_name)
+ end
+ end
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(unstage, [], name=nothing, capacity=nothing, memory_limit=nothing, dtypes=nothing, container=nothing, shared_name=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res[1]
+ end
end
- out
end
-
-"""
- identity(input)
-
-
-"""
-tf.@op function identity(input_; name=nothing)
- local desc
- tf.with_op_name((()->begin
- desc = tf.NodeDescription("Identity")
- input_ = convert(TensorFlow.Tensor{Any}, input_)
- (input_,) = tf.tf_promote(input_)
- tf.add_input(desc, input_)
- end), name, "Identity")
- tf.Tensor(tf.Operation(desc))
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function unstage(; name=nothing, capacity=nothing, memory_limit=nothing, dtypes=nothing, container=nothing, shared_name=nothing)
+ if tf.in_eager_mode()
+ unstage_eager(; name=name, capacity=capacity, memory_limit=memory_limit, dtypes=dtypes, container=container, shared_name=shared_name)
+ else
+ unstage_graph(; name=name, capacity=capacity, memory_limit=memory_limit, dtypes=dtypes, container=container, shared_name=shared_name)
+ end
+ end
end
+end
+
"""
- merge(inputs)
+ relu6grad(gradients, features)
"""
-tf.@op function merge(inputs_; name=nothing, N=nothing)
- local desc
- tf.with_op_name((()->begin
- desc = tf.NodeDescription("Merge")
- inputs_ = [convert(TensorFlow.Tensor{Any}, x) for x = inputs_]
- (inputs_,) = tf.tf_promote(inputs_)
- tf.add_input(desc, inputs_)
- if N !== nothing
- desc["N"] = Base.Int(N)
+begin
+ begin
+ function relu6grad_graph(gradients_, features_; name=nothing)
+ local desc
+ tf.with_op_name(name, "Relu6Grad") do
+ desc = tf.NodeDescription("Relu6Grad")
+ begin
+ begin
+ gradients_ = convert(Tensor{Any}, gradients_)
+ begin
+ end
+ end
+ begin
+ features_ = convert(Tensor{Any}, features_)
+ begin
+ end
+ end
+ begin
+ (gradients_, features_) = tf.tf_promote(gradients_, features_)
+ end
+ end
+ begin
+ begin
+ tf.add_input(desc, gradients_)
end
- end), name, "Merge")
- out = tf.Tensor[]
- op = tf.Operation(desc)
- for out_idx = 1:2
- push!(out, tf.Tensor(op, out_idx))
+ begin
+ tf.add_input(desc, features_)
+ end
+ end
+ begin
+ end
+ end
+ tf.Tensor(tf.Operation(desc))
+ end
+ end
+ begin
+ function relu6grad_eager(gradients_, features_; name=nothing)
+ desc = tf.EagerOp("Relu6Grad")
+ gradients_ = convert(tf.EagerTensor, gradients_)
+ features_ = convert(tf.EagerTensor, features_)
+ begin
+ begin
+ tf.add_input(desc, gradients_)
+ end
+ begin
+ tf.add_input(desc, features_)
+ end
+ end
+ begin
+ end
+ begin
+ desc["T"] = tf.data_type(gradients_)
+ end
+ begin
+ desc["T"] = tf.data_type(features_)
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(relu6grad, [gradients_, features_], name=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res[1]
+ end
end
- out
end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function relu6grad(gradients_, features_; name=nothing)
+ if tf.in_eager_mode()
+ relu6grad_eager(gradients_, features_; name=name)
+ else
+ relu6grad_graph(gradients_, features_; name=name)
+ end
+ end
+ end
+end
-"""
- enter(data; is_constant=false, parallel_iterations=10)
+"""
+ _array_to_list(input)
+Converts an array of tensors to a list of tensors.
"""
-tf.@op function enter(data_; name=nothing, frame_name=nothing, is_constant=nothing, parallel_iterations=nothing)
- local desc
- tf.with_op_name((()->begin
- desc = tf.NodeDescription("Enter")
- data_ = convert(TensorFlow.Tensor{Any}, data_)
- (data_,) = tf.tf_promote(data_)
- tf.add_input(desc, data_)
- if frame_name !== nothing
- desc["frame_name"] = Base.String(frame_name)
+begin
+ begin
+ function _array_to_list_graph(input_; name=nothing, N=nothing, out_types=nothing)
+ local desc
+ tf.with_op_name(name, "_ArrayToList") do
+ desc = tf.NodeDescription("_ArrayToList")
+ begin
+ begin
+ input_ = [convert(Tensor{Any}, x) for x = input_]
+ begin
+ end
end
- if is_constant !== nothing
- desc["is_constant"] = Base.Bool(is_constant)
+ begin
+ (input_,) = tf.tf_promote(input_)
end
- if parallel_iterations !== nothing
- desc["parallel_iterations"] = Base.Int(parallel_iterations)
+ end
+ begin
+ begin
+ tf.add_input(desc, input_)
+ end
+ end
+ begin
+ begin
+ if N !== nothing
+ desc["N"] = Base.Int(N)
+ end
+ end
+ begin
+ if out_types !== nothing
+ desc["out_types"] = map(Base.identity, out_types)
+ end
+ end
+ end
+ end
+ tf.Tensor(tf.Operation(desc))
+ end
+ end
+ begin
+ function _array_to_list_eager(input_; name=nothing, N=nothing, out_types=nothing)
+ desc = tf.EagerOp("_ArrayToList")
+ input_ = convert(tf.EagerTensor, input_)
+ begin
+ begin
+ tf.add_input(desc, input_)
+ end
+ end
+ begin
+ begin
+ if N !== nothing
+ desc["N"] = Base.Int(N)
end
- end), name, "Enter")
- tf.Tensor(tf.Operation(desc))
+ end
+ begin
+ if out_types !== nothing
+ desc["out_types"] = map(Base.identity, out_types)
+ end
+ end
+ end
+ begin
+ desc["T"] = tf.data_type(input_)
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(_array_to_list, [input_], name=nothing, N=nothing, out_types=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res[1]
+ end
+ end
end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function _array_to_list(input_; name=nothing, N=nothing, out_types=nothing)
+ if tf.in_eager_mode()
+ _array_to_list_eager(input_; name=name, N=N, out_types=out_types)
+ else
+ _array_to_list_graph(input_; name=name, N=N, out_types=out_types)
+ end
+ end
+ end
+end
+
"""
- loop_cond(input)
+ expand_dims(input, dim)
"""
-tf.@op function loop_cond(input_; name=nothing)
- local desc
- tf.with_op_name((()->begin
- desc = tf.NodeDescription("LoopCond")
- input_ = convert(TensorFlow.Tensor{Bool}, input_)
+begin
+ begin
+ function expand_dims_graph(input_, dim_; name=nothing)
+ local desc
+ tf.with_op_name(name, "ExpandDims") do
+ desc = tf.NodeDescription("ExpandDims")
+ begin
+ begin
+ input_ = convert(Tensor{Any}, input_)
+ begin
+ end
+ end
+ begin
+ dim_ = convert(Tensor{Int32}, dim_)
+ begin
+ dim_ = dim_ - convert(tf.Tensor{eltype(dim_)}, 1)
+ end
+ end
+ begin
+ (input_,) = tf.tf_promote(input_)
+ end
+ begin
+ (dim_,) = tf.tf_promote(dim_)
+ end
+ end
+ begin
+ begin
+ tf.add_input(desc, input_)
+ end
+ begin
+ tf.add_input(desc, dim_)
+ end
+ end
+ begin
+ end
+ end
+ tf.Tensor(tf.Operation(desc))
+ end
+ end
+ begin
+ function expand_dims_eager(input_, dim_; name=nothing)
+ desc = tf.EagerOp("ExpandDims")
+ input_ = convert(tf.EagerTensor, input_)
+ dim_ = convert(tf.EagerTensor, dim_)
+ begin
+ begin
tf.add_input(desc, input_)
- end), name, "LoopCond")
- tf.Tensor(tf.Operation(desc))
+ end
+ begin
+ tf.add_input(desc, dim_)
+ end
+ end
+ begin
+ end
+ begin
+ desc["T"] = tf.data_type(input_)
+ end
+ begin
+ desc["Tdim"] = tf.data_type(dim_)
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(expand_dims, [input_, dim_], name=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res[1]
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function expand_dims(input_, dim_; name=nothing)
+ if tf.in_eager_mode()
+ expand_dims_eager(input_, dim_; name=name)
+ else
+ expand_dims_graph(input_, dim_; name=name)
+ end
+ end
end
+end
+
"""
- exit(data)
+ inv_grad(y, dy)
"""
-tf.@op function exit(data_; name=nothing)
- local desc
- tf.with_op_name((()->begin
- desc = tf.NodeDescription("Exit")
- data_ = convert(TensorFlow.Tensor{Any}, data_)
- (data_,) = tf.tf_promote(data_)
- tf.add_input(desc, data_)
- end), name, "Exit")
- tf.Tensor(tf.Operation(desc))
+begin
+ begin
+ function inv_grad_graph(y_, dy_; name=nothing)
+ local desc
+ tf.with_op_name(name, "InvGrad") do
+ desc = tf.NodeDescription("InvGrad")
+ begin
+ begin
+ y_ = convert(Tensor{Any}, y_)
+ begin
+ end
+ end
+ begin
+ dy_ = convert(Tensor{Any}, dy_)
+ begin
+ end
+ end
+ begin
+ (y_, dy_) = tf.tf_promote(y_, dy_)
+ end
+ end
+ begin
+ begin
+ tf.add_input(desc, y_)
+ end
+ begin
+ tf.add_input(desc, dy_)
+ end
+ end
+ begin
+ end
+ end
+ tf.Tensor(tf.Operation(desc))
+ end
end
+ begin
+ function inv_grad_eager(y_, dy_; name=nothing)
+ desc = tf.EagerOp("InvGrad")
+ y_ = convert(tf.EagerTensor, y_)
+ dy_ = convert(tf.EagerTensor, dy_)
+ begin
+ begin
+ tf.add_input(desc, y_)
+ end
+ begin
+ tf.add_input(desc, dy_)
+ end
+ end
+ begin
+ end
+ begin
+ desc["T"] = tf.data_type(y_)
+ end
+ begin
+ desc["T"] = tf.data_type(dy_)
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(inv_grad, [y_, dy_], name=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res[1]
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function inv_grad(y_, dy_; name=nothing)
+ if tf.in_eager_mode()
+ inv_grad_eager(y_, dy_; name=name)
+ else
+ inv_grad_graph(y_, dy_; name=name)
+ end
+ end
+ end
+end
+
"""
- next_iteration(data)
+ non_max_suppression(boxes, scores, max_output_size; iou_threshold=?)
"""
-tf.@op function next_iteration(data_; name=nothing)
- local desc
- tf.with_op_name((()->begin
- desc = tf.NodeDescription("NextIteration")
- data_ = convert(TensorFlow.Tensor{Any}, data_)
- (data_,) = tf.tf_promote(data_)
- tf.add_input(desc, data_)
- end), name, "NextIteration")
- tf.Tensor(tf.Operation(desc))
+begin
+ begin
+ function non_max_suppression_graph(boxes_, scores_, max_output_size_; name=nothing, iou_threshold=nothing)
+ local desc
+ tf.with_op_name(name, "NonMaxSuppression") do
+ desc = tf.NodeDescription("NonMaxSuppression")
+ begin
+ begin
+ boxes_ = convert(Tensor{Float32}, boxes_)
+ begin
+ end
+ end
+ begin
+ scores_ = convert(Tensor{Float32}, scores_)
+ begin
+ end
+ end
+ begin
+ max_output_size_ = convert(Tensor{Int32}, max_output_size_)
+ begin
+ end
+ end
+ end
+ begin
+ begin
+ tf.add_input(desc, boxes_)
+ end
+ begin
+ tf.add_input(desc, scores_)
+ end
+ begin
+ tf.add_input(desc, max_output_size_)
+ end
+ end
+ begin
+ begin
+ if iou_threshold !== nothing
+ desc["iou_threshold"] = Base.identity(iou_threshold)
+ end
+ end
+ end
+ end
+ tf.Tensor(tf.Operation(desc))
+ end
+ end
+ begin
+ function non_max_suppression_eager(boxes_, scores_, max_output_size_; name=nothing, iou_threshold=nothing)
+ desc = tf.EagerOp("NonMaxSuppression")
+ boxes_ = convert(tf.EagerTensor, boxes_)
+ scores_ = convert(tf.EagerTensor, scores_)
+ max_output_size_ = convert(tf.EagerTensor, max_output_size_)
+ begin
+ begin
+ tf.add_input(desc, boxes_)
+ end
+ begin
+ tf.add_input(desc, scores_)
+ end
+ begin
+ tf.add_input(desc, max_output_size_)
+ end
+ end
+ begin
+ begin
+ if iou_threshold !== nothing
+ desc["iou_threshold"] = Base.identity(iou_threshold)
+ end
+ end
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(non_max_suppression, [boxes_, scores_, max_output_size_], name=nothing, iou_threshold=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res[1]
+ end
+ end
end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function non_max_suppression(boxes_, scores_, max_output_size_; name=nothing, iou_threshold=nothing)
+ if tf.in_eager_mode()
+ non_max_suppression_eager(boxes_, scores_, max_output_size_; name=name, iou_threshold=iou_threshold)
+ else
+ non_max_suppression_graph(boxes_, scores_, max_output_size_; name=name, iou_threshold=iou_threshold)
+ end
+ end
+ end
+end
+
"""
- complex(real, imag)
+ l2loss(t)
"""
-tf.@op function complex(real_, imag_; name=nothing)
- local desc
- tf.with_op_name((()->begin
- desc = tf.NodeDescription("Complex")
- real_ = convert(TensorFlow.Tensor{Float32}, real_)
- imag_ = convert(TensorFlow.Tensor{Float32}, imag_)
- (real_, imag_) = tf.tf_promote(real_, imag_)
- tf.add_input(desc, real_)
- tf.add_input(desc, imag_)
- end), name, "Complex")
- tf.Tensor(tf.Operation(desc))
+begin
+ begin
+ function l2loss_graph(t_; name=nothing)
+ local desc
+ tf.with_op_name(name, "L2Loss") do
+ desc = tf.NodeDescription("L2Loss")
+ begin
+ begin
+ t_ = convert(Tensor{Any}, t_)
+ begin
+ end
+ end
+ begin
+ (t_,) = tf.tf_promote(t_)
+ end
+ end
+ begin
+ begin
+ tf.add_input(desc, t_)
+ end
+ end
+ begin
+ end
+ end
+ tf.Tensor(tf.Operation(desc))
+ end
+ end
+ begin
+ function l2loss_eager(t_; name=nothing)
+ desc = tf.EagerOp("L2Loss")
+ t_ = convert(tf.EagerTensor, t_)
+ begin
+ begin
+ tf.add_input(desc, t_)
+ end
+ end
+ begin
+ end
+ begin
+ desc["T"] = tf.data_type(t_)
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(l2loss, [t_], name=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res[1]
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function l2loss(t_; name=nothing)
+ if tf.in_eager_mode()
+ l2loss_eager(t_; name=name)
+ else
+ l2loss_graph(t_; name=name)
+ end
+ end
end
+end
+
"""
- print(input, data; message=, first_n=-1, summarize=3)
+ resize_area(images, size; align_corners=false)
"""
-tf.@op function print(input_, data_; name=nothing, U=nothing, message=nothing, first_n=nothing, summarize=nothing)
- local desc
- tf.with_op_name((()->begin
- desc = tf.NodeDescription("Print")
- input_ = convert(TensorFlow.Tensor{Any}, input_)
- data_ = [convert(TensorFlow.Tensor{Any}, x) for x = data_]
- (input_,) = tf.tf_promote(input_)
- tf.add_input(desc, input_)
- tf.add_input(desc, data_)
- if U !== nothing
- desc["U"] = map(Base.identity, U)
+begin
+ begin
+ function resize_area_graph(images_, size_; name=nothing, align_corners=nothing)
+ local desc
+ tf.with_op_name(name, "ResizeArea") do
+ desc = tf.NodeDescription("ResizeArea")
+ begin
+ begin
+ images_ = convert(Tensor{Any}, images_)
+ begin
+ end
end
- if message !== nothing
- desc["message"] = Base.String(message)
+ begin
+ size_ = convert(Tensor{Int32}, size_)
+ begin
+ end
end
- if first_n !== nothing
- desc["first_n"] = Base.Int(first_n)
+ begin
+ (images_,) = tf.tf_promote(images_)
end
- if summarize !== nothing
- desc["summarize"] = Base.Int(summarize)
+ end
+ begin
+ begin
+ tf.add_input(desc, images_)
end
- end), name, "Print")
- tf.Tensor(tf.Operation(desc))
+ begin
+ tf.add_input(desc, size_)
+ end
+ end
+ begin
+ begin
+ if align_corners !== nothing
+ desc["align_corners"] = Base.Bool(align_corners)
+ end
+ end
+ end
+ end
+ tf.Tensor(tf.Operation(desc))
+ end
end
-
-"""
- reverse_v2(tensor, axis)
-
-
-"""
-tf.@op function reverse_v2(tensor_, axis_; name=nothing)
- local desc
- tf.with_op_name((()->begin
- desc = tf.NodeDescription("ReverseV2")
- tensor_ = convert(TensorFlow.Tensor{Any}, tensor_)
- axis_ = convert(TensorFlow.Tensor{Int32}, axis_)
- axis_ = axis_ - convert(tf.Tensor{eltype(axis_)}, 1)
- (tensor_,) = tf.tf_promote(tensor_)
- (axis_,) = tf.tf_promote(axis_)
- tf.add_input(desc, tensor_)
- tf.add_input(desc, axis_)
- end), name, "ReverseV2")
- tf.Tensor(tf.Operation(desc))
+ begin
+ function resize_area_eager(images_, size_; name=nothing, align_corners=nothing)
+ desc = tf.EagerOp("ResizeArea")
+ images_ = convert(tf.EagerTensor, images_)
+ size_ = convert(tf.EagerTensor, size_)
+ begin
+ begin
+ tf.add_input(desc, images_)
+ end
+ begin
+ tf.add_input(desc, size_)
+ end
+ end
+ begin
+ begin
+ if align_corners !== nothing
+ desc["align_corners"] = Base.Bool(align_corners)
+ end
+ end
+ end
+ begin
+ desc["T"] = tf.data_type(images_)
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(resize_area, [images_, size_], name=nothing, align_corners=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res[1]
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function resize_area(images_, size_; name=nothing, align_corners=nothing)
+ if tf.in_eager_mode()
+ resize_area_eager(images_, size_; name=name, align_corners=align_corners)
+ else
+ resize_area_graph(images_, size_; name=name, align_corners=align_corners)
+ end
+ end
end
+end
+
"""
- size(input; out_type=Int32)
+ sparse_cross(indices, values, shapes, dense_inputs)
"""
-tf.@op function size(input_; name=nothing, out_type=nothing)
- local desc
- tf.with_op_name((()->begin
- desc = tf.NodeDescription("Size")
- input_ = convert(TensorFlow.Tensor{Any}, input_)
- (input_,) = tf.tf_promote(input_)
- tf.add_input(desc, input_)
+begin
+ begin
+ function sparse_cross_graph(indices_, values_, shapes_, dense_inputs_; name=nothing, N=nothing, hashed_output=nothing, num_buckets=nothing, hash_key=nothing, sparse_types=nothing, dense_types=nothing, out_type=nothing, internal_type=nothing)
+ local desc
+ tf.with_op_name(name, "SparseCross") do
+ desc = tf.NodeDescription("SparseCross")
+ begin
+ begin
+ indices_ = [convert(Tensor{Int64}, x) for x = indices_]
+ begin
+ end
+ end
+ begin
+ values_ = [convert(Tensor{Any}, x) for x = values_]
+ begin
+ end
+ end
+ begin
+ shapes_ = [convert(Tensor{Int64}, x) for x = shapes_]
+ begin
+ end
+ end
+ begin
+ dense_inputs_ = [convert(Tensor{Any}, x) for x = dense_inputs_]
+ begin
+ end
+ end
+ end
+ begin
+ begin
+ tf.add_input(desc, indices_)
+ end
+ begin
+ tf.add_input(desc, values_)
+ end
+ begin
+ tf.add_input(desc, shapes_)
+ end
+ begin
+ tf.add_input(desc, dense_inputs_)
+ end
+ end
+ begin
+ begin
+ if N !== nothing
+ desc["N"] = Base.Int(N)
+ end
+ end
+ begin
+ if hashed_output !== nothing
+ desc["hashed_output"] = Base.Bool(hashed_output)
+ end
+ end
+ begin
+ if num_buckets !== nothing
+ desc["num_buckets"] = Base.Int(num_buckets)
+ end
+ end
+ begin
+ if hash_key !== nothing
+ desc["hash_key"] = Base.Int(hash_key)
+ end
+ end
+ begin
+ if sparse_types !== nothing
+ desc["sparse_types"] = map(Base.identity, sparse_types)
+ end
+ end
+ begin
+ if dense_types !== nothing
+ desc["dense_types"] = map(Base.identity, dense_types)
+ end
+ end
+ begin
+ if out_type !== nothing
+ desc["out_type"] = Base.identity(out_type)
+ end
+ end
+ begin
+ if internal_type !== nothing
+ desc["internal_type"] = Base.identity(internal_type)
+ end
+ end
+ end
+ end
+ begin
+ out = tf.Tensor[]
+ op = tf.Operation(desc)
+ for out_idx = 1:3
+ push!(out, tf.Tensor(op, out_idx))
+ end
+ out
+ end
+ end
+ end
+ begin
+ function sparse_cross_eager(indices_, values_, shapes_, dense_inputs_; name=nothing, N=nothing, hashed_output=nothing, num_buckets=nothing, hash_key=nothing, sparse_types=nothing, dense_types=nothing, out_type=nothing, internal_type=nothing)
+ desc = tf.EagerOp("SparseCross")
+ indices_ = convert(tf.EagerTensor, indices_)
+ values_ = convert(tf.EagerTensor, values_)
+ shapes_ = convert(tf.EagerTensor, shapes_)
+ dense_inputs_ = convert(tf.EagerTensor, dense_inputs_)
+ begin
+ begin
+ tf.add_input(desc, indices_)
+ end
+ begin
+ tf.add_input(desc, values_)
+ end
+ begin
+ tf.add_input(desc, shapes_)
+ end
+ begin
+ tf.add_input(desc, dense_inputs_)
+ end
+ end
+ begin
+ begin
+ if N !== nothing
+ desc["N"] = Base.Int(N)
+ end
+ end
+ begin
+ if hashed_output !== nothing
+ desc["hashed_output"] = Base.Bool(hashed_output)
+ end
+ end
+ begin
+ if num_buckets !== nothing
+ desc["num_buckets"] = Base.Int(num_buckets)
+ end
+ end
+ begin
+ if hash_key !== nothing
+ desc["hash_key"] = Base.Int(hash_key)
+ end
+ end
+ begin
+ if sparse_types !== nothing
+ desc["sparse_types"] = map(Base.identity, sparse_types)
+ end
+ end
+ begin
+ if dense_types !== nothing
+ desc["dense_types"] = map(Base.identity, dense_types)
+ end
+ end
+ begin
if out_type !== nothing
desc["out_type"] = Base.identity(out_type)
end
- end), name, "Size")
- tf.Tensor(tf.Operation(desc))
+ end
+ begin
+ if internal_type !== nothing
+ desc["internal_type"] = Base.identity(internal_type)
+ end
+ end
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(sparse_cross, [indices_, values_, shapes_, dense_inputs_], name=nothing, N=nothing, hashed_output=nothing, num_buckets=nothing, hash_key=nothing, sparse_types=nothing, dense_types=nothing, out_type=nothing, internal_type=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res
+ end
+ end
end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function sparse_cross(indices_, values_, shapes_, dense_inputs_; name=nothing, N=nothing, hashed_output=nothing, num_buckets=nothing, hash_key=nothing, sparse_types=nothing, dense_types=nothing, out_type=nothing, internal_type=nothing)
+ if tf.in_eager_mode()
+ sparse_cross_eager(indices_, values_, shapes_, dense_inputs_; name=name, N=N, hashed_output=hashed_output, num_buckets=num_buckets, hash_key=hash_key, sparse_types=sparse_types, dense_types=dense_types, out_type=out_type, internal_type=internal_type)
+ else
+ sparse_cross_graph(indices_, values_, shapes_, dense_inputs_; name=name, N=N, hashed_output=hashed_output, num_buckets=num_buckets, hash_key=hash_key, sparse_types=sparse_types, dense_types=dense_types, out_type=out_type, internal_type=internal_type)
+ end
+ end
+ end
+end
+
"""
- softmax_cross_entropy_with_logits(features, labels)
+ batch_fft3d(input)
"""
-tf.@op function softmax_cross_entropy_with_logits(features_, labels_; name=nothing)
- local desc
- tf.with_op_name((()->begin
- desc = tf.NodeDescription("SoftmaxCrossEntropyWithLogits")
- features_ = convert(TensorFlow.Tensor{Any}, features_)
- labels_ = convert(TensorFlow.Tensor{Any}, labels_)
- (features_, labels_) = tf.tf_promote(features_, labels_)
- tf.add_input(desc, features_)
- tf.add_input(desc, labels_)
- end), name, "SoftmaxCrossEntropyWithLogits")
- out = tf.Tensor[]
- op = tf.Operation(desc)
- for out_idx = 1:2
- push!(out, tf.Tensor(op, out_idx))
+begin
+ begin
+ function batch_fft3d_graph(input_; name=nothing)
+ local desc
+ tf.with_op_name(name, "BatchFFT3D") do
+ desc = tf.NodeDescription("BatchFFT3D")
+ begin
+ begin
+ input_ = convert(Tensor{Complex{Float32}}, input_)
+ begin
+ end
+ end
+ end
+ begin
+ begin
+ tf.add_input(desc, input_)
+ end
+ end
+ begin
+ end
+ end
+ tf.Tensor(tf.Operation(desc))
end
- out
end
+ begin
+ function batch_fft3d_eager(input_; name=nothing)
+ desc = tf.EagerOp("BatchFFT3D")
+ input_ = convert(tf.EagerTensor, input_)
+ begin
+ begin
+ tf.add_input(desc, input_)
+ end
+ end
+ begin
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(batch_fft3d, [input_], name=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res[1]
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function batch_fft3d(input_; name=nothing)
+ if tf.in_eager_mode()
+ batch_fft3d_eager(input_; name=name)
+ else
+ batch_fft3d_graph(input_; name=name)
+ end
+ end
+ end
+end
+
"""
- sparse_softmax_cross_entropy_with_logits(features, labels)
+ random_standard_normal(shape; seed=0, seed2=0)
"""
-tf.@op function sparse_softmax_cross_entropy_with_logits(features_, labels_; name=nothing)
- local desc
- tf.with_op_name((()->begin
- desc = tf.NodeDescription("SparseSoftmaxCrossEntropyWithLogits")
- features_ = convert(TensorFlow.Tensor{Any}, features_)
- labels_ = convert(TensorFlow.Tensor{Int64}, labels_)
- (features_,) = tf.tf_promote(features_)
- (labels_,) = tf.tf_promote(labels_)
- tf.add_input(desc, features_)
- tf.add_input(desc, labels_)
- end), name, "SparseSoftmaxCrossEntropyWithLogits")
- out = tf.Tensor[]
- op = tf.Operation(desc)
- for out_idx = 1:2
- push!(out, tf.Tensor(op, out_idx))
+begin
+ begin
+ function random_standard_normal_graph(shape_; name=nothing, seed=nothing, seed2=nothing, dtype=nothing)
+ local desc
+ tf.with_op_name(name, "RandomStandardNormal") do
+ desc = tf.NodeDescription("RandomStandardNormal")
+ begin
+ begin
+ shape_ = convert(Tensor{Any}, shape_)
+ begin
+ end
+ end
+ begin
+ (shape_,) = tf.tf_promote(shape_)
+ end
+ end
+ begin
+ begin
+ tf.add_input(desc, shape_)
+ end
+ end
+ begin
+ begin
+ if seed !== nothing
+ desc["seed"] = Base.Int(seed)
+ end
+ end
+ begin
+ if seed2 !== nothing
+ desc["seed2"] = Base.Int(seed2)
+ end
+ end
+ begin
+ if dtype !== nothing
+ desc["dtype"] = Base.identity(dtype)
+ end
+ end
+ end
+ end
+ tf.Tensor(tf.Operation(desc))
+ end
+ end
+ begin
+ function random_standard_normal_eager(shape_; name=nothing, seed=nothing, seed2=nothing, dtype=nothing)
+ desc = tf.EagerOp("RandomStandardNormal")
+ shape_ = convert(tf.EagerTensor, shape_)
+ begin
+ begin
+ tf.add_input(desc, shape_)
+ end
+ end
+ begin
+ begin
+ if seed !== nothing
+ desc["seed"] = Base.Int(seed)
+ end
+ end
+ begin
+ if seed2 !== nothing
+ desc["seed2"] = Base.Int(seed2)
+ end
+ end
+ begin
+ if dtype !== nothing
+ desc["dtype"] = Base.identity(dtype)
+ end
+ end
+ end
+ begin
+ desc["T"] = tf.data_type(shape_)
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(random_standard_normal, [shape_], name=nothing, seed=nothing, seed2=nothing, dtype=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res[1]
+ end
end
- out
end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function random_standard_normal(shape_; name=nothing, seed=nothing, seed2=nothing, dtype=nothing)
+ if tf.in_eager_mode()
+ random_standard_normal_eager(shape_; name=name, seed=seed, seed2=seed2, dtype=dtype)
+ else
+ random_standard_normal_graph(shape_; name=name, seed=seed, seed2=seed2, dtype=dtype)
+ end
+ end
+ end
+end
+
"""
- top_kv2(input, k; sorted=true)
+ resource_scatter_mul(resource, indices, updates)
"""
-tf.@op function top_kv2(input_, k_; name=nothing, sorted=nothing)
- local desc
- tf.with_op_name((()->begin
- desc = tf.NodeDescription("TopKV2")
- input_ = convert(TensorFlow.Tensor{Any}, input_)
- k_ = convert(TensorFlow.Tensor{Int32}, k_)
- (input_,) = tf.tf_promote(input_)
- tf.add_input(desc, input_)
- tf.add_input(desc, k_)
- if sorted !== nothing
- desc["sorted"] = Base.Bool(sorted)
+begin
+ begin
+ function resource_scatter_mul_graph(resource_, indices_, updates_; name=nothing, dtype=nothing)
+ local desc
+ tf.with_op_name(name, "ResourceScatterMul") do
+ desc = tf.NodeDescription("ResourceScatterMul")
+ begin
+ begin
+ resource_ = convert(Tensor{Any}, resource_)
+ begin
+ end
+ end
+ begin
+ indices_ = convert(Tensor{Any}, indices_)
+ begin
+ indices_ = indices_ - convert(tf.Tensor{eltype(indices_)}, 1)
+ end
+ end
+ begin
+ updates_ = convert(Tensor{Any}, updates_)
+ begin
+ end
+ end
+ begin
+ (updates_,) = tf.tf_promote(updates_)
+ end
+ begin
+ (indices_,) = tf.tf_promote(indices_)
end
- end), name, "TopKV2")
- out = tf.Tensor[]
- op = tf.Operation(desc)
- for out_idx = 1:2
- push!(out, tf.Tensor(op, out_idx))
+ end
+ begin
+ begin
+ tf.add_input(desc, resource_)
+ end
+ begin
+ tf.add_input(desc, indices_)
+ end
+ begin
+ tf.add_input(desc, updates_)
+ end
+ end
+ begin
+ begin
+ if dtype !== nothing
+ desc["dtype"] = Base.identity(dtype)
+ end
+ end
+ end
+ end
+ tf.Tensor(tf.Operation(desc))
+ end
+ end
+ begin
+ function resource_scatter_mul_eager(resource_, indices_, updates_; name=nothing, dtype=nothing)
+ desc = tf.EagerOp("ResourceScatterMul")
+ resource_ = convert(tf.EagerTensor, resource_)
+ indices_ = convert(tf.EagerTensor, indices_)
+ updates_ = convert(tf.EagerTensor, updates_)
+ begin
+ begin
+ tf.add_input(desc, resource_)
+ end
+ begin
+ tf.add_input(desc, indices_)
+ end
+ begin
+ tf.add_input(desc, updates_)
+ end
+ end
+ begin
+ begin
+ if dtype !== nothing
+ desc["dtype"] = Base.identity(dtype)
+ end
+ end
+ end
+ begin
+ desc["Tindices"] = tf.data_type(indices_)
+ end
+ begin
+ desc["dtype"] = tf.data_type(updates_)
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(resource_scatter_mul, [resource_, indices_, updates_], name=nothing, dtype=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res[1]
+ end
end
- out
end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function resource_scatter_mul(resource_, indices_, updates_; name=nothing, dtype=nothing)
+ if tf.in_eager_mode()
+ resource_scatter_mul_eager(resource_, indices_, updates_; name=name, dtype=dtype)
+ else
+ resource_scatter_mul_graph(resource_, indices_, updates_; name=name, dtype=dtype)
+ end
+ end
+ end
+end
+
"""
- in_top_k(predictions, targets)
+ sdca_optimizer(sparse_example_indices, sparse_feature_indices, sparse_feature_values, dense_features, example_weights, example_labels, sparse_indices, sparse_weights, dense_weights, example_state_data; adaptative=false)
"""
-tf.@op function in_top_k(predictions_, targets_; name=nothing, k=nothing)
- local desc
- tf.with_op_name((()->begin
- desc = tf.NodeDescription("InTopK")
- predictions_ = convert(TensorFlow.Tensor{Float32}, predictions_)
- targets_ = convert(TensorFlow.Tensor{Int32}, targets_)
- (targets_,) = tf.tf_promote(targets_)
- tf.add_input(desc, predictions_)
- tf.add_input(desc, targets_)
- if k !== nothing
- desc["k"] = Base.Int(k)
+begin
+ begin
+ function sdca_optimizer_graph(sparse_example_indices_, sparse_feature_indices_, sparse_feature_values_, dense_features_, example_weights_, example_labels_, sparse_indices_, sparse_weights_, dense_weights_, example_state_data_; name=nothing, loss_type=nothing, adaptative=nothing, num_sparse_features=nothing, num_sparse_features_with_values=nothing, num_dense_features=nothing, l1=nothing, l2=nothing, num_loss_partitions=nothing, num_inner_iterations=nothing)
+ local desc
+ tf.with_op_name(name, "SdcaOptimizer") do
+ desc = tf.NodeDescription("SdcaOptimizer")
+ begin
+ begin
+ sparse_example_indices_ = [convert(Tensor{Int64}, x) for x = sparse_example_indices_]
+ begin
+ end
+ end
+ begin
+ sparse_feature_indices_ = [convert(Tensor{Int64}, x) for x = sparse_feature_indices_]
+ begin
+ end
+ end
+ begin
+ sparse_feature_values_ = [convert(Tensor{Float32}, x) for x = sparse_feature_values_]
+ begin
+ end
+ end
+ begin
+ dense_features_ = [convert(Tensor{Float32}, x) for x = dense_features_]
+ begin
+ end
+ end
+ begin
+ example_weights_ = convert(Tensor{Float32}, example_weights_)
+ begin
+ end
+ end
+ begin
+ example_labels_ = convert(Tensor{Float32}, example_labels_)
+ begin
+ end
+ end
+ begin
+ sparse_indices_ = [convert(Tensor{Int64}, x) for x = sparse_indices_]
+ begin
+ end
+ end
+ begin
+ sparse_weights_ = [convert(Tensor{Float32}, x) for x = sparse_weights_]
+ begin
+ end
+ end
+ begin
+ dense_weights_ = [convert(Tensor{Float32}, x) for x = dense_weights_]
+ begin
+ end
+ end
+ begin
+ example_state_data_ = convert(Tensor{Float32}, example_state_data_)
+ begin
+ end
+ end
+ end
+ begin
+ begin
+ tf.add_input(desc, sparse_example_indices_)
+ end
+ begin
+ tf.add_input(desc, sparse_feature_indices_)
+ end
+ begin
+ tf.add_input(desc, sparse_feature_values_)
+ end
+ begin
+ tf.add_input(desc, dense_features_)
+ end
+ begin
+ tf.add_input(desc, example_weights_)
+ end
+ begin
+ tf.add_input(desc, example_labels_)
+ end
+ begin
+ tf.add_input(desc, sparse_indices_)
+ end
+ begin
+ tf.add_input(desc, sparse_weights_)
+ end
+ begin
+ tf.add_input(desc, dense_weights_)
+ end
+ begin
+ tf.add_input(desc, example_state_data_)
+ end
+ end
+ begin
+ begin
+ if loss_type !== nothing
+ desc["loss_type"] = Base.String(loss_type)
+ end
+ end
+ begin
+ if adaptative !== nothing
+ desc["adaptative"] = Base.Bool(adaptative)
+ end
+ end
+ begin
+ if num_sparse_features !== nothing
+ desc["num_sparse_features"] = Base.Int(num_sparse_features)
+ end
+ end
+ begin
+ if num_sparse_features_with_values !== nothing
+ desc["num_sparse_features_with_values"] = Base.Int(num_sparse_features_with_values)
+ end
+ end
+ begin
+ if num_dense_features !== nothing
+ desc["num_dense_features"] = Base.Int(num_dense_features)
+ end
+ end
+ begin
+ if l1 !== nothing
+ desc["l1"] = Base.identity(l1)
+ end
+ end
+ begin
+ if l2 !== nothing
+ desc["l2"] = Base.identity(l2)
+ end
+ end
+ begin
+ if num_loss_partitions !== nothing
+ desc["num_loss_partitions"] = Base.Int(num_loss_partitions)
+ end
+ end
+ begin
+ if num_inner_iterations !== nothing
+ desc["num_inner_iterations"] = Base.Int(num_inner_iterations)
+ end
+ end
+ end
+ end
+ begin
+ out = tf.Tensor[]
+ op = tf.Operation(desc)
+ for out_idx = 1:3
+ push!(out, tf.Tensor(op, out_idx))
+ end
+ out
+ end
+ end
+ end
+ begin
+ function sdca_optimizer_eager(sparse_example_indices_, sparse_feature_indices_, sparse_feature_values_, dense_features_, example_weights_, example_labels_, sparse_indices_, sparse_weights_, dense_weights_, example_state_data_; name=nothing, loss_type=nothing, adaptative=nothing, num_sparse_features=nothing, num_sparse_features_with_values=nothing, num_dense_features=nothing, l1=nothing, l2=nothing, num_loss_partitions=nothing, num_inner_iterations=nothing)
+ desc = tf.EagerOp("SdcaOptimizer")
+ sparse_example_indices_ = convert(tf.EagerTensor, sparse_example_indices_)
+ sparse_feature_indices_ = convert(tf.EagerTensor, sparse_feature_indices_)
+ sparse_feature_values_ = convert(tf.EagerTensor, sparse_feature_values_)
+ dense_features_ = convert(tf.EagerTensor, dense_features_)
+ example_weights_ = convert(tf.EagerTensor, example_weights_)
+ example_labels_ = convert(tf.EagerTensor, example_labels_)
+ sparse_indices_ = convert(tf.EagerTensor, sparse_indices_)
+ sparse_weights_ = convert(tf.EagerTensor, sparse_weights_)
+ dense_weights_ = convert(tf.EagerTensor, dense_weights_)
+ example_state_data_ = convert(tf.EagerTensor, example_state_data_)
+ begin
+ begin
+ tf.add_input(desc, sparse_example_indices_)
+ end
+ begin
+ tf.add_input(desc, sparse_feature_indices_)
+ end
+ begin
+ tf.add_input(desc, sparse_feature_values_)
+ end
+ begin
+ tf.add_input(desc, dense_features_)
+ end
+ begin
+ tf.add_input(desc, example_weights_)
+ end
+ begin
+ tf.add_input(desc, example_labels_)
+ end
+ begin
+ tf.add_input(desc, sparse_indices_)
+ end
+ begin
+ tf.add_input(desc, sparse_weights_)
+ end
+ begin
+ tf.add_input(desc, dense_weights_)
+ end
+ begin
+ tf.add_input(desc, example_state_data_)
+ end
+ end
+ begin
+ begin
+ if loss_type !== nothing
+ desc["loss_type"] = Base.String(loss_type)
+ end
+ end
+ begin
+ if adaptative !== nothing
+ desc["adaptative"] = Base.Bool(adaptative)
+ end
+ end
+ begin
+ if num_sparse_features !== nothing
+ desc["num_sparse_features"] = Base.Int(num_sparse_features)
+ end
+ end
+ begin
+ if num_sparse_features_with_values !== nothing
+ desc["num_sparse_features_with_values"] = Base.Int(num_sparse_features_with_values)
+ end
+ end
+ begin
+ if num_dense_features !== nothing
+ desc["num_dense_features"] = Base.Int(num_dense_features)
+ end
+ end
+ begin
+ if l1 !== nothing
+ desc["l1"] = Base.identity(l1)
+ end
+ end
+ begin
+ if l2 !== nothing
+ desc["l2"] = Base.identity(l2)
end
- end), name, "InTopK")
- tf.Tensor(tf.Operation(desc))
+ end
+ begin
+ if num_loss_partitions !== nothing
+ desc["num_loss_partitions"] = Base.Int(num_loss_partitions)
+ end
+ end
+ begin
+ if num_inner_iterations !== nothing
+ desc["num_inner_iterations"] = Base.Int(num_inner_iterations)
+ end
+ end
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(sdca_optimizer, [sparse_example_indices_, sparse_feature_indices_, sparse_feature_values_, dense_features_, example_weights_, example_labels_, sparse_indices_, sparse_weights_, dense_weights_, example_state_data_], name=nothing, loss_type=nothing, adaptative=nothing, num_sparse_features=nothing, num_sparse_features_with_values=nothing, num_dense_features=nothing, l1=nothing, l2=nothing, num_loss_partitions=nothing, num_inner_iterations=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function sdca_optimizer(sparse_example_indices_, sparse_feature_indices_, sparse_feature_values_, dense_features_, example_weights_, example_labels_, sparse_indices_, sparse_weights_, dense_weights_, example_state_data_; name=nothing, loss_type=nothing, adaptative=nothing, num_sparse_features=nothing, num_sparse_features_with_values=nothing, num_dense_features=nothing, l1=nothing, l2=nothing, num_loss_partitions=nothing, num_inner_iterations=nothing)
+ if tf.in_eager_mode()
+ sdca_optimizer_eager(sparse_example_indices_, sparse_feature_indices_, sparse_feature_values_, dense_features_, example_weights_, example_labels_, sparse_indices_, sparse_weights_, dense_weights_, example_state_data_; name=name, loss_type=loss_type, adaptative=adaptative, num_sparse_features=num_sparse_features, num_sparse_features_with_values=num_sparse_features_with_values, num_dense_features=num_dense_features, l1=l1, l2=l2, num_loss_partitions=num_loss_partitions, num_inner_iterations=num_inner_iterations)
+ else
+ sdca_optimizer_graph(sparse_example_indices_, sparse_feature_indices_, sparse_feature_values_, dense_features_, example_weights_, example_labels_, sparse_indices_, sparse_weights_, dense_weights_, example_state_data_; name=name, loss_type=loss_type, adaptative=adaptative, num_sparse_features=num_sparse_features, num_sparse_features_with_values=num_sparse_features_with_values, num_dense_features=num_dense_features, l1=l1, l2=l2, num_loss_partitions=num_loss_partitions, num_inner_iterations=num_inner_iterations)
+ end
+ end
end
+end
+
"""
- fifo_queue_v2(; shapes=Int64[], capacity=-1, container=, shared_name=)
+ zeta(x, q)
"""
-tf.@op function fifo_queue_v2(; name=nothing, component_types=nothing, shapes=nothing, capacity=nothing, container=nothing, shared_name=nothing)
- local desc
- tf.with_op_name((()->begin
- desc = tf.NodeDescription("FIFOQueueV2")
- if component_types !== nothing
- desc["component_types"] = map(Base.identity, component_types)
+begin
+ begin
+ function zeta_graph(x_, q_; name=nothing)
+ local desc
+ tf.with_op_name(name, "Zeta") do
+ desc = tf.NodeDescription("Zeta")
+ begin
+ begin
+ x_ = convert(Tensor{Any}, x_)
+ begin
+ end
end
- if shapes !== nothing
- desc["shapes"] = map(Base.identity, shapes)
+ begin
+ q_ = convert(Tensor{Any}, q_)
+ begin
+ end
end
- if capacity !== nothing
- desc["capacity"] = Base.Int(capacity)
+ begin
+ (x_, q_) = tf.tf_promote(x_, q_)
end
- if container !== nothing
- desc["container"] = Base.String(container)
+ end
+ begin
+ begin
+ tf.add_input(desc, x_)
end
- if shared_name !== nothing
- desc["shared_name"] = Base.String(shared_name)
+ begin
+ tf.add_input(desc, q_)
end
- end), name, "FIFOQueueV2")
- tf.Tensor(tf.Operation(desc))
+ end
+ begin
+ end
+ end
+ tf.Tensor(tf.Operation(desc))
+ end
+ end
+ begin
+ function zeta_eager(x_, q_; name=nothing)
+ desc = tf.EagerOp("Zeta")
+ x_ = convert(tf.EagerTensor, x_)
+ q_ = convert(tf.EagerTensor, q_)
+ begin
+ begin
+ tf.add_input(desc, x_)
+ end
+ begin
+ tf.add_input(desc, q_)
+ end
+ end
+ begin
+ end
+ begin
+ desc["T"] = tf.data_type(x_)
+ end
+ begin
+ desc["T"] = tf.data_type(q_)
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(zeta, [x_, q_], name=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res[1]
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function zeta(x_, q_; name=nothing)
+ if tf.in_eager_mode()
+ zeta_eager(x_, q_; name=name)
+ else
+ zeta_graph(x_, q_; name=name)
+ end
+ end
end
+end
+
"""
- random_shuffle_queue_v2(; shapes=Int64[], capacity=-1, min_after_dequeue=0, seed=0, seed2=0, container=, shared_name=)
+ sample_distorted_bounding_box(image_size, bounding_boxes; seed=0, seed2=0, min_object_covered=?, aspect_ratio_range=Int64[], area_range=Int64[], max_attempts=100, use_image_if_no_bounding_boxes=false)
"""
-tf.@op function random_shuffle_queue_v2(; name=nothing, component_types=nothing, shapes=nothing, capacity=nothing, min_after_dequeue=nothing, seed=nothing, seed2=nothing, container=nothing, shared_name=nothing)
- local desc
- tf.with_op_name((()->begin
- desc = tf.NodeDescription("RandomShuffleQueueV2")
- if component_types !== nothing
- desc["component_types"] = map(Base.identity, component_types)
+begin
+ begin
+ function sample_distorted_bounding_box_graph(image_size_, bounding_boxes_; name=nothing, seed=nothing, seed2=nothing, min_object_covered=nothing, aspect_ratio_range=nothing, area_range=nothing, max_attempts=nothing, use_image_if_no_bounding_boxes=nothing)
+ local desc
+ tf.with_op_name(name, "SampleDistortedBoundingBox") do
+ desc = tf.NodeDescription("SampleDistortedBoundingBox")
+ begin
+ begin
+ image_size_ = convert(Tensor{Any}, image_size_)
+ begin
+ end
end
- if shapes !== nothing
- desc["shapes"] = map(Base.identity, shapes)
+ begin
+ bounding_boxes_ = convert(Tensor{Float32}, bounding_boxes_)
+ begin
+ end
end
- if capacity !== nothing
- desc["capacity"] = Base.Int(capacity)
+ begin
+ (image_size_,) = tf.tf_promote(image_size_)
end
- if min_after_dequeue !== nothing
- desc["min_after_dequeue"] = Base.Int(min_after_dequeue)
+ end
+ begin
+ begin
+ tf.add_input(desc, image_size_)
+ end
+ begin
+ tf.add_input(desc, bounding_boxes_)
+ end
+ end
+ begin
+ begin
+ if seed !== nothing
+ desc["seed"] = Base.Int(seed)
+ end
+ end
+ begin
+ if seed2 !== nothing
+ desc["seed2"] = Base.Int(seed2)
+ end
+ end
+ begin
+ if min_object_covered !== nothing
+ desc["min_object_covered"] = Base.identity(min_object_covered)
+ end
+ end
+ begin
+ if aspect_ratio_range !== nothing
+ desc["aspect_ratio_range"] = map(Base.identity, aspect_ratio_range)
+ end
end
+ begin
+ if area_range !== nothing
+ desc["area_range"] = map(Base.identity, area_range)
+ end
+ end
+ begin
+ if max_attempts !== nothing
+ desc["max_attempts"] = Base.Int(max_attempts)
+ end
+ end
+ begin
+ if use_image_if_no_bounding_boxes !== nothing
+ desc["use_image_if_no_bounding_boxes"] = Base.Bool(use_image_if_no_bounding_boxes)
+ end
+ end
+ end
+ end
+ begin
+ out = tf.Tensor[]
+ op = tf.Operation(desc)
+ for out_idx = 1:3
+ push!(out, tf.Tensor(op, out_idx))
+ end
+ out
+ end
+ end
+ end
+ begin
+ function sample_distorted_bounding_box_eager(image_size_, bounding_boxes_; name=nothing, seed=nothing, seed2=nothing, min_object_covered=nothing, aspect_ratio_range=nothing, area_range=nothing, max_attempts=nothing, use_image_if_no_bounding_boxes=nothing)
+ desc = tf.EagerOp("SampleDistortedBoundingBox")
+ image_size_ = convert(tf.EagerTensor, image_size_)
+ bounding_boxes_ = convert(tf.EagerTensor, bounding_boxes_)
+ begin
+ begin
+ tf.add_input(desc, image_size_)
+ end
+ begin
+ tf.add_input(desc, bounding_boxes_)
+ end
+ end
+ begin
+ begin
if seed !== nothing
desc["seed"] = Base.Int(seed)
end
+ end
+ begin
if seed2 !== nothing
desc["seed2"] = Base.Int(seed2)
end
- if container !== nothing
- desc["container"] = Base.String(container)
+ end
+ begin
+ if min_object_covered !== nothing
+ desc["min_object_covered"] = Base.identity(min_object_covered)
end
- if shared_name !== nothing
- desc["shared_name"] = Base.String(shared_name)
+ end
+ begin
+ if aspect_ratio_range !== nothing
+ desc["aspect_ratio_range"] = map(Base.identity, aspect_ratio_range)
end
- end), name, "RandomShuffleQueueV2")
- tf.Tensor(tf.Operation(desc))
- end
-
-"""
- queue_enqueue_v2(handle, components; timeout_ms=-1)
-
-
-"""
-tf.@op function queue_enqueue_v2(handle_, components_; name=nothing, Tcomponents=nothing, timeout_ms=nothing)
- local desc
- tf.with_op_name((()->begin
- desc = tf.NodeDescription("QueueEnqueueV2")
- handle_ = convert(TensorFlow.Tensor{Any}, handle_)
- components_ = [convert(TensorFlow.Tensor{Any}, x) for x = components_]
- tf.add_input(desc, handle_)
- tf.add_input(desc, components_)
- if Tcomponents !== nothing
- desc["Tcomponents"] = map(Base.identity, Tcomponents)
+ end
+ begin
+ if area_range !== nothing
+ desc["area_range"] = map(Base.identity, area_range)
end
- if timeout_ms !== nothing
- desc["timeout_ms"] = Base.Int(timeout_ms)
+ end
+ begin
+ if max_attempts !== nothing
+ desc["max_attempts"] = Base.Int(max_attempts)
end
- end), name, "QueueEnqueueV2")
- tf.Tensor(tf.Operation(desc))
+ end
+ begin
+ if use_image_if_no_bounding_boxes !== nothing
+ desc["use_image_if_no_bounding_boxes"] = Base.Bool(use_image_if_no_bounding_boxes)
+ end
+ end
+ end
+ begin
+ desc["T"] = tf.data_type(image_size_)
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(sample_distorted_bounding_box, [image_size_, bounding_boxes_], name=nothing, seed=nothing, seed2=nothing, min_object_covered=nothing, aspect_ratio_range=nothing, area_range=nothing, max_attempts=nothing, use_image_if_no_bounding_boxes=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res
+ end
+ end
end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function sample_distorted_bounding_box(image_size_, bounding_boxes_; name=nothing, seed=nothing, seed2=nothing, min_object_covered=nothing, aspect_ratio_range=nothing, area_range=nothing, max_attempts=nothing, use_image_if_no_bounding_boxes=nothing)
+ if tf.in_eager_mode()
+ sample_distorted_bounding_box_eager(image_size_, bounding_boxes_; name=name, seed=seed, seed2=seed2, min_object_covered=min_object_covered, aspect_ratio_range=aspect_ratio_range, area_range=area_range, max_attempts=max_attempts, use_image_if_no_bounding_boxes=use_image_if_no_bounding_boxes)
+ else
+ sample_distorted_bounding_box_graph(image_size_, bounding_boxes_; name=name, seed=seed, seed2=seed2, min_object_covered=min_object_covered, aspect_ratio_range=aspect_ratio_range, area_range=area_range, max_attempts=max_attempts, use_image_if_no_bounding_boxes=use_image_if_no_bounding_boxes)
+ end
+ end
+ end
+end
+
"""
- queue_enqueue_many_v2(handle, components; timeout_ms=-1)
+ igamma_grad_a(a, x)
"""
-tf.@op function queue_enqueue_many_v2(handle_, components_; name=nothing, Tcomponents=nothing, timeout_ms=nothing)
- local desc
- tf.with_op_name((()->begin
- desc = tf.NodeDescription("QueueEnqueueManyV2")
- handle_ = convert(TensorFlow.Tensor{Any}, handle_)
- components_ = [convert(TensorFlow.Tensor{Any}, x) for x = components_]
- tf.add_input(desc, handle_)
- tf.add_input(desc, components_)
- if Tcomponents !== nothing
- desc["Tcomponents"] = map(Base.identity, Tcomponents)
+begin
+ begin
+ function igamma_grad_a_graph(a_, x_; name=nothing)
+ local desc
+ tf.with_op_name(name, "IgammaGradA") do
+ desc = tf.NodeDescription("IgammaGradA")
+ begin
+ begin
+ a_ = convert(Tensor{Any}, a_)
+ begin
+ end
end
- if timeout_ms !== nothing
- desc["timeout_ms"] = Base.Int(timeout_ms)
+ begin
+ x_ = convert(Tensor{Any}, x_)
+ begin
+ end
+ end
+ begin
+ (a_, x_) = tf.tf_promote(a_, x_)
+ end
+ end
+ begin
+ begin
+ tf.add_input(desc, a_)
end
- end), name, "QueueEnqueueManyV2")
- tf.Tensor(tf.Operation(desc))
+ begin
+ tf.add_input(desc, x_)
+ end
+ end
+ begin
+ end
+ end
+ tf.Tensor(tf.Operation(desc))
+ end
end
-
-"""
- queue_size_v2(handle)
-
-
-"""
-tf.@op function queue_size_v2(handle_; name=nothing)
- local desc
- tf.with_op_name((()->begin
- desc = tf.NodeDescription("QueueSizeV2")
- handle_ = convert(TensorFlow.Tensor{Any}, handle_)
- tf.add_input(desc, handle_)
- end), name, "QueueSizeV2")
- tf.Tensor(tf.Operation(desc))
+ begin
+ function igamma_grad_a_eager(a_, x_; name=nothing)
+ desc = tf.EagerOp("IgammaGradA")
+ a_ = convert(tf.EagerTensor, a_)
+ x_ = convert(tf.EagerTensor, x_)
+ begin
+ begin
+ tf.add_input(desc, a_)
+ end
+ begin
+ tf.add_input(desc, x_)
+ end
+ end
+ begin
+ end
+ begin
+ desc["T"] = tf.data_type(a_)
+ end
+ begin
+ desc["T"] = tf.data_type(x_)
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(igamma_grad_a, [a_, x_], name=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res[1]
+ end
+ end
end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function igamma_grad_a(a_, x_; name=nothing)
+ if tf.in_eager_mode()
+ igamma_grad_a_eager(a_, x_; name=name)
+ else
+ igamma_grad_a_graph(a_, x_; name=name)
+ end
+ end
+ end
+end
+
"""
- queue_close_v2(handle; cancel_pending_enqueues=false)
+ segment_max(data, segment_ids)
"""
-tf.@op function queue_close_v2(handle_; name=nothing, cancel_pending_enqueues=nothing)
- local desc
- tf.with_op_name((()->begin
- desc = tf.NodeDescription("QueueCloseV2")
- handle_ = convert(TensorFlow.Tensor{Any}, handle_)
- tf.add_input(desc, handle_)
- if cancel_pending_enqueues !== nothing
- desc["cancel_pending_enqueues"] = Base.Bool(cancel_pending_enqueues)
+begin
+ begin
+ function segment_max_graph(data_, segment_ids_; name=nothing)
+ local desc
+ tf.with_op_name(name, "SegmentMax") do
+ desc = tf.NodeDescription("SegmentMax")
+ begin
+ begin
+ data_ = convert(Tensor{Any}, data_)
+ begin
+ end
+ end
+ begin
+ segment_ids_ = convert(Tensor{Any}, segment_ids_)
+ begin
+ segment_ids_ = segment_ids_ - convert(tf.Tensor{eltype(segment_ids_)}, 1)
+ end
+ end
+ begin
+ (data_,) = tf.tf_promote(data_)
+ end
+ begin
+ (segment_ids_,) = tf.tf_promote(segment_ids_)
+ end
+ end
+ begin
+ begin
+ tf.add_input(desc, data_)
end
- end), name, "QueueCloseV2")
- tf.Tensor(tf.Operation(desc))
+ begin
+ tf.add_input(desc, segment_ids_)
+ end
+ end
+ begin
+ end
+ end
+ tf.Tensor(tf.Operation(desc))
+ end
end
-
-"""
- lin_space(start, stop, num)
-
-
-"""
-tf.@op function lin_space(start_, stop_, num_; name=nothing)
- local desc
- tf.with_op_name((()->begin
- desc = tf.NodeDescription("LinSpace")
- start_ = convert(TensorFlow.Tensor{Any}, start_)
- stop_ = convert(TensorFlow.Tensor{Any}, stop_)
- num_ = convert(TensorFlow.Tensor{Int32}, num_)
- num_ = num_ - convert(tf.Tensor{eltype(num_)}, 1)
- (start_, stop_) = tf.tf_promote(start_, stop_)
- (num_,) = tf.tf_promote(num_)
- tf.add_input(desc, start_)
- tf.add_input(desc, stop_)
- tf.add_input(desc, num_)
- end), name, "LinSpace")
- tf.Tensor(tf.Operation(desc))
+ begin
+ function segment_max_eager(data_, segment_ids_; name=nothing)
+ desc = tf.EagerOp("SegmentMax")
+ data_ = convert(tf.EagerTensor, data_)
+ segment_ids_ = convert(tf.EagerTensor, segment_ids_)
+ begin
+ begin
+ tf.add_input(desc, data_)
+ end
+ begin
+ tf.add_input(desc, segment_ids_)
+ end
+ end
+ begin
+ end
+ begin
+ desc["T"] = tf.data_type(data_)
+ end
+ begin
+ desc["Tindices"] = tf.data_type(segment_ids_)
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(segment_max, [data_, segment_ids_], name=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res[1]
+ end
+ end
end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function segment_max(data_, segment_ids_; name=nothing)
+ if tf.in_eager_mode()
+ segment_max_eager(data_, segment_ids_; name=name)
+ else
+ segment_max_graph(data_, segment_ids_; name=name)
+ end
+ end
+ end
+end
+
"""
range(start, limit, delta)
"""
-tf.@op function range(start_, limit_, delta_; name=nothing)
- local desc
- tf.with_op_name((()->begin
- desc = tf.NodeDescription("Range")
- start_ = convert(TensorFlow.Tensor{Int32}, start_)
- limit_ = convert(TensorFlow.Tensor{Int32}, limit_)
- delta_ = convert(TensorFlow.Tensor{Int32}, delta_)
- (start_, limit_, delta_) = tf.tf_promote(start_, limit_, delta_)
+begin
+ begin
+ function range_graph(start_, limit_, delta_; name=nothing)
+ local desc
+ tf.with_op_name(name, "Range") do
+ desc = tf.NodeDescription("Range")
+ begin
+ begin
+ start_ = convert(Tensor{Int32}, start_)
+ begin
+ end
+ end
+ begin
+ limit_ = convert(Tensor{Int32}, limit_)
+ begin
+ end
+ end
+ begin
+ delta_ = convert(Tensor{Int32}, delta_)
+ begin
+ end
+ end
+ begin
+ (start_, limit_, delta_) = tf.tf_promote(start_, limit_, delta_)
+ end
+ end
+ begin
+ begin
+ tf.add_input(desc, start_)
+ end
+ begin
+ tf.add_input(desc, limit_)
+ end
+ begin
+ tf.add_input(desc, delta_)
+ end
+ end
+ begin
+ end
+ end
+ tf.Tensor(tf.Operation(desc))
+ end
+ end
+ begin
+ function range_eager(start_, limit_, delta_; name=nothing)
+ desc = tf.EagerOp("Range")
+ start_ = convert(tf.EagerTensor, start_)
+ limit_ = convert(tf.EagerTensor, limit_)
+ delta_ = convert(tf.EagerTensor, delta_)
+ begin
+ begin
tf.add_input(desc, start_)
+ end
+ begin
tf.add_input(desc, limit_)
+ end
+ begin
tf.add_input(desc, delta_)
- end), name, "Range")
- tf.Tensor(tf.Operation(desc))
+ end
+ end
+ begin
+ end
+ begin
+ desc["Tidx"] = tf.data_type(start_)
+ end
+ begin
+ desc["Tidx"] = tf.data_type(limit_)
+ end
+ begin
+ desc["Tidx"] = tf.data_type(delta_)
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(range, [start_, limit_, delta_], name=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res[1]
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function range(start_, limit_, delta_; name=nothing)
+ if tf.in_eager_mode()
+ range_eager(start_, limit_, delta_; name=name)
+ else
+ range_graph(start_, limit_, delta_; name=name)
+ end
+ end
end
+end
-"""
- fill(dims, value; index_type=Int32)
+"""
+ retrieve_tpu_embedding_momentum_parameters_grad_accum_debug(; table_id=-1, table_name=)
+Retrieve embedding parameters for a single table.
"""
-tf.@op function fill(dims_, value_; name=nothing, index_type=nothing)
- local desc
- tf.with_op_name((()->begin
- desc = tf.NodeDescription("Fill")
- dims_ = convert(TensorFlow.Tensor{Int32}, dims_)
- value_ = convert(TensorFlow.Tensor{Any}, value_)
- (value_,) = tf.tf_promote(value_)
- (dims_,) = tf.tf_promote(dims_)
- tf.add_input(desc, dims_)
- tf.add_input(desc, value_)
- if index_type !== nothing
- desc["index_type"] = Base.identity(index_type)
+begin
+ begin
+ function retrieve_tpu_embedding_momentum_parameters_grad_accum_debug_graph(; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing)
+ local desc
+ tf.with_op_name(name, "RetrieveTPUEmbeddingMomentumParametersGradAccumDebug") do
+ desc = tf.NodeDescription("RetrieveTPUEmbeddingMomentumParametersGradAccumDebug")
+ begin
+ end
+ begin
+ end
+ begin
+ begin
+ if table_id !== nothing
+ desc["table_id"] = Base.Int(table_id)
+ end
+ end
+ begin
+ if table_name !== nothing
+ desc["table_name"] = Base.String(table_name)
+ end
+ end
+ begin
+ if num_shards !== nothing
+ desc["num_shards"] = Base.Int(num_shards)
+ end
+ end
+ begin
+ if shard_id !== nothing
+ desc["shard_id"] = Base.Int(shard_id)
+ end
+ end
+ end
+ end
+ begin
+ out = tf.Tensor[]
+ op = tf.Operation(desc)
+ for out_idx = 1:3
+ push!(out, tf.Tensor(op, out_idx))
+ end
+ out
+ end
+ end
+ end
+ begin
+ function retrieve_tpu_embedding_momentum_parameters_grad_accum_debug_eager(; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing)
+ desc = tf.EagerOp("RetrieveTPUEmbeddingMomentumParametersGradAccumDebug")
+ begin
+ end
+ begin
+ begin
+ if table_id !== nothing
+ desc["table_id"] = Base.Int(table_id)
+ end
+ end
+ begin
+ if table_name !== nothing
+ desc["table_name"] = Base.String(table_name)
+ end
+ end
+ begin
+ if num_shards !== nothing
+ desc["num_shards"] = Base.Int(num_shards)
end
- end), name, "Fill")
- tf.Tensor(tf.Operation(desc))
+ end
+ begin
+ if shard_id !== nothing
+ desc["shard_id"] = Base.Int(shard_id)
+ end
+ end
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(retrieve_tpu_embedding_momentum_parameters_grad_accum_debug, [], name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res
+ end
+ end
end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function retrieve_tpu_embedding_momentum_parameters_grad_accum_debug(; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing)
+ if tf.in_eager_mode()
+ retrieve_tpu_embedding_momentum_parameters_grad_accum_debug_eager(; name=name, table_id=table_id, table_name=table_name, num_shards=num_shards, shard_id=shard_id)
+ else
+ retrieve_tpu_embedding_momentum_parameters_grad_accum_debug_graph(; name=name, table_id=table_id, table_name=table_name, num_shards=num_shards, shard_id=shard_id)
+ end
+ end
+ end
+end
+
"""
- squeeze(input; squeeze_dims=Int64[])
+ flush_summary_writer(writer)
"""
-tf.@op function squeeze(input_; name=nothing, squeeze_dims=nothing)
- local desc
- tf.with_op_name((()->begin
- desc = tf.NodeDescription("Squeeze")
- input_ = convert(TensorFlow.Tensor{Any}, input_)
- (input_,) = tf.tf_promote(input_)
- tf.add_input(desc, input_)
- if squeeze_dims !== nothing
- desc["squeeze_dims"] = map(Base.identity, squeeze_dims)
+begin
+ begin
+ function flush_summary_writer_graph(writer_; name=nothing)
+ local desc
+ tf.with_op_name(name, "FlushSummaryWriter") do
+ desc = tf.NodeDescription("FlushSummaryWriter")
+ begin
+ begin
+ writer_ = convert(Tensor{Any}, writer_)
+ begin
+ end
+ end
+ end
+ begin
+ begin
+ tf.add_input(desc, writer_)
end
- end), name, "Squeeze")
- tf.Tensor(tf.Operation(desc))
+ end
+ begin
+ end
+ end
+ tf.Tensor(tf.Operation(desc))
+ end
+ end
+ begin
+ function flush_summary_writer_eager(writer_; name=nothing)
+ desc = tf.EagerOp("FlushSummaryWriter")
+ writer_ = convert(tf.EagerTensor, writer_)
+ begin
+ begin
+ tf.add_input(desc, writer_)
+ end
+ end
+ begin
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(flush_summary_writer, [writer_], name=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res[1]
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function flush_summary_writer(writer_; name=nothing)
+ if tf.in_eager_mode()
+ flush_summary_writer_eager(writer_; name=name)
+ else
+ flush_summary_writer_graph(writer_; name=name)
+ end
+ end
end
+end
+
"""
- unpack(value; axis=0)
+ dequantize(input, min_range, max_range; mode=)
"""
-tf.@op function unpack(value_; name=nothing, num=nothing, axis=nothing)
- local desc
- tf.with_op_name((()->begin
- desc = tf.NodeDescription("Unpack")
- value_ = convert(TensorFlow.Tensor{Any}, value_)
- (value_,) = tf.tf_promote(value_)
- tf.add_input(desc, value_)
- if num !== nothing
- desc["num"] = Base.Int(num)
+begin
+ begin
+ function dequantize_graph(input_, min_range_, max_range_; name=nothing, mode=nothing)
+ local desc
+ tf.with_op_name(name, "Dequantize") do
+ desc = tf.NodeDescription("Dequantize")
+ begin
+ begin
+ input_ = convert(Tensor{Any}, input_)
+ begin
+ end
end
- if axis !== nothing
- axis = Base.Int(axis) - 1
+ begin
+ min_range_ = convert(Tensor{Float32}, min_range_)
+ begin
+ end
end
- if axis !== nothing
- desc["axis"] = Base.Int(axis)
+ begin
+ max_range_ = convert(Tensor{Float32}, max_range_)
+ begin
+ end
+ end
+ begin
+ (input_,) = tf.tf_promote(input_)
+ end
+ end
+ begin
+ begin
+ tf.add_input(desc, input_)
+ end
+ begin
+ tf.add_input(desc, min_range_)
+ end
+ begin
+ tf.add_input(desc, max_range_)
end
- end), name, "Unpack")
- out = tf.Tensor[]
- op = tf.Operation(desc)
- for out_idx = 1:num
- push!(out, tf.Tensor(op, out_idx))
+ end
+ begin
+ begin
+ if mode !== nothing
+ desc["mode"] = Base.String(mode)
+ end
+ end
+ end
+ end
+ tf.Tensor(tf.Operation(desc))
+ end
+ end
+ begin
+ function dequantize_eager(input_, min_range_, max_range_; name=nothing, mode=nothing)
+ desc = tf.EagerOp("Dequantize")
+ input_ = convert(tf.EagerTensor, input_)
+ min_range_ = convert(tf.EagerTensor, min_range_)
+ max_range_ = convert(tf.EagerTensor, max_range_)
+ begin
+ begin
+ tf.add_input(desc, input_)
+ end
+ begin
+ tf.add_input(desc, min_range_)
+ end
+ begin
+ tf.add_input(desc, max_range_)
+ end
+ end
+ begin
+ begin
+ if mode !== nothing
+ desc["mode"] = Base.String(mode)
+ end
+ end
+ end
+ begin
+ desc["T"] = tf.data_type(input_)
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(dequantize, [input_, min_range_, max_range_], name=nothing, mode=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res[1]
+ end
end
- out
end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function dequantize(input_, min_range_, max_range_; name=nothing, mode=nothing)
+ if tf.in_eager_mode()
+ dequantize_eager(input_, min_range_, max_range_; name=name, mode=mode)
+ else
+ dequantize_graph(input_, min_range_, max_range_; name=name, mode=mode)
+ end
+ end
+ end
+end
+
"""
- transpose(x, perm)
+ sparse_fill_empty_rows_grad(reverse_index_map, grad_values)
"""
-tf.@op function transpose(x_, perm_; name=nothing)
- local desc
- tf.with_op_name((()->begin
- desc = tf.NodeDescription("Transpose")
- x_ = convert(TensorFlow.Tensor{Any}, x_)
- perm_ = convert(TensorFlow.Tensor{Int32}, perm_)
- (perm_,) = tf.tf_promote(perm_)
- (x_,) = tf.tf_promote(x_)
- tf.add_input(desc, x_)
- tf.add_input(desc, perm_)
- end), name, "Transpose")
- tf.Tensor(tf.Operation(desc))
+begin
+ begin
+ function sparse_fill_empty_rows_grad_graph(reverse_index_map_, grad_values_; name=nothing)
+ local desc
+ tf.with_op_name(name, "SparseFillEmptyRowsGrad") do
+ desc = tf.NodeDescription("SparseFillEmptyRowsGrad")
+ begin
+ begin
+ reverse_index_map_ = convert(Tensor{Int64}, reverse_index_map_)
+ begin
+ end
+ end
+ begin
+ grad_values_ = convert(Tensor{Any}, grad_values_)
+ begin
+ end
+ end
+ begin
+ (grad_values_,) = tf.tf_promote(grad_values_)
+ end
+ end
+ begin
+ begin
+ tf.add_input(desc, reverse_index_map_)
+ end
+ begin
+ tf.add_input(desc, grad_values_)
+ end
+ end
+ begin
+ end
+ end
+ begin
+ out = tf.Tensor[]
+ op = tf.Operation(desc)
+ for out_idx = 1:2
+ push!(out, tf.Tensor(op, out_idx))
+ end
+ out
+ end
+ end
+ end
+ begin
+ function sparse_fill_empty_rows_grad_eager(reverse_index_map_, grad_values_; name=nothing)
+ desc = tf.EagerOp("SparseFillEmptyRowsGrad")
+ reverse_index_map_ = convert(tf.EagerTensor, reverse_index_map_)
+ grad_values_ = convert(tf.EagerTensor, grad_values_)
+ begin
+ begin
+ tf.add_input(desc, reverse_index_map_)
+ end
+ begin
+ tf.add_input(desc, grad_values_)
+ end
+ end
+ begin
+ end
+ begin
+ desc["T"] = tf.data_type(grad_values_)
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(sparse_fill_empty_rows_grad, [reverse_index_map_, grad_values_], name=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function sparse_fill_empty_rows_grad(reverse_index_map_, grad_values_; name=nothing)
+ if tf.in_eager_mode()
+ sparse_fill_empty_rows_grad_eager(reverse_index_map_, grad_values_; name=name)
+ else
+ sparse_fill_empty_rows_grad_graph(reverse_index_map_, grad_values_; name=name)
+ end
+ end
end
+end
+
"""
- slice(input, begin, size)
+ iterator_get_next(iterator)
"""
-tf.@op function slice(input_, begin_, size_; name=nothing, Index=nothing)
- local desc
- tf.with_op_name((()->begin
- desc = tf.NodeDescription("Slice")
- input_ = convert(TensorFlow.Tensor{Any}, input_)
- begin_ = convert(TensorFlow.Tensor{Any}, begin_)
- begin_ = begin_ - convert(tf.Tensor{eltype(begin_)}, 1)
- size_ = convert(TensorFlow.Tensor{Any}, size_)
- (input_,) = tf.tf_promote(input_)
- (begin_, size_) = tf.tf_promote(begin_, size_)
- tf.add_input(desc, input_)
- tf.add_input(desc, begin_)
- tf.add_input(desc, size_)
- if Index !== nothing
- desc["Index"] = Base.identity(Index)
+begin
+ begin
+ function iterator_get_next_graph(iterator_; name=nothing, output_types=nothing, output_shapes=nothing)
+ local desc
+ tf.with_op_name(name, "IteratorGetNext") do
+ desc = tf.NodeDescription("IteratorGetNext")
+ begin
+ begin
+ iterator_ = convert(Tensor{Any}, iterator_)
+ begin
+ end
+ end
+ end
+ begin
+ begin
+ tf.add_input(desc, iterator_)
+ end
+ end
+ begin
+ begin
+ if output_types !== nothing
+ desc["output_types"] = map(Base.identity, output_types)
+ end
+ end
+ begin
+ if output_shapes !== nothing
+ desc["output_shapes"] = map(Base.identity, output_shapes)
+ end
+ end
+ end
+ end
+ tf.Tensor(tf.Operation(desc))
+ end
+ end
+ begin
+ function iterator_get_next_eager(iterator_; name=nothing, output_types=nothing, output_shapes=nothing)
+ desc = tf.EagerOp("IteratorGetNext")
+ iterator_ = convert(tf.EagerTensor, iterator_)
+ begin
+ begin
+ tf.add_input(desc, iterator_)
+ end
+ end
+ begin
+ begin
+ if output_types !== nothing
+ desc["output_types"] = map(Base.identity, output_types)
end
- end), name, "Slice")
- tf.Tensor(tf.Operation(desc))
+ end
+ begin
+ if output_shapes !== nothing
+ desc["output_shapes"] = map(Base.identity, output_shapes)
+ end
+ end
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(iterator_get_next, [iterator_], name=nothing, output_types=nothing, output_shapes=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res[1]
+ end
+ end
end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function iterator_get_next(iterator_; name=nothing, output_types=nothing, output_shapes=nothing)
+ if tf.in_eager_mode()
+ iterator_get_next_eager(iterator_; name=name, output_types=output_types, output_shapes=output_shapes)
+ else
+ iterator_get_next_graph(iterator_; name=name, output_types=output_types, output_shapes=output_shapes)
+ end
+ end
+ end
+end
+
"""
- rank(input)
+ prevent_gradient(input; message=)
"""
-tf.@op function rank(input_; name=nothing)
- local desc
- tf.with_op_name((()->begin
- desc = tf.NodeDescription("Rank")
- input_ = convert(TensorFlow.Tensor{Any}, input_)
- (input_,) = tf.tf_promote(input_)
+begin
+ begin
+ function prevent_gradient_graph(input_; name=nothing, message=nothing)
+ local desc
+ tf.with_op_name(name, "PreventGradient") do
+ desc = tf.NodeDescription("PreventGradient")
+ begin
+ begin
+ input_ = convert(Tensor{Any}, input_)
+ begin
+ end
+ end
+ begin
+ (input_,) = tf.tf_promote(input_)
+ end
+ end
+ begin
+ begin
+ tf.add_input(desc, input_)
+ end
+ end
+ begin
+ begin
+ if message !== nothing
+ desc["message"] = Base.String(message)
+ end
+ end
+ end
+ end
+ tf.Tensor(tf.Operation(desc))
+ end
+ end
+ begin
+ function prevent_gradient_eager(input_; name=nothing, message=nothing)
+ desc = tf.EagerOp("PreventGradient")
+ input_ = convert(tf.EagerTensor, input_)
+ begin
+ begin
tf.add_input(desc, input_)
- end), name, "Rank")
- tf.Tensor(tf.Operation(desc))
+ end
+ end
+ begin
+ begin
+ if message !== nothing
+ desc["message"] = Base.String(message)
+ end
+ end
+ end
+ begin
+ desc["T"] = tf.data_type(input_)
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(prevent_gradient, [input_], name=nothing, message=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res[1]
+ end
+ end
end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function prevent_gradient(input_; name=nothing, message=nothing)
+ if tf.in_eager_mode()
+ prevent_gradient_eager(input_; name=name, message=message)
+ else
+ prevent_gradient_graph(input_; name=name, message=message)
+ end
+ end
+ end
+end
+
"""
- conv2d_backprop_input(input_sizes, filter, out_backprop; use_cudnn_on_gpu=true, data_format=NHWC, dilations=[1, 1, 1, 1])
+ sparse_tensor_dense_add(a_indices, a_values, a_shape, b)
"""
-tf.@op function conv2d_backprop_input(input_sizes_, filter_, out_backprop_; name=nothing, strides=nothing, use_cudnn_on_gpu=nothing, padding=nothing, data_format=nothing, dilations=nothing)
- local desc
- tf.with_op_name((()->begin
- desc = tf.NodeDescription("Conv2DBackpropInput")
- input_sizes_ = convert(TensorFlow.Tensor{Int32}, input_sizes_)
- filter_ = convert(TensorFlow.Tensor{Any}, filter_)
- out_backprop_ = convert(TensorFlow.Tensor{Any}, out_backprop_)
- (filter_, out_backprop_) = tf.tf_promote(filter_, out_backprop_)
- tf.add_input(desc, input_sizes_)
- tf.add_input(desc, filter_)
- tf.add_input(desc, out_backprop_)
- if strides !== nothing
- desc["strides"] = map(Base.identity, strides)
+begin
+ begin
+ function sparse_tensor_dense_add_graph(a_indices_, a_values_, a_shape_, b_; name=nothing)
+ local desc
+ tf.with_op_name(name, "SparseTensorDenseAdd") do
+ desc = tf.NodeDescription("SparseTensorDenseAdd")
+ begin
+ begin
+ a_indices_ = convert(Tensor{Any}, a_indices_)
+ begin
+ a_indices_ = a_indices_ - convert(tf.Tensor{eltype(a_indices_)}, 1)
+ end
end
- if use_cudnn_on_gpu !== nothing
- desc["use_cudnn_on_gpu"] = Base.Bool(use_cudnn_on_gpu)
+ begin
+ a_values_ = convert(Tensor{Any}, a_values_)
+ begin
+ end
end
- if padding !== nothing
- desc["padding"] = Base.String(padding)
+ begin
+ a_shape_ = convert(Tensor{Any}, a_shape_)
+ begin
+ a_shape_ = a_shape_ - convert(tf.Tensor{eltype(a_shape_)}, 1)
+ end
end
- if data_format !== nothing
- desc["data_format"] = Base.String(data_format)
+ begin
+ b_ = convert(Tensor{Any}, b_)
+ begin
+ end
end
- if dilations !== nothing
- desc["dilations"] = map(Base.identity, dilations)
+ begin
+ (a_values_, b_) = tf.tf_promote(a_values_, b_)
+ end
+ begin
+ (a_indices_, a_shape_) = tf.tf_promote(a_indices_, a_shape_)
+ end
+ end
+ begin
+ begin
+ tf.add_input(desc, a_indices_)
end
- end), name, "Conv2DBackpropInput")
- tf.Tensor(tf.Operation(desc))
+ begin
+ tf.add_input(desc, a_values_)
+ end
+ begin
+ tf.add_input(desc, a_shape_)
+ end
+ begin
+ tf.add_input(desc, b_)
+ end
+ end
+ begin
+ end
+ end
+ tf.Tensor(tf.Operation(desc))
+ end
end
+ begin
+ function sparse_tensor_dense_add_eager(a_indices_, a_values_, a_shape_, b_; name=nothing)
+ desc = tf.EagerOp("SparseTensorDenseAdd")
+ a_indices_ = convert(tf.EagerTensor, a_indices_)
+ a_values_ = convert(tf.EagerTensor, a_values_)
+ a_shape_ = convert(tf.EagerTensor, a_shape_)
+ b_ = convert(tf.EagerTensor, b_)
+ begin
+ begin
+ tf.add_input(desc, a_indices_)
+ end
+ begin
+ tf.add_input(desc, a_values_)
+ end
+ begin
+ tf.add_input(desc, a_shape_)
+ end
+ begin
+ tf.add_input(desc, b_)
+ end
+ end
+ begin
+ end
+ begin
+ desc["Tindices"] = tf.data_type(a_indices_)
+ end
+ begin
+ desc["T"] = tf.data_type(a_values_)
+ end
+ begin
+ desc["Tindices"] = tf.data_type(a_shape_)
+ end
+ begin
+ desc["T"] = tf.data_type(b_)
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(sparse_tensor_dense_add, [a_indices_, a_values_, a_shape_, b_], name=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res[1]
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function sparse_tensor_dense_add(a_indices_, a_values_, a_shape_, b_; name=nothing)
+ if tf.in_eager_mode()
+ sparse_tensor_dense_add_eager(a_indices_, a_values_, a_shape_, b_; name=name)
+ else
+ sparse_tensor_dense_add_graph(a_indices_, a_values_, a_shape_, b_; name=name)
+ end
+ end
+ end
+end
+
"""
- svd(input; compute_uv=true, full_matrices=false)
+ lookup_table_export(table_handle)
"""
-tf.@op function svd(input_; name=nothing, compute_uv=nothing, full_matrices=nothing)
- local desc
- tf.with_op_name((()->begin
- desc = tf.NodeDescription("Svd")
- input_ = convert(TensorFlow.Tensor{Any}, input_)
- (input_,) = tf.tf_promote(input_)
- tf.add_input(desc, input_)
- if compute_uv !== nothing
- desc["compute_uv"] = Base.Bool(compute_uv)
+begin
+ begin
+ function lookup_table_export_graph(table_handle_; name=nothing)
+ local desc
+ tf.with_op_name(name, "LookupTableExport") do
+ desc = tf.NodeDescription("LookupTableExport")
+ begin
+ begin
+ table_handle_ = convert(Tensor{String}, table_handle_)
+ begin
+ end
end
- if full_matrices !== nothing
- desc["full_matrices"] = Base.Bool(full_matrices)
+ end
+ begin
+ begin
+ tf.add_input(desc, table_handle_)
end
- end), name, "Svd")
- out = tf.Tensor[]
- op = tf.Operation(desc)
- for out_idx = 1:3
- push!(out, tf.Tensor(op, out_idx))
+ end
+ begin
+ end
+ end
+ begin
+ out = tf.Tensor[]
+ op = tf.Operation(desc)
+ for out_idx = 1:2
+ push!(out, tf.Tensor(op, out_idx))
+ end
+ out
+ end
end
- out
end
-
-"""
- cross(a, b)
-
-
-"""
-tf.@op function cross(a_, b_; name=nothing)
- local desc
- tf.with_op_name((()->begin
- desc = tf.NodeDescription("Cross")
- a_ = convert(TensorFlow.Tensor{Any}, a_)
- b_ = convert(TensorFlow.Tensor{Any}, b_)
- (a_, b_) = tf.tf_promote(a_, b_)
- tf.add_input(desc, a_)
- tf.add_input(desc, b_)
- end), name, "Cross")
- tf.Tensor(tf.Operation(desc))
+ begin
+ function lookup_table_export_eager(table_handle_; name=nothing)
+ desc = tf.EagerOp("LookupTableExport")
+ table_handle_ = convert(tf.EagerTensor, table_handle_)
+ begin
+ begin
+ tf.add_input(desc, table_handle_)
+ end
+ end
+ begin
+ end
+ res = tf.execute(desc)
+ node = tf.TapeNode(lookup_table_export, [table_handle_], name=nothing, res)
+ if length(res) >= 1
+ tf.add_node(res[1], node)
+ return res
+ end
+ end
+ end
+ begin
+ #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function lookup_table_export(table_handle_; name=nothing)
+ if tf.in_eager_mode()
+ lookup_table_export_eager(table_handle_; name=name)
+ else
+ lookup_table_export_graph(table_handle_; name=name)
+ end
+ end
end
+end
+
end
diff --git a/src/ops/math.jl b/src/ops/math.jl
index a7a52e72..fe271aa5 100644
--- a/src/ops/math.jl
+++ b/src/ops/math.jl
@@ -27,18 +27,18 @@ import .Ops:
segment_prod
-@op Base.argmin(n::AbstractTensor, dim; name=nothing) = Ops.arg_min(n, dim; name=name)+1
+@op Base.argmin(n::AbstractTensor, dim; name = nothing) = Ops.arg_min(n, dim; name = name) + 1
-@op Base.argmax(n::AbstractTensor, dim; name=nothing) = Ops.arg_max(n, dim; name=name)+1
+@op Base.argmax(n::AbstractTensor, dim; name = nothing) = Ops.arg_max(n, dim; name = name) + 1
@op Base.max(x::AbstractTensor, y; kwargs...) = Ops.maximum(x, y; kwargs...)
@op Base.min(x::AbstractTensor, y; kwargs...) = Ops.minimum(x, y; kwargs...)
-@op function LinearAlgebra.svd(a::AbstractTensor; full=false, kwargs...)
+@op function LinearAlgebra.svd(a::AbstractTensor; full = false, kwargs...)
# Match Base names and ordering of results
- s,u,v = Ops.svd(a; compute_uv=true, full_matrices=full, kwargs...)
- u,s,v
+ s, u, v = Ops.svd(a; compute_uv = true, full_matrices = full, kwargs...)
+ u, s, v
end
@@ -68,7 +68,7 @@ const matmul = mat_mul
Broadcast.broadcasted(::typeof(Base.literal_pow), ::typeof(^), x::AbstractTensor, y::Val{T}) where T = x^Tensor(T)
-@op function batch_matmul(x::AbstractTensor,y::AbstractTensor; adj_x=false, adj_y=false, name=nothing)
+@op function batch_matmul(x::AbstractTensor, y::AbstractTensor; adj_x = false, adj_y = false, name = nothing)
if tf_version() >= v"1.0.0-"
Base.depwarn("""
batch_matmul is deprecated. Its functionality is now subsumed by matmul.
@@ -102,8 +102,9 @@ Args:
Returns:
A `Tensor`. Has the same type as `x`.
- """
-@op function squared_difference(x, y; name=nothing)
+
+"""
+@op function squared_difference(x, y; name = nothing)
local desc
with_op_name(name, "SquaredDifference") do
x = Tensor(x)
@@ -119,9 +120,9 @@ end
Ops.cross(n1, n2; kwargs...)
end
-*(x::Number, n::AbstractTensor) = x.*n # For supporting notation like `2x`
+*(x::Number, n::AbstractTensor) = x .* n # For supporting notation like `2x`
-^(n::AbstractTensor, x::Int) = invoke(^, Tuple{AbstractTensor, Any}, n, x)
+^(n::AbstractTensor, x::Int) = invoke(^, Tuple{AbstractTensor,Any}, n, x)
for jl_func_name in [
:log,
@@ -186,7 +187,7 @@ for (jl_func_name, tf_func_name) in [
@eval @define_unary LinearAlgebra.$jl_func_name Ops.$tf_func_name
end
-function LinearAlgebra.diagm(kv::Pair{T, S}) where {T<:Integer, S<:AbstractTensor}
+function LinearAlgebra.diagm(kv::Pair{T,S}) where {T <: Integer,S <: AbstractTensor}
if kv.first == 0
return Ops.diag(kv.second)
end
@@ -197,36 +198,46 @@ end
# TODO Clean this up
for reduction in [:sum, :prod, :min, :max, :all, :any, :mean]
- @eval @op function $(Symbol("reduce_", reduction))(n::AbstractTensor; axis=nothing, keep_dims=false, name=nothing)
+ @eval @op function $(Symbol("reduce_", reduction))(n::AbstractTensor; axis = nothing, keep_dims = false, name = nothing)
if name === nothing
name = get_name("reduce")
end
- if axis == nothing
- n = Tensor(n) # TODO: rewrite this
- range_start = constant(Int32(0))
- range_delta = constant(Int32(1))
- desc = NodeDescription("Rank", "$name/rank")
- add_input(desc, n)
- rank = Tensor(Operation(desc), 1)
- desc = NodeDescription("Range", "$name/range")
- add_input(desc, range_start)
- add_input(desc, rank)
- add_input(desc, range_delta)
- range = Tensor(Operation(desc), 1)
- desc = NodeDescription($(capitalize(reduction)), name)
- add_input(desc, n)
- add_input(desc, range)
- Tensor(Operation(desc), 1)
+ if in_eager_mode()
+ if axis === nothing
+ n_value = convert(Array, n) # TODO use shape functions instead
+ num_axis = length(size(n_value))
+ axis = Ops.range(constant(0), constant(num_axis), constant(1))
+ fn = Ops.$reduction
+ fn(n, axis, keep_dims = keep_dims)
+ end # TODO else case
else
- if isa(axis, Number)
- axis = [axis]
+ if axis == nothing
+ n = Tensor(n) # TODO: rewrite this
+ range_start = constant(Int32(0))
+ range_delta = constant(Int32(1))
+ desc = NodeDescription("Rank", "$name/rank")
+ add_input(desc, n)
+ rank = Tensor(Operation(desc), 1)
+ desc = NodeDescription("Range", "$name/range")
+ add_input(desc, range_start)
+ add_input(desc, rank)
+ add_input(desc, range_delta)
+ range = Tensor(Operation(desc), 1)
+ desc = NodeDescription($(capitalize(reduction)), name)
+ add_input(desc, n)
+ add_input(desc, range)
+ Tensor(Operation(desc), 1)
+ else
+ if isa(axis, Number)
+ axis = [axis]
+ end
+ axis = [Int32(idx - 1) for idx in axis]
+ desc = NodeDescription($(capitalize(reduction)), name)
+ add_input(desc, Tensor(n))
+ add_input(desc, Tensor(axis))
+ desc["keep_dims"] = keep_dims
+ Tensor(Operation(desc), 1)
end
- axis = [Int32(idx-1) for idx in axis]
- desc = NodeDescription($(capitalize(reduction)), name)
- add_input(desc, Tensor(n))
- add_input(desc, Tensor(axis))
- desc["keep_dims"] = keep_dims
- Tensor(Operation(desc), 1)
end
end
end
@@ -241,7 +252,7 @@ for (jl_func, tf_func) in [
(:(Base.any), :reduce_any),
(:(Statistics.mean), :reduce_mean),
]
- @eval function $jl_func(n::AbstractTensor, axis=nothing; kwargs...)
- $tf_func(n; axis=axis, kwargs...)
+ @eval function $jl_func(n::AbstractTensor, axis = nothing; kwargs...)
+ $tf_func(n; axis = axis, kwargs...)
end
end
diff --git a/src/ops/op_names.txt b/src/ops/op_names.txt
new file mode 100644
index 00000000..60222d70
--- /dev/null
+++ b/src/ops/op_names.txt
@@ -0,0 +1,1148 @@
+ReduceJoin
+ReduceDataset
+TensorListFromTensor
+ExtractJpegShape
+Svd
+IteratorGetNextSync
+RefEnter
+Erf
+LookupTableExportV2
+Round
+OutfeedDequeue
+TensorForestTreeIsInitializedOp
+Merge
+HistogramFixedWidth
+Asin
+Any
+RsqrtGrad
+TensorArrayScatter
+DynamicPartition
+ExperimentalPrivateThreadPoolDataset
+ReaderSerializeState
+RightShift
+AvgPool3D
+EncodePng
+DebugIdentity
+Imag
+ResourceSparseApplyFtrlV2
+StageClear
+Sign
+PopulationCount
+Neg
+AnonymousIterator
+SparseReduceSum
+FilterDataset
+StringLength
+Conv3D
+RetrieveTPUEmbeddingAdagradParameters
+OptionalHasValue
+ApplyAdam
+CudnnRNNParamsToCanonical
+IRFFT3D
+Angle
+TensorForestTreeResourceHandleOp
+LearnedUnigramCandidateSampler
+_Arg
+MatrixSquareRoot
+SparseDenseCwiseMul
+TensorArrayConcatV3
+UnicodeScript
+BatchCholeskyGrad
+Mean
+BatchFFT
+Sin
+BoostedTreesEnsembleResourceHandleOp
+QuantizedMaxPool
+OrderedMapStage
+PartitionedCall
+SparseApplyAdagrad
+DecodeProtoV2
+Betainc
+GuaranteeConst
+DecodeBmp
+BoostedTreesBucketize
+ShutdownDistributedTPU
+ExperimentalStatsAggregatorSummary
+Timestamp
+MatrixExponential
+Size
+AddN
+SparseSegmentSum
+BatchDataset
+RecordInput
+QueueDequeueUpToV2
+RetrieveTPUEmbeddingProximalAdagradParametersGradAccumDebug
+LoadTPUEmbeddingRMSPropParametersGradAccumDebug
+SerializeTensor
+Mul
+SoftmaxCrossEntropyWithLogits
+ResourceScatterDiv
+FixedLengthRecordDatasetV2
+SkipDataset
+Cosh
+FusedBatchNormV2
+TensorArraySplit
+CTCLoss
+QuantizedReshape
+FloorDiv
+TensorArrayV2
+BarrierClose
+ReadVariableOp
+QuantizedMul
+Selu
+CudnnRNNBackpropV3
+LookupTableInsert
+ComplexAbs
+TridiagonalSolve
+LookupTableImport
+Abs
+ResourceApplyAdam
+WriteHistogramSummary
+ExperimentalIndexedDatasetMaterialize
+_HostSend
+Greater
+NcclBroadcast
+TensorListPushBackBatch
+ResourceScatterMin
+Slice
+UnicodeDecode
+TakeDataset
+BoostedTreesMakeStatsSummary
+AllCandidateSampler
+Conv2DBackpropInput
+DatasetToSingleElement
+CacheDataset
+FakeQuantWithMinMaxVarsGradient
+FusedResizeAndPadConv2D
+Batch
+CollectiveBcastRecv
+BatchToSpaceND
+LoopCond
+DepthToSpace
+DestroyTemporaryVariable
+CudnnRNN
+RefIdentity
+MaxPool3DGrad
+LoadTPUEmbeddingMomentumParametersGradAccumDebug
+PaddingFIFOQueueV2
+Conv3DBackpropInput
+RefExit
+MapClear
+EncodeWav
+TensorSummaryV2
+QueueDequeueUpTo
+MatrixBandPart
+Copy
+ShapeN
+ExperimentalParseExampleDataset
+Concat
+DataFormatDimMap
+IdentityReader
+Softplus
+ResourceSparseApplyProximalAdagrad
+ParseSingleSequenceExample
+MatrixDiag
+Fact
+ShardDataset
+MaxPoolGradGrad
+ResizeBilinearGrad
+BatchToSpace
+OptionalFromValue
+Xlogy
+Cross
+BitwiseAnd
+BroadcastTo
+EluGrad
+CudnnRNNBackprop
+StringToHashBucketFast
+MutableHashTable
+Relu
+NthElement
+Softsign
+MutableDenseHashTable
+_ShutdownDistributedTPU
+Polygamma
+NcclReduce
+ArgMax
+MatrixSetDiag
+SpaceToBatchND
+SparseReshape
+OptimizeDataset
+ConcatV2
+ResourceSparseApplyAdadelta
+Tile
+MutexV2
+SerializeManySparse
+TPUEmbeddingActivations
+BatchMatrixSolveLs
+NotEqual
+Lgamma
+TPUReplicateMetadata
+ExperimentalThreadPoolHandle
+SelfAdjointEig
+BoostedTreesQuantileStreamResourceGetBucketBoundaries
+SparseDenseCwiseDiv
+Acos
+All
+CompareAndBitpack
+VarHandleOp
+ExperimentalUniqueDataset
+QuantizedConv2DWithBiasSumAndRelu
+ListDiff
+CreateSummaryFileWriter
+GenerateVocabRemapping
+BatchMatrixInverse
+ControlTrigger
+TPUOrdinalSelector
+StopGradient
+Split
+Unpack
+ResourceScatterMax
+TensorArrayWrite
+Fill
+QuantizedConv2DWithBiasAndRequantize
+Softmax
+ResizeBicubic
+InfeedDequeueTuple
+MultiDeviceIterator
+DecodeCSV
+LookupTableFind
+ShuffleAndRepeatDataset
+RequantizationRangePerChannel
+ExperimentalUnbatchDataset
+AvgPool3DGrad
+PlaceholderWithDefault
+InitializeTableV2
+SetSize
+Assert
+NonMaxSuppressionV4
+SampleDistortedBoundingBoxV2
+InitializeTableFromTextFile
+LookupTableSize
+SparseApplyAdagradDA
+BroadcastGradientArgs
+SummaryWriter
+RecvTPUEmbeddingActivations
+_While
+InitializeTable
+DebugNumericSummary
+RetrieveTPUEmbeddingAdagradParametersGradAccumDebug
+Tanh
+SymbolicGradient
+BoostedTreesUpdateEnsemble
+ApplyMomentum
+ReaderRead
+_WaitForDistributedTPU
+MutexLock
+AccumulatorSetGlobalStep
+QuantizedAdd
+Squeeze
+ExperimentalMatchingFilesDataset
+ExperimentalDatasetToTFRecord
+LoadTPUEmbeddingStochasticGradientDescentParameters
+NoOp
+ZipDataset
+IdentityReaderV2
+LMDBReader
+NcclAllReduce
+TextLineDataset
+SdcaShrinkL1
+TFRecordReaderV2
+MultiDeviceIteratorFromStringHandle
+PaddedBatchDatasetV2
+LoadTPUEmbeddingProximalAdagradParameters
+TensorArraySize
+OrderedMapSize
+StatelessRandomUniform
+SparseToSparseSetOperation
+TensorSummary
+RemoteFusedGraphExecute
+SparseSliceGrad
+Cumsum
+BatchNormWithGlobalNormalizationGrad
+AvgPoolGrad
+RestoreV2
+Relu6
+SparseApplyRMSProp
+_Recv
+MaxPool
+Invert
+_UnaryOpsComposition
+ExperimentalMapDataset
+LoadTPUEmbeddingADAMParameters
+ParseTensor
+ExperimentalMaterializedIndexDatasetHandle
+MultiDeviceIteratorGetNextFromShard
+RandomUniformInt
+SparseSoftmaxCrossEntropyWithLogits
+TensorArrayReadV2
+ReaderReadUpTo
+EncodeProto
+StridedSliceGrad
+_NcclReduceSend
+PaddedBatchDataset
+DataFormatVecPermute
+StringFormat
+AsString
+QueueEnqueueMany
+FakeParam
+ApplyAdagrad
+ExperimentalIteratorGetDevice
+AdjustContrast
+ExtractImagePatches
+ScaleAndTranslate
+OptionalNone
+VariableV2
+Elu
+ScatterUpdate
+FloorMod
+ExperimentalIgnoreErrorsDataset
+ExperimentalSetStatsAggregatorDataset
+ComputeAccidentalHits
+StringToNumber
+Snapshot
+DeserializeIterator
+Atan
+MatMul
+Erfc
+SigmoidGrad
+FixedLengthRecordReaderV2
+NonMaxSuppressionV3
+Dilation2DBackpropInput
+LogicalOr
+ResourceApplyAdadelta
+DenseToSparseSetOperation
+ReaderNumRecordsProduced
+AdjustHue
+BoostedTreesQuantileStreamResourceFlush
+ExperimentalMapAndBatchDataset
+RealDiv
+RestoreSlice
+StackPopV2
+Reverse
+DecodePng
+NonMaxSuppressionV2
+Igamma
+Digamma
+ResourceApplyAdaMax
+SpaceToDepth
+SqrtGrad
+MapUnstage
+Qr
+BoostedTreesCalculateBestGainsPerFeature
+UnbatchGrad
+LogSoftmax
+ResourceCountUpTo
+AccumulateNV2
+ParallelMapDataset
+RandomUniform
+UnicodeTranscode
+ReaderReset
+_NcclBroadcastSend
+BatchMatrixDeterminant
+LessEqual
+ApplyGradientDescent
+SparseSegmentSqrtN
+MatrixLogarithm
+ScatterMul
+DecodeJpeg
+RandomShuffleQueueV2
+QueueEnqueueManyV2
+ResourceSparseApplyCenteredRMSProp
+InterleaveDataset
+StackPop
+MaxPoolV2
+BoostedTreesDeserializeEnsemble
+LoadAndRemapMatrix
+SparseApplyProximalGradientDescent
+PyFuncStateless
+Where
+Mfcc
+CheckNumerics
+TPUCompilationResult
+RetrieveTPUEmbeddingStochasticGradientDescentParameters
+SparseSegmentMeanGrad
+TryRpc
+BatchMatrixTriangularSolve
+_Retval
+UniqueWithCounts
+Add
+ExperimentalScanDataset
+AssignAddVariableOp
+SplitV
+Assign
+MaxPoolWithArgmax
+QuantizedReluX
+RandomShuffleQueue
+FFT2D
+ExperimentalThreadPoolDataset
+ExperimentalDirectedInterleaveDataset
+SparseSegmentSqrtNGrad
+Real
+OrderedMapUnstage
+RFFT2D
+VarIsInitializedOp
+BoostedTreesQuantileStreamResourceHandleOp
+Atan2
+RandomPoisson
+ReverseSequence
+OutfeedEnqueue
+Sub
+StringSplit
+Cumprod
+QuantizedResizeBilinear
+ParseSingleExample
+IsVariableInitialized
+ExperimentalStatsAggregatorHandle
+TensorListConcatV2
+CudnnRNNV2
+ResourceScatterSub
+AssignAdd
+TensorDataset
+Bucketize
+SparseReduceMax
+RetrieveTPUEmbeddingMDLAdagradLightParameters
+TensorArrayGradWithShape
+TensorArrayCloseV3
+NonMaxSuppressionWithOverlaps
+Pack
+TensorArrayGradV2
+AssignSubVariableOp
+BatchFFT2D
+CloseSummaryWriter
+Rank
+FFT3D
+ApplyFtrl
+Abort
+AudioSpectrogram
+VariableShape
+FIFOQueueV2
+Variable
+TensorForestCreateTreeVariable
+MaxPoolGradWithArgmax
+RefSwitch
+SdcaFprint
+ExperimentalChooseFastestDataset
+LeakyRelu
+IdentityN
+CudnnRNNBackpropV2
+RequantizationRange
+Maximum
+Reshape
+MatrixSolveLs
+TFRecordDataset
+BoostedTreesExampleDebugOutputs
+HSVToRGB
+ExperimentalMaxIntraOpParallelismDataset
+ScatterDiv
+DecodeWav
+Log
+SaveV2
+DeepCopy
+ModelDataset
+ParseSequenceExample
+Sinh
+IteratorV2
+TensorArrayWriteV2
+TensorListElementShape
+QueueSizeV2
+Expm1
+BatchMatrixBandPart
+ConcatenateDataset
+DecodeGif
+TPUReplicate
+BatchSelfAdjointEigV2
+Shape
+RepeatDataset
+CropAndResizeGradBoxes
+ReciprocalGrad
+BatchMatrixSolve
+MutableHashTableV2
+Exit
+LRN
+StatelessIf
+TensorListSetItem
+Rsqrt
+QuantizedConv2DWithBiasSumAndReluAndRequantize
+DeleteSessionTensor
+OneHot
+ResourceApplyFtrl
+SdcaOptimizerV2
+QueueEnqueue
+ConditionalAccumulator
+CTCBeamSearchDecoder
+WholeFileReader
+ApplyRMSProp
+AdjustSaturation
+LookupTableRemoveV2
+QueueClose
+PrefetchDataset
+MapDataset
+QuantizedConv2DWithBias
+TensorArrayReadV3
+Identity
+Print
+CollectiveBcastSend
+_ListToArray
+NegTrain
+WorkerHeartbeat
+MergeV2Checkpoints
+CollectivePermute
+QuantizeAndDequantizeV3
+HashTable
+SoftplusGrad
+FixedLengthRecordReader
+TensorArrayScatterV2
+DecodeJSONExample
+FusedBatchNormGradV2
+_HostCast
+TFRecordReader
+While
+StatelessMultinomial
+ScatterAdd
+Conj
+ParallelDynamicStitch
+MakeIterator
+RFFT3D
+SparseReduceSumSparse
+CollectiveGather
+CombinedNonMaxSuppression
+_ScopedAllocator
+LoadTPUEmbeddingAdadeltaParameters
+SparseAdd
+CTCGreedyDecoder
+ImmutableConst
+ConsumeMutexLock
+GreaterEqual
+InitializeTableFromTextFileV2
+QueueDequeue
+Equal
+IteratorFromStringHandle
+TensorListSplit
+FractionalMaxPool
+ScatterNd
+TensorListScatterIntoExistingList
+Select
+Min
+LRNGrad
+RandomPoissonV2
+FIFOQueue
+ResourceSparseApplyProximalGradientDescent
+ExperimentalNonSerializableDataset
+ExperimentalBytesProducedStatsDataset
+Dilation2DBackpropFilter
+_If
+BiasAddGrad
+ReaderSerializeStateV2
+WrapDatasetVariant
+ParallelInterleaveDatasetV2
+DepthwiseConv2dNativeBackpropInput
+ResourceApplyRMSProp
+SparseAccumulatorTakeGradient
+ExperimentalLMDBDataset
+StackCloseV2
+MapSize
+ResourceApplyAdagradDA
+TensorForestTreeSize
+MatrixDiagPart
+ReaderNumWorkUnitsCompletedV2
+TensorArraySplitV3
+SparseToDense
+TPUReplicatedInput
+StackClose
+DeserializeManySparse
+_NcclReduceRecv
+MirrorPadGrad
+BroadcastArgs
+StatelessTruncatedNormal
+RegexFullMatch
+UnwrapDatasetVariant
+Empty
+OutfeedDequeueTuple
+Div
+Barrier
+TruncateDiv
+UnicodeEncode
+MergeSummary
+FakeQueue
+BatchCholesky
+Iterator
+BesselI1e
+ImportEvent
+QuantizedInstanceNorm
+LoadTPUEmbeddingAdagradParameters
+TensorArrayWriteV3
+DenseToDenseSetOperation
+EncodeJpeg
+InplaceUpdate
+FusedPadConv2D
+QuantizedRelu
+GatherNd
+Placeholder
+FilterByLastComponentDataset
+ClipByValue
+ImageSummary
+RetrieveTPUEmbeddingAdadeltaParameters
+StringJoin
+ResourceScatterNdAdd
+BoostedTreesQuantileStreamResourceDeserialize
+LeftShift
+RequantizePerChannel
+TensorScatterAdd
+_VarHandlesOp
+IFFT3D
+EuclideanNorm
+RefSelect
+SparseTensorSliceDataset
+RetrieveTPUEmbeddingFTRLParametersGradAccumDebug
+BatchIFFT2D
+TensorArrayGather
+SparseSegmentMeanWithNumSegments
+EnsureShape
+ApplyProximalGradientDescent
+CollectiveReduce
+IsNan
+ApplyAdaMax
+DecodeAndCropJpeg
+ApplyCenteredRMSProp
+Conv3DBackpropFilterV2
+MatrixTriangularSolve
+ReaderNumWorkUnitsCompleted
+WriteAudioSummary
+ShardedFilespec
+DivNoNan
+SparseAccumulatorApplyGradient
+RaggedTensorToSparse
+ExtractVolumePatches
+BarrierInsertMany
+Const
+SpaceToBatch
+StageSize
+EmptyTensorList
+QuantizedConv2DAndRequantize
+Lu
+DecodeCompressed
+GetSessionTensor
+TensorArrayGatherV3
+LoadTPUEmbeddingFTRLParametersGradAccumDebug
+DestroyResourceOp
+TextLineReader
+CreateSummaryDbWriter
+TanhGrad
+DecodeBase64
+MaxPoolGradGradV2
+AudioSummaryV2
+StatefulPartitionedCall
+_ScopedAllocatorConcat
+FakeQuantWithMinMaxArgsGradient
+BatchSvd
+MapStage
+ResourceSparseApplyFtrl
+ResizeNearestNeighbor
+ExperimentalCSVDataset
+_MklMul
+BatchMatrixDiag
+IsInf
+FixedUnigramCandidateSampler
+SparseApplyFtrlV2
+UnravelIndex
+Max
+IFFT2D
+SparseConcat
+HistogramSummary
+SegmentSum
+Exp
+ConfigureDistributedTPU
+ResourceScatterNdSub
+_XlaSendFromHost
+GetSessionHandleV2
+ReluGrad
+UnsortedSegmentMin
+ParseExample
+QueueEnqueueV2
+ScatterNdAdd
+ReaderNumRecordsProducedV2
+LoadTPUEmbeddingCenteredRMSPropParameters
+AssignSub
+UnsortedSegmentSum
+FusedBatchNormGrad
+MaxPoolGradV2
+QuantizedConv2DWithBiasAndRelu
+BoostedTreesCreateEnsemble
+OrderedMapIncompleteSize
+Skipgram
+ArgMin
+QueueDequeueMany
+BoostedTreesSerializeEnsemble
+Minimum
+Substr
+QueueSize
+ApplyFtrlV2
+LoadTPUEmbeddingMomentumParameters
+SparseSegmentMean
+ResourceApplyProximalAdagrad
+TensorArrayGatherV2
+Less
+HostConst
+UpperBound
+TensorListGetItem
+FakeQuantWithMinMaxVars
+IsBoostedTreesQuantileStreamResourceInitialized
+ReaderReadUpToV2
+Complex
+TensorListReserve
+Bitcast
+PriorityQueue
+QuantizedBatchNormWithGlobalNormalization
+Cos
+QuantizeDownAndShrinkRange
+ExperimentalRandomDataset
+Rpc
+QuantizedConv2DWithBiasSignedSumAndReluAndRequantize
+TensorListLength
+MapIncompleteSize
+StatelessWhile
+SparseConditionalAccumulator
+SegmentMin
+WriteGraphSummary
+CholeskyGrad
+LogUniformCandidateSampler
+SerializeSparse
+ScatterNdNonAliasingAdd
+RefMerge
+TensorListConcat
+CudnnRNNCanonicalToParams
+SparseApplyAdadelta
+TensorArrayClose
+SeluGrad
+CropAndResizeGradImage
+RFFT
+ExperimentalSqlDataset
+ResourceApplyPowerSign
+MatrixDeterminant
+StaticRegexReplace
+AvgPool
+SparseDenseCwiseAdd
+BiasAddV1
+InvertPermutation
+HashTableV2
+SparseApplyMomentum
+InfeedEnqueue
+StatelessRandomUniformInt
+LoadTPUEmbeddingAdadeltaParametersGradAccumDebug
+_Send
+MapPeek
+WriteScalarSummary
+OrderedMapUnstageNoKey
+SparseApplyCenteredRMSProp
+TensorListScatterV2
+Conv3DBackpropInputV2
+RetrieveTPUEmbeddingProximalAdagradParameters
+RandomShuffle
+UniformCandidateSampler
+TensorArraySplitV2
+MutableDenseHashTableV2
+DrawBoundingBoxes
+SparseApplyProximalAdagrad
+RangeDataset
+ReaderRestoreStateV2
+TopKV2
+Atanh
+DebugGradientIdentity
+SparseAddGrad
+ResourceScatterAdd
+Ceil
+Save
+RetrieveTPUEmbeddingCenteredRMSPropParameters
+QuantizedConcat
+ZerosLike
+FractionalAvgPool
+EditDistance
+UniqueV2
+QuantizeAndDequantizeV2
+QuantizeAndDequantize
+TensorListPopBack
+DebugNanCount
+ApplyAdagradDA
+DepthwiseConv2dNative
+SerializeIterator
+DatasetToGraph
+TopK
+ResourceApplyFtrlV2
+_NcclBroadcastRecv
+QueueIsClosed
+ShuffleDataset
+DeserializeSparse
+PriorityQueueV2
+_DeviceArg
+TruncatedNormal
+TensorForestTreePredict
+StackV2
+AccumulatorNumAccumulated
+ReaderResetV2
+ApplyAddSign
+RetrieveTPUEmbeddingRMSPropParametersGradAccumDebug
+Rint
+RetrieveTPUEmbeddingAdadeltaParametersGradAccumDebug
+ExtractGlimpse
+StringToHashBucketStrong
+OneShotIterator
+ResourceSparseApplyMomentum
+SaveSlices
+ExperimentalDatasetCardinality
+IsFinite
+ExperimentalNumaMapAndBatchDataset
+AllToAll
+TakeManySparseFromTensorsMap
+BatchMatrixDiagPart
+FixedLengthRecordDataset
+StackPush
+PlaceholderV2
+MultiDeviceIteratorInit
+GcsConfigureBlockCache
+QueueDequeueV2
+RetrieveTPUEmbeddingRMSPropParameters
+Transpose
+IFFT
+SparseSegmentSumWithNumSegments
+QueueIsClosedV2
+ParameterizedTruncatedNormal
+DiagPart
+KmeansPlusPlusInitialization
+RegexReplace
+SparseTensorDenseMatMul
+MapDefun
+ThreadUnsafeUnigramCandidateSampler
+RetrieveTPUEmbeddingADAMParametersGradAccumDebug
+ParallelConcat
+LookupTableFindV2
+TensorForestTreeDeserialize
+RetrieveTPUEmbeddingMomentumParameters
+FakeQuantWithMinMaxArgs
+ResourceApplyGradientDescent
+ExperimentalSlidingWindowDataset
+DecodeRaw
+FakeQuantWithMinMaxVarsPerChannelGradient
+UniqueWithCountsV2
+ExperimentalSleepDataset
+TPUReplicatedOutput
+LowerBound
+Tan
+Enter
+InfeedEnqueueTuple
+_SetGlobalTPUArray
+Square
+DebugGradientRefIdentity
+ApplyAdadelta
+ExperimentalGroupByWindowDataset
+AudioSummary
+SquaredDifference
+ExperimentalTakeWhileDataset
+ScatterNdUpdate
+DynamicStitch
+OnesLike
+FractionalMaxPoolGrad
+RemoteCall
+Gather
+QuantizedMatMul
+UnicodeDecodeWithOffsets
+EnqueueTPUEmbeddingSparseTensorBatch
+AccumulatorApplyGradient
+WriteSummary
+QuantizedConv2D
+ResourceApplyMomentum
+Log1p
+OrderedMapClear
+ResourceScatterUpdate
+BarrierTakeMany
+ResourceApplyKerasMomentum
+GenerateBigQueryReaderPartitions
+_XlaRecvAtHost
+QuantizedAvgPool
+ResourceApplyAdamWithAmsgrad
+TensorListResize
+_HostRecv
+BoostedTreesCenterBias
+LookupTableSizeV2
+IRFFT
+InplaceAdd
+BiasAdd
+LoadTPUEmbeddingADAMParametersGradAccumDebug
+_DisconnectHostFromDistributedTPUSystem
+RaggedRange
+WindowDataset
+Diag
+InfeedDequeue
+ExperimentalLatencyStatsDataset
+AddSparseToTensorsMap
+RaggedGather
+RGBToHSV
+MultiDeviceIteratorToStringHandle
+For
+SparseReduceMaxSparse
+ConcatOffset
+Stage
+Switch
+QueueDequeueManyV2
+SegmentProd
+ApproximateEqual
+Conv2D
+CrossReplicaSum
+SparseMatMul
+_ScopedAllocatorSplit
+Igammac
+BatchMatMul
+EnqueueTPUEmbeddingSparseBatch
+QueueCloseV2
+TensorArrayPack
+ReaderRestoreState
+_FusedConv2D
+_ReadVariablesOp
+MutableHashTableOfTensors
+ReadFile
+LoadTPUEmbeddingMDLAdagradLightParameters
+FractionalAvgPoolGrad
+LoadTPUEmbeddingAdagradParametersGradAccumDebug
+StatefulStandardNormalV2
+Bincount
+Inv
+ApplyProximalAdagrad
+GatherV2
+WriteFile
+BoostedTreesGetEnsembleStates
+ResourceGather
+ResourceApplyProximalGradientDescent
+TruncateMod
+LogMatrixDeterminant
+IRFFT2D
+BoostedTreesTrainingPredict
+NearestNeighbors
+Floor
+LoadTPUEmbeddingProximalAdagradParametersGradAccumDebug
+WriteImageSummary
+TileGrad
+TensorArrayGradV3
+EnqueueTPUEmbeddingIntegerBatch
+FusedBatchNorm
+LogicalAnd
+TensorScatterUpdate
+TextLineReaderV2
+TensorSliceDataset
+TensorArrayScatterV3
+ResizeNearestNeighborGrad
+ApplyPowerSign
+ExperimentalRebatchDataset
+MirrorPad
+LogicalNot
+BatchIFFT
+TensorArrayConcatV2
+Sum
+BoostedTreesPredict
+QuantizedConv2DWithBiasAndReluAndRequantize
+ResourceSparseApplyAdagrad
+LeakyReluGrad
+_DeviceRetval
+Pad
+AddManySparseToTensorsMap
+SparseReorder
+BitwiseXor
+BatchMatrixSetDiag
+LookupTableInsertV2
+ExperimentalDenseToSparseBatchDataset
+ResourceSparseApplyRMSProp
+RandomCrop
+LookupTableImportV2
+ResourceScatterNdUpdate
+StaticRegexFullMatch
+GcsConfigureCredentials
+TensorArraySizeV3
+SparseSegmentSqrtNWithNumSegments
+ExperimentalGroupByReducerDataset
+Conv2DBackpropFilter
+MaxPoolGrad
+_InitializeHostForDistributedTPU
+StagePeek
+PadV2
+_ParallelConcatStart
+PrintV2
+OptionalGetValue
+LoadTPUEmbeddingFTRLParameters
+SparseSlice
+BoostedTreesMakeQuantileSummaries
+MatrixSolve
+_ConfigureDistributedTPU
+AdjustContrastv2
+_MklMaximum
+CudnnRNNParamsSize
+BoostedTreesQuantileStreamResourceAddSummaries
+BatchIFFT3D
+Sigmoid
+SegmentMean
+IsBoostedTreesEnsembleInitialized
+TensorArraySizeV2
+_MklSub
+SendTPUEmbeddingGradients
+MaxPool3D
+Prod
+ExperimentalIdentityIndexedDataset
+TensorListPushBack
+BatchFunction
+SparseFillEmptyRows
+SelfAdjointEigV2
+RetrieveTPUEmbeddingFTRLParameters
+ResourceSparseApplyAdagradDA
+TemporaryVariable
+ResourceApplyAddSign
+Roll
+Xdivy
+MaxPool3DGradGrad
+CropAndResize
+QuantizedBiasAdd
+KMC2ChainInitialization
+MapUnstageNoKey
+ScatterNdSub
+ResizeBilinear
+OrderedMapPeek
+TensorArray
+InplaceSub
+Pow
+StatefulStandardNormal
+RefNextIteration
+ScalarSummary
+StringSplitV2
+BesselI0e
+Unique
+LoadTPUEmbeddingRMSPropParameters
+WholeFileReaderV2
+EagerPyFunc
+NextIteration
+Case
+TensorScatterSub
+ScatterMax
+Sqrt
+AccumulatorTakeGradient
+_MklAdd
+Reciprocal
+OutfeedEnqueueTuple
+StringStrip
+FakeQuantWithMinMaxVarsPerChannel
+BarrierReadySize
+StringToHashBucket
+TensorArrayConcat
+ShardedFilename
+PyFunc
+UnsortedSegmentProd
+CountUpTo
+RandomGamma
+TensorArrayGrad
+Dilation2D
+Unbatch
+GetSessionHandle
+RetrieveTPUEmbeddingADAMParameters
+MutableHashTableOfTensorsV2
+SparseApplyFtrl
+BatchDatasetV2
+SparseSparseMinimum
+ReverseV2
+StridedSlice
+MatchingFiles
+EncodeBase64
+IteratorGetNextAsOptional
+PaddingFIFOQueue
+IteratorToStringHandle
+MaxPoolGradGradWithArgmax
+TensorListGather
+Multinomial
+TensorArrayRead
+ExperimentalIndexedDatasetGet
+TPUPartitionedCall
+QuantizedConv2DAndReluAndRequantize
+IteratorFromStringHandleV2
+BitwiseOr
+UnsortedSegmentMax
+_MklSquaredDifference
+Conv3DBackpropFilter
+If
+FlatMapDataset
+TensorListScatter
+SoftsignGrad
+CopyHost
+LinSpace
+_ParallelConcatUpdate
+Stack
+StackPushV2
+AssignVariableOp
+SparseSplit
+TensorArrayUnpack
+TensorListStack
+BarrierIncompleteSize
+Restore
+TensorArrayV3
+ExperimentalAssertNextDataset
+InTopK
+ScatterSub
+Acosh
+DepthwiseConv2dNativeBackpropFilter
+Cast
+QuantizeV2
+GeneratorDataset
+TensorForestTreeSerialize
+NextAfter
+TensorArrayCloseV2
+BigQueryReader
+ReaderReadV2
+Mod
+AddV2
+StatelessRandomNormal
+StridedSliceAssign
+ScatterMin
+ResourceStridedSliceAssign
+RandomGammaGrad
+ResourceSparseApplyKerasMomentum
+BoostedTreesCreateQuantileStreamResource
+QuantizedRelu6
+SparseSparseMaximum
+BatchNormWithGlobalNormalization
+InTopKV2
+Cholesky
+ResourceApplyCenteredRMSProp
+ResourceApplyAdagrad
+ExperimentalParallelInterleaveDataset
+ResizeBicubicGrad
+BatchSelfAdjointEig
+SparseSoftmax
+Asinh
+QuantizedConv2DAndRelu
+MatrixInverse
+TensorListConcatLists
+Requantize
+FFT
+ConjugateTranspose
+Unstage
+Relu6Grad
+ScaleAndTranslateGrad
+_ArrayToList
+CudnnRNNV3
+ExpandDims
+InvGrad
+NonMaxSuppression
+L2Loss
+ResizeArea
+SparseCross
+BatchFFT3D
+RandomStandardNormal
+ResourceScatterMul
+SdcaOptimizer
+Zeta
+SampleDistortedBoundingBox
+IgammaGradA
+SegmentMax
+Range
+RetrieveTPUEmbeddingMomentumParametersGradAccumDebug
+FlushSummaryWriter
+Dequantize
+SparseFillEmptyRowsGrad
+IteratorGetNext
+SparseTensorDenseAdd
+PreventGradient
+LookupTableExport
diff --git a/src/ops/sequences.jl b/src/ops/sequences.jl
index 6633ab6b..003b3cfe 100644
--- a/src/ops/sequences.jl
+++ b/src/ops/sequences.jl
@@ -15,7 +15,7 @@ end
convert_eltype(x, dtype) = x
-@op function constant(value; dtype=nothing, kwargs...)
+@op function constant(value; dtype = nothing, kwargs...)
if dtype === nothing
if isa(value, AbstractString)
dtype = String
@@ -25,23 +25,27 @@ convert_eltype(x, dtype) = x
else
value = convert_eltype(value, dtype)
end
- Ops.const_(; value=value, dtype=dtype, kwargs...)
+ if in_eager_mode()
+ EagerTensor(value)
+ else
+ Ops.const_(; value = value, dtype = dtype, kwargs...)
+ end
end
for f in [:zeros, :ones]
@eval Base.$f(::Type{Tensor}, args::Integer...) = $f(Tensor{Float32}, args...)
- @eval Base.$f(::Type{Tensor}, args::NTuple{N, Integer}) where N = $f(Tensor, args...)
+ @eval Base.$f(::Type{Tensor}, args::NTuple{N,Integer}) where N = $f(Tensor, args...)
@eval Base.$f(::Type{Tensor{T}}, args::Integer...) where {T} = constant($f(T, args...))
- @eval Base.$f(::Type{Tensor{T}}, args::NTuple{N, Integer}) where {T, N} = constant($f(T, args))
+ @eval Base.$f(::Type{Tensor{T}}, args::NTuple{N,Integer}) where {T,N} = constant($f(T, args))
end
-@op function random_normal(shape; mean=0.0, stddev=1.0, dtype=Float32, name=nothing, kwargs...)
+@op function random_normal(shape; mean = 0.0, stddev = 1.0, dtype = Float32, name = nothing, kwargs...)
local out
with_op_name(name, "random_normal") do
mean = convert(Tensor{dtype}, mean)
stddev = convert(Tensor{dtype}, stddev)
- standard = Ops.random_standard_normal(shape; name=name, dtype=dtype, kwargs...)
- out = standard.*stddev + mean
+ standard = Ops.random_standard_normal(shape; name = name, dtype = dtype, kwargs...)
+ out = standard .* stddev + mean
end
out
end
@@ -63,7 +67,7 @@ Args:
Returns:
A `Tensor` of the specified `shape` and `dtype` containing random values.
"""
-@op function random_uniform(shape, minval, maxval; name=nothing, seed=0, dtype=Float32)
+@op function random_uniform(shape, minval, maxval; name = nothing, seed = 0, dtype = Float32)
local out
with_op_name(name, "RandomUniformScaled") do
seed1 = 0
@@ -71,8 +75,8 @@ A `Tensor` of the specified `shape` and `dtype` containing random values.
seed2 = seed
minval = convert(Tensor{dtype}, minval)
maxval = convert(Tensor{dtype}, maxval)
- r = random_uniform(shape; seed=seed1, seed2=seed2, dtype=dtype, name=name)
- out = r .* (maxval-minval) + minval
+ r = random_uniform(shape; seed = seed1, seed2 = seed2, dtype = dtype, name = name)
+ out = r .* (maxval - minval) + minval
end
out
end
@@ -82,14 +86,14 @@ end
Ops.random_shuffle(t; kwargs...)
end
-@op function Base.range(start::AbstractTensor; stop, num=Union{Integer, Nothin}, kwargs...)
+@op function Base.range(start::AbstractTensor; stop, num = Union{Integer,Nothin}, kwargs...)
Ops.lin_space(start, stop, num; kwargs...)
end
@op Base.range(start::AbstractTensor, length; kwargs...) = range(start, 1, length; kwargs...)
@op function Base.range(start::AbstractTensor, step, length; kwargs...)
- Ops.range(start, length+1, step; kwargs...)
+ Ops.range(start, length + 1, step; kwargs...)
end
@op function Base.fill(n::AbstractTensor, dims; kwargs...) #TODO: I think this is uncallable in 0.5
@@ -105,6 +109,10 @@ end
Ops.reverse_v2(x, indices; kwargs...)
end
-@op function Base.fill(n::AbstractTensor, dims::Tuple{Vararg{Int64, N}} where N; kwargs...)
- invoke(fill, Tuple{AbstractTensor, Any}, n, dims; kwargs...)
+@op function Base.reverse(x::AbstractTensor; dims=0, kwargs...)
+ reverse(x, [dims]; kwargs...)
+end
+
+@op function Base.fill(n::AbstractTensor, dims::Tuple{Vararg{Int64,N}} where N; kwargs...)
+ invoke(fill, Tuple{AbstractTensor,Any}, n, dims; kwargs...)
end
diff --git a/src/ops/summaries.jl b/src/ops/summaries.jl
index cf4c5509..3ca0a34b 100644
--- a/src/ops/summaries.jl
+++ b/src/ops/summaries.jl
@@ -5,9 +5,11 @@ scalar,
audio,
histogram,
merge_all,
-image
+image,
+@scalar
import TensorFlow
+using MacroTools
const tf = TensorFlow
for (jl_func, op) in [
@@ -16,10 +18,14 @@ for (jl_func, op) in [
(:histogram, :histogram_summary),
(:image, :image_summary)
]
- @eval @tf.op function $jl_func(args...; collections=[:Summaries], kwargs...)
+ @eval @tf.op function $jl_func(args...; collections=[:Summaries], step=0, kwargs...)
res = tf.Ops.$op(args...; kwargs...)
- foreach(c->tf.add_to_collection(c, res), collections)
- res
+ if tf.in_eager_mode()
+ tf.summary.record_summary(tf.item(res), step=step)
+ else
+ foreach(c->tf.add_to_collection(c, res), collections)
+ return res
+ end
end
# Set the documentation of the summary function to the same as the
@@ -49,4 +55,10 @@ function merge_all(key=:Summaries)
merge(tensors)
end
+macro scalar(f, args...)
+ quote
+ scalar($(string(f)), $(esc(f)); $(esc.(args)...))
+ end
+end
+
end
diff --git a/src/ops/transformations.jl b/src/ops/transformations.jl
index 2d17705c..a1c5a846 100644
--- a/src/ops/transformations.jl
+++ b/src/ops/transformations.jl
@@ -381,13 +381,14 @@ Returns:
with_op_name(name, "Transpose") do
if perm === nothing
r = range(constant(0), LinearAlgebra.rank(n)-1)
- perm = reverse(r, [true])
+ perm = reverse(r)
end
result = Ops.transpose(n, perm)
end
result
end
+
@op function Base.permutedims(n::AbstractTensor, perm; name=nothing)
transpose(n, perm .- 1; name=name)
end
diff --git a/src/py.jl b/src/py.jl
index 707430bb..20152c54 100644
--- a/src/py.jl
+++ b/src/py.jl
@@ -11,14 +11,14 @@ function init()
py_tf_core[] = pyimport("tensorflow.core")
pywrap_tensorflow[] = pyimport("tensorflow.python.pywrap_tensorflow")
catch err
- error("The Python TensorFlow package could not be imported. You must install Python TensorFlow before using this package.")
+ error("The Python TensorFlow package could not be imported. You must install Python TensorFlow before using this package. The error was $err")
end
end
function py_with(f, ctx_mngr)
- ctx_mngr[:__enter__]()
+ ctx_mngr.__enter__()
f()
- ctx_mngr[:__exit__](nothing, nothing, nothing)
+ ctx_mngr.__exit__(nothing, nothing, nothing)
end
function py_bytes(b::DenseVector{UInt8})
@@ -41,31 +41,29 @@ macro py_catch(ex)
try
$(esc(ex))
catch err
- s = string("Python error: ", repr(err.val))
+ s = string("Python error: ", repr(err))
error(s)
end
end
end
function make_py_graph(graph_proto)
- py_graph = py_tf[][:Graph]()
- py_with(py_graph[:as_default]()) do
- # graph_def = py_tf[][:GraphDef]()
- graph_def = py_tf_core[][:protobuf][:meta_graph_pb2][:MetaGraphDef]()
- graph_def[:ParseFromString](graph_proto|>py_bytes)
- # @py_catch py_tf[][:import_graph_def](graph_def, name="")
- @py_catch py_tf[][:train][:import_meta_graph](graph_def)
+ py_graph = py_tf[].Graph()
+ py_with(py_graph.as_default()) do
+ graph_def = py_tf_core[].protobuf.meta_graph_pb2.MetaGraphDef()
+ graph_def.ParseFromString(graph_proto|>py_bytes)
+ @py_catch py_tf[].train.import_meta_graph(graph_def)
end
py_graph
end
function to_protos(py_graph)
- nodes = PyVector(py_graph[:node])
+ nodes = PyVector(py_graph.node)
n_nodes = length(nodes)
protos = []
for node_idx in 1:n_nodes
node_py = nodes[node_idx]
- proto = codeunits(node_py[:SerializeToString]())
+ proto = codeunits(node_py.SerializeToString())
push!(protos, proto)
end
return protos
@@ -74,15 +72,15 @@ end
function py_gradients(jl_graph_proto, x_names, y_names, grad_y_names)
py_graph = make_py_graph(jl_graph_proto)
- to_py_node(node_name) = py_graph[:get_tensor_by_name](string(node_name[1], ":", node_name[2]-1))
+ to_py_node(node_name) = py_graph.get_tensor_by_name(string(node_name[1], ":", node_name[2]-1))
to_py_node(node_names::AbstractVector) = tuple(to_py_node.(node_names)...) #Need tuple as Vector will not be accepted
to_py_node(::Cvoid) = nothing
py_x = to_py_node(x_names)
py_y = to_py_node(y_names)
py_grad_y = to_py_node(grad_y_names)
- @py_catch grad_node = py_tf[][:gradients](py_y, py_x, py_grad_y)
- py_graph_def = py_graph[:as_graph_def]()
+ @py_catch grad_node = py_tf[].gradients(py_y, py_x, py_grad_y)
+ py_graph_def = py_graph.as_graph_def()
grad_names = []
for node in grad_node
if node === nothing
@@ -90,9 +88,9 @@ function py_gradients(jl_graph_proto, x_names, y_names, grad_y_names)
continue
end
try
- push!(grad_names, (node[:values][:name], node[:indices][:name]))
+ push!(grad_names, (node.values.name, node.indices.name))
catch
- push!(grad_names, node[:name])
+ push!(grad_names, node.name)
end
end
return to_protos(py_graph_def), grad_names
diff --git a/src/show.jl b/src/show.jl
index 678f9531..f631df65 100644
--- a/src/show.jl
+++ b/src/show.jl
@@ -4,10 +4,10 @@ import Juno: Tree, Row, fade, interleave
import Printf
@render Juno.Inline t::Tensor begin
- s = get_shape(t)
- shape = s.rank_unknown ? [fade("unknown")] :
- interleave(map(dim -> ismissing(dim) ? "?" : dim , s.dims), fade("×"))
- Tree(Row(fade(try string(eltype(t)," ") catch e "" end),
+ s = get_shape(t)
+ shape = s.rank_unknown ? [fade("unknown")] :
+ interleave(map(dim->ismissing(dim) ? "?" : dim, s.dims), fade("×"))
+ Tree(Row(fade(try string(eltype(t), " ") catch e "" end),
HTML("Tensor "),
shape...),
[Text("name: $(node_name(t.op))"),
@@ -40,12 +40,18 @@ function Base.show(io::IO, t::RawTensor)
end
end
+function Base.show(io::IO, t::EagerTensor)
+ jl_array = convert(Array, t)
+ ptr = pointer_from_objref(t)
+ print(io, "EagerTensor<$ptr>($(jl_array))")
+end
+
function Base.show(io::IO, n::Operation)
print(io, "")
end
function Base.show(io::IO, t::Tensor{T}) where T
- @assert(T==eltype(t), "eltype = $(eltype(t)), but Tensor{$(T)})")
+ @assert(T == eltype(t), "eltype = $(eltype(t)), but Tensor{$(T)})")
s = get_shape(t)
if s.rank_unknown
@@ -165,7 +171,7 @@ function find_tensorboard()
return path
end
-function get_tensorboard(logdir=nothing)
+function get_tensorboard(logdir = nothing)
if isdefined(tensorboard, :x)
port = tensorboard[].port + 1
else
@@ -209,7 +215,7 @@ function visualize end
@with_def_graph function visualize(g::Graph)
tensorboard = get_tensorboard()
- writer = summary.FileWriter(tensorboard.logdir, graph=g)
+ writer = summary.FileWriter(tensorboard.logdir, graph = g)
visualize(writer)
close(writer)
end
diff --git a/src/summary_writer.jl b/src/summary_writer.jl
index 9b04e556..12f26475 100644
--- a/src/summary_writer.jl
+++ b/src/summary_writer.jl
@@ -1,11 +1,12 @@
using ProtoBuf
-import TensorFlow
-import Distributed
+using CRC32c
+import ..TensorFlow
const tf = TensorFlow
import ..TensorFlow: tensorflow, Graph, get_def_graph, @py_proc
+export FileWriter
-struct FileWriter
- pyo::Distributed.Future
+struct FileWriter <: tf.Context
+ file_handle
logdir::String
end
@@ -28,28 +29,44 @@ Arguments:
* logdir: A string. Directory where event file will be written.
* graph: A `Graph` object.
"""
-function FileWriter(log_dir::AbstractString; graph=get_def_graph())
+function FileWriter(log_dir::AbstractString; graph=nothing)
+ if !tf.in_eager_mode() && graph === nothing
+ graph = get_def_graph()
+ end
mkpath(log_dir)
- path = joinpath(log_dir, "events")
- pyo = @py_proc pywrap_tensorflow[][:EventsWriter](py_bytes($path))
- writer = FileWriter(pyo, String(log_dir))
+ local path
+ for i in Iterators.countfrom(1)
+ path = joinpath(log_dir, "events.out.tfevents.$i")
+ ispath(path) || break
+ end
+ writer = FileWriter(open(path, "w"), String(log_dir))
if graph !== nothing
write(writer, graph)
end
return writer
end
+function masked_crc(data)
+ x = CRC32c.crc32c(data)
+ ((x>>15) | (x<<17)) + 0xa282ead8
+end
+
function Base.write(writer::FileWriter, event::tensorflow.Event)
b = IOBuffer()
writeproto(b, event)
seekstart(b)
proto = read(b)
- @py_proc begin
- py_event = py_tf[][:Event]()
- py_event[:ParseFromString](py_bytes($(proto)))
- $(writer.pyo)[:WriteEvent](py_event)
- $(writer.pyo)[:Flush]()
- end
+ file = writer.file_handle
+ proto_length = UInt64(length(proto))
+ buffer = IOBuffer()
+ write(buffer, proto_length)
+ seekstart(buffer)
+ proto_length_bytes = read(buffer)
+ write(file, proto_length_bytes)
+ write(file, masked_crc(proto_length_bytes))
+ write(file, proto)
+ write(file, masked_crc(proto))
+ flush(file)
nothing
end
@@ -58,6 +75,12 @@ function Base.write(writer::FileWriter, summary::tensorflow.Summary, global_step
setproperty!(event, :step, Int(global_step))
setproperty!(event, :wall_time, time())
setproperty!(event, :summary, summary)
+ # Some bug in ProtoBuf.jl is causing these to not be marked as filled,
+ # so we do it manually.
+ fillset(event, :wall_time)
+ fillset(event, :step)
+ fillset(event, :summary)
+
write(writer, event)
end
@@ -76,7 +99,25 @@ function Base.write(writer::FileWriter, graph::Graph)
write(writer, event)
end
+function set_default(writer::FileWriter)
+ push!(tf.global_context, writer)
+end
+
+function with_default(writer::FileWriter, block)
+ tf.with_context(block, writer)
+end
+
+function get_default_file_writer()
+ return tf.context_value(FileWriter)
+end
+
+function record_summary(summary_pb; step=0)
+ writer = get_default_file_writer()
+ writer === nothing && return
+ write(writer, summary_pb, step)
+end
+
function Base.close(writer::FileWriter)
- @py_proc $(writer.pyo)[:Close]()
+ close(writer.file_handle)
nothing
end
diff --git a/src/tape.jl b/src/tape.jl
new file mode 100644
index 00000000..3afa4f64
--- /dev/null
+++ b/src/tape.jl
@@ -0,0 +1,182 @@
+using MacroTools
+import MacroTools: splitdef, combinedef
+
+struct TapeNode
+ op::Function
+ args::Vector
+ results::Vector
+ kwargs::Dict
+end
+
+TapeNode(op, args, results; kwargs...) = TapeNode(op, args, results, kwargs)
+
+mutable struct Tape
+ nodes::Dict{EagerTensor, TapeNode}
+ attrs::Dict
+end
+
+Tape(;kwargs...) = Tape(Dict{EagerTensor, TapeNode}(), Dict(kwargs...))
+
+struct TapeContext <: Context
+ tape::Union{Tape, Nothing}
+end
+
+create_tape() = set_tape(Tape())
+
+function set_tape(new_tape)
+ push!(global_context, TapeContext(new_tape))
+ return new_tape
+end
+
+function with_tape(block, tape=Tape())
+ ctx = TapeContext(tape)
+ with_context(block, ctx)
+end
+
+function get_tape()
+ tape_context = context_value(TapeContext)
+ if tape_context === nothing
+ return nothing
+ else
+ return tape_context.tape
+ end
+end
+
+function add_node(t, node)
+ tape = get_tape()
+ tape === nothing && return
+ tape.nodes[t] = node
+end
+
+function backwards
+end
+
+macro back_for(target, fn)
+ def = splitdef(fn)
+ if def[:name] == :f
+ def[:name] = Symbol(string(target, "_", "backwards"))
+ end
+ backwards_expr = :(backwards(::typeof($target)) = $(def[:name]))
+ quote
+ $(esc(combinedef(def)))
+ $(esc(backwards_expr))
+ end
+end
+
+@back_for(Ops.add, function f(grad, x, y; kwargs...)
+ println("Add got $grad, $x, $y")
+ return [constant(1.0).*grad, constant(1.0).*grad]
+end)
+
+@back_for(Ops.sub, function f(grad, x, y; kwargs...)
+ return [constant(1.0).*grad, constant(-1.0).*grad]
+end)
+
+@back_for(Ops.neg, function f(grad, x; kwargs...)
+ return constant(-1.0) .* grad
+end)
+
+@back_for(Ops.pow, function f(grad, x, y; kwargs...)
+ [y.* (x.^(y.-1)), nothing]
+end)
+
+@back_for(Ops.exp, function f(grad, x; kwargs...)
+ Ops.exp(x) .* grad
+end)
+
+@back_for(Ops.mean, function f(grad, x, reduction_indices; keep_dims=nothing, kwargs...)
+ # assume reduction_indices is everything for now
+ n_elem = float(num_elements(x))
+ [grad .* Ops.fill(size(x), 1/constant(n_elem)), nothing]
+end)
+
+@back_for(Ops.sum, function f(grad, x, reduction_indices; keep_dims=nothing, kwargs...)
+ # assume reduction_indices is everything for now
+ [grad .* Ops.fill(size(x), constant(1.0)), nothing]
+end)
+
+@back_for(Ops.mul, function f(grad, x, y; kwargs...)
+ return [grad.*y, grad.*x]
+end)
+
+@back_for(Ops.cast, function f(grad, x; kwargs...)
+ return grad
+end)
+
+@back_for(Ops.log, function f(grad, x; kwargs...)
+ return 1/x .* grad
+end)
+
+@back_for(Ops.sin, function f(grad, x; kwargs...)
+ return cos(x) .* grad
+end)
+
+@back_for(Ops.cos, function f(grad, x; kwargs...)
+ return sin(x) .* grad
+end)
+
+@back_for(Ops.relu, function f(grad, x; kwarg...)
+ Ops.relu_grad(grad, x)
+end)
+
+@back_for(Ops.mat_mul, function f(grad, x, y; transpose_a=nothing, transpose_b=nothing, kwargs...)
+ # todo pay attension to transpose arguments
+ grad_x = Ops.mat_mul(grad, y, transpose_b=true)
+ grad_y = Ops.mat_mul(x, grad, transpose_a=true)
+ return [grad_x, grad_y]
+end)
+
+@back_for(Ops.tanh, function f(grad, x; output=nothing, kwargs...)
+ Ops.tanh_grad(output[1], grad)
+end)
+
+@back_for(Ops.sigmoid, function f(grad, x; output=nothing, kwargs...)
+ Ops.sigmoid_grad(output[1], grad)
+end)
+
+@back_for(Ops.sqrt, function f(grad, x; output=nothing, kwargs...)
+ Ops.sqrt_grad(output[1], grad)
+end)
+
+@back_for(Ops.bias_add, function f(grad, x, y; kwargs...)
+ [grad, Ops.bias_add_grad(grad)]
+end)
+
+function with_no_grad(f)
+ res = with_context(f, TapeContext(nothing))
+ return res
+end
+
+ensure_vector(x::AbstractArray) = x
+ensure_vector(x) = [x]
+
+function _grad(tape::Tape, tensor, out_grad, grads)
+ if !haskey(tape.nodes, tensor)
+ return
+ end
+
+ node = tape.nodes[tensor]
+ back_op = backwards(node.op)
+ arg_grads = with_no_grad() do
+ back_op(out_grad, node.args...; output=node.results, node.kwargs...)
+ end
+ arg_grads = ensure_vector(arg_grads)
+ for (i, arg) in enumerate(node.args)
+ arg_grads[i] === nothing && continue
+ grads[arg] = arg_grads[i]
+ _grad(tape, arg, grads[arg], grads)
+ end
+
+ return
+end
+
+function grad(tape, tensor, in_tensors::AbstractArray, out_grad=constant(1.0))
+ grads = Dict()
+ _grad(tape, tensor, out_grad, grads)
+ get(tape.attrs, "preserve", false) || empty!(tape.nodes)
+ return [get(grads, tensor, nothing) for tensor in in_tensors]
+end
+
+function grad(tape, tensor, in_tensor, out_grad=constant(1.0))
+ grad(tape, tensor, [in_tensor], out_grad)[1]
+end
diff --git a/src/train.jl b/src/train.jl
index 0514ce99..f760f313 100644
--- a/src/train.jl
+++ b/src/train.jl
@@ -64,7 +64,7 @@ mutable struct GradientDescentOptimizer <: Optimizer
name::String
end
-GradientDescentOptimizer(learning_rate; name="descent") = GradientDescentOptimizer(Tensor(learning_rate), name)
+GradientDescentOptimizer(learning_rate; name="descent") = GradientDescentOptimizer(constant(learning_rate), name)
function GradientDescentOptimizer(; α=.01, kwargs...)
GradientDescentOptimizer(α; kwargs...)
diff --git a/src/version.jl b/src/version.jl
index 71d98370..30d4e41e 100644
--- a/src/version.jl
+++ b/src/version.jl
@@ -17,7 +17,7 @@ function tf_version(; kind=:backend)
if kind == :backend
res = @tfcall(:TF_Version, Cstring, ()) |> unsafe_string
elseif kind == :python
- res = fetch(@py_proc py_tf[][:VERSION])
+ res = fetch(@py_proc py_tf[].VERSION)
elseif kind == :julia
return Pkg.installed()["TensorFlow"]
else
diff --git a/test/runtests.jl b/test/runtests.jl
index c505e3f6..6a3ad45f 100644
--- a/test/runtests.jl
+++ b/test/runtests.jl
@@ -31,9 +31,9 @@ tests = [
"train.jl",
"training.jl",
"transformations.jl",
+"summary_eager.jl"
]
-
tf_versioninfo() # Dump out all the info at start of the test, for easy debugging from logs. (also check `tf_versioninfo()` itself works)
for filename in tests
@@ -42,3 +42,7 @@ for filename in tests
include(filename)
end
end
+
+# TODO configure tests so they automatically set the appropriate graph or eager mode for themselves. For now,
+# all the eager tests run at the end.
+include(joinpath(dirname(@__FILE__), "..", "examples", "keras.jl"))
diff --git a/test/summary_eager.jl b/test/summary_eager.jl
new file mode 100644
index 00000000..3eac3433
--- /dev/null
+++ b/test/summary_eager.jl
@@ -0,0 +1,16 @@
+using TensorFlow
+tf = TensorFlow
+tf.enable_eager_execution()
+summary = tf.summary
+mktempdir() do tmpdir
+ writer = summary.FileWriter(tmpdir)
+ summary.set_default(writer)
+ tag="y"
+ summary.scalar(tag, 3.2, step=0)
+ summary.scalar(tag, 5.0, step=1)
+ summary.scalar(tag, -2.5, step=2)
+
+ # Test convenience macros
+ loss=2.0
+ @tf.summary.scalar(loss, step=1)
+end