diff --git a/CHANGELOG.md b/CHANGELOG.md
index c3a4148c833..cea1e5dd1d7 100644
--- a/CHANGELOG.md
+++ b/CHANGELOG.md
@@ -3,7 +3,7 @@
* Refactor and improve presets for PyTorch ([pull #1360](https://github.com/bytedeco/javacpp-presets/pull/1360))
* Include `mkl_lapack.h` header file in presets for MKL ([issue #1388](https://github.com/bytedeco/javacpp-presets/issues/1388))
* Map new higher-level C++ API of Triton Inference Server ([pull #1361](https://github.com/bytedeco/javacpp-presets/pull/1361))
- * Upgrade presets for OpenCV 4.8.1, DNNL 3.3, OpenBLAS 0.3.24, CPython 3.12.0, NumPy 1.26.1, SciPy 1.11.3, LLVM 17.0.1, Leptonica 1.83.1, Tesseract 5.3.3, CUDA 12.3.0, cuDNN 8.9.5, NCCL 2.18.5, TensorFlow Lite 2.14.0, Triton Inference Server 2.38.0, ONNX 1.14.1, ONNX Runtime 1.16.1, TVM 0.13.0, and their dependencies
+ * Upgrade presets for OpenCV 4.8.1, DNNL 3.3, OpenBLAS 0.3.24, CPython 3.12.0, NumPy 1.26.1, SciPy 1.11.3, LLVM 17.0.1, Leptonica 1.83.1, Tesseract 5.3.3, CUDA 12.3.0, cuDNN 8.9.5, NCCL 2.18.5, PyTorch 2.1.0, TensorFlow Lite 2.14.0, Triton Inference Server 2.38.0, ONNX 1.14.1, ONNX Runtime 1.16.1, TVM 0.13.0, and their dependencies
### June 6, 2023 version 1.5.9
* Virtualize `nvinfer1::IGpuAllocator` from TensorRT to allow customization ([pull #1367](https://github.com/bytedeco/javacpp-presets/pull/1367))
diff --git a/README.md b/README.md
index effa1d36f14..6664501caed 100644
--- a/README.md
+++ b/README.md
@@ -134,7 +134,7 @@ Further, in the case of Android, the JavaCPP Presets also rely on:
Manual Installation
-------------------
-Simply put all the desired JAR files (`opencv*.jar`, `ffmpeg*.jar`, etc.), in addition to `javacpp.jar`, somewhere in your class path. The JAR files available as pre-built artifacts are meant to be used with [JavaCPP](https://github.com/bytedeco/javacpp). The binaries for Linux were built for CentOS 6 and 7, so they should work on most distributions currently in use. The ones for Android were compiled for ARMv7 processors featuring an FPU, so they will not work on ancient devices such as the HTC Magic or some others with an ARMv6 CPU. Here are some more specific instructions for common cases:
+Simply put all the desired JAR files (`opencv*.jar`, `ffmpeg*.jar`, etc.), in addition to `javacpp.jar`, somewhere in your class path. The JAR files available as pre-built artifacts are meant to be used with [JavaCPP](https://github.com/bytedeco/javacpp). The binaries for Linux are built with Ubuntu, so they should work on most distributions currently in use. The ones for Android were compiled for ARMv7 processors featuring an FPU, so they will not work on ancient devices such as the HTC Magic or some others with an ARMv6 CPU. Here are some more specific instructions for common cases:
NetBeans (Java SE 7 or newer):
@@ -222,7 +222,7 @@ Each child module in turn relies by default on the included [`cppbuild.sh` scrip
* NVIDIA Video Codec SDK 12.1.x https://developer.nvidia.com/nvidia-video-codec-sdk
* OpenCL 3.0.x https://github.com/KhronosGroup/OpenCL-ICD-Loader
* MXNet 1.9.x https://github.com/apache/incubator-mxnet
- * PyTorch 2.0.x https://github.com/pytorch/pytorch
+ * PyTorch 2.1.x https://github.com/pytorch/pytorch
* SentencePiece 0.1.99 https://github.com/google/sentencepiece
* TensorFlow 1.15.x https://github.com/tensorflow/tensorflow
* TensorFlow Lite 2.14.x https://github.com/tensorflow/tensorflow
diff --git a/platform/pom.xml b/platform/pom.xml
index 976e8feba6a..2f06d345982 100644
--- a/platform/pom.xml
+++ b/platform/pom.xml
@@ -292,7 +292,7 @@
org.bytedeco
pytorch-platform
- 2.0.1-${project.version}
+ 2.1.0-${project.version}
org.bytedeco
diff --git a/pytorch/README.md b/pytorch/README.md
index 352be0a24a1..ff19dd6eb71 100644
--- a/pytorch/README.md
+++ b/pytorch/README.md
@@ -9,7 +9,7 @@ Introduction
------------
This directory contains the JavaCPP Presets module for:
- * PyTorch 2.0.1 https://pytorch.org/
+ * PyTorch 2.1.0 https://pytorch.org/
Please refer to the parent README.md file for more detailed information about the JavaCPP Presets.
@@ -48,14 +48,14 @@ We can use [Maven 3](http://maven.apache.org/) to download and install automatic
org.bytedeco
pytorch-platform
- 2.0.1-1.5.10-SNAPSHOT
+ 2.1.0-1.5.10-SNAPSHOT
org.bytedeco
pytorch-platform-gpu
- 2.0.1-1.5.10-SNAPSHOT
+ 2.1.0-1.5.10-SNAPSHOT
@@ -109,7 +109,7 @@ public class SimpleMNIST {
}
// Use one of many "standard library" modules.
- LinearImpl fc1 = null, fc2 = null, fc3 = null;
+ final LinearImpl fc1, fc2, fc3;
}
public static void main(String[] args) throws Exception {
diff --git a/pytorch/cppbuild.sh b/pytorch/cppbuild.sh
index aebc073401d..8e9d089548b 100755
--- a/pytorch/cppbuild.sh
+++ b/pytorch/cppbuild.sh
@@ -27,7 +27,7 @@ if [[ "$EXTENSION" == *gpu ]]; then
export USE_CUDNN=1
export USE_FAST_NVCC=0
export CUDA_SEPARABLE_COMPILATION=OFF
- export TORCH_CUDA_ARCH_LIST="5.0;6.0;7.0+PTX"
+ export TORCH_CUDA_ARCH_LIST="5.0;6.0;7.0;8.0;9.0"
fi
export PYTHON_BIN_PATH=$(which python3)
@@ -35,7 +35,7 @@ if [[ $PLATFORM == windows* ]]; then
export PYTHON_BIN_PATH=$(which python.exe)
fi
-PYTORCH_VERSION=2.0.1
+PYTORCH_VERSION=2.1.0
mkdir -p "$PLATFORM$EXTENSION"
cd "$PLATFORM$EXTENSION"
diff --git a/pytorch/include_list.pl b/pytorch/include_list.pl
new file mode 100644
index 00000000000..1435c82f3ec
--- /dev/null
+++ b/pytorch/include_list.pl
@@ -0,0 +1,65 @@
+#!/bin/perl
+
+# Must be run at from javacpp-presets/pytorch after cppbuild.sh has been run
+# for linux-x86_64-gpu
+
+# Generate the lists of includes to parse, in order, from the output
+# of g++ -H
+# Used to update src/main/resources/org/bytedeco/pytorch/presets/*
+
+use strict;
+use warnings;
+
+my %incs;
+my @inc_per_depth;
+
+sub flush($) {
+ my $min_depth = shift;
+ for (my $d = @inc_per_depth - 1; $d >= $min_depth; $d--) {
+ if ($inc_per_depth[$d]) {
+ foreach my $i (@{$inc_per_depth[$d]}) {
+ print "#include \"$i\"\n";
+ $incs{$i} = 1;
+ }
+ undef $inc_per_depth[$d];
+ }
+ }
+}
+
+sub go {
+ my $path = join ' ', @_;
+
+ my @inc = `g++ -I. -I torch/csrc/api/include/ -H $path -E 2>&1 > /dev/null`;
+ foreach my $i (@inc) {
+ chomp $i;
+ my ($depth, $f) = $i =~ /^(\.+)\s(.*\.h)$/;
+ next unless $depth;
+ $depth = length($depth);
+ $f =~ s#^\./##;
+ next if $f =~ m#^/
+ |^ATen/ops/\w+_native\.h$
+ |^ATen/ops/\w+_meta\.h$
+ |^ATen/ops/\w+_ops\.h$
+ |^ATen/ops/_\w+\.h$#x
+ or $incs{$f};
+ flush($depth);
+ my $incs = $inc_per_depth[$depth];
+ $incs = $inc_per_depth[$depth] = [] unless $incs;
+ push @$incs, $f;
+ }
+ flush(0);
+}
+
+chdir "cppbuild/linux-x86_64-gpu/pytorch/torch/include";
+
+go('torch/csrc/api/include/torch/torch.h', 'torch/script.h');
+
+print <org.bytedeco
pytorch-platform-gpu
- 2.0.1-${project.parent.version}
+ 2.1.0-${project.parent.version}
JavaCPP Presets Platform GPU for PyTorch
diff --git a/pytorch/platform/pom.xml b/pytorch/platform/pom.xml
index 8df62d99c80..4755c9c317e 100644
--- a/pytorch/platform/pom.xml
+++ b/pytorch/platform/pom.xml
@@ -12,7 +12,7 @@
org.bytedeco
pytorch-platform
- 2.0.1-${project.parent.version}
+ 2.1.0-${project.parent.version}
JavaCPP Presets Platform for PyTorch
diff --git a/pytorch/pom.xml b/pytorch/pom.xml
index 4acbcd7fc5d..14da0bf4f81 100644
--- a/pytorch/pom.xml
+++ b/pytorch/pom.xml
@@ -11,7 +11,7 @@
org.bytedeco
pytorch
- 2.0.1-${project.parent.version}
+ 2.1.0-${project.parent.version}
JavaCPP Presets for PyTorch
diff --git a/pytorch/samples/SimpleMNIST.java b/pytorch/samples/SimpleMNIST.java
index 2b2419024e3..d1a3fa392f1 100644
--- a/pytorch/samples/SimpleMNIST.java
+++ b/pytorch/samples/SimpleMNIST.java
@@ -20,14 +20,14 @@ static class Net extends Module {
Tensor forward(Tensor x) {
// Use one of many tensor manipulation functions.
x = relu(fc1.forward(x.reshape(x.size(0), 784)));
- x = dropout(x, /*p=*/0.5, /*train=*/is_training(), false);
+ x = dropout(x, /*p=*/0.5, /*train=*/is_training());
x = relu(fc2.forward(x));
- x = log_softmax(fc3.forward(x), new LogSoftmaxFuncOptions(/*dim=*/1));
+ x = log_softmax(fc3.forward(x), /*dim=*/1);
return x;
}
// Use one of many "standard library" modules.
- LinearImpl fc1 = null, fc2 = null, fc3 = null;
+ final LinearImpl fc1, fc2, fc3;
}
public static void main(String[] args) throws Exception {
diff --git a/pytorch/samples/pom.xml b/pytorch/samples/pom.xml
index ddab7cd4c0a..c4306cfab81 100644
--- a/pytorch/samples/pom.xml
+++ b/pytorch/samples/pom.xml
@@ -12,14 +12,14 @@
org.bytedeco
pytorch-platform
- 2.0.1-1.5.10-SNAPSHOT
+ 2.1.0-1.5.10-SNAPSHOT
org.bytedeco
pytorch-platform-gpu
- 2.0.1-1.5.10-SNAPSHOT
+ 2.1.0-1.5.10-SNAPSHOT
diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/ActivityTraceWrapper.java b/pytorch/src/gen/java/org/bytedeco/pytorch/ActivityTraceWrapper.java
deleted file mode 100644
index d85fe88b885..00000000000
--- a/pytorch/src/gen/java/org/bytedeco/pytorch/ActivityTraceWrapper.java
+++ /dev/null
@@ -1,26 +0,0 @@
-// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE
-
-package org.bytedeco.pytorch;
-
-import org.bytedeco.pytorch.Allocator;
-import org.bytedeco.pytorch.Function;
-import org.bytedeco.pytorch.functions.*;
-import org.bytedeco.pytorch.Module;
-import org.bytedeco.javacpp.annotation.Cast;
-import java.nio.*;
-import org.bytedeco.javacpp.*;
-import org.bytedeco.javacpp.annotation.*;
-
-import static org.bytedeco.javacpp.presets.javacpp.*;
-import static org.bytedeco.openblas.global.openblas_nolapack.*;
-import static org.bytedeco.openblas.global.openblas.*;
-
-import static org.bytedeco.pytorch.global.torch.*;
-
-@Namespace("torch::profiler::impl::kineto") @Opaque @Properties(inherit = org.bytedeco.pytorch.presets.torch.class)
-public class ActivityTraceWrapper extends Pointer {
- /** Empty constructor. Calls {@code super((Pointer)null)}. */
- public ActivityTraceWrapper() { super((Pointer)null); }
- /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
- public ActivityTraceWrapper(Pointer p) { super(p); }
-}
diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/Allocator.java b/pytorch/src/gen/java/org/bytedeco/pytorch/Allocator.java
index eb44f918baf..bab8dd1b2e2 100644
--- a/pytorch/src/gen/java/org/bytedeco/pytorch/Allocator.java
+++ b/pytorch/src/gen/java/org/bytedeco/pytorch/Allocator.java
@@ -52,7 +52,7 @@ public class Allocator extends Pointer {
// is guaranteed to return a unique_ptr with this deleter attached;
// it means the rawAllocate and rawDeallocate APIs are safe to use.
// This function MUST always return the same BoundDeleter.
- public native @Cast("c10::DeleterFnPtr") PointerConsumer raw_deleter();
+ public native PointerConsumer raw_deleter();
public native Pointer raw_allocate(@Cast("size_t") long n);
public native void raw_deallocate(Pointer ptr);
}
diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/AnyModule.java b/pytorch/src/gen/java/org/bytedeco/pytorch/AnyModule.java
index 6da3c43040e..acb0b54f7bc 100644
--- a/pytorch/src/gen/java/org/bytedeco/pytorch/AnyModule.java
+++ b/pytorch/src/gen/java/org/bytedeco/pytorch/AnyModule.java
@@ -227,6 +227,8 @@ public class AnyModule extends Pointer {
private native void allocate(@SharedPtr @Cast({"", "std::shared_ptr"}) ReplicationPad1dImpl module);
public AnyModule(ConstantPad1dImpl module) { super((Pointer)null); allocate(module); }
private native void allocate(@SharedPtr @Cast({"", "std::shared_ptr"}) ConstantPad1dImpl module);
+ public AnyModule(ZeroPad1dImpl module) { super((Pointer)null); allocate(module); }
+ private native void allocate(@SharedPtr @Cast({"", "std::shared_ptr"}) ZeroPad1dImpl module);
public AnyModule(AvgPool1dImpl module) { super((Pointer)null); allocate(module); }
private native void allocate(@SharedPtr @Cast({"", "std::shared_ptr"}) AvgPool1dImpl module);
public AnyModule(MaxPool1dImpl module) { super((Pointer)null); allocate(module); }
@@ -267,6 +269,8 @@ public class AnyModule extends Pointer {
private native void allocate(@SharedPtr @Cast({"", "std::shared_ptr"}) ReplicationPad3dImpl module);
public AnyModule(ConstantPad3dImpl module) { super((Pointer)null); allocate(module); }
private native void allocate(@SharedPtr @Cast({"", "std::shared_ptr"}) ConstantPad3dImpl module);
+ public AnyModule(ZeroPad3dImpl module) { super((Pointer)null); allocate(module); }
+ private native void allocate(@SharedPtr @Cast({"", "std::shared_ptr"}) ZeroPad3dImpl module);
public AnyModule(AvgPool3dImpl module) { super((Pointer)null); allocate(module); }
private native void allocate(@SharedPtr @Cast({"", "std::shared_ptr"}) AvgPool3dImpl module);
public AnyModule(MaxPool3dImpl module) { super((Pointer)null); allocate(module); }
diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/ArgumentDef.java b/pytorch/src/gen/java/org/bytedeco/pytorch/ArgumentDef.java
index f5e011b39e6..b7233c25fc5 100644
--- a/pytorch/src/gen/java/org/bytedeco/pytorch/ArgumentDef.java
+++ b/pytorch/src/gen/java/org/bytedeco/pytorch/ArgumentDef.java
@@ -38,18 +38,10 @@ public class ArgumentDef extends Pointer {
return new ArgumentDef((Pointer)this).offsetAddress(i);
}
- public static class GetTypeFn extends FunctionPointer {
- static { Loader.load(); }
- /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
- public GetTypeFn(Pointer p) { super(p); }
- protected GetTypeFn() { allocate(); }
- private native void allocate();
- public native @ByVal Type.TypePtr call();
- }
- public native GetTypeFn getTypeFn(); public native ArgumentDef getTypeFn(GetTypeFn setter);
- public native GetTypeFn getFakeTypeFn(); public native ArgumentDef getFakeTypeFn(GetTypeFn setter);
+ public native TypeSupplier getTypeFn(); public native ArgumentDef getTypeFn(TypeSupplier setter);
+ public native TypeSupplier getFakeTypeFn(); public native ArgumentDef getFakeTypeFn(TypeSupplier setter);
public ArgumentDef() { super((Pointer)null); allocate(); }
private native void allocate();
- public ArgumentDef(GetTypeFn getTypeFn, GetTypeFn getFakeTypeFn) { super((Pointer)null); allocate(getTypeFn, getFakeTypeFn); }
- private native void allocate(GetTypeFn getTypeFn, GetTypeFn getFakeTypeFn);
+ public ArgumentDef(TypeSupplier getTypeFn, TypeSupplier getFakeTypeFn) { super((Pointer)null); allocate(getTypeFn, getFakeTypeFn); }
+ private native void allocate(TypeSupplier getTypeFn, TypeSupplier getFakeTypeFn);
}
diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/AutogradMeta.java b/pytorch/src/gen/java/org/bytedeco/pytorch/AutogradMeta.java
index 73c4c4f626e..a68a8d19f87 100644
--- a/pytorch/src/gen/java/org/bytedeco/pytorch/AutogradMeta.java
+++ b/pytorch/src/gen/java/org/bytedeco/pytorch/AutogradMeta.java
@@ -71,6 +71,12 @@ public class AutogradMeta extends AutogradMetaInterface {
+ // The post_acc_grad_hooks_ field stores only Python hooks
+ // (PyFunctionTensorPostAccGradHooks) that are called after the
+ // .grad field has been accumulated into. This is less complicated
+ // than the hooks_ field, which encapsulates a lot more.
+ public native @UniquePtr @Cast({"", "", "std::unique_ptr&&"}) PostAccumulateGradHook post_acc_grad_hooks_(); public native AutogradMeta post_acc_grad_hooks_(PostAccumulateGradHook setter);
+
// Only meaningful on leaf variables (must be false otherwise)
public native @Cast("bool") boolean requires_grad_(); public native AutogradMeta requires_grad_(boolean setter);
diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/AutogradState.java b/pytorch/src/gen/java/org/bytedeco/pytorch/AutogradState.java
index 3fd2ecff942..d9fe4f36999 100644
--- a/pytorch/src/gen/java/org/bytedeco/pytorch/AutogradState.java
+++ b/pytorch/src/gen/java/org/bytedeco/pytorch/AutogradState.java
@@ -46,7 +46,7 @@ private native void allocate(
public native void set_inference_mode(@Cast("bool") boolean enabled);
- public native void set_multithreading_enabled(@Cast("bool") boolean mulithreading_enabled);
+ public native void set_multithreading_enabled(@Cast("bool") boolean multithreading_enabled);
public native void set_view_replay_enabled(@Cast("bool") boolean view_replay_enabled);
diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/BackendMeta.java b/pytorch/src/gen/java/org/bytedeco/pytorch/BackendMeta.java
new file mode 100644
index 00000000000..708b845b5e5
--- /dev/null
+++ b/pytorch/src/gen/java/org/bytedeco/pytorch/BackendMeta.java
@@ -0,0 +1,49 @@
+// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE
+
+package org.bytedeco.pytorch;
+
+import org.bytedeco.pytorch.Allocator;
+import org.bytedeco.pytorch.Function;
+import org.bytedeco.pytorch.functions.*;
+import org.bytedeco.pytorch.Module;
+import org.bytedeco.javacpp.annotation.Cast;
+import java.nio.*;
+import org.bytedeco.javacpp.*;
+import org.bytedeco.javacpp.annotation.*;
+
+import static org.bytedeco.javacpp.presets.javacpp.*;
+import static org.bytedeco.openblas.global.openblas_nolapack.*;
+import static org.bytedeco.openblas.global.openblas.*;
+
+import static org.bytedeco.pytorch.global.torch.*;
+
+
+// For ease of copy pasting
+// #if 0
+// #endif
+
+/**
+ * This structure is intended to hold additional metadata of the specific device
+ * backend.
+ **/
+@Namespace("c10") @Properties(inherit = org.bytedeco.pytorch.presets.torch.class)
+public class BackendMeta extends Pointer {
+ static { Loader.load(); }
+ /** Default native constructor. */
+ public BackendMeta() { super((Pointer)null); allocate(); }
+ /** Native array allocator. Access with {@link Pointer#position(long)}. */
+ public BackendMeta(long size) { super((Pointer)null); allocateArray(size); }
+ /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
+ public BackendMeta(Pointer p) { super(p); }
+ private native void allocate();
+ private native void allocateArray(long size);
+ @Override public BackendMeta position(long position) {
+ return (BackendMeta)super.position(position);
+ }
+ @Override public BackendMeta getPointer(long i) {
+ return new BackendMeta((Pointer)this).offsetAddress(i);
+ }
+
+ public native @ByVal BackendMetaRef clone(
+ @Const @ByRef BackendMetaRef ptr);
+}
diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/BackendMetaRef.java b/pytorch/src/gen/java/org/bytedeco/pytorch/BackendMetaRef.java
new file mode 100644
index 00000000000..8cf29659d24
--- /dev/null
+++ b/pytorch/src/gen/java/org/bytedeco/pytorch/BackendMetaRef.java
@@ -0,0 +1,150 @@
+// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE
+
+package org.bytedeco.pytorch;
+
+import org.bytedeco.pytorch.Allocator;
+import org.bytedeco.pytorch.Function;
+import org.bytedeco.pytorch.functions.*;
+import org.bytedeco.pytorch.Module;
+import org.bytedeco.javacpp.annotation.Cast;
+import java.nio.*;
+import org.bytedeco.javacpp.*;
+import org.bytedeco.javacpp.annotation.*;
+
+import static org.bytedeco.javacpp.presets.javacpp.*;
+import static org.bytedeco.openblas.global.openblas_nolapack.*;
+import static org.bytedeco.openblas.global.openblas.*;
+
+import static org.bytedeco.pytorch.global.torch.*;
+
+
+@Name("c10::intrusive_ptr") @NoOffset @Properties(inherit = org.bytedeco.pytorch.presets.torch.class)
+public class BackendMetaRef extends Pointer {
+ static { Loader.load(); }
+ /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
+ public BackendMetaRef(Pointer p) { super(p); }
+ /** Native array allocator. Access with {@link Pointer#position(long)}. */
+ public BackendMetaRef(long size) { super((Pointer)null); allocateArray(size); }
+ private native void allocateArray(long size);
+ @Override public BackendMetaRef position(long position) {
+ return (BackendMetaRef)super.position(position);
+ }
+ @Override public BackendMetaRef getPointer(long i) {
+ return new BackendMetaRef((Pointer)this).offsetAddress(i);
+ }
+
+
+ public BackendMetaRef() { super((Pointer)null); allocate(); }
+ @NoException(true) private native void allocate();
+
+ public BackendMetaRef(@ByVal @Cast("std::nullptr_t*") PointerPointer arg0) { super((Pointer)null); allocate(arg0); }
+ @NoException(true) private native void allocate(@ByVal @Cast("std::nullptr_t*") PointerPointer arg0);
+
+ // This constructor will not increase the ref counter for you.
+ // We use the tagged dispatch mechanism to explicitly mark this constructor
+ // to not increase the refcount
+ public BackendMetaRef(BackendMeta target, @ByVal DontIncreaseRefcount arg1) { super((Pointer)null); allocate(target, arg1); }
+ @NoException(true) private native void allocate(BackendMeta target, @ByVal DontIncreaseRefcount arg1);
+
+
+
+ public BackendMetaRef(@ByRef(true) BackendMetaRef rhs) { super((Pointer)null); allocate(rhs); }
+ @NoException(true) private native void allocate(@ByRef(true) BackendMetaRef rhs);
+
+ public native @ByRef @Name("operator =") @NoException(true) BackendMetaRef put(@ByRef(true) BackendMetaRef rhs);
+
+ public native @NoException(true) BackendMeta get();
+
+ public native @ByRef @Name("operator *") @NoException(true) BackendMeta multiply();
+
+ public native @Name("operator ->") @NoException(true) BackendMeta access();
+
+ public native @Cast("bool") @Name("operator bool") @NoException(true) boolean asBoolean();
+
+ public native @NoException(true) void reset();
+
+ public native @NoException(true) void swap(@ByRef BackendMetaRef rhs);
+
+ // We do a lot of null-pointer checks in our code, good to have this be cheap.
+ public native @Cast("bool") @NoException(true) boolean defined();
+
+ public native @Cast("size_t") @NoException(true) long use_count();
+
+ public native @Cast("size_t") @NoException(true) long weak_use_count();
+
+ public native @Cast("bool") @NoException(true) boolean unique();
+
+ /**
+ * Returns an owning (!) pointer to the underlying object and makes the
+ * intrusive_ptr instance invalid. That means the refcount is not decreased.
+ * You *must* put the returned pointer back into a intrusive_ptr using
+ * intrusive_ptr::reclaim(ptr) to properly destruct it.
+ * This is helpful for C APIs.
+ */
+ public native @NoException(true) BackendMeta release();
+
+ /**
+ * Takes an owning pointer to TTarget* and creates an intrusive_ptr that takes
+ * over ownership. That means the refcount is not increased.
+ * This is the counter-part to intrusive_ptr::release() and the pointer
+ * passed in *must* have been created using intrusive_ptr::release().
+ */
+ public static native @ByVal BackendMetaRef reclaim(BackendMeta owning_ptr);
+
+ /**
+ * Takes an owning pointer to TTarget* and creates an intrusive_ptr
+ * representing a new reference, i.e. the raw pointer retains
+ * ownership.
+ */
+ public static native @ByVal BackendMetaRef reclaim_copy(BackendMeta owning_ptr);
+
+ /**
+ * Allocate a heap object with args and wrap it inside a intrusive_ptr and
+ * incref. This is a helper function to let make_intrusive() access private
+ * intrusive_ptr constructors.
+ */
+
+ /**
+ * Turn a new instance of TTarget (e.g., literally allocated
+ * using new TTarget(...) into an intrusive_ptr. If possible,
+ * use intrusive_ptr::make instead which statically guarantees
+ * that the allocation was done properly.
+ *
+ * At the moment, the only reason this method exists is because
+ * pybind11 holder types expect to be able to allocate in
+ * this way (because pybind11 handles the new allocation itself).
+ */
+ public static native @ByVal BackendMetaRef unsafe_steal_from_new(BackendMeta raw_ptr);
+
+ /**
+ * Turn an instance of TTarget that should not be reference counted
+ * (e.g., allocated into an arena with placement new) into an
+ * intrusive_ptr. This is gratuitously unsafe and should only be
+ * used if you can guarantee that the pointer will not escape and be
+ * refcounted as normal.
+ *
+ * {@code expected_decrefs} is a debugging parameter: it indicates the
+ * number of strong owners the intrusive_ptr_target in question is
+ * expected to get. In most use cases, this will likely be 1.
+ *
+ * The reason this method exists is for manually sharing
+ * StorageImpls across Tensors in the static runtime. It needs
+ * access to private intrusive_ptr members so that the refcounts can
+ * be initialized to custom values.
+ */
+ public static native @ByVal BackendMetaRef unsafe_adapt_non_heap_allocated(
+ BackendMeta raw_ptr,
+ @Cast("size_t") long expected_decrefs);
+
+ /**
+ * Turn a **non-owning raw pointer** to an intrusive_ptr. It is
+ * the moral equivalent of enable_shared_from_this on a shared pointer.
+ *
+ * This method is only valid for objects that are already live. If
+ * you are looking for the moral equivalent of unique_ptr(T*)
+ * constructor, see steal_from_new.
+ *
+ * TODO: https://github.com/pytorch/pytorch/issues/56482
+ */
+ public static native @ByVal BackendMetaRef unsafe_reclaim_from_nonowning(BackendMeta raw_ptr);
+}
diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/Blob.java b/pytorch/src/gen/java/org/bytedeco/pytorch/Blob.java
index 688b9c76ecb..da86aa83043 100644
--- a/pytorch/src/gen/java/org/bytedeco/pytorch/Blob.java
+++ b/pytorch/src/gen/java/org/bytedeco/pytorch/Blob.java
@@ -69,7 +69,7 @@ public class Blob extends Pointer {
* \brief Gets the const reference of the stored object. The code checks if
* the stored object is of the desired type.
*/
- // TODO(jerryzh): add a Get(DeviceType) function?
+ // TODO(jerryzh): add a Get(c10::DeviceType) function?
public native @NoException(true) Pointer GetRaw();
/**
diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/BlockWrap.java b/pytorch/src/gen/java/org/bytedeco/pytorch/BlockWrap.java
index 1320b1b5311..db2effd1d10 100644
--- a/pytorch/src/gen/java/org/bytedeco/pytorch/BlockWrap.java
+++ b/pytorch/src/gen/java/org/bytedeco/pytorch/BlockWrap.java
@@ -32,13 +32,5 @@ public class BlockWrap extends Pointer {
private native void allocate(Block p);
public native void clear();
public native Block elem(); public native BlockWrap elem(Block setter);
- public static class Clear_cb_Pointer extends FunctionPointer {
- static { Loader.load(); }
- /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
- public Clear_cb_Pointer(Pointer p) { super(p); }
- protected Clear_cb_Pointer() { allocate(); }
- private native void allocate();
- public native void call(Pointer arg0);
- }
- public native Clear_cb_Pointer clear_cb(); public native BlockWrap clear_cb(Clear_cb_Pointer setter);
+ public native PointerConsumer clear_cb(); public native BlockWrap clear_cb(PointerConsumer setter);
}
diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/Bool2Vector.java b/pytorch/src/gen/java/org/bytedeco/pytorch/Bool2Vector.java
deleted file mode 100644
index 1cd991dd051..00000000000
--- a/pytorch/src/gen/java/org/bytedeco/pytorch/Bool2Vector.java
+++ /dev/null
@@ -1,90 +0,0 @@
-// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE
-
-package org.bytedeco.pytorch;
-
-import org.bytedeco.pytorch.Allocator;
-import org.bytedeco.pytorch.Function;
-import org.bytedeco.pytorch.functions.*;
-import org.bytedeco.pytorch.Module;
-import org.bytedeco.javacpp.annotation.Cast;
-import java.nio.*;
-import org.bytedeco.javacpp.*;
-import org.bytedeco.javacpp.annotation.*;
-
-import static org.bytedeco.javacpp.presets.javacpp.*;
-import static org.bytedeco.openblas.global.openblas_nolapack.*;
-import static org.bytedeco.openblas.global.openblas.*;
-
-import static org.bytedeco.pytorch.global.torch.*;
-
-@Name("std::vector >") @Properties(inherit = org.bytedeco.pytorch.presets.torch.class)
-public class Bool2Vector extends Pointer {
- static { Loader.load(); }
- /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
- public Bool2Vector(Pointer p) { super(p); }
- public Bool2Vector(BoolPointer value) { this(1); put(0, value); }
- public Bool2Vector(BoolPointer ... array) { this(array.length); put(array); }
- public Bool2Vector() { allocate(); }
- public Bool2Vector(long n) { allocate(n); }
- private native void allocate();
- private native void allocate(@Cast("size_t") long n);
- public native @Name("operator =") @ByRef Bool2Vector put(@ByRef Bool2Vector x);
-
- public boolean empty() { return size() == 0; }
- public native long size();
- public void clear() { resize(0); }
- public native void resize(@Cast("size_t") long n);
-
- public BoolPointer front() { return get(0); }
- public BoolPointer back() { return get(size() - 1); }
- @Index(function = "at") public native @Cast("std::array*") @ByRef BoolPointer get(@Cast("size_t") long i);
- public native Bool2Vector put(@Cast("size_t") long i, BoolPointer value);
-
- public native @ByVal Iterator insert(@ByVal Iterator pos, @Cast("std::array*") @ByRef BoolPointer value);
- public native @ByVal Iterator erase(@ByVal Iterator pos);
- public native @ByVal Iterator begin();
- public native @ByVal Iterator end();
- @NoOffset @Name("iterator") public static class Iterator extends Pointer {
- public Iterator(Pointer p) { super(p); }
- public Iterator() { }
-
- public native @Name("operator ++") @ByRef Iterator increment();
- public native @Name("operator ==") boolean equals(@ByRef Iterator it);
- public native @Name("operator *") @Cast("std::array*") @ByRef @Const BoolPointer get();
- }
-
- public BoolPointer[] get() {
- BoolPointer[] array = new BoolPointer[size() < Integer.MAX_VALUE ? (int)size() : Integer.MAX_VALUE];
- for (int i = 0; i < array.length; i++) {
- array[i] = get(i);
- }
- return array;
- }
- @Override public String toString() {
- return java.util.Arrays.toString(get());
- }
-
- public BoolPointer pop_back() {
- long size = size();
- BoolPointer value = get(size - 1);
- resize(size - 1);
- return value;
- }
- public Bool2Vector push_back(BoolPointer value) {
- long size = size();
- resize(size + 1);
- return put(size, value);
- }
- public Bool2Vector put(BoolPointer value) {
- if (size() != 1) { resize(1); }
- return put(0, value);
- }
- public Bool2Vector put(BoolPointer ... array) {
- if (size() != array.length) { resize(array.length); }
- for (int i = 0; i < array.length; i++) {
- put(i, array[i]);
- }
- return this;
- }
-}
-
diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/ByteArrayRef.java b/pytorch/src/gen/java/org/bytedeco/pytorch/ByteArrayRef.java
index 10a6ca2a047..2c7414c1709 100644
--- a/pytorch/src/gen/java/org/bytedeco/pytorch/ByteArrayRef.java
+++ b/pytorch/src/gen/java/org/bytedeco/pytorch/ByteArrayRef.java
@@ -103,6 +103,7 @@ public class ByteArrayRef extends Pointer {
/** equals - Check for element-wise equality. */
public native @Cast("const bool") boolean equals(@ByVal ByteArrayRef RHS);
+ public native @Cast("const bool") boolean equals(@ByVal @Cast({"jbyte*", "c10::ArrayRef", "std::vector&"}) @StdVector("jbyte") byte... RHS);
/** slice(n, m) - Take M elements of the array starting at element N */
public native @Const @ByVal ByteArrayRef slice(@Cast("size_t") long N, @Cast("size_t") long M);
diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/CPUGeneratorImpl.java b/pytorch/src/gen/java/org/bytedeco/pytorch/CPUGeneratorImpl.java
index dd8c370d49d..92d33064b98 100644
--- a/pytorch/src/gen/java/org/bytedeco/pytorch/CPUGeneratorImpl.java
+++ b/pytorch/src/gen/java/org/bytedeco/pytorch/CPUGeneratorImpl.java
@@ -33,6 +33,8 @@ public class CPUGeneratorImpl extends GeneratorImpl {
// CPUGeneratorImpl methods
public native @SharedPtr CPUGeneratorImpl clone();
public native void set_current_seed(@Cast("uint64_t") long seed);
+ public native void set_offset(@Cast("uint64_t") long offset);
+ public native @Cast("uint64_t") long get_offset();
public native @Cast("uint64_t") long current_seed();
public native @Cast("uint64_t") long seed();
public native void set_state(@Const @ByRef TensorImpl new_state);
diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/CUDAHooksInterface.java b/pytorch/src/gen/java/org/bytedeco/pytorch/CUDAHooksInterface.java
index 9156c2370a2..633a735e0cf 100644
--- a/pytorch/src/gen/java/org/bytedeco/pytorch/CUDAHooksInterface.java
+++ b/pytorch/src/gen/java/org/bytedeco/pytorch/CUDAHooksInterface.java
@@ -64,7 +64,7 @@ public class CUDAHooksInterface extends Pointer {
public native @ByVal Device getDeviceFromPtr(Pointer arg0);
- public native @Cast("bool") boolean isPinnedPtr(Pointer arg0);
+ public native @Cast("bool") boolean isPinnedPtr(@Const Pointer arg0);
public native @Cast("bool") boolean hasCUDA();
@@ -80,9 +80,9 @@ public class CUDAHooksInterface extends Pointer {
public native @Cast("const at::cuda::NVRTC*") @ByRef Pointer nvrtc();
- public native @Cast("bool") boolean hasPrimaryContext(@Cast("int64_t") long device_index);
+ public native @Cast("bool") boolean hasPrimaryContext(@Cast("c10::DeviceIndex") byte device_index);
- public native @Cast("int64_t") long current_device();
+ public native @Cast("c10::DeviceIndex") byte current_device();
public native Allocator getPinnedMemoryAllocator();
@@ -106,15 +106,15 @@ public class CUDAHooksInterface extends Pointer {
public native double batchnormMinEpsilonCuDNN();
- public native @Cast("int64_t") long cuFFTGetPlanCacheMaxSize(@Cast("int64_t") long arg0);
+ public native @Cast("int64_t") long cuFFTGetPlanCacheMaxSize(@Cast("c10::DeviceIndex") byte arg0);
- public native void cuFFTSetPlanCacheMaxSize(@Cast("int64_t") long arg0, @Cast("int64_t") long arg1);
+ public native void cuFFTSetPlanCacheMaxSize(@Cast("c10::DeviceIndex") byte arg0, @Cast("int64_t") long arg1);
- public native @Cast("int64_t") long cuFFTGetPlanCacheSize(@Cast("int64_t") long arg0);
+ public native @Cast("int64_t") long cuFFTGetPlanCacheSize(@Cast("c10::DeviceIndex") byte arg0);
- public native void cuFFTClearPlanCache(@Cast("int64_t") long arg0);
+ public native void cuFFTClearPlanCache(@Cast("c10::DeviceIndex") byte arg0);
public native int getNumGPUs();
- public native void deviceSynchronize(@Cast("int64_t") long arg0);
+ public native void deviceSynchronize(@Cast("c10::DeviceIndex") byte arg0);
}
diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/ChunkBatchDataset.java b/pytorch/src/gen/java/org/bytedeco/pytorch/ChunkBatchDataset.java
index eb2ccc28080..4bf4085b3cd 100644
--- a/pytorch/src/gen/java/org/bytedeco/pytorch/ChunkBatchDataset.java
+++ b/pytorch/src/gen/java/org/bytedeco/pytorch/ChunkBatchDataset.java
@@ -16,10 +16,8 @@
import static org.bytedeco.openblas.global.openblas.*;
import static org.bytedeco.pytorch.global.torch.*;
- // namespace detail
-/** A dataset that can yield data only in batches. */
-@Name("torch::data::datasets::BatchDataset,c10::optional,size_t>") @Properties(inherit = org.bytedeco.pytorch.presets.torch.class)
+@Name("torch::data::datasets::BatchDataset,c10::optional,size_t>") @Properties(inherit = org.bytedeco.pytorch.presets.torch.class)
public class ChunkBatchDataset extends Pointer {
static { Loader.load(); }
/** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/ChunkBatchSharedBatchDataset.java b/pytorch/src/gen/java/org/bytedeco/pytorch/ChunkBatchSharedBatchDataset.java
index 893a57c3d5e..7d0a9cfab3f 100644
--- a/pytorch/src/gen/java/org/bytedeco/pytorch/ChunkBatchSharedBatchDataset.java
+++ b/pytorch/src/gen/java/org/bytedeco/pytorch/ChunkBatchSharedBatchDataset.java
@@ -17,7 +17,7 @@
import static org.bytedeco.pytorch.global.torch.*;
-@Name("torch::data::datasets::BatchDataset >,c10::optional,size_t>") @Properties(inherit = org.bytedeco.pytorch.presets.torch.class)
+@Name("torch::data::datasets::BatchDataset >,c10::optional,size_t>") @Properties(inherit = org.bytedeco.pytorch.presets.torch.class)
public class ChunkBatchSharedBatchDataset extends Pointer {
static { Loader.load(); }
/** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/ChunkBatchSharedTensorBatchDataset.java b/pytorch/src/gen/java/org/bytedeco/pytorch/ChunkBatchSharedTensorBatchDataset.java
new file mode 100644
index 00000000000..74c33b86e3c
--- /dev/null
+++ b/pytorch/src/gen/java/org/bytedeco/pytorch/ChunkBatchSharedTensorBatchDataset.java
@@ -0,0 +1,40 @@
+// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE
+
+package org.bytedeco.pytorch;
+
+import org.bytedeco.pytorch.Allocator;
+import org.bytedeco.pytorch.Function;
+import org.bytedeco.pytorch.functions.*;
+import org.bytedeco.pytorch.Module;
+import org.bytedeco.javacpp.annotation.Cast;
+import java.nio.*;
+import org.bytedeco.javacpp.*;
+import org.bytedeco.javacpp.annotation.*;
+
+import static org.bytedeco.javacpp.presets.javacpp.*;
+import static org.bytedeco.openblas.global.openblas_nolapack.*;
+import static org.bytedeco.openblas.global.openblas.*;
+
+import static org.bytedeco.pytorch.global.torch.*;
+
+@Name("torch::data::datasets::BatchDataset >,c10::optional,size_t>") @Properties(inherit = org.bytedeco.pytorch.presets.torch.class)
+public class ChunkBatchSharedTensorBatchDataset extends Pointer {
+ static { Loader.load(); }
+ /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
+ public ChunkBatchSharedTensorBatchDataset(Pointer p) { super(p); }
+
+ @MemberGetter public static native @Cast("const bool") boolean is_stateful();
+ public static final boolean is_stateful = is_stateful();
+
+ /** Returns a batch of data given an index. */
+ public native @ByVal TensorExampleVectorOptional get_batch(@Cast("size_t") long request);
+
+ /** Returns the size of the dataset, or an empty optional if it is unsized. */
+ public native @ByVal SizeTOptional size();
+
+ /** Creates a {@code MapDataset} that applies the given {@code transform} to this dataset. */
+ public native @ByVal ChunkMapTensorDataset map(@ByVal TensorExampleStack transform);
+
+ /** Creates a {@code MapDataset} that applies the given {@code transform} to this dataset. */
+
+}
diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/ChunkDataReader.java b/pytorch/src/gen/java/org/bytedeco/pytorch/ChunkDataReader.java
index 59ec42fe86b..d1a899046ff 100644
--- a/pytorch/src/gen/java/org/bytedeco/pytorch/ChunkDataReader.java
+++ b/pytorch/src/gen/java/org/bytedeco/pytorch/ChunkDataReader.java
@@ -24,7 +24,7 @@
* A chunk could be an entire file, such as an audio data file or an image,
* or part of a file in the case of a large text-file split based on seek
* positions. */
-@Name("torch::data::datasets::ChunkDataReader,std::vector > >") @Properties(inherit = org.bytedeco.pytorch.presets.torch.class)
+@Name("torch::data::datasets::ChunkDataReader,std::vector > >") @Properties(inherit = org.bytedeco.pytorch.presets.torch.class)
public class ChunkDataReader extends Pointer {
static { Loader.load(); }
/** Default native constructor. */
@@ -45,7 +45,7 @@ public class ChunkDataReader extends Pointer {
/** Read an entire chunk. */
- @Virtual(true) public native @ByVal @Cast("torch::data::datasets::ChunkDataReader,std::vector > >::ChunkType*") ExampleVector read_chunk(@Cast("size_t") long chunk_index);
+ @Virtual(true) public native @ByVal @Cast("torch::data::datasets::ChunkDataReader,std::vector > >::ChunkType*") ExampleVector read_chunk(@Cast("size_t") long chunk_index);
/** Returns the number of chunks available in this reader. */
@Virtual(true) public native @Cast("size_t") long chunk_count();
diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/ChunkDataset.java b/pytorch/src/gen/java/org/bytedeco/pytorch/ChunkDataset.java
index 8a8ccb0a87a..f6f5d695f4f 100644
--- a/pytorch/src/gen/java/org/bytedeco/pytorch/ChunkDataset.java
+++ b/pytorch/src/gen/java/org/bytedeco/pytorch/ChunkDataset.java
@@ -26,7 +26,7 @@
* while the {@code ExampleSampler} determins the order of Examples that are returned
* in each {@code get_batch} call. The hierarchical sampling approach used here is
* inspired by this paper http://martin.zinkevich.org/publications/nips2010.pdf */
-@Name("torch::data::datasets::ChunkDataset") @NoOffset @Properties(inherit = org.bytedeco.pytorch.presets.torch.class)
+@Name("torch::data::datasets::ChunkDataset") @NoOffset @Properties(inherit = org.bytedeco.pytorch.presets.torch.class)
public class ChunkDataset extends ChunkStatefulDataset {
static { Loader.load(); }
/** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
@@ -45,11 +45,11 @@ public ChunkDataset(
ChunkDatasetOptions options,
Pointer preprocessing_policy) { super((Pointer)null); allocate(chunk_reader, chunk_sampler, example_sampler, options, preprocessing_policy); }
private native void allocate(
- @ByVal @Cast("JavaCPP_torch_0003a_0003adata_0003a_0003adatasets_0003a_0003aChunkDataReader_0003ctorch_0003a_0003adata_0003a_0003aExample_0003c_0003e_0002cstd_0003a_0003avector_0003ctorch_0003a_0003adata_0003a_0003aExample_0003c_0003e_00020_0003e_00020_0003e*") ChunkDataReader chunk_reader,
+ @ByVal @Cast("JavaCPP_torch_0003a_0003adata_0003a_0003adatasets_0003a_0003aChunkDataReader_0003ctorch_0003a_0003adata_0003a_0003aExample_0003ctorch_0003a_0003aTensor_0002ctorch_0003a_0003aTensor_0003e_0002cstd_0003a_0003avector_0003ctorch_0003a_0003adata_0003a_0003aExample_0003ctorch_0003a_0003aTensor_0002ctorch_0003a_0003aTensor_0003e_00020_0003e_00020_0003e*") ChunkDataReader chunk_reader,
@ByVal RandomSampler chunk_sampler,
@ByVal RandomSampler example_sampler,
@ByVal ChunkDatasetOptions options,
- @ByVal(nullValue = "std::function>&)>()") @Cast("std::function>&)>*") Pointer preprocessing_policy);
+ @ByVal(nullValue = "std::function>&)>()") @Cast("std::function>&)>*") Pointer preprocessing_policy);
/** Default get_batch method of BatchDataset. This method returns
* Example batches created from the preloaded chunks. The implemenation
@@ -69,7 +69,7 @@ private native void allocate(
// provide a references to chunk sampler. Used mainly in distributed data
// loading to set the epoch number for the sampler.
- public native @Cast("torch::data::datasets::ChunkDataset::ChunkSamplerType*") @ByRef RandomSampler chunk_sampler();
+ public native @Cast("torch::data::datasets::ChunkDataset::ChunkSamplerType*") @ByRef RandomSampler chunk_sampler();
public native void save(@ByRef OutputArchive archive);
diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/ChunkMapBatchDataset.java b/pytorch/src/gen/java/org/bytedeco/pytorch/ChunkMapBatchDataset.java
index a346bc60dd3..69c084905ea 100644
--- a/pytorch/src/gen/java/org/bytedeco/pytorch/ChunkMapBatchDataset.java
+++ b/pytorch/src/gen/java/org/bytedeco/pytorch/ChunkMapBatchDataset.java
@@ -17,7 +17,7 @@
import static org.bytedeco.pytorch.global.torch.*;
-@Name("torch::data::datasets::BatchDataset >,torch::data::transforms::Stack > >,std::vector >,at::ArrayRef >") @Properties(inherit = org.bytedeco.pytorch.presets.torch.class)
+@Name("torch::data::datasets::BatchDataset >,torch::data::transforms::Stack > >,std::vector >,at::ArrayRef >") @Properties(inherit = org.bytedeco.pytorch.presets.torch.class)
public class ChunkMapBatchDataset extends Pointer {
static { Loader.load(); }
/** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
@@ -28,6 +28,7 @@ public class ChunkMapBatchDataset extends Pointer {
/** Returns a batch of data given an index. */
public native @ByVal ExampleVector get_batch(@ByVal SizeTArrayRef request);
+ public native @ByVal ExampleVector get_batch(@ByVal @Cast({"size_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("size_t") long... request);
/** Returns the size of the dataset, or an empty optional if it is unsized. */
public native @ByVal SizeTOptional size();
diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/ChunkMapDataset.java b/pytorch/src/gen/java/org/bytedeco/pytorch/ChunkMapDataset.java
index 864f0fb09c2..1a721147661 100644
--- a/pytorch/src/gen/java/org/bytedeco/pytorch/ChunkMapDataset.java
+++ b/pytorch/src/gen/java/org/bytedeco/pytorch/ChunkMapDataset.java
@@ -16,18 +16,16 @@
import static org.bytedeco.openblas.global.openblas.*;
import static org.bytedeco.pytorch.global.torch.*;
- // namespace detail
-/** A {@code MapDataset} is a dataset that applies a transform to a source dataset. */
-@Name("torch::data::datasets::MapDataset >,torch::data::transforms::Stack > >") @NoOffset @Properties(inherit = org.bytedeco.pytorch.presets.torch.class)
+@Name("torch::data::datasets::MapDataset >,torch::data::transforms::Stack > >") @NoOffset @Properties(inherit = org.bytedeco.pytorch.presets.torch.class)
public class ChunkMapDataset extends ChunkMapBatchDataset {
static { Loader.load(); }
/** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
public ChunkMapDataset(Pointer p) { super(p); }
- public ChunkMapDataset(@ByVal ChunkSharedBatchDataset dataset, @ByVal @Cast("torch::data::datasets::MapDataset >,torch::data::transforms::Stack > >::TransformType*") ExampleStack transform) { super((Pointer)null); allocate(dataset, transform); }
- private native void allocate(@ByVal ChunkSharedBatchDataset dataset, @ByVal @Cast("torch::data::datasets::MapDataset >,torch::data::transforms::Stack > >::TransformType*") ExampleStack transform);
+ public ChunkMapDataset(@ByVal ChunkSharedBatchDataset dataset, @ByVal @Cast("torch::data::datasets::MapDataset >,torch::data::transforms::Stack > >::TransformType*") ExampleStack transform) { super((Pointer)null); allocate(dataset, transform); }
+ private native void allocate(@ByVal ChunkSharedBatchDataset dataset, @ByVal @Cast("torch::data::datasets::MapDataset >,torch::data::transforms::Stack > >::TransformType*") ExampleStack transform);
/** Gets a batch from the source dataset and applies the transform to it,
* returning the result. */
diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/ChunkMapTensorBatchDataset.java b/pytorch/src/gen/java/org/bytedeco/pytorch/ChunkMapTensorBatchDataset.java
new file mode 100644
index 00000000000..2fbce701de8
--- /dev/null
+++ b/pytorch/src/gen/java/org/bytedeco/pytorch/ChunkMapTensorBatchDataset.java
@@ -0,0 +1,40 @@
+// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE
+
+package org.bytedeco.pytorch;
+
+import org.bytedeco.pytorch.Allocator;
+import org.bytedeco.pytorch.Function;
+import org.bytedeco.pytorch.functions.*;
+import org.bytedeco.pytorch.Module;
+import org.bytedeco.javacpp.annotation.Cast;
+import java.nio.*;
+import org.bytedeco.javacpp.*;
+import org.bytedeco.javacpp.annotation.*;
+
+import static org.bytedeco.javacpp.presets.javacpp.*;
+import static org.bytedeco.openblas.global.openblas_nolapack.*;
+import static org.bytedeco.openblas.global.openblas.*;
+
+import static org.bytedeco.pytorch.global.torch.*;
+
+@Name("torch::data::datasets::BatchDataset >,torch::data::transforms::Stack > >,std::vector >,at::ArrayRef >") @Properties(inherit = org.bytedeco.pytorch.presets.torch.class)
+public class ChunkMapTensorBatchDataset extends Pointer {
+ static { Loader.load(); }
+ /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
+ public ChunkMapTensorBatchDataset(Pointer p) { super(p); }
+
+ @MemberGetter public static native @Cast("const bool") boolean is_stateful();
+ public static final boolean is_stateful = is_stateful();
+
+ /** Returns a batch of data given an index. */
+ public native @ByVal TensorExampleVector get_batch(@ByVal SizeTArrayRef request);
+ public native @ByVal TensorExampleVector get_batch(@ByVal @Cast({"size_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("size_t") long... request);
+
+ /** Returns the size of the dataset, or an empty optional if it is unsized. */
+ public native @ByVal SizeTOptional size();
+
+ /** Creates a {@code MapDataset} that applies the given {@code transform} to this dataset. */
+
+ /** Creates a {@code MapDataset} that applies the given {@code transform} to this dataset. */
+
+}
diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/ChunkMapTensorDataset.java b/pytorch/src/gen/java/org/bytedeco/pytorch/ChunkMapTensorDataset.java
new file mode 100644
index 00000000000..16c39244d25
--- /dev/null
+++ b/pytorch/src/gen/java/org/bytedeco/pytorch/ChunkMapTensorDataset.java
@@ -0,0 +1,49 @@
+// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE
+
+package org.bytedeco.pytorch;
+
+import org.bytedeco.pytorch.Allocator;
+import org.bytedeco.pytorch.Function;
+import org.bytedeco.pytorch.functions.*;
+import org.bytedeco.pytorch.Module;
+import org.bytedeco.javacpp.annotation.Cast;
+import java.nio.*;
+import org.bytedeco.javacpp.*;
+import org.bytedeco.javacpp.annotation.*;
+
+import static org.bytedeco.javacpp.presets.javacpp.*;
+import static org.bytedeco.openblas.global.openblas_nolapack.*;
+import static org.bytedeco.openblas.global.openblas.*;
+
+import static org.bytedeco.pytorch.global.torch.*;
+
+@Name("torch::data::datasets::MapDataset >,torch::data::transforms::Stack > >") @NoOffset @Properties(inherit = org.bytedeco.pytorch.presets.torch.class)
+public class ChunkMapTensorDataset extends ChunkMapTensorBatchDataset {
+ static { Loader.load(); }
+ /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
+ public ChunkMapTensorDataset(Pointer p) { super(p); }
+
+
+ public ChunkMapTensorDataset(@ByVal ChunkSharedTensorBatchDataset dataset, @ByVal @Cast("torch::data::datasets::MapDataset >,torch::data::transforms::Stack > >::TransformType*") TensorExampleStack transform) { super((Pointer)null); allocate(dataset, transform); }
+ private native void allocate(@ByVal ChunkSharedTensorBatchDataset dataset, @ByVal @Cast("torch::data::datasets::MapDataset >,torch::data::transforms::Stack > >::TransformType*") TensorExampleStack transform);
+
+ /** Gets a batch from the source dataset and applies the transform to it,
+ * returning the result. */
+ public native @Name("get_batch") @ByVal TensorExampleOptional get_batch_example(@Cast("size_t") long indices);
+
+ /** Returns the size of the source dataset. */
+ // NOLINTNEXTLINE(bugprone-exception-escape)
+ public native @ByVal @NoException(true) SizeTOptional size();
+
+ /** Calls {@code reset()} on the underlying dataset.
+ * NOTE: Stateless datasets do not have a reset() method, so a call to this
+ * method will only compile for stateful datasets (which have a reset()
+ * method). */
+
+
+ /** Returns the underlying dataset. */
+ public native @Const @ByRef @NoException(true) ChunkSharedTensorBatchDataset dataset();
+
+ /** Returns the transform being applied. */
+ public native @Const @ByRef @NoException(true) TensorExampleStack transform();
+}
diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/ChunkRandomDataLoader.java b/pytorch/src/gen/java/org/bytedeco/pytorch/ChunkRandomDataLoader.java
index 3ae0a154083..edc6057cfe8 100644
--- a/pytorch/src/gen/java/org/bytedeco/pytorch/ChunkRandomDataLoader.java
+++ b/pytorch/src/gen/java/org/bytedeco/pytorch/ChunkRandomDataLoader.java
@@ -30,7 +30,7 @@
*
* A stateful dataloader is created by calling {@code make_data_loader} with a
* stateful dataset. */
-@Name("torch::data::StatefulDataLoader >,torch::data::transforms::Stack > > >") @Properties(inherit = org.bytedeco.pytorch.presets.torch.class)
+@Name("torch::data::StatefulDataLoader >,torch::data::transforms::Stack > > >") @Properties(inherit = org.bytedeco.pytorch.presets.torch.class)
public class ChunkRandomDataLoader extends ChunkRandomDataLoaderBase {
static { Loader.load(); }
/** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/ChunkRandomDataLoaderBase.java b/pytorch/src/gen/java/org/bytedeco/pytorch/ChunkRandomDataLoaderBase.java
index d6f1ad4a879..cd5affa75de 100644
--- a/pytorch/src/gen/java/org/bytedeco/pytorch/ChunkRandomDataLoaderBase.java
+++ b/pytorch/src/gen/java/org/bytedeco/pytorch/ChunkRandomDataLoaderBase.java
@@ -17,7 +17,7 @@
import static org.bytedeco.pytorch.global.torch.*;
-@Name("torch::data::DataLoaderBase >,torch::data::transforms::Stack > >,torch::data::Example<>,size_t>") @NoOffset @Properties(inherit = org.bytedeco.pytorch.presets.torch.class)
+@Name("torch::data::DataLoaderBase >,torch::data::transforms::Stack > >,torch::data::Example,size_t>") @NoOffset @Properties(inherit = org.bytedeco.pytorch.presets.torch.class)
public class ChunkRandomDataLoaderBase extends Pointer {
static { Loader.load(); }
/** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
@@ -53,4 +53,5 @@ public class ChunkRandomDataLoaderBase extends Pointer {
public native void join();
/** Returns the options with which the DataLoader was configured. */
+ public native @Const @ByRef @NoException(true) FullDataLoaderOptions options();
}
diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/ChunkRandomTensorDataLoader.java b/pytorch/src/gen/java/org/bytedeco/pytorch/ChunkRandomTensorDataLoader.java
new file mode 100644
index 00000000000..d66bab456e5
--- /dev/null
+++ b/pytorch/src/gen/java/org/bytedeco/pytorch/ChunkRandomTensorDataLoader.java
@@ -0,0 +1,30 @@
+// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE
+
+package org.bytedeco.pytorch;
+
+import org.bytedeco.pytorch.Allocator;
+import org.bytedeco.pytorch.Function;
+import org.bytedeco.pytorch.functions.*;
+import org.bytedeco.pytorch.Module;
+import org.bytedeco.javacpp.annotation.Cast;
+import java.nio.*;
+import org.bytedeco.javacpp.*;
+import org.bytedeco.javacpp.annotation.*;
+
+import static org.bytedeco.javacpp.presets.javacpp.*;
+import static org.bytedeco.openblas.global.openblas_nolapack.*;
+import static org.bytedeco.openblas.global.openblas.*;
+
+import static org.bytedeco.pytorch.global.torch.*;
+
+@Name("torch::data::StatefulDataLoader >,torch::data::transforms::Stack > > >") @Properties(inherit = org.bytedeco.pytorch.presets.torch.class)
+public class ChunkRandomTensorDataLoader extends ChunkRandomTensorDataLoaderBase {
+ static { Loader.load(); }
+ /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
+ public ChunkRandomTensorDataLoader(Pointer p) { super(p); }
+
+
+ /** Constructs the {@code StatefulDataLoader} from a {@code dataset} and some {@code options}. */
+ public ChunkRandomTensorDataLoader(@ByVal ChunkMapTensorDataset dataset, @ByVal DataLoaderOptions options) { super((Pointer)null); allocate(dataset, options); }
+ private native void allocate(@ByVal ChunkMapTensorDataset dataset, @ByVal DataLoaderOptions options);
+}
diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/ChunkRandomTensorDataLoaderBase.java b/pytorch/src/gen/java/org/bytedeco/pytorch/ChunkRandomTensorDataLoaderBase.java
new file mode 100644
index 00000000000..fea7e155d57
--- /dev/null
+++ b/pytorch/src/gen/java/org/bytedeco/pytorch/ChunkRandomTensorDataLoaderBase.java
@@ -0,0 +1,57 @@
+// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE
+
+package org.bytedeco.pytorch;
+
+import org.bytedeco.pytorch.Allocator;
+import org.bytedeco.pytorch.Function;
+import org.bytedeco.pytorch.functions.*;
+import org.bytedeco.pytorch.Module;
+import org.bytedeco.javacpp.annotation.Cast;
+import java.nio.*;
+import org.bytedeco.javacpp.*;
+import org.bytedeco.javacpp.annotation.*;
+
+import static org.bytedeco.javacpp.presets.javacpp.*;
+import static org.bytedeco.openblas.global.openblas_nolapack.*;
+import static org.bytedeco.openblas.global.openblas.*;
+
+import static org.bytedeco.pytorch.global.torch.*;
+
+@Name("torch::data::DataLoaderBase >,torch::data::transforms::Stack > >,torch::data::Example,size_t>") @NoOffset @Properties(inherit = org.bytedeco.pytorch.presets.torch.class)
+public class ChunkRandomTensorDataLoaderBase extends Pointer {
+ static { Loader.load(); }
+ /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
+ public ChunkRandomTensorDataLoaderBase(Pointer p) { super(p); }
+
+
+ /** Constructs a new DataLoader from a {@code dataset} to sample from, {@code options}
+ * to configure the DataLoader with, and a {@code sampler} that specifies the
+ * sampling strategy. */
+
+ // NOLINTNEXTLINE(bugprone-exception-escape)
+
+ /** Returns an iterator into the DataLoader. The lifetime of the iterator is
+ * bound to the DataLoader. In C++ standards language, the category of the
+ * iterator is {@code OutputIterator}. See
+ * https://en.cppreference.com/w/cpp/named_req/OutputIterator for what this
+ * means. In short: you may increment the iterator and dereference it, but
+ * cannot go back, or step forward more than one position at a time. When the
+ * DataLoader is exhausted, it will compare equal with the special
+ * "sentinel" iterator returned by {@code DataLoader::end()}. Most of the time, you
+ * should only use range-for loops to loop over the DataLoader, but
+ * standard algorithms like {@code std::copy(dataloader.begin(), dataloader.end(),
+ * output_iterator)} are supported too. */
+ public native @ByVal TensorExampleIterator begin();
+
+ /** Returns a special "sentinel" iterator that compares equal with a
+ * non-sentinel iterator once the DataLoader is exhausted. */
+ public native @ByVal TensorExampleIterator end();
+
+ /** Joins the DataLoader's worker threads and drains internal queues.
+ * This function may only be invoked from the main thread (in which the
+ * DataLoader lives). */
+ public native void join();
+
+ /** Returns the options with which the DataLoader was configured. */
+ public native @Const @ByRef @NoException(true) FullDataLoaderOptions options();
+}
diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/ChunkSharedBatchDataset.java b/pytorch/src/gen/java/org/bytedeco/pytorch/ChunkSharedBatchDataset.java
index a9cef9f3f8b..5c0384b1e62 100644
--- a/pytorch/src/gen/java/org/bytedeco/pytorch/ChunkSharedBatchDataset.java
+++ b/pytorch/src/gen/java/org/bytedeco/pytorch/ChunkSharedBatchDataset.java
@@ -26,7 +26,7 @@
*
* Use {@code torch::data::datasets::make_shared_dataset()} to create a new
* {@code SharedBatchDataset} like you would a {@code std::shared_ptr}. */
-@Name("torch::data::datasets::SharedBatchDataset >") @NoOffset @Properties(inherit = org.bytedeco.pytorch.presets.torch.class)
+@Name("torch::data::datasets::SharedBatchDataset >") @NoOffset @Properties(inherit = org.bytedeco.pytorch.presets.torch.class)
public class ChunkSharedBatchDataset extends ChunkBatchSharedBatchDataset {
static { Loader.load(); }
/** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
@@ -41,7 +41,7 @@ private native void allocate(
@SharedPtr ChunkDataset shared_dataset);
/** Calls {@code get_batch} on the underlying dataset. */
- public native @ByVal @Cast("torch::data::datasets::SharedBatchDataset >::BatchType*") ExampleVectorOptional get_batch(@Cast("torch::data::datasets::SharedBatchDataset >::BatchRequestType") long request);
+ public native @ByVal @Cast("torch::data::datasets::SharedBatchDataset >::BatchType*") ExampleVectorOptional get_batch(@Cast("torch::data::datasets::SharedBatchDataset >::BatchRequestType") long request);
/** Returns the {@code size} from the underlying dataset. */
public native @ByVal SizeTOptional size();
diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/ChunkSharedTensorBatchDataset.java b/pytorch/src/gen/java/org/bytedeco/pytorch/ChunkSharedTensorBatchDataset.java
new file mode 100644
index 00000000000..50c2fd4d368
--- /dev/null
+++ b/pytorch/src/gen/java/org/bytedeco/pytorch/ChunkSharedTensorBatchDataset.java
@@ -0,0 +1,52 @@
+// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE
+
+package org.bytedeco.pytorch;
+
+import org.bytedeco.pytorch.Allocator;
+import org.bytedeco.pytorch.Function;
+import org.bytedeco.pytorch.functions.*;
+import org.bytedeco.pytorch.Module;
+import org.bytedeco.javacpp.annotation.Cast;
+import java.nio.*;
+import org.bytedeco.javacpp.*;
+import org.bytedeco.javacpp.annotation.*;
+
+import static org.bytedeco.javacpp.presets.javacpp.*;
+import static org.bytedeco.openblas.global.openblas_nolapack.*;
+import static org.bytedeco.openblas.global.openblas.*;
+
+import static org.bytedeco.pytorch.global.torch.*;
+
+@Name("torch::data::datasets::SharedBatchDataset >") @NoOffset @Properties(inherit = org.bytedeco.pytorch.presets.torch.class)
+public class ChunkSharedTensorBatchDataset extends ChunkBatchSharedTensorBatchDataset {
+ static { Loader.load(); }
+ /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
+ public ChunkSharedTensorBatchDataset(Pointer p) { super(p); }
+
+
+ /** Constructs a new {@code SharedBatchDataset} from a {@code shared_ptr} to the
+ * {@code UnderlyingDataset}. */
+ /* implicit */ public ChunkSharedTensorBatchDataset(
+ @SharedPtr ChunkTensorDataset shared_dataset) { super((Pointer)null); allocate(shared_dataset); }
+private native void allocate(
+ @SharedPtr ChunkTensorDataset shared_dataset);
+
+ /** Calls {@code get_batch} on the underlying dataset. */
+ public native @ByVal @Cast("torch::data::datasets::SharedBatchDataset >::BatchType*") TensorExampleVectorOptional get_batch(@Cast("torch::data::datasets::SharedBatchDataset >::BatchRequestType") long request);
+
+ /** Returns the {@code size} from the underlying dataset. */
+ public native @ByVal SizeTOptional size();
+
+ /** Accesses the underlying dataset. */
+ public native @ByRef @Name("operator *") ChunkTensorDataset multiply();
+
+ /** Accesses the underlying dataset. */
+
+ /** Accesses the underlying dataset. */
+ public native @Name("operator ->") ChunkTensorDataset access();
+
+ /** Accesses the underlying dataset. */
+
+ /** Calls {@code reset()} on the underlying dataset. */
+ public native void reset();
+}
diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/ChunkStatefulDataset.java b/pytorch/src/gen/java/org/bytedeco/pytorch/ChunkStatefulDataset.java
index 145c1c03674..df8566ad0e6 100644
--- a/pytorch/src/gen/java/org/bytedeco/pytorch/ChunkStatefulDataset.java
+++ b/pytorch/src/gen/java/org/bytedeco/pytorch/ChunkStatefulDataset.java
@@ -31,7 +31,7 @@
* {@code optional} (i.e. the type specified in the {@code StatefulDataset}
* specialization is automatically boxed into an {@code optional} for the dataset's
* {@code BatchType}). */
-@Name("torch::data::datasets::StatefulDataset,JavaCPP_torch_0003a_0003adata_0003a_0003adatasets_0003a_0003aChunkDataReader_0003ctorch_0003a_0003adata_0003a_0003aExample_0003c_0003e_0002cstd_0003a_0003avector_0003ctorch_0003a_0003adata_0003a_0003aExample_0003c_0003e_00020_0003e_00020_0003e::BatchType,size_t>") @Properties(inherit = org.bytedeco.pytorch.presets.torch.class)
+@Name("torch::data::datasets::StatefulDataset,JavaCPP_torch_0003a_0003adata_0003a_0003adatasets_0003a_0003aChunkDataReader_0003ctorch_0003a_0003adata_0003a_0003aExample_0003ctorch_0003a_0003aTensor_0002ctorch_0003a_0003aTensor_0003e_0002cstd_0003a_0003avector_0003ctorch_0003a_0003adata_0003a_0003aExample_0003ctorch_0003a_0003aTensor_0002ctorch_0003a_0003aTensor_0003e_00020_0003e_00020_0003e::BatchType,size_t>") @Properties(inherit = org.bytedeco.pytorch.presets.torch.class)
public class ChunkStatefulDataset extends ChunkBatchDataset {
static { Loader.load(); }
/** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/ChunkStatefulTensorDataset.java b/pytorch/src/gen/java/org/bytedeco/pytorch/ChunkStatefulTensorDataset.java
new file mode 100644
index 00000000000..a744347c27f
--- /dev/null
+++ b/pytorch/src/gen/java/org/bytedeco/pytorch/ChunkStatefulTensorDataset.java
@@ -0,0 +1,34 @@
+// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE
+
+package org.bytedeco.pytorch;
+
+import org.bytedeco.pytorch.Allocator;
+import org.bytedeco.pytorch.Function;
+import org.bytedeco.pytorch.functions.*;
+import org.bytedeco.pytorch.Module;
+import org.bytedeco.javacpp.annotation.Cast;
+import java.nio.*;
+import org.bytedeco.javacpp.*;
+import org.bytedeco.javacpp.annotation.*;
+
+import static org.bytedeco.javacpp.presets.javacpp.*;
+import static org.bytedeco.openblas.global.openblas_nolapack.*;
+import static org.bytedeco.openblas.global.openblas.*;
+
+import static org.bytedeco.pytorch.global.torch.*;
+
+@Name("torch::data::datasets::StatefulDataset,JavaCPP_torch_0003a_0003adata_0003a_0003adatasets_0003a_0003aChunkDataReader_0003ctorch_0003a_0003adata_0003a_0003aExample_0003ctorch_0003a_0003aTensor_0002ctorch_0003a_0003adata_0003a_0003aexample_0003a_0003aNoTarget_0003e_0002cstd_0003a_0003avector_0003ctorch_0003a_0003adata_0003a_0003aExample_0003ctorch_0003a_0003aTensor_0002ctorch_0003a_0003adata_0003a_0003aexample_0003a_0003aNoTarget_0003e_00020_0003e_00020_0003e::BatchType,size_t>") @Properties(inherit = org.bytedeco.pytorch.presets.torch.class)
+public class ChunkStatefulTensorDataset extends ChunkTensorBatchDataset {
+ static { Loader.load(); }
+ /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
+ public ChunkStatefulTensorDataset(Pointer p) { super(p); }
+
+ /** Resets internal state of the dataset. */
+ public native void reset();
+
+ /** Saves the statefulDataset's state to OutputArchive. */
+ public native void save(@ByRef OutputArchive archive);
+
+ /** Deserializes the statefulDataset's state from the {@code archive}. */
+ public native void load(@ByRef InputArchive archive);
+}
diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/ChunkTensorBatchDataset.java b/pytorch/src/gen/java/org/bytedeco/pytorch/ChunkTensorBatchDataset.java
new file mode 100644
index 00000000000..69da762261e
--- /dev/null
+++ b/pytorch/src/gen/java/org/bytedeco/pytorch/ChunkTensorBatchDataset.java
@@ -0,0 +1,39 @@
+// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE
+
+package org.bytedeco.pytorch;
+
+import org.bytedeco.pytorch.Allocator;
+import org.bytedeco.pytorch.Function;
+import org.bytedeco.pytorch.functions.*;
+import org.bytedeco.pytorch.Module;
+import org.bytedeco.javacpp.annotation.Cast;
+import java.nio.*;
+import org.bytedeco.javacpp.*;
+import org.bytedeco.javacpp.annotation.*;
+
+import static org.bytedeco.javacpp.presets.javacpp.*;
+import static org.bytedeco.openblas.global.openblas_nolapack.*;
+import static org.bytedeco.openblas.global.openblas.*;
+
+import static org.bytedeco.pytorch.global.torch.*;
+
+@Name("torch::data::datasets::BatchDataset,c10::optional,size_t>") @Properties(inherit = org.bytedeco.pytorch.presets.torch.class)
+public class ChunkTensorBatchDataset extends Pointer {
+ static { Loader.load(); }
+ /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
+ public ChunkTensorBatchDataset(Pointer p) { super(p); }
+
+ @MemberGetter public static native @Cast("const bool") boolean is_stateful();
+ public static final boolean is_stateful = is_stateful();
+
+ /** Returns a batch of data given an index. */
+ public native @ByVal TensorExampleVectorOptional get_batch(@Cast("size_t") long request);
+
+ /** Returns the size of the dataset, or an empty optional if it is unsized. */
+ public native @ByVal SizeTOptional size();
+
+ /** Creates a {@code MapDataset} that applies the given {@code transform} to this dataset. */
+
+ /** Creates a {@code MapDataset} that applies the given {@code transform} to this dataset. */
+
+}
diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/ChunkTensorDataReader.java b/pytorch/src/gen/java/org/bytedeco/pytorch/ChunkTensorDataReader.java
new file mode 100644
index 00000000000..2598d7c7a6e
--- /dev/null
+++ b/pytorch/src/gen/java/org/bytedeco/pytorch/ChunkTensorDataReader.java
@@ -0,0 +1,48 @@
+// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE
+
+package org.bytedeco.pytorch;
+
+import org.bytedeco.pytorch.Allocator;
+import org.bytedeco.pytorch.Function;
+import org.bytedeco.pytorch.functions.*;
+import org.bytedeco.pytorch.Module;
+import org.bytedeco.javacpp.annotation.Cast;
+import java.nio.*;
+import org.bytedeco.javacpp.*;
+import org.bytedeco.javacpp.annotation.*;
+
+import static org.bytedeco.javacpp.presets.javacpp.*;
+import static org.bytedeco.openblas.global.openblas_nolapack.*;
+import static org.bytedeco.openblas.global.openblas.*;
+
+import static org.bytedeco.pytorch.global.torch.*;
+
+@Name("torch::data::datasets::ChunkDataReader,std::vector > >") @Properties(inherit = org.bytedeco.pytorch.presets.torch.class)
+public class ChunkTensorDataReader extends Pointer {
+ static { Loader.load(); }
+ /** Default native constructor. */
+ public ChunkTensorDataReader() { super((Pointer)null); allocate(); }
+ /** Native array allocator. Access with {@link Pointer#position(long)}. */
+ public ChunkTensorDataReader(long size) { super((Pointer)null); allocateArray(size); }
+ /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
+ public ChunkTensorDataReader(Pointer p) { super(p); }
+ private native void allocate();
+ private native void allocateArray(long size);
+ @Override public ChunkTensorDataReader position(long position) {
+ return (ChunkTensorDataReader)super.position(position);
+ }
+ @Override public ChunkTensorDataReader getPointer(long i) {
+ return new ChunkTensorDataReader((Pointer)this).offsetAddress(i);
+ }
+
+
+
+ /** Read an entire chunk. */
+ @Virtual(true) public native @ByVal @Cast("torch::data::datasets::ChunkDataReader,std::vector > >::ChunkType*") TensorExampleVector read_chunk(@Cast("size_t") long chunk_index);
+
+ /** Returns the number of chunks available in this reader. */
+ @Virtual(true) public native @Cast("size_t") long chunk_count();
+
+ /** This will clear any internal state associate with this reader. */
+ @Virtual(true) public native void reset();
+}
diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/ChunkTensorDataset.java b/pytorch/src/gen/java/org/bytedeco/pytorch/ChunkTensorDataset.java
new file mode 100644
index 00000000000..5d4b34ee0d6
--- /dev/null
+++ b/pytorch/src/gen/java/org/bytedeco/pytorch/ChunkTensorDataset.java
@@ -0,0 +1,68 @@
+// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE
+
+package org.bytedeco.pytorch;
+
+import org.bytedeco.pytorch.Allocator;
+import org.bytedeco.pytorch.Function;
+import org.bytedeco.pytorch.functions.*;
+import org.bytedeco.pytorch.Module;
+import org.bytedeco.javacpp.annotation.Cast;
+import java.nio.*;
+import org.bytedeco.javacpp.*;
+import org.bytedeco.javacpp.annotation.*;
+
+import static org.bytedeco.javacpp.presets.javacpp.*;
+import static org.bytedeco.openblas.global.openblas_nolapack.*;
+import static org.bytedeco.openblas.global.openblas.*;
+
+import static org.bytedeco.pytorch.global.torch.*;
+
+@Name("torch::data::datasets::ChunkDataset") @NoOffset @Properties(inherit = org.bytedeco.pytorch.presets.torch.class)
+public class ChunkTensorDataset extends ChunkStatefulTensorDataset {
+ static { Loader.load(); }
+ /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
+ public ChunkTensorDataset(Pointer p) { super(p); }
+
+
+ public ChunkTensorDataset(
+ ChunkTensorDataReader chunk_reader,
+ RandomSampler chunk_sampler,
+ RandomSampler example_sampler,
+ ChunkDatasetOptions options) { super((Pointer)null); allocate(chunk_reader, chunk_sampler, example_sampler, options, null); }
+ public ChunkTensorDataset(
+ ChunkTensorDataReader chunk_reader,
+ RandomSampler chunk_sampler,
+ RandomSampler example_sampler,
+ ChunkDatasetOptions options,
+ Pointer preprocessing_policy) { super((Pointer)null); allocate(chunk_reader, chunk_sampler, example_sampler, options, preprocessing_policy); }
+ private native void allocate(
+ @ByVal @Cast("JavaCPP_torch_0003a_0003adata_0003a_0003adatasets_0003a_0003aChunkDataReader_0003ctorch_0003a_0003adata_0003a_0003aExample_0003ctorch_0003a_0003aTensor_0002ctorch_0003a_0003adata_0003a_0003aexample_0003a_0003aNoTarget_0003e_0002cstd_0003a_0003avector_0003ctorch_0003a_0003adata_0003a_0003aExample_0003ctorch_0003a_0003aTensor_0002ctorch_0003a_0003adata_0003a_0003aexample_0003a_0003aNoTarget_0003e_00020_0003e_00020_0003e*") ChunkTensorDataReader chunk_reader,
+ @ByVal RandomSampler chunk_sampler,
+ @ByVal RandomSampler example_sampler,
+ @ByVal ChunkDatasetOptions options,
+ @ByVal(nullValue = "std::function>&)>()") @Cast("std::function>&)>*") Pointer preprocessing_policy);
+
+ /** Default get_batch method of BatchDataset. This method returns
+ * Example batches created from the preloaded chunks. The implemenation
+ * is dataset agnostic and does not need overriding in different chunk
+ * datasets. */
+ public native @ByVal TensorExampleVectorOptional get_batch(@Cast("size_t") long batch_size);
+
+ /** Helper method around get_batch as {@code batch_size} is not strictly necessary */
+ public native @ByVal TensorExampleVectorOptional get_batch();
+
+ /** This will clear any internal state and starts the internal prefetching
+ * mechanism for the chunk dataset. */
+ public native void reset();
+
+ /** size is not used for chunk dataset. */
+ public native @ByVal SizeTOptional size();
+
+ // provide a references to chunk sampler. Used mainly in distributed data
+ // loading to set the epoch number for the sampler.
+ public native @Cast("torch::data::datasets::ChunkDataset::ChunkSamplerType*") @ByRef RandomSampler chunk_sampler();
+
+ public native void save(@ByRef OutputArchive archive);
+
+ public native void load(@ByRef InputArchive archive);
+}
diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/Code.java b/pytorch/src/gen/java/org/bytedeco/pytorch/Code.java
index 1350f11bacf..e0fc05b2248 100644
--- a/pytorch/src/gen/java/org/bytedeco/pytorch/Code.java
+++ b/pytorch/src/gen/java/org/bytedeco/pytorch/Code.java
@@ -78,7 +78,7 @@ private native void allocate(
public native @Cast("size_t") long num_bailouts();
public native @Const @ByRef IValueVector constant_table();
public native @Const @ByRef TypeVector type_table();
- public native @Const @ByRef InstructionVector instructions();
+ public native @StdVector Instruction instructions();
public native @Const @ByRef StringSizeTMap op_to_num_specified_args();
public native @Cast("torch::jit::Node**") @StdVector PointerPointer instructions_source();
public native void request_bailout(@Cast("size_t") long index);
diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/CompilationUnitVector.java b/pytorch/src/gen/java/org/bytedeco/pytorch/CompilationUnitVector.java
deleted file mode 100644
index ff4b8bb1df9..00000000000
--- a/pytorch/src/gen/java/org/bytedeco/pytorch/CompilationUnitVector.java
+++ /dev/null
@@ -1,47 +0,0 @@
-// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE
-
-package org.bytedeco.pytorch;
-
-import org.bytedeco.pytorch.Allocator;
-import org.bytedeco.pytorch.Function;
-import org.bytedeco.pytorch.functions.*;
-import org.bytedeco.pytorch.Module;
-import org.bytedeco.javacpp.annotation.Cast;
-import java.nio.*;
-import org.bytedeco.javacpp.*;
-import org.bytedeco.javacpp.annotation.*;
-
-import static org.bytedeco.javacpp.presets.javacpp.*;
-import static org.bytedeco.openblas.global.openblas_nolapack.*;
-import static org.bytedeco.openblas.global.openblas.*;
-
-import static org.bytedeco.pytorch.global.torch.*;
-
-@Name("std::vector") @Properties(inherit = org.bytedeco.pytorch.presets.torch.class)
-public class CompilationUnitVector extends Pointer {
- static { Loader.load(); }
- /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
- public CompilationUnitVector(Pointer p) { super(p); }
- public CompilationUnitVector() { allocate(); }
- private native void allocate();
-
-
- public boolean empty() { return size() == 0; }
- public native long size();
-
- public CompilationUnit front() { return get(0); }
- public CompilationUnit back() { return get(size() - 1); }
- @Index(function = "at") public native @ByRef CompilationUnit get(@Cast("size_t") long i);
-
- public native @ByVal Iterator begin();
- public native @ByVal Iterator end();
- @NoOffset @Name("iterator") public static class Iterator extends Pointer {
- public Iterator(Pointer p) { super(p); }
- public Iterator() { }
-
- public native @Name("operator ++") @ByRef Iterator increment();
- public native @Name("operator ==") boolean equals(@ByRef Iterator it);
- public native @Name("operator *") @ByRef @Const CompilationUnit get();
- }
-}
-
diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/InterpreterStateImpl.java b/pytorch/src/gen/java/org/bytedeco/pytorch/CompiledNodeArgs.java
similarity index 74%
rename from pytorch/src/gen/java/org/bytedeco/pytorch/InterpreterStateImpl.java
rename to pytorch/src/gen/java/org/bytedeco/pytorch/CompiledNodeArgs.java
index 0307371678f..780ba34f781 100644
--- a/pytorch/src/gen/java/org/bytedeco/pytorch/InterpreterStateImpl.java
+++ b/pytorch/src/gen/java/org/bytedeco/pytorch/CompiledNodeArgs.java
@@ -17,10 +17,10 @@
import static org.bytedeco.pytorch.global.torch.*;
-@Namespace("torch::jit") @Opaque @Properties(inherit = org.bytedeco.pytorch.presets.torch.class)
-public class InterpreterStateImpl extends Pointer {
+@Namespace("torch::dynamo::autograd") @Opaque @Properties(inherit = org.bytedeco.pytorch.presets.torch.class)
+public class CompiledNodeArgs extends Pointer {
/** Empty constructor. Calls {@code super((Pointer)null)}. */
- public InterpreterStateImpl() { super((Pointer)null); }
+ public CompiledNodeArgs() { super((Pointer)null); }
/** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
- public InterpreterStateImpl(Pointer p) { super(p); }
+ public CompiledNodeArgs(Pointer p) { super(p); }
}
diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/Context.java b/pytorch/src/gen/java/org/bytedeco/pytorch/Context.java
index 62169c23401..dfcd3ecb333 100644
--- a/pytorch/src/gen/java/org/bytedeco/pytorch/Context.java
+++ b/pytorch/src/gen/java/org/bytedeco/pytorch/Context.java
@@ -39,13 +39,14 @@ public class Context extends Pointer {
public native @Const @ByRef Generator defaultGenerator(@ByVal Device device);
public native @ByVal Device getDeviceFromPtr(Pointer data, DeviceType device_type);
public native @ByVal Device getDeviceFromPtr(Pointer data, @Cast("c10::DeviceType") byte device_type);
- public static native @Cast("bool") boolean isPinnedPtr(Pointer data);
+ public static native @Cast("bool") boolean isPinnedPtr(@Const Pointer data);
public static native @Cast("bool") boolean hasOpenMP();
public static native @Cast("bool") boolean hasMKL();
public static native @Cast("bool") boolean hasLAPACK();
public static native @Cast("bool") boolean hasMKLDNN();
public static native @Cast("bool") boolean hasMAGMA();
public static native @Cast("bool") boolean hasCUDA();
+ public static native @Cast("bool") boolean hasMTIA();
public static native @Cast("bool") boolean hasCUDART();
public static native long versionCUDART();
public static native @Cast("bool") boolean hasCuDNN();
@@ -55,6 +56,7 @@ public class Context extends Pointer {
public static native @Cast("bool") boolean hasMPS();
public static native @Cast("bool") boolean hasIPU();
public static native @Cast("bool") boolean hasXLA();
+ public static native @Cast("bool") boolean hasXPU();
public static native @Cast("bool") boolean hasLazy();
public static native @Cast("bool") boolean hasORT();
// defined in header so that getNonVariableType has ability to inline
diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/SchemaRegistrationHandleRAII.java b/pytorch/src/gen/java/org/bytedeco/pytorch/DLDevice_.java
similarity index 61%
rename from pytorch/src/gen/java/org/bytedeco/pytorch/SchemaRegistrationHandleRAII.java
rename to pytorch/src/gen/java/org/bytedeco/pytorch/DLDevice_.java
index fa207e4a8f7..ae08e542c96 100644
--- a/pytorch/src/gen/java/org/bytedeco/pytorch/SchemaRegistrationHandleRAII.java
+++ b/pytorch/src/gen/java/org/bytedeco/pytorch/DLDevice_.java
@@ -17,10 +17,14 @@
import static org.bytedeco.pytorch.global.torch.*;
-@Namespace("c10") @Opaque @Properties(inherit = org.bytedeco.pytorch.presets.torch.class)
-public class SchemaRegistrationHandleRAII extends Pointer {
+
+// We use forward declaration here instead of #include to avoid
+// leaking DLPack implementation detail to every project that includes `ATen/Context.h`, which in turn
+// would lead to a conflict when linked with another project using DLPack (for example TVM)
+@Opaque @Properties(inherit = org.bytedeco.pytorch.presets.torch.class)
+public class DLDevice_ extends Pointer {
/** Empty constructor. Calls {@code super((Pointer)null)}. */
- public SchemaRegistrationHandleRAII() { super((Pointer)null); }
+ public DLDevice_() { super((Pointer)null); }
/** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
- public SchemaRegistrationHandleRAII(Pointer p) { super(p); }
+ public DLDevice_(Pointer p) { super(p); }
}
diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/DataPtr.java b/pytorch/src/gen/java/org/bytedeco/pytorch/DataPtr.java
index 88ca2be01a9..4149f900580 100644
--- a/pytorch/src/gen/java/org/bytedeco/pytorch/DataPtr.java
+++ b/pytorch/src/gen/java/org/bytedeco/pytorch/DataPtr.java
@@ -46,20 +46,17 @@ public class DataPtr extends Pointer {
private native void allocate();
public DataPtr(Pointer data, @ByVal Device device) { super((Pointer)null); allocate(data, device); }
private native void allocate(Pointer data, @ByVal Device device);
- public DataPtr(Pointer data, Pointer ctx, @Cast("c10::DeleterFnPtr") PointerConsumer ctx_deleter, @ByVal Device device) { super((Pointer)null); allocate(data, ctx, ctx_deleter, device); }
- private native void allocate(Pointer data, Pointer ctx, @Cast("c10::DeleterFnPtr") PointerConsumer ctx_deleter, @ByVal Device device);
- public DataPtr(Pointer data, Pointer ctx, @Cast("c10::DeleterFnPtr") Pointer ctx_deleter, @ByVal Device device) { super((Pointer)null); allocate(data, ctx, ctx_deleter, device); }
- private native void allocate(Pointer data, Pointer ctx, @Cast("c10::DeleterFnPtr") Pointer ctx_deleter, @ByVal Device device);
- public DataPtr(Pointer data, Pointer ctx, @Cast("c10::DeleterFnPtr") long ctx_deleter, @ByVal Device device) { super((Pointer)null); allocate(data, ctx, ctx_deleter, device); }
- private native void allocate(Pointer data, Pointer ctx, @Cast("c10::DeleterFnPtr") long ctx_deleter, @ByVal Device device);
+ public DataPtr(Pointer data, Pointer ctx, PointerConsumer ctx_deleter, @ByVal Device device) { super((Pointer)null); allocate(data, ctx, ctx_deleter, device); }
+ private native void allocate(Pointer data, Pointer ctx, PointerConsumer ctx_deleter, @ByVal Device device);
public native @Name("operator ->") Pointer access();
public native void clear();
public native Pointer get();
+ public native Pointer mutable_get();
public native Pointer get_context();
public native Pointer release_context();
public native @Cast("bool") @Name("operator bool") boolean asBoolean();
- public native @Cast("c10::DeleterFnPtr") PointerConsumer get_deleter();
+ public native PointerConsumer get_deleter();
/**
* Compare the deleter in a DataPtr to expected_deleter.
* If it matches, replace the deleter with new_deleter
@@ -98,14 +95,8 @@ public class DataPtr extends Pointer {
* in question to confirm this.
*/
public native @Cast("bool") boolean compare_exchange_deleter(
- @Cast("c10::DeleterFnPtr") PointerConsumer expected_deleter,
- @Cast("c10::DeleterFnPtr") PointerConsumer new_deleter);
- public native @Cast("bool") boolean compare_exchange_deleter(
- @Cast("c10::DeleterFnPtr") Pointer expected_deleter,
- @Cast("c10::DeleterFnPtr") Pointer new_deleter);
- public native @Cast("bool") boolean compare_exchange_deleter(
- @Cast("c10::DeleterFnPtr") long expected_deleter,
- @Cast("c10::DeleterFnPtr") long new_deleter);
+ PointerConsumer expected_deleter,
+ PointerConsumer new_deleter);
public native @ByVal Device device();
// Unsafely mutates the device on a DataPtr. Under normal use,
// you should never actually need to call this function.
diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/InstructionVector.java b/pytorch/src/gen/java/org/bytedeco/pytorch/DataPtrVector.java
similarity index 69%
rename from pytorch/src/gen/java/org/bytedeco/pytorch/InstructionVector.java
rename to pytorch/src/gen/java/org/bytedeco/pytorch/DataPtrVector.java
index 5df2a22e996..30279d21e40 100644
--- a/pytorch/src/gen/java/org/bytedeco/pytorch/InstructionVector.java
+++ b/pytorch/src/gen/java/org/bytedeco/pytorch/DataPtrVector.java
@@ -17,21 +17,21 @@
import static org.bytedeco.pytorch.global.torch.*;
-@Name("std::vector") @Properties(inherit = org.bytedeco.pytorch.presets.torch.class)
-public class InstructionVector extends Pointer {
+@Name("std::vector") @Properties(inherit = org.bytedeco.pytorch.presets.torch.class)
+public class DataPtrVector extends Pointer {
static { Loader.load(); }
/** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
- public InstructionVector(Pointer p) { super(p); }
- public InstructionVector() { allocate(); }
+ public DataPtrVector(Pointer p) { super(p); }
+ public DataPtrVector() { allocate(); }
private native void allocate();
public boolean empty() { return size() == 0; }
public native long size();
- public Instruction front() { return get(0); }
- public Instruction back() { return get(size() - 1); }
- @Index(function = "at") public native @ByRef Instruction get(@Cast("size_t") long i);
+ public DataPtr front() { return get(0); }
+ public DataPtr back() { return get(size() - 1); }
+ @Index(function = "at") public native @Cast({"", "c10::DataPtr&&"}) @StdMove DataPtr get(@Cast("size_t") long i);
public native @ByVal Iterator begin();
public native @ByVal Iterator end();
@@ -41,7 +41,7 @@ public Iterator() { }
public native @Name("operator ++") @ByRef Iterator increment();
public native @Name("operator ==") boolean equals(@ByRef Iterator it);
- public native @Name("operator *") @ByRef @Const Instruction get();
+ public native @Name("operator *") @Cast({"", "c10::DataPtr&&"}) @StdMove DataPtr get();
}
}
diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/DeserializationStorageContext.java b/pytorch/src/gen/java/org/bytedeco/pytorch/DeserializationStorageContext.java
index 67d3a7dc2a0..622568cefd4 100644
--- a/pytorch/src/gen/java/org/bytedeco/pytorch/DeserializationStorageContext.java
+++ b/pytorch/src/gen/java/org/bytedeco/pytorch/DeserializationStorageContext.java
@@ -18,34 +18,10 @@
import static org.bytedeco.pytorch.global.torch.*;
-// Used in torch.package and TorchScript deserialization to coordinate
-// sharing of storages between models.
-@Namespace("torch::jit") @NoOffset @Properties(inherit = org.bytedeco.pytorch.presets.torch.class)
+@Namespace("torch::jit") @Opaque @Properties(inherit = org.bytedeco.pytorch.presets.torch.class)
public class DeserializationStorageContext extends Pointer {
- static { Loader.load(); }
+ /** Empty constructor. Calls {@code super((Pointer)null)}. */
+ public DeserializationStorageContext() { super((Pointer)null); }
/** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
public DeserializationStorageContext(Pointer p) { super(p); }
- /** Native array allocator. Access with {@link Pointer#position(long)}. */
- public DeserializationStorageContext(long size) { super((Pointer)null); allocateArray(size); }
- private native void allocateArray(long size);
- @Override public DeserializationStorageContext position(long position) {
- return (DeserializationStorageContext)super.position(position);
- }
- @Override public DeserializationStorageContext getPointer(long i) {
- return new DeserializationStorageContext((Pointer)this).offsetAddress(i);
- }
-
- public DeserializationStorageContext() { super((Pointer)null); allocate(); }
- private native void allocate();
-
-
-
- public native void addStorage(@StdString BytePointer name, @Cast({"", "c10::Storage&&"}) @StdMove Storage storage);
- public native void addStorage(@StdString String name, @Cast({"", "c10::Storage&&"}) @StdMove Storage storage);
-
- public native @Cast("bool") boolean hasStorage(@StdString BytePointer name);
- public native @Cast("bool") boolean hasStorage(@StdString String name);
-
- public native @Cast({"", "c10::Storage&&"}) @StdMove Storage getStorage(@StdString BytePointer name);
- public native @Cast({"", "c10::Storage&&"}) @StdMove Storage getStorage(@StdString String name);
}
diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/Device.java b/pytorch/src/gen/java/org/bytedeco/pytorch/Device.java
index 0a895e88832..822d4df91e6 100644
--- a/pytorch/src/gen/java/org/bytedeco/pytorch/Device.java
+++ b/pytorch/src/gen/java/org/bytedeco/pytorch/Device.java
@@ -18,7 +18,7 @@
import static org.bytedeco.pytorch.global.torch.*;
-/** Represents a a compute device on which a tensor is located. A device is
+/** Represents a compute device on which a tensor is located. A device is
* uniquely identified by a type, which specifies the type of machine it is
* (e.g. CPU or CUDA GPU), and a device index or ordinal, which identifies the
* specific compute device when there is more than one of a certain type. The
@@ -79,6 +79,9 @@ public class Device extends Pointer {
/** Return true if the device is of CUDA type. */
public native @Cast("bool") @NoException(true) boolean is_cuda();
+ /** Return true if the device is of PrivateUse1 type. */
+ public native @Cast("bool") @NoException(true) boolean is_privateuseone();
+
/** Return true if the device is of MPS type. */
public native @Cast("bool") @NoException(true) boolean is_mps();
@@ -97,6 +100,9 @@ public class Device extends Pointer {
/** Return true if the device is of XLA type. */
public native @Cast("bool") @NoException(true) boolean is_xla();
+ /** Return true if the device is of MTIA type. */
+ public native @Cast("bool") @NoException(true) boolean is_mtia();
+
/** Return true if the device is of HPU type. */
public native @Cast("bool") @NoException(true) boolean is_hpu();
@@ -118,7 +124,7 @@ public class Device extends Pointer {
/** Return true if the device is of CPU type. */
public native @Cast("bool") @NoException(true) boolean is_cpu();
- /** Return true if the device supports arbirtary strides. */
+ /** Return true if the device supports arbitrary strides. */
public native @Cast("bool") @NoException(true) boolean supports_as_strided();
/** Same string as returned from operator<<. */
diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/RecordScopeSet.java b/pytorch/src/gen/java/org/bytedeco/pytorch/DeviceTypeSet.java
similarity index 69%
rename from pytorch/src/gen/java/org/bytedeco/pytorch/RecordScopeSet.java
rename to pytorch/src/gen/java/org/bytedeco/pytorch/DeviceTypeSet.java
index f1f13f8161a..3373402df36 100644
--- a/pytorch/src/gen/java/org/bytedeco/pytorch/RecordScopeSet.java
+++ b/pytorch/src/gen/java/org/bytedeco/pytorch/DeviceTypeSet.java
@@ -17,21 +17,21 @@
import static org.bytedeco.pytorch.global.torch.*;
-@Name("std::unordered_set") @Properties(inherit = org.bytedeco.pytorch.presets.torch.class)
-public class RecordScopeSet extends Pointer {
+@Name("std::unordered_set") @Properties(inherit = org.bytedeco.pytorch.presets.torch.class)
+public class DeviceTypeSet extends Pointer {
static { Loader.load(); }
/** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
- public RecordScopeSet(Pointer p) { super(p); }
- public RecordScopeSet() { allocate(); }
+ public DeviceTypeSet(Pointer p) { super(p); }
+ public DeviceTypeSet() { allocate(); }
private native void allocate();
- public native @Name("operator =") @ByRef RecordScopeSet put(@ByRef RecordScopeSet x);
+ public native @Name("operator =") @ByRef DeviceTypeSet put(@ByRef DeviceTypeSet x);
public boolean empty() { return size() == 0; }
public native long size();
- public RecordScope front() { try (Iterator it = begin()) { return it.get(); } }
- public native void insert(@ByRef RecordScope value);
- public native void erase(@ByRef RecordScope value);
+ public DeviceType front() { try (Iterator it = begin()) { return it.get(); } }
+ public native void insert(@ByRef DeviceType value);
+ public native void erase(@ByRef DeviceType value);
public native @ByVal Iterator begin();
public native @ByVal Iterator end();
@NoOffset @Name("iterator") public static class Iterator extends Pointer {
@@ -40,7 +40,7 @@ public Iterator() { }
public native @Name("operator ++") @ByRef Iterator increment();
public native @Name("operator ==") boolean equals(@ByRef Iterator it);
- public native @Name("operator *") @ByRef @Const RecordScope get();
+ public native @Name("operator *") @ByRef @Const DeviceType get();
}
}
diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/DimVectorInferExpandGeometryResult.java b/pytorch/src/gen/java/org/bytedeco/pytorch/DimVectorInferExpandGeometryResult.java
index 1fee80e6d40..46f7032ccb6 100644
--- a/pytorch/src/gen/java/org/bytedeco/pytorch/DimVectorInferExpandGeometryResult.java
+++ b/pytorch/src/gen/java/org/bytedeco/pytorch/DimVectorInferExpandGeometryResult.java
@@ -32,6 +32,6 @@ public class DimVectorInferExpandGeometryResult extends Pointer {
private native void allocate(@Cast("size_t") long ndim);
public DimVectorInferExpandGeometryResult(@ByVal LongArrayRef sizes_, @Cast("size_t") long ndim) { super((Pointer)null); allocate(sizes_, ndim); }
private native void allocate(@ByVal LongArrayRef sizes_, @Cast("size_t") long ndim);
- public DimVectorInferExpandGeometryResult(@ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] sizes_, @Cast("size_t") long ndim) { super((Pointer)null); allocate(sizes_, ndim); }
- private native void allocate(@ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] sizes_, @Cast("size_t") long ndim);
+ public DimVectorInferExpandGeometryResult(@ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] sizes_, @Cast("size_t") long ndim) { super((Pointer)null); allocate(sizes_, ndim); }
+ private native void allocate(@ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] sizes_, @Cast("size_t") long ndim);
}
diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/ExampleCollation.java b/pytorch/src/gen/java/org/bytedeco/pytorch/ExampleCollation.java
index fc3d63ab8b1..f26ff6ea85e 100644
--- a/pytorch/src/gen/java/org/bytedeco/pytorch/ExampleCollation.java
+++ b/pytorch/src/gen/java/org/bytedeco/pytorch/ExampleCollation.java
@@ -19,7 +19,7 @@
/** A transformation of a batch to a new batch. */
-@Name("torch::data::transforms::BatchTransform >,torch::data::Example<> >") @Properties(inherit = org.bytedeco.pytorch.presets.torch.class)
+@Name("torch::data::transforms::BatchTransform >,torch::data::Example >") @Properties(inherit = org.bytedeco.pytorch.presets.torch.class)
public class ExampleCollation extends Pointer {
static { Loader.load(); }
/** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/ExampleIterator.java b/pytorch/src/gen/java/org/bytedeco/pytorch/ExampleIterator.java
index 27e10847c52..929bd97678a 100644
--- a/pytorch/src/gen/java/org/bytedeco/pytorch/ExampleIterator.java
+++ b/pytorch/src/gen/java/org/bytedeco/pytorch/ExampleIterator.java
@@ -18,7 +18,7 @@
import static org.bytedeco.pytorch.global.torch.*;
// namespace detail
-@Name("torch::data::Iterator >") @NoOffset @Properties(inherit = org.bytedeco.pytorch.presets.torch.class)
+@Name("torch::data::Iterator >") @NoOffset @Properties(inherit = org.bytedeco.pytorch.presets.torch.class)
public class ExampleIterator extends Pointer {
static { Loader.load(); }
/** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/ExampleStack.java b/pytorch/src/gen/java/org/bytedeco/pytorch/ExampleStack.java
index f67ef9a2cd1..76b3468f69a 100644
--- a/pytorch/src/gen/java/org/bytedeco/pytorch/ExampleStack.java
+++ b/pytorch/src/gen/java/org/bytedeco/pytorch/ExampleStack.java
@@ -38,5 +38,5 @@ public class ExampleStack extends ExampleCollation {
return new ExampleStack((Pointer)this).offsetAddress(i);
}
- public native @ByVal Example apply_batch(@ByVal ExampleVector examples);
+ public native @ByVal Example apply_batch(@StdVector Example examples);
}
diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/ExampleVector.java b/pytorch/src/gen/java/org/bytedeco/pytorch/ExampleVector.java
index 454209e92d4..2a7eda78f74 100644
--- a/pytorch/src/gen/java/org/bytedeco/pytorch/ExampleVector.java
+++ b/pytorch/src/gen/java/org/bytedeco/pytorch/ExampleVector.java
@@ -17,7 +17,7 @@
import static org.bytedeco.pytorch.global.torch.*;
-@Name("std::vector >") @Properties(inherit = org.bytedeco.pytorch.presets.torch.class)
+@Name("std::vector >") @Properties(inherit = org.bytedeco.pytorch.presets.torch.class)
public class ExampleVector extends Pointer {
static { Loader.load(); }
/** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/ExampleVectorOptionalIterator.java b/pytorch/src/gen/java/org/bytedeco/pytorch/ExampleVectorIterator.java
similarity index 67%
rename from pytorch/src/gen/java/org/bytedeco/pytorch/ExampleVectorOptionalIterator.java
rename to pytorch/src/gen/java/org/bytedeco/pytorch/ExampleVectorIterator.java
index 9d7253dfb60..470c9b179bc 100644
--- a/pytorch/src/gen/java/org/bytedeco/pytorch/ExampleVectorOptionalIterator.java
+++ b/pytorch/src/gen/java/org/bytedeco/pytorch/ExampleVectorIterator.java
@@ -18,29 +18,29 @@
import static org.bytedeco.pytorch.global.torch.*;
-@Name("torch::data::Iterator > > >") @NoOffset @Properties(inherit = org.bytedeco.pytorch.presets.torch.class)
-public class ExampleVectorOptionalIterator extends Pointer {
+@Name("torch::data::Iterator > >") @NoOffset @Properties(inherit = org.bytedeco.pytorch.presets.torch.class)
+public class ExampleVectorIterator extends Pointer {
static { Loader.load(); }
/** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
- public ExampleVectorOptionalIterator(Pointer p) { super(p); }
+ public ExampleVectorIterator(Pointer p) { super(p); }
// Type aliases to make the class recognized as a proper iterator.
/** Increments the iterator.
* Only permitted for valid iterators (not past the end). */
- public native @ByRef @Name("operator ++") ExampleVectorOptionalIterator increment();
+ public native @ByRef @Name("operator ++") ExampleVectorIterator increment();
/** Returns the current batch.
* Only permitted for valid iterators (not past the end). */
- public native @ByRef @Name("operator *") ExampleVectorOptional multiply();
+ public native @ByRef @Name("operator *") ExampleVector multiply();
/** Returns a pointer to the current batch.
* Only permitted for valid iterators (not past the end). */
- public native @Name("operator ->") ExampleVectorOptional access();
+ public native @Name("operator ->") ExampleVector access();
/** Compares two iterators for equality. */
- public native @Cast("bool") @Name("operator ==") boolean equals(@Const @ByRef ExampleVectorOptionalIterator other);
+ public native @Cast("bool") @Name("operator ==") boolean equals(@Const @ByRef ExampleVectorIterator other);
/** Compares two iterators for inequality. */
- public native @Cast("bool") @Name("operator !=") boolean notEquals(@Const @ByRef ExampleVectorOptionalIterator other);
+ public native @Cast("bool") @Name("operator !=") boolean notEquals(@Const @ByRef ExampleVectorIterator other);
}
diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/ExampleVectorOptional.java b/pytorch/src/gen/java/org/bytedeco/pytorch/ExampleVectorOptional.java
index 44ce876c817..f1c3f540474 100644
--- a/pytorch/src/gen/java/org/bytedeco/pytorch/ExampleVectorOptional.java
+++ b/pytorch/src/gen/java/org/bytedeco/pytorch/ExampleVectorOptional.java
@@ -17,7 +17,7 @@
import static org.bytedeco.pytorch.global.torch.*;
-@NoOffset @Name("c10::optional > >") @Properties(inherit = org.bytedeco.pytorch.presets.torch.class)
+@NoOffset @Name("c10::optional > >") @Properties(inherit = org.bytedeco.pytorch.presets.torch.class)
public class ExampleVectorOptional extends Pointer {
static { Loader.load(); }
/** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/ExperimentalConfig.java b/pytorch/src/gen/java/org/bytedeco/pytorch/ExperimentalConfig.java
index 7661ce7bd4e..279ff234f29 100644
--- a/pytorch/src/gen/java/org/bytedeco/pytorch/ExperimentalConfig.java
+++ b/pytorch/src/gen/java/org/bytedeco/pytorch/ExperimentalConfig.java
@@ -38,12 +38,14 @@ public ExperimentalConfig(
@Cast("bool") boolean profiler_measure_per_kernel/*=false*/,
@Cast("bool") boolean verbose/*=false*/,
@ByVal(nullValue = "std::vector{}") StringVector performance_events,
- @Cast("bool") boolean adjust_timestamps/*=false*/) { super((Pointer)null); allocate(profiler_metrics, profiler_measure_per_kernel, verbose, performance_events, adjust_timestamps); }
+ @Cast("bool") boolean enable_cuda_sync_events/*=false*/,
+ @Cast("bool") boolean adjust_timestamps/*=false*/) { super((Pointer)null); allocate(profiler_metrics, profiler_measure_per_kernel, verbose, performance_events, enable_cuda_sync_events, adjust_timestamps); }
private native void allocate(
@ByVal(nullValue = "std::vector{}") StringVector profiler_metrics,
@Cast("bool") boolean profiler_measure_per_kernel/*=false*/,
@Cast("bool") boolean verbose/*=false*/,
@ByVal(nullValue = "std::vector{}") StringVector performance_events,
+ @Cast("bool") boolean enable_cuda_sync_events/*=false*/,
@Cast("bool") boolean adjust_timestamps/*=false*/);
public ExperimentalConfig() { super((Pointer)null); allocate(); }
private native void allocate();
@@ -57,6 +59,12 @@ private native void allocate(
* An empty list will disable performance event based profiling altogether.
*/
public native @ByRef StringVector performance_events(); public native ExperimentalConfig performance_events(StringVector setter);
+ /*
+ * For CUDA profiling mode, enable adding CUDA synchronization events
+ * that expose CUDA device, stream and event synchronization activities.
+ * This feature is new and currently disabled by default.
+ */
+ public native @Cast("bool") boolean enable_cuda_sync_events(); public native ExperimentalConfig enable_cuda_sync_events(boolean setter);
/*
* Controls whether or not timestamp adjustment occurs after profiling.
* The purpose of this is to adjust Vulkan event timelines to align with those
diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/Float8_e4m3fn.java b/pytorch/src/gen/java/org/bytedeco/pytorch/Float8_e4m3fn.java
new file mode 100644
index 00000000000..876b2466492
--- /dev/null
+++ b/pytorch/src/gen/java/org/bytedeco/pytorch/Float8_e4m3fn.java
@@ -0,0 +1,46 @@
+// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE
+
+package org.bytedeco.pytorch;
+
+import org.bytedeco.pytorch.Allocator;
+import org.bytedeco.pytorch.Function;
+import org.bytedeco.pytorch.functions.*;
+import org.bytedeco.pytorch.Module;
+import org.bytedeco.javacpp.annotation.Cast;
+import java.nio.*;
+import org.bytedeco.javacpp.*;
+import org.bytedeco.javacpp.annotation.*;
+
+import static org.bytedeco.javacpp.presets.javacpp.*;
+import static org.bytedeco.openblas.global.openblas_nolapack.*;
+import static org.bytedeco.openblas.global.openblas.*;
+
+import static org.bytedeco.pytorch.global.torch.*;
+ // namespace detail
+
+@Namespace("c10") @NoOffset @Properties(inherit = org.bytedeco.pytorch.presets.torch.class)
+public class Float8_e4m3fn extends Pointer {
+ static { Loader.load(); }
+ /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
+ public Float8_e4m3fn(Pointer p) { super(p); }
+
+ public native @Cast("uint8_t") byte x(); public native Float8_e4m3fn x(byte setter);
+
+ @Opaque public static class from_bits_t extends Pointer {
+ /** Empty constructor. Calls {@code super((Pointer)null)}. */
+ public from_bits_t() { super((Pointer)null); }
+ /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
+ public from_bits_t(Pointer p) { super(p); }
+ }
+ public static native @Const @ByVal from_bits_t from_bits();
+
+ public Float8_e4m3fn() { super((Pointer)null); allocate(); }
+ private native void allocate();
+
+ public Float8_e4m3fn(@Cast("uint8_t") byte bits, @ByVal from_bits_t arg1) { super((Pointer)null); allocate(bits, arg1); }
+ private native void allocate(@Cast("uint8_t") byte bits, @ByVal from_bits_t arg1);
+ public Float8_e4m3fn(float value) { super((Pointer)null); allocate(value); }
+ private native void allocate(float value);
+ public native @Name("operator float") float asFloat();
+ public native @Cast("bool") boolean isnan();
+}
diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/Float8_e5m2.java b/pytorch/src/gen/java/org/bytedeco/pytorch/Float8_e5m2.java
new file mode 100644
index 00000000000..02104b9c12d
--- /dev/null
+++ b/pytorch/src/gen/java/org/bytedeco/pytorch/Float8_e5m2.java
@@ -0,0 +1,47 @@
+// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE
+
+package org.bytedeco.pytorch;
+
+import org.bytedeco.pytorch.Allocator;
+import org.bytedeco.pytorch.Function;
+import org.bytedeco.pytorch.functions.*;
+import org.bytedeco.pytorch.Module;
+import org.bytedeco.javacpp.annotation.Cast;
+import java.nio.*;
+import org.bytedeco.javacpp.*;
+import org.bytedeco.javacpp.annotation.*;
+
+import static org.bytedeco.javacpp.presets.javacpp.*;
+import static org.bytedeco.openblas.global.openblas_nolapack.*;
+import static org.bytedeco.openblas.global.openblas.*;
+
+import static org.bytedeco.pytorch.global.torch.*;
+ // namespace detail
+
+@Namespace("c10") @NoOffset @Properties(inherit = org.bytedeco.pytorch.presets.torch.class)
+public class Float8_e5m2 extends Pointer {
+ static { Loader.load(); }
+ /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
+ public Float8_e5m2(Pointer p) { super(p); }
+
+ public native @Cast("uint8_t") byte x(); public native Float8_e5m2 x(byte setter);
+
+ @Opaque public static class from_bits_t extends Pointer {
+ /** Empty constructor. Calls {@code super((Pointer)null)}. */
+ public from_bits_t() { super((Pointer)null); }
+ /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
+ public from_bits_t(Pointer p) { super(p); }
+ }
+ public static native @Const @ByVal from_bits_t from_bits();
+
+ public Float8_e5m2() { super((Pointer)null); allocate(); }
+ private native void allocate();
+
+ public Float8_e5m2(@Cast("uint8_t") byte bits, @ByVal from_bits_t arg1) { super((Pointer)null); allocate(bits, arg1); }
+ private native void allocate(@Cast("uint8_t") byte bits, @ByVal from_bits_t arg1);
+ public Float8_e5m2(float value) { super((Pointer)null); allocate(value); }
+ private native void allocate(float value);
+ public native @Name("operator float") float asFloat();
+ public native @Cast("bool") boolean isnan();
+ public native @Cast("bool") boolean isinf();
+}
diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/FloatArrayRef.java b/pytorch/src/gen/java/org/bytedeco/pytorch/FloatArrayRef.java
index ae0f3aa9ea8..093a011160e 100644
--- a/pytorch/src/gen/java/org/bytedeco/pytorch/FloatArrayRef.java
+++ b/pytorch/src/gen/java/org/bytedeco/pytorch/FloatArrayRef.java
@@ -103,6 +103,7 @@ public class FloatArrayRef extends Pointer {
/** equals - Check for element-wise equality. */
public native @Cast("const bool") boolean equals(@ByVal FloatArrayRef RHS);
+ public native @Cast("const bool") boolean equals(@ByVal @Cast({"float*", "c10::ArrayRef", "std::vector&"}) @StdVector("float") float... RHS);
/** slice(n, m) - Take M elements of the array starting at element N */
public native @Const @ByVal FloatArrayRef slice(@Cast("size_t") long N, @Cast("size_t") long M);
diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/FullDataLoaderOptions.java b/pytorch/src/gen/java/org/bytedeco/pytorch/FullDataLoaderOptions.java
new file mode 100644
index 00000000000..ad9a1e1863e
--- /dev/null
+++ b/pytorch/src/gen/java/org/bytedeco/pytorch/FullDataLoaderOptions.java
@@ -0,0 +1,42 @@
+// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE
+
+package org.bytedeco.pytorch;
+
+import org.bytedeco.pytorch.Allocator;
+import org.bytedeco.pytorch.Function;
+import org.bytedeco.pytorch.functions.*;
+import org.bytedeco.pytorch.Module;
+import org.bytedeco.javacpp.annotation.Cast;
+import java.nio.*;
+import org.bytedeco.javacpp.*;
+import org.bytedeco.javacpp.annotation.*;
+
+import static org.bytedeco.javacpp.presets.javacpp.*;
+import static org.bytedeco.openblas.global.openblas_nolapack.*;
+import static org.bytedeco.openblas.global.openblas.*;
+
+import static org.bytedeco.pytorch.global.torch.*;
+
+
+/** Like {@code DataLoaderOptions}, but without any unconfigured state.
+ * {@code DataLoaderOptions} has some options that depend on other options
+ * ({@code max_jobs} => {@code 2 * workers}). In the spirit of properly using the C++ type
+ * system, {@code DataLoaderOptions} allows only setting values. To access values,
+ * you must create a {@code FullDataLoaderOptions} from a {@code DataLoaderOptions}
+ * instance, which will do any necessary coalescing. */
+@Namespace("torch::data") @NoOffset @Properties(inherit = org.bytedeco.pytorch.presets.torch.class)
+public class FullDataLoaderOptions extends Pointer {
+ static { Loader.load(); }
+ /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
+ public FullDataLoaderOptions(Pointer p) { super(p); }
+
+ public FullDataLoaderOptions(@ByVal DataLoaderOptions options) { super((Pointer)null); allocate(options); }
+ private native void allocate(@ByVal DataLoaderOptions options);
+
+ public native @Cast("size_t") long batch_size(); public native FullDataLoaderOptions batch_size(long setter);
+ public native @Cast("size_t") long workers(); public native FullDataLoaderOptions workers(long setter);
+ public native @Cast("size_t") long max_jobs(); public native FullDataLoaderOptions max_jobs(long setter);
+ public native @ByRef @Cast("c10::optional*") Pointer timeout(); public native FullDataLoaderOptions timeout(Pointer setter);
+ public native @Cast("bool") boolean enforce_ordering(); public native FullDataLoaderOptions enforce_ordering(boolean setter);
+ public native @Cast("bool") boolean drop_last(); public native FullDataLoaderOptions drop_last(boolean setter);
+}
diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/FuncTorchTLSBase.java b/pytorch/src/gen/java/org/bytedeco/pytorch/FuncTorchTLSBase.java
index c826d754b4c..522c9d2423a 100644
--- a/pytorch/src/gen/java/org/bytedeco/pytorch/FuncTorchTLSBase.java
+++ b/pytorch/src/gen/java/org/bytedeco/pytorch/FuncTorchTLSBase.java
@@ -45,6 +45,7 @@ public class FuncTorchTLSBase extends Pointer {
public native @UniquePtr FuncTorchTLSBase deepcopy();
public native @Cast("int64_t") long checkSupportsSingleLevelAutogradFunction();
+ public native void checkSupportsCppAutogradFunction();
public native void checkSupportsInplaceRequiresGrad();
public native void checkSupportsRetainGrad();
}
diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/FunctionPostHook.java b/pytorch/src/gen/java/org/bytedeco/pytorch/FunctionPostHook.java
index b8499221f1b..171b416baf1 100644
--- a/pytorch/src/gen/java/org/bytedeco/pytorch/FunctionPostHook.java
+++ b/pytorch/src/gen/java/org/bytedeco/pytorch/FunctionPostHook.java
@@ -27,4 +27,6 @@ public class FunctionPostHook extends Pointer {
public native @Name("operator ()") @Cast({"", "std::vector"}) @StdMove TensorVector apply(
@Cast({"", "std::vector"}) @StdMove TensorVector outputs,
@Cast({"", "std::vector"}) @StdMove TensorVector inputs);
+ // only implemented for python hooks, registers hook with compiled autograd
+ public native void compiled_args(@ByRef CompiledNodeArgs args);
}
diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/FunctionPreHook.java b/pytorch/src/gen/java/org/bytedeco/pytorch/FunctionPreHook.java
index 1edde112cfc..749a9b3c347 100644
--- a/pytorch/src/gen/java/org/bytedeco/pytorch/FunctionPreHook.java
+++ b/pytorch/src/gen/java/org/bytedeco/pytorch/FunctionPreHook.java
@@ -25,4 +25,6 @@ public class FunctionPreHook extends Pointer {
public FunctionPreHook(Pointer p) { super(p); }
public native @Name("operator ()") @Cast({"", "std::vector"}) @StdMove TensorVector apply(@Cast({"", "std::vector"}) @StdMove TensorVector grads);
+ // only implemented for python hooks, registers hook with compiled autograd
+ public native void compiled_args(@ByRef CompiledNodeArgs args);
}
diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/Future.java b/pytorch/src/gen/java/org/bytedeco/pytorch/Future.java
index 365c78f9e26..9f51fb8de3e 100644
--- a/pytorch/src/gen/java/org/bytedeco/pytorch/Future.java
+++ b/pytorch/src/gen/java/org/bytedeco/pytorch/Future.java
@@ -98,7 +98,7 @@ public native void markCompleted(
// This accessor should only be used if we know that the future is
// completed() with no error.
- public native @Const @ByRef WeakStorageVector storages();
+ public native @StdVector WeakStorage storages();
/**
* Add a callback to the future.
diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/GatheredContext.java b/pytorch/src/gen/java/org/bytedeco/pytorch/GatheredContext.java
new file mode 100644
index 00000000000..4795c37a2f2
--- /dev/null
+++ b/pytorch/src/gen/java/org/bytedeco/pytorch/GatheredContext.java
@@ -0,0 +1,40 @@
+// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE
+
+package org.bytedeco.pytorch;
+
+import org.bytedeco.pytorch.Allocator;
+import org.bytedeco.pytorch.Function;
+import org.bytedeco.pytorch.functions.*;
+import org.bytedeco.pytorch.Module;
+import org.bytedeco.javacpp.annotation.Cast;
+import java.nio.*;
+import org.bytedeco.javacpp.*;
+import org.bytedeco.javacpp.annotation.*;
+
+import static org.bytedeco.javacpp.presets.javacpp.*;
+import static org.bytedeco.openblas.global.openblas_nolapack.*;
+import static org.bytedeco.openblas.global.openblas.*;
+
+import static org.bytedeco.pytorch.global.torch.*;
+
+
+// used to hold traceback information in allocators
+@Namespace("c10") @Properties(inherit = org.bytedeco.pytorch.presets.torch.class)
+public class GatheredContext extends Pointer {
+ static { Loader.load(); }
+ /** Default native constructor. */
+ public GatheredContext() { super((Pointer)null); allocate(); }
+ /** Native array allocator. Access with {@link Pointer#position(long)}. */
+ public GatheredContext(long size) { super((Pointer)null); allocateArray(size); }
+ /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
+ public GatheredContext(Pointer p) { super(p); }
+ private native void allocate();
+ private native void allocateArray(long size);
+ @Override public GatheredContext position(long position) {
+ return (GatheredContext)super.position(position);
+ }
+ @Override public GatheredContext getPointer(long i) {
+ return new GatheredContext((Pointer)this).offsetAddress(i);
+ }
+
+}
diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/Generator.java b/pytorch/src/gen/java/org/bytedeco/pytorch/Generator.java
index 8dd29e67335..bd1dee91cc3 100644
--- a/pytorch/src/gen/java/org/bytedeco/pytorch/Generator.java
+++ b/pytorch/src/gen/java/org/bytedeco/pytorch/Generator.java
@@ -85,6 +85,13 @@ public class Generator extends Pointer {
public native @Const @ByRef GeneratorImplPtr getIntrusivePtr();
public native void set_current_seed(@Cast("uint64_t") long seed);
+ // Sets the offset of Generator state to the desired offset. This is currently
+ // supported for only Philox based Generators, i.e., CUDA and MPS.
+ public native void set_offset(@Cast("uint64_t") long offset);
+
+ // Returns the offset of Generator state. This is currently supported for only
+ // Philox based Generators, i.e., CUDA and MPS.
+ public native @Cast("uint64_t") long get_offset();
public native @Cast("uint64_t") long current_seed();
diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/GeneratorImpl.java b/pytorch/src/gen/java/org/bytedeco/pytorch/GeneratorImpl.java
index ad4d1087ba7..147677fde43 100644
--- a/pytorch/src/gen/java/org/bytedeco/pytorch/GeneratorImpl.java
+++ b/pytorch/src/gen/java/org/bytedeco/pytorch/GeneratorImpl.java
@@ -35,6 +35,8 @@ public class GeneratorImpl extends Pointer {
// Common methods for all generators
public native void set_current_seed(@Cast("uint64_t") long seed);
+ public native void set_offset(@Cast("uint64_t") long offset);
+ public native @Cast("uint64_t") long get_offset();
public native @Cast("uint64_t") long current_seed();
public native @Cast("uint64_t") long seed();
public native void set_state(@Const @ByRef TensorImpl new_state);
diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/IValue.java b/pytorch/src/gen/java/org/bytedeco/pytorch/IValue.java
index fef627f9ae0..4388e2cec88 100644
--- a/pytorch/src/gen/java/org/bytedeco/pytorch/IValue.java
+++ b/pytorch/src/gen/java/org/bytedeco/pytorch/IValue.java
@@ -252,6 +252,14 @@ public class IValue extends Pointer {
public native @ByVal SymFloat toSymFloat();
+ public IValue(@ByVal SymBool i) { super((Pointer)null); allocate(i); }
+ private native void allocate(@ByVal SymBool i);
+
+ public native @Cast("bool") boolean isSymBool();
+
+
+ public native @ByVal SymBool toSymBool();
+
// allow you to pass literals (3, 4) without ambiguity
public IValue(int i) { super((Pointer)null); allocate(i); }
private native void allocate(int i);
@@ -268,9 +276,11 @@ public class IValue extends Pointer {
// IntList
public native @Cast("bool") boolean isIntList();
+ public native @Cast("bool") boolean isSymIntList();
public native @ByVal LongList toIntList();
public native @ByVal @Cast("std::vector*") LongVector toIntVector();
+ public native @ByVal SymIntVector toSymIntVector();
public native @ByVal DimVector toDimVector();
// ConstantString
@@ -497,6 +507,11 @@ public class IValue extends Pointer {
// TODO: There are several places that recurse over IValue. This is fragile.
// This visitor should be used to recurse over ivalues.
+ public native @ByVal IValue deepcopy(@ByVal(nullValue = "c10::optional(c10::nullopt)") DeviceOptional device);
public native @ByVal IValue deepcopy();
- public native @ByVal IValue deepcopy(@ByRef HashAliasedIValueMap memo);
+ public native @ByVal IValue deepcopy(
+ @ByRef HashAliasedIValueMap memo,
+ @ByVal(nullValue = "c10::optional(c10::nullopt)") DeviceOptional device);
+ public native @ByVal IValue deepcopy(
+ @ByRef HashAliasedIValueMap memo);
}
diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/InferenceMode.java b/pytorch/src/gen/java/org/bytedeco/pytorch/InferenceMode.java
index 7b01b98d7a2..1b5381c0d3c 100644
--- a/pytorch/src/gen/java/org/bytedeco/pytorch/InferenceMode.java
+++ b/pytorch/src/gen/java/org/bytedeco/pytorch/InferenceMode.java
@@ -71,7 +71,7 @@ public class InferenceMode extends Pointer {
//
// 3. Why does setting InferenceMode also set GradMode?
//
- // This is required since InferenceMode is a faster and more restricive
+ // This is required since InferenceMode is a faster and more restrictive
// version of NoGradGuard. All runtime checks using GradMode::is_enabled()
// are applicable to InferenceMode as well, e.g.
// `tensorTypeInCurrentExecutionContext` in interpreter.cpp.
diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/IntArrayRef.java b/pytorch/src/gen/java/org/bytedeco/pytorch/IntArrayRef.java
index 9ec05261006..0988c901f88 100644
--- a/pytorch/src/gen/java/org/bytedeco/pytorch/IntArrayRef.java
+++ b/pytorch/src/gen/java/org/bytedeco/pytorch/IntArrayRef.java
@@ -103,6 +103,7 @@ public class IntArrayRef extends Pointer {
/** equals - Check for element-wise equality. */
public native @Cast("const bool") boolean equals(@ByVal IntArrayRef RHS);
+ public native @Cast("const bool") boolean equals(@ByVal @Cast({"jint*", "c10::ArrayRef", "std::vector&"}) @StdVector("jint") int... RHS);
/** slice(n, m) - Take M elements of the array starting at element N */
public native @Const @ByVal IntArrayRef slice(@Cast("size_t") long N, @Cast("size_t") long M);
diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/JavaBatchDataset.java b/pytorch/src/gen/java/org/bytedeco/pytorch/JavaBatchDataset.java
new file mode 100644
index 00000000000..ecda6625801
--- /dev/null
+++ b/pytorch/src/gen/java/org/bytedeco/pytorch/JavaBatchDataset.java
@@ -0,0 +1,40 @@
+// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE
+
+package org.bytedeco.pytorch;
+
+import org.bytedeco.pytorch.Allocator;
+import org.bytedeco.pytorch.Function;
+import org.bytedeco.pytorch.functions.*;
+import org.bytedeco.pytorch.Module;
+import org.bytedeco.javacpp.annotation.Cast;
+import java.nio.*;
+import org.bytedeco.javacpp.*;
+import org.bytedeco.javacpp.annotation.*;
+
+import static org.bytedeco.javacpp.presets.javacpp.*;
+import static org.bytedeco.openblas.global.openblas_nolapack.*;
+import static org.bytedeco.openblas.global.openblas.*;
+
+import static org.bytedeco.pytorch.global.torch.*;
+
+@Name("torch::data::datasets::BatchDataset,std::vector > >") @Properties(inherit = org.bytedeco.pytorch.presets.torch.class)
+public class JavaBatchDataset extends Pointer {
+ static { Loader.load(); }
+ /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
+ public JavaBatchDataset(Pointer p) { super(p); }
+
+ @MemberGetter public static native @Cast("const bool") boolean is_stateful();
+ public static final boolean is_stateful = is_stateful();
+
+ /** Returns a batch of data given an index. */
+ public native @ByVal ExampleVector get_batch(@ByVal SizeTArrayRef request);
+ public native @ByVal ExampleVector get_batch(@ByVal @Cast({"size_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("size_t") long... request);
+
+ /** Returns the size of the dataset, or an empty optional if it is unsized. */
+ public native @ByVal SizeTOptional size();
+
+ /** Creates a {@code MapDataset} that applies the given {@code transform} to this dataset. */
+
+ /** Creates a {@code MapDataset} that applies the given {@code transform} to this dataset. */
+
+}
diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/JavaDataset.java b/pytorch/src/gen/java/org/bytedeco/pytorch/JavaDataset.java
new file mode 100644
index 00000000000..6a3021353ba
--- /dev/null
+++ b/pytorch/src/gen/java/org/bytedeco/pytorch/JavaDataset.java
@@ -0,0 +1,45 @@
+// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE
+
+package org.bytedeco.pytorch;
+
+import org.bytedeco.pytorch.Allocator;
+import org.bytedeco.pytorch.Function;
+import org.bytedeco.pytorch.functions.*;
+import org.bytedeco.pytorch.Module;
+import org.bytedeco.javacpp.annotation.Cast;
+import java.nio.*;
+import org.bytedeco.javacpp.*;
+import org.bytedeco.javacpp.annotation.*;
+
+import static org.bytedeco.javacpp.presets.javacpp.*;
+import static org.bytedeco.openblas.global.openblas_nolapack.*;
+import static org.bytedeco.openblas.global.openblas.*;
+
+import static org.bytedeco.pytorch.global.torch.*;
+
+
+/**
+ * Abstract class for stateless datasets to be subclassed by Java user code.
+ */
+ @Name("javacpp::Dataset") @Properties(inherit = org.bytedeco.pytorch.presets.torch.class)
+public class JavaDataset extends JavaDatasetBase {
+ static { Loader.load(); }
+ /** Default native constructor. */
+ public JavaDataset() { super((Pointer)null); allocate(); }
+ /** Native array allocator. Access with {@link Pointer#position(long)}. */
+ public JavaDataset(long size) { super((Pointer)null); allocateArray(size); }
+ /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
+ public JavaDataset(Pointer p) { super(p); }
+ private native void allocate();
+ private native void allocateArray(long size);
+ @Override public JavaDataset position(long position) {
+ return (JavaDataset)super.position(position);
+ }
+ @Override public JavaDataset getPointer(long i) {
+ return new JavaDataset((Pointer)this).offsetAddress(i);
+ }
+
+ @Virtual(true) public native @ByVal Example get(@Cast("size_t") long index);
+ @Virtual(true) public native @ByVal @Const({false, false, true}) SizeTOptional size();
+ @Virtual public native @ByVal ExampleVector get_batch(@ByVal SizeTArrayRef indices);
+}
diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/JavaDatasetBase.java b/pytorch/src/gen/java/org/bytedeco/pytorch/JavaDatasetBase.java
new file mode 100644
index 00000000000..745d26d64ac
--- /dev/null
+++ b/pytorch/src/gen/java/org/bytedeco/pytorch/JavaDatasetBase.java
@@ -0,0 +1,35 @@
+// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE
+
+package org.bytedeco.pytorch;
+
+import org.bytedeco.pytorch.Allocator;
+import org.bytedeco.pytorch.Function;
+import org.bytedeco.pytorch.functions.*;
+import org.bytedeco.pytorch.Module;
+import org.bytedeco.javacpp.annotation.Cast;
+import java.nio.*;
+import org.bytedeco.javacpp.*;
+import org.bytedeco.javacpp.annotation.*;
+
+import static org.bytedeco.javacpp.presets.javacpp.*;
+import static org.bytedeco.openblas.global.openblas_nolapack.*;
+import static org.bytedeco.openblas.global.openblas.*;
+
+import static org.bytedeco.pytorch.global.torch.*;
+
+@Name("torch::data::datasets::Dataset,torch::data::Example >") @Properties(inherit = org.bytedeco.pytorch.presets.torch.class)
+public class JavaDatasetBase extends JavaBatchDataset {
+ static { Loader.load(); }
+ /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
+ public JavaDatasetBase(Pointer p) { super(p); }
+
+
+ /** Returns the example at the given index. */
+ public native @ByVal @Cast("torch::data::datasets::Dataset,torch::data::Example >::ExampleType*") Example get(@Cast("size_t") long index);
+
+ /** Returns a batch of data.
+ * The default implementation calls {@code get()} for every requested index
+ * in the batch. */
+ public native @ByVal ExampleVector get_batch(@ByVal SizeTArrayRef indices);
+ public native @ByVal ExampleVector get_batch(@ByVal @Cast({"size_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("size_t") long... indices);
+}
diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/JavaDistributedRandomDataLoader.java b/pytorch/src/gen/java/org/bytedeco/pytorch/JavaDistributedRandomDataLoader.java
new file mode 100644
index 00000000000..5b49faf24b9
--- /dev/null
+++ b/pytorch/src/gen/java/org/bytedeco/pytorch/JavaDistributedRandomDataLoader.java
@@ -0,0 +1,37 @@
+// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE
+
+package org.bytedeco.pytorch;
+
+import org.bytedeco.pytorch.Allocator;
+import org.bytedeco.pytorch.Function;
+import org.bytedeco.pytorch.functions.*;
+import org.bytedeco.pytorch.Module;
+import org.bytedeco.javacpp.annotation.Cast;
+import java.nio.*;
+import org.bytedeco.javacpp.*;
+import org.bytedeco.javacpp.annotation.*;
+
+import static org.bytedeco.javacpp.presets.javacpp.*;
+import static org.bytedeco.openblas.global.openblas_nolapack.*;
+import static org.bytedeco.openblas.global.openblas.*;
+
+import static org.bytedeco.pytorch.global.torch.*;
+
+@Name("torch::data::StatelessDataLoader") @NoOffset @Properties(inherit = org.bytedeco.pytorch.presets.torch.class)
+public class JavaDistributedRandomDataLoader extends JavaDistributedRandomDataLoaderBase {
+ static { Loader.load(); }
+ /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
+ public JavaDistributedRandomDataLoader(Pointer p) { super(p); }
+
+
+ /** Constructs the {@code StatelessDataLoader} from a {@code dataset}, a {@code sampler} and
+ * some {@code options}. */
+ public JavaDistributedRandomDataLoader(
+ @ByVal @Cast("JavaCPP_javacpp_0003a_0003aDataset_0003ctorch_0003a_0003aTensor_0002ctorch_0003a_0003aTensor_0003e*") JavaDataset dataset,
+ @ByVal DistributedRandomSampler sampler,
+ @ByVal DataLoaderOptions options) { super((Pointer)null); allocate(dataset, sampler, options); }
+ private native void allocate(
+ @ByVal @Cast("JavaCPP_javacpp_0003a_0003aDataset_0003ctorch_0003a_0003aTensor_0002ctorch_0003a_0003aTensor_0003e*") JavaDataset dataset,
+ @ByVal DistributedRandomSampler sampler,
+ @ByVal DataLoaderOptions options);
+}
diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/JavaDistributedRandomDataLoaderBase.java b/pytorch/src/gen/java/org/bytedeco/pytorch/JavaDistributedRandomDataLoaderBase.java
new file mode 100644
index 00000000000..bad54600bdf
--- /dev/null
+++ b/pytorch/src/gen/java/org/bytedeco/pytorch/JavaDistributedRandomDataLoaderBase.java
@@ -0,0 +1,57 @@
+// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE
+
+package org.bytedeco.pytorch;
+
+import org.bytedeco.pytorch.Allocator;
+import org.bytedeco.pytorch.Function;
+import org.bytedeco.pytorch.functions.*;
+import org.bytedeco.pytorch.Module;
+import org.bytedeco.javacpp.annotation.Cast;
+import java.nio.*;
+import org.bytedeco.javacpp.*;
+import org.bytedeco.javacpp.annotation.*;
+
+import static org.bytedeco.javacpp.presets.javacpp.*;
+import static org.bytedeco.openblas.global.openblas_nolapack.*;
+import static org.bytedeco.openblas.global.openblas.*;
+
+import static org.bytedeco.pytorch.global.torch.*;
+
+@Name("torch::data::DataLoaderBase") @NoOffset @Properties(inherit = org.bytedeco.pytorch.presets.torch.class)
+public class JavaDistributedRandomDataLoaderBase extends Pointer {
+ static { Loader.load(); }
+ /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
+ public JavaDistributedRandomDataLoaderBase(Pointer p) { super(p); }
+
+
+ /** Constructs a new DataLoader from a {@code dataset} to sample from, {@code options}
+ * to configure the DataLoader with, and a {@code sampler} that specifies the
+ * sampling strategy. */
+
+ // NOLINTNEXTLINE(bugprone-exception-escape)
+
+ /** Returns an iterator into the DataLoader. The lifetime of the iterator is
+ * bound to the DataLoader. In C++ standards language, the category of the
+ * iterator is {@code OutputIterator}. See
+ * https://en.cppreference.com/w/cpp/named_req/OutputIterator for what this
+ * means. In short: you may increment the iterator and dereference it, but
+ * cannot go back, or step forward more than one position at a time. When the
+ * DataLoader is exhausted, it will compare equal with the special
+ * "sentinel" iterator returned by {@code DataLoader::end()}. Most of the time, you
+ * should only use range-for loops to loop over the DataLoader, but
+ * standard algorithms like {@code std::copy(dataloader.begin(), dataloader.end(),
+ * output_iterator)} are supported too. */
+ public native @ByVal ExampleVectorIterator begin();
+
+ /** Returns a special "sentinel" iterator that compares equal with a
+ * non-sentinel iterator once the DataLoader is exhausted. */
+ public native @ByVal ExampleVectorIterator end();
+
+ /** Joins the DataLoader's worker threads and drains internal queues.
+ * This function may only be invoked from the main thread (in which the
+ * DataLoader lives). */
+ public native void join();
+
+ /** Returns the options with which the DataLoader was configured. */
+ public native @Const @ByRef @NoException(true) FullDataLoaderOptions options();
+}
diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/JavaDistributedRandomTensorDataLoader.java b/pytorch/src/gen/java/org/bytedeco/pytorch/JavaDistributedRandomTensorDataLoader.java
new file mode 100644
index 00000000000..2e599d8805d
--- /dev/null
+++ b/pytorch/src/gen/java/org/bytedeco/pytorch/JavaDistributedRandomTensorDataLoader.java
@@ -0,0 +1,37 @@
+// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE
+
+package org.bytedeco.pytorch;
+
+import org.bytedeco.pytorch.Allocator;
+import org.bytedeco.pytorch.Function;
+import org.bytedeco.pytorch.functions.*;
+import org.bytedeco.pytorch.Module;
+import org.bytedeco.javacpp.annotation.Cast;
+import java.nio.*;
+import org.bytedeco.javacpp.*;
+import org.bytedeco.javacpp.annotation.*;
+
+import static org.bytedeco.javacpp.presets.javacpp.*;
+import static org.bytedeco.openblas.global.openblas_nolapack.*;
+import static org.bytedeco.openblas.global.openblas.*;
+
+import static org.bytedeco.pytorch.global.torch.*;
+
+@Name("torch::data::StatelessDataLoader") @NoOffset @Properties(inherit = org.bytedeco.pytorch.presets.torch.class)
+public class JavaDistributedRandomTensorDataLoader extends JavaDistributedRandomTensorDataLoaderBase {
+ static { Loader.load(); }
+ /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
+ public JavaDistributedRandomTensorDataLoader(Pointer p) { super(p); }
+
+
+ /** Constructs the {@code StatelessDataLoader} from a {@code dataset}, a {@code sampler} and
+ * some {@code options}. */
+ public JavaDistributedRandomTensorDataLoader(
+ @ByVal @Cast("JavaCPP_javacpp_0003a_0003aDataset_0003ctorch_0003a_0003aTensor_0002ctorch_0003a_0003adata_0003a_0003aexample_0003a_0003aNoTarget_0003e*") JavaTensorDataset dataset,
+ @ByVal DistributedRandomSampler sampler,
+ @ByVal DataLoaderOptions options) { super((Pointer)null); allocate(dataset, sampler, options); }
+ private native void allocate(
+ @ByVal @Cast("JavaCPP_javacpp_0003a_0003aDataset_0003ctorch_0003a_0003aTensor_0002ctorch_0003a_0003adata_0003a_0003aexample_0003a_0003aNoTarget_0003e*") JavaTensorDataset dataset,
+ @ByVal DistributedRandomSampler sampler,
+ @ByVal DataLoaderOptions options);
+}
diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/JavaDistributedRandomTensorDataLoaderBase.java b/pytorch/src/gen/java/org/bytedeco/pytorch/JavaDistributedRandomTensorDataLoaderBase.java
new file mode 100644
index 00000000000..eea76157e6b
--- /dev/null
+++ b/pytorch/src/gen/java/org/bytedeco/pytorch/JavaDistributedRandomTensorDataLoaderBase.java
@@ -0,0 +1,57 @@
+// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE
+
+package org.bytedeco.pytorch;
+
+import org.bytedeco.pytorch.Allocator;
+import org.bytedeco.pytorch.Function;
+import org.bytedeco.pytorch.functions.*;
+import org.bytedeco.pytorch.Module;
+import org.bytedeco.javacpp.annotation.Cast;
+import java.nio.*;
+import org.bytedeco.javacpp.*;
+import org.bytedeco.javacpp.annotation.*;
+
+import static org.bytedeco.javacpp.presets.javacpp.*;
+import static org.bytedeco.openblas.global.openblas_nolapack.*;
+import static org.bytedeco.openblas.global.openblas.*;
+
+import static org.bytedeco.pytorch.global.torch.*;
+
+@Name("torch::data::DataLoaderBase") @NoOffset @Properties(inherit = org.bytedeco.pytorch.presets.torch.class)
+public class JavaDistributedRandomTensorDataLoaderBase extends Pointer {
+ static { Loader.load(); }
+ /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
+ public JavaDistributedRandomTensorDataLoaderBase(Pointer p) { super(p); }
+
+
+ /** Constructs a new DataLoader from a {@code dataset} to sample from, {@code options}
+ * to configure the DataLoader with, and a {@code sampler} that specifies the
+ * sampling strategy. */
+
+ // NOLINTNEXTLINE(bugprone-exception-escape)
+
+ /** Returns an iterator into the DataLoader. The lifetime of the iterator is
+ * bound to the DataLoader. In C++ standards language, the category of the
+ * iterator is {@code OutputIterator}. See
+ * https://en.cppreference.com/w/cpp/named_req/OutputIterator for what this
+ * means. In short: you may increment the iterator and dereference it, but
+ * cannot go back, or step forward more than one position at a time. When the
+ * DataLoader is exhausted, it will compare equal with the special
+ * "sentinel" iterator returned by {@code DataLoader::end()}. Most of the time, you
+ * should only use range-for loops to loop over the DataLoader, but
+ * standard algorithms like {@code std::copy(dataloader.begin(), dataloader.end(),
+ * output_iterator)} are supported too. */
+ public native @ByVal TensorExampleVectorIterator begin();
+
+ /** Returns a special "sentinel" iterator that compares equal with a
+ * non-sentinel iterator once the DataLoader is exhausted. */
+ public native @ByVal TensorExampleVectorIterator end();
+
+ /** Joins the DataLoader's worker threads and drains internal queues.
+ * This function may only be invoked from the main thread (in which the
+ * DataLoader lives). */
+ public native void join();
+
+ /** Returns the options with which the DataLoader was configured. */
+ public native @Const @ByRef @NoException(true) FullDataLoaderOptions options();
+}
diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/JavaDistributedSequentialDataLoader.java b/pytorch/src/gen/java/org/bytedeco/pytorch/JavaDistributedSequentialDataLoader.java
new file mode 100644
index 00000000000..fc113a06359
--- /dev/null
+++ b/pytorch/src/gen/java/org/bytedeco/pytorch/JavaDistributedSequentialDataLoader.java
@@ -0,0 +1,37 @@
+// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE
+
+package org.bytedeco.pytorch;
+
+import org.bytedeco.pytorch.Allocator;
+import org.bytedeco.pytorch.Function;
+import org.bytedeco.pytorch.functions.*;
+import org.bytedeco.pytorch.Module;
+import org.bytedeco.javacpp.annotation.Cast;
+import java.nio.*;
+import org.bytedeco.javacpp.*;
+import org.bytedeco.javacpp.annotation.*;
+
+import static org.bytedeco.javacpp.presets.javacpp.*;
+import static org.bytedeco.openblas.global.openblas_nolapack.*;
+import static org.bytedeco.openblas.global.openblas.*;
+
+import static org.bytedeco.pytorch.global.torch.*;
+
+@Name("torch::data::StatelessDataLoader") @NoOffset @Properties(inherit = org.bytedeco.pytorch.presets.torch.class)
+public class JavaDistributedSequentialDataLoader extends JavaDistributedSequentialDataLoaderBase {
+ static { Loader.load(); }
+ /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
+ public JavaDistributedSequentialDataLoader(Pointer p) { super(p); }
+
+
+ /** Constructs the {@code StatelessDataLoader} from a {@code dataset}, a {@code sampler} and
+ * some {@code options}. */
+ public JavaDistributedSequentialDataLoader(
+ @ByVal @Cast("JavaCPP_javacpp_0003a_0003aDataset_0003ctorch_0003a_0003aTensor_0002ctorch_0003a_0003aTensor_0003e*") JavaDataset dataset,
+ @ByVal DistributedSequentialSampler sampler,
+ @ByVal DataLoaderOptions options) { super((Pointer)null); allocate(dataset, sampler, options); }
+ private native void allocate(
+ @ByVal @Cast("JavaCPP_javacpp_0003a_0003aDataset_0003ctorch_0003a_0003aTensor_0002ctorch_0003a_0003aTensor_0003e*") JavaDataset dataset,
+ @ByVal DistributedSequentialSampler sampler,
+ @ByVal DataLoaderOptions options);
+}
diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/JavaDistributedSequentialDataLoaderBase.java b/pytorch/src/gen/java/org/bytedeco/pytorch/JavaDistributedSequentialDataLoaderBase.java
new file mode 100644
index 00000000000..65dcb5491a2
--- /dev/null
+++ b/pytorch/src/gen/java/org/bytedeco/pytorch/JavaDistributedSequentialDataLoaderBase.java
@@ -0,0 +1,57 @@
+// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE
+
+package org.bytedeco.pytorch;
+
+import org.bytedeco.pytorch.Allocator;
+import org.bytedeco.pytorch.Function;
+import org.bytedeco.pytorch.functions.*;
+import org.bytedeco.pytorch.Module;
+import org.bytedeco.javacpp.annotation.Cast;
+import java.nio.*;
+import org.bytedeco.javacpp.*;
+import org.bytedeco.javacpp.annotation.*;
+
+import static org.bytedeco.javacpp.presets.javacpp.*;
+import static org.bytedeco.openblas.global.openblas_nolapack.*;
+import static org.bytedeco.openblas.global.openblas.*;
+
+import static org.bytedeco.pytorch.global.torch.*;
+
+@Name("torch::data::DataLoaderBase") @NoOffset @Properties(inherit = org.bytedeco.pytorch.presets.torch.class)
+public class JavaDistributedSequentialDataLoaderBase extends Pointer {
+ static { Loader.load(); }
+ /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
+ public JavaDistributedSequentialDataLoaderBase(Pointer p) { super(p); }
+
+
+ /** Constructs a new DataLoader from a {@code dataset} to sample from, {@code options}
+ * to configure the DataLoader with, and a {@code sampler} that specifies the
+ * sampling strategy. */
+
+ // NOLINTNEXTLINE(bugprone-exception-escape)
+
+ /** Returns an iterator into the DataLoader. The lifetime of the iterator is
+ * bound to the DataLoader. In C++ standards language, the category of the
+ * iterator is {@code OutputIterator}. See
+ * https://en.cppreference.com/w/cpp/named_req/OutputIterator for what this
+ * means. In short: you may increment the iterator and dereference it, but
+ * cannot go back, or step forward more than one position at a time. When the
+ * DataLoader is exhausted, it will compare equal with the special
+ * "sentinel" iterator returned by {@code DataLoader::end()}. Most of the time, you
+ * should only use range-for loops to loop over the DataLoader, but
+ * standard algorithms like {@code std::copy(dataloader.begin(), dataloader.end(),
+ * output_iterator)} are supported too. */
+ public native @ByVal ExampleVectorIterator begin();
+
+ /** Returns a special "sentinel" iterator that compares equal with a
+ * non-sentinel iterator once the DataLoader is exhausted. */
+ public native @ByVal ExampleVectorIterator end();
+
+ /** Joins the DataLoader's worker threads and drains internal queues.
+ * This function may only be invoked from the main thread (in which the
+ * DataLoader lives). */
+ public native void join();
+
+ /** Returns the options with which the DataLoader was configured. */
+ public native @Const @ByRef @NoException(true) FullDataLoaderOptions options();
+}
diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/JavaDistributedSequentialTensorDataLoader.java b/pytorch/src/gen/java/org/bytedeco/pytorch/JavaDistributedSequentialTensorDataLoader.java
new file mode 100644
index 00000000000..734076272d6
--- /dev/null
+++ b/pytorch/src/gen/java/org/bytedeco/pytorch/JavaDistributedSequentialTensorDataLoader.java
@@ -0,0 +1,37 @@
+// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE
+
+package org.bytedeco.pytorch;
+
+import org.bytedeco.pytorch.Allocator;
+import org.bytedeco.pytorch.Function;
+import org.bytedeco.pytorch.functions.*;
+import org.bytedeco.pytorch.Module;
+import org.bytedeco.javacpp.annotation.Cast;
+import java.nio.*;
+import org.bytedeco.javacpp.*;
+import org.bytedeco.javacpp.annotation.*;
+
+import static org.bytedeco.javacpp.presets.javacpp.*;
+import static org.bytedeco.openblas.global.openblas_nolapack.*;
+import static org.bytedeco.openblas.global.openblas.*;
+
+import static org.bytedeco.pytorch.global.torch.*;
+
+@Name("torch::data::StatelessDataLoader") @NoOffset @Properties(inherit = org.bytedeco.pytorch.presets.torch.class)
+public class JavaDistributedSequentialTensorDataLoader extends JavaDistributedSequentialTensorDataLoaderBase {
+ static { Loader.load(); }
+ /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
+ public JavaDistributedSequentialTensorDataLoader(Pointer p) { super(p); }
+
+
+ /** Constructs the {@code StatelessDataLoader} from a {@code dataset}, a {@code sampler} and
+ * some {@code options}. */
+ public JavaDistributedSequentialTensorDataLoader(
+ @ByVal @Cast("JavaCPP_javacpp_0003a_0003aDataset_0003ctorch_0003a_0003aTensor_0002ctorch_0003a_0003adata_0003a_0003aexample_0003a_0003aNoTarget_0003e*") JavaTensorDataset dataset,
+ @ByVal DistributedSequentialSampler sampler,
+ @ByVal DataLoaderOptions options) { super((Pointer)null); allocate(dataset, sampler, options); }
+ private native void allocate(
+ @ByVal @Cast("JavaCPP_javacpp_0003a_0003aDataset_0003ctorch_0003a_0003aTensor_0002ctorch_0003a_0003adata_0003a_0003aexample_0003a_0003aNoTarget_0003e*") JavaTensorDataset dataset,
+ @ByVal DistributedSequentialSampler sampler,
+ @ByVal DataLoaderOptions options);
+}
diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/JavaDistributedSequentialTensorDataLoaderBase.java b/pytorch/src/gen/java/org/bytedeco/pytorch/JavaDistributedSequentialTensorDataLoaderBase.java
new file mode 100644
index 00000000000..a364b6ab273
--- /dev/null
+++ b/pytorch/src/gen/java/org/bytedeco/pytorch/JavaDistributedSequentialTensorDataLoaderBase.java
@@ -0,0 +1,57 @@
+// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE
+
+package org.bytedeco.pytorch;
+
+import org.bytedeco.pytorch.Allocator;
+import org.bytedeco.pytorch.Function;
+import org.bytedeco.pytorch.functions.*;
+import org.bytedeco.pytorch.Module;
+import org.bytedeco.javacpp.annotation.Cast;
+import java.nio.*;
+import org.bytedeco.javacpp.*;
+import org.bytedeco.javacpp.annotation.*;
+
+import static org.bytedeco.javacpp.presets.javacpp.*;
+import static org.bytedeco.openblas.global.openblas_nolapack.*;
+import static org.bytedeco.openblas.global.openblas.*;
+
+import static org.bytedeco.pytorch.global.torch.*;
+
+@Name("torch::data::DataLoaderBase") @NoOffset @Properties(inherit = org.bytedeco.pytorch.presets.torch.class)
+public class JavaDistributedSequentialTensorDataLoaderBase extends Pointer {
+ static { Loader.load(); }
+ /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
+ public JavaDistributedSequentialTensorDataLoaderBase(Pointer p) { super(p); }
+
+
+ /** Constructs a new DataLoader from a {@code dataset} to sample from, {@code options}
+ * to configure the DataLoader with, and a {@code sampler} that specifies the
+ * sampling strategy. */
+
+ // NOLINTNEXTLINE(bugprone-exception-escape)
+
+ /** Returns an iterator into the DataLoader. The lifetime of the iterator is
+ * bound to the DataLoader. In C++ standards language, the category of the
+ * iterator is {@code OutputIterator}. See
+ * https://en.cppreference.com/w/cpp/named_req/OutputIterator for what this
+ * means. In short: you may increment the iterator and dereference it, but
+ * cannot go back, or step forward more than one position at a time. When the
+ * DataLoader is exhausted, it will compare equal with the special
+ * "sentinel" iterator returned by {@code DataLoader::end()}. Most of the time, you
+ * should only use range-for loops to loop over the DataLoader, but
+ * standard algorithms like {@code std::copy(dataloader.begin(), dataloader.end(),
+ * output_iterator)} are supported too. */
+ public native @ByVal TensorExampleVectorIterator begin();
+
+ /** Returns a special "sentinel" iterator that compares equal with a
+ * non-sentinel iterator once the DataLoader is exhausted. */
+ public native @ByVal TensorExampleVectorIterator end();
+
+ /** Joins the DataLoader's worker threads and drains internal queues.
+ * This function may only be invoked from the main thread (in which the
+ * DataLoader lives). */
+ public native void join();
+
+ /** Returns the options with which the DataLoader was configured. */
+ public native @Const @ByRef @NoException(true) FullDataLoaderOptions options();
+}
diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/JavaRandomDataLoader.java b/pytorch/src/gen/java/org/bytedeco/pytorch/JavaRandomDataLoader.java
new file mode 100644
index 00000000000..41843004e94
--- /dev/null
+++ b/pytorch/src/gen/java/org/bytedeco/pytorch/JavaRandomDataLoader.java
@@ -0,0 +1,37 @@
+// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE
+
+package org.bytedeco.pytorch;
+
+import org.bytedeco.pytorch.Allocator;
+import org.bytedeco.pytorch.Function;
+import org.bytedeco.pytorch.functions.*;
+import org.bytedeco.pytorch.Module;
+import org.bytedeco.javacpp.annotation.Cast;
+import java.nio.*;
+import org.bytedeco.javacpp.*;
+import org.bytedeco.javacpp.annotation.*;
+
+import static org.bytedeco.javacpp.presets.javacpp.*;
+import static org.bytedeco.openblas.global.openblas_nolapack.*;
+import static org.bytedeco.openblas.global.openblas.*;
+
+import static org.bytedeco.pytorch.global.torch.*;
+
+@Name("torch::data::StatelessDataLoader") @NoOffset @Properties(inherit = org.bytedeco.pytorch.presets.torch.class)
+public class JavaRandomDataLoader extends JavaRandomDataLoaderBase {
+ static { Loader.load(); }
+ /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
+ public JavaRandomDataLoader(Pointer p) { super(p); }
+
+
+ /** Constructs the {@code StatelessDataLoader} from a {@code dataset}, a {@code sampler} and
+ * some {@code options}. */
+ public JavaRandomDataLoader(
+ @ByVal @Cast("JavaCPP_javacpp_0003a_0003aDataset_0003ctorch_0003a_0003aTensor_0002ctorch_0003a_0003aTensor_0003e*") JavaDataset dataset,
+ @ByVal RandomSampler sampler,
+ @ByVal DataLoaderOptions options) { super((Pointer)null); allocate(dataset, sampler, options); }
+ private native void allocate(
+ @ByVal @Cast("JavaCPP_javacpp_0003a_0003aDataset_0003ctorch_0003a_0003aTensor_0002ctorch_0003a_0003aTensor_0003e*") JavaDataset dataset,
+ @ByVal RandomSampler sampler,
+ @ByVal DataLoaderOptions options);
+}
diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/JavaRandomDataLoaderBase.java b/pytorch/src/gen/java/org/bytedeco/pytorch/JavaRandomDataLoaderBase.java
new file mode 100644
index 00000000000..0917169ac6a
--- /dev/null
+++ b/pytorch/src/gen/java/org/bytedeco/pytorch/JavaRandomDataLoaderBase.java
@@ -0,0 +1,57 @@
+// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE
+
+package org.bytedeco.pytorch;
+
+import org.bytedeco.pytorch.Allocator;
+import org.bytedeco.pytorch.Function;
+import org.bytedeco.pytorch.functions.*;
+import org.bytedeco.pytorch.Module;
+import org.bytedeco.javacpp.annotation.Cast;
+import java.nio.*;
+import org.bytedeco.javacpp.*;
+import org.bytedeco.javacpp.annotation.*;
+
+import static org.bytedeco.javacpp.presets.javacpp.*;
+import static org.bytedeco.openblas.global.openblas_nolapack.*;
+import static org.bytedeco.openblas.global.openblas.*;
+
+import static org.bytedeco.pytorch.global.torch.*;
+
+@Name("torch::data::DataLoaderBase") @NoOffset @Properties(inherit = org.bytedeco.pytorch.presets.torch.class)
+public class JavaRandomDataLoaderBase extends Pointer {
+ static { Loader.load(); }
+ /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
+ public JavaRandomDataLoaderBase(Pointer p) { super(p); }
+
+
+ /** Constructs a new DataLoader from a {@code dataset} to sample from, {@code options}
+ * to configure the DataLoader with, and a {@code sampler} that specifies the
+ * sampling strategy. */
+
+ // NOLINTNEXTLINE(bugprone-exception-escape)
+
+ /** Returns an iterator into the DataLoader. The lifetime of the iterator is
+ * bound to the DataLoader. In C++ standards language, the category of the
+ * iterator is {@code OutputIterator}. See
+ * https://en.cppreference.com/w/cpp/named_req/OutputIterator for what this
+ * means. In short: you may increment the iterator and dereference it, but
+ * cannot go back, or step forward more than one position at a time. When the
+ * DataLoader is exhausted, it will compare equal with the special
+ * "sentinel" iterator returned by {@code DataLoader::end()}. Most of the time, you
+ * should only use range-for loops to loop over the DataLoader, but
+ * standard algorithms like {@code std::copy(dataloader.begin(), dataloader.end(),
+ * output_iterator)} are supported too. */
+ public native @ByVal ExampleVectorIterator begin();
+
+ /** Returns a special "sentinel" iterator that compares equal with a
+ * non-sentinel iterator once the DataLoader is exhausted. */
+ public native @ByVal ExampleVectorIterator end();
+
+ /** Joins the DataLoader's worker threads and drains internal queues.
+ * This function may only be invoked from the main thread (in which the
+ * DataLoader lives). */
+ public native void join();
+
+ /** Returns the options with which the DataLoader was configured. */
+ public native @Const @ByRef @NoException(true) FullDataLoaderOptions options();
+}
diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/JavaRandomTensorDataLoader.java b/pytorch/src/gen/java/org/bytedeco/pytorch/JavaRandomTensorDataLoader.java
new file mode 100644
index 00000000000..93127463eb6
--- /dev/null
+++ b/pytorch/src/gen/java/org/bytedeco/pytorch/JavaRandomTensorDataLoader.java
@@ -0,0 +1,37 @@
+// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE
+
+package org.bytedeco.pytorch;
+
+import org.bytedeco.pytorch.Allocator;
+import org.bytedeco.pytorch.Function;
+import org.bytedeco.pytorch.functions.*;
+import org.bytedeco.pytorch.Module;
+import org.bytedeco.javacpp.annotation.Cast;
+import java.nio.*;
+import org.bytedeco.javacpp.*;
+import org.bytedeco.javacpp.annotation.*;
+
+import static org.bytedeco.javacpp.presets.javacpp.*;
+import static org.bytedeco.openblas.global.openblas_nolapack.*;
+import static org.bytedeco.openblas.global.openblas.*;
+
+import static org.bytedeco.pytorch.global.torch.*;
+
+@Name("torch::data::StatelessDataLoader") @NoOffset @Properties(inherit = org.bytedeco.pytorch.presets.torch.class)
+public class JavaRandomTensorDataLoader extends JavaRandomTensorDataLoaderBase {
+ static { Loader.load(); }
+ /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
+ public JavaRandomTensorDataLoader(Pointer p) { super(p); }
+
+
+ /** Constructs the {@code StatelessDataLoader} from a {@code dataset}, a {@code sampler} and
+ * some {@code options}. */
+ public JavaRandomTensorDataLoader(
+ @ByVal @Cast("JavaCPP_javacpp_0003a_0003aDataset_0003ctorch_0003a_0003aTensor_0002ctorch_0003a_0003adata_0003a_0003aexample_0003a_0003aNoTarget_0003e*") JavaTensorDataset dataset,
+ @ByVal RandomSampler sampler,
+ @ByVal DataLoaderOptions options) { super((Pointer)null); allocate(dataset, sampler, options); }
+ private native void allocate(
+ @ByVal @Cast("JavaCPP_javacpp_0003a_0003aDataset_0003ctorch_0003a_0003aTensor_0002ctorch_0003a_0003adata_0003a_0003aexample_0003a_0003aNoTarget_0003e*") JavaTensorDataset dataset,
+ @ByVal RandomSampler sampler,
+ @ByVal DataLoaderOptions options);
+}
diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/JavaRandomTensorDataLoaderBase.java b/pytorch/src/gen/java/org/bytedeco/pytorch/JavaRandomTensorDataLoaderBase.java
new file mode 100644
index 00000000000..1724e9cb0cf
--- /dev/null
+++ b/pytorch/src/gen/java/org/bytedeco/pytorch/JavaRandomTensorDataLoaderBase.java
@@ -0,0 +1,57 @@
+// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE
+
+package org.bytedeco.pytorch;
+
+import org.bytedeco.pytorch.Allocator;
+import org.bytedeco.pytorch.Function;
+import org.bytedeco.pytorch.functions.*;
+import org.bytedeco.pytorch.Module;
+import org.bytedeco.javacpp.annotation.Cast;
+import java.nio.*;
+import org.bytedeco.javacpp.*;
+import org.bytedeco.javacpp.annotation.*;
+
+import static org.bytedeco.javacpp.presets.javacpp.*;
+import static org.bytedeco.openblas.global.openblas_nolapack.*;
+import static org.bytedeco.openblas.global.openblas.*;
+
+import static org.bytedeco.pytorch.global.torch.*;
+
+@Name("torch::data::DataLoaderBase") @NoOffset @Properties(inherit = org.bytedeco.pytorch.presets.torch.class)
+public class JavaRandomTensorDataLoaderBase extends Pointer {
+ static { Loader.load(); }
+ /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
+ public JavaRandomTensorDataLoaderBase(Pointer p) { super(p); }
+
+
+ /** Constructs a new DataLoader from a {@code dataset} to sample from, {@code options}
+ * to configure the DataLoader with, and a {@code sampler} that specifies the
+ * sampling strategy. */
+
+ // NOLINTNEXTLINE(bugprone-exception-escape)
+
+ /** Returns an iterator into the DataLoader. The lifetime of the iterator is
+ * bound to the DataLoader. In C++ standards language, the category of the
+ * iterator is {@code OutputIterator}. See
+ * https://en.cppreference.com/w/cpp/named_req/OutputIterator for what this
+ * means. In short: you may increment the iterator and dereference it, but
+ * cannot go back, or step forward more than one position at a time. When the
+ * DataLoader is exhausted, it will compare equal with the special
+ * "sentinel" iterator returned by {@code DataLoader::end()}. Most of the time, you
+ * should only use range-for loops to loop over the DataLoader, but
+ * standard algorithms like {@code std::copy(dataloader.begin(), dataloader.end(),
+ * output_iterator)} are supported too. */
+ public native @ByVal TensorExampleVectorIterator begin();
+
+ /** Returns a special "sentinel" iterator that compares equal with a
+ * non-sentinel iterator once the DataLoader is exhausted. */
+ public native @ByVal TensorExampleVectorIterator end();
+
+ /** Joins the DataLoader's worker threads and drains internal queues.
+ * This function may only be invoked from the main thread (in which the
+ * DataLoader lives). */
+ public native void join();
+
+ /** Returns the options with which the DataLoader was configured. */
+ public native @Const @ByRef @NoException(true) FullDataLoaderOptions options();
+}
diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/JavaSequentialDataLoader.java b/pytorch/src/gen/java/org/bytedeco/pytorch/JavaSequentialDataLoader.java
new file mode 100644
index 00000000000..93ca46f7706
--- /dev/null
+++ b/pytorch/src/gen/java/org/bytedeco/pytorch/JavaSequentialDataLoader.java
@@ -0,0 +1,37 @@
+// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE
+
+package org.bytedeco.pytorch;
+
+import org.bytedeco.pytorch.Allocator;
+import org.bytedeco.pytorch.Function;
+import org.bytedeco.pytorch.functions.*;
+import org.bytedeco.pytorch.Module;
+import org.bytedeco.javacpp.annotation.Cast;
+import java.nio.*;
+import org.bytedeco.javacpp.*;
+import org.bytedeco.javacpp.annotation.*;
+
+import static org.bytedeco.javacpp.presets.javacpp.*;
+import static org.bytedeco.openblas.global.openblas_nolapack.*;
+import static org.bytedeco.openblas.global.openblas.*;
+
+import static org.bytedeco.pytorch.global.torch.*;
+
+@Name("torch::data::StatelessDataLoader") @NoOffset @Properties(inherit = org.bytedeco.pytorch.presets.torch.class)
+public class JavaSequentialDataLoader extends JavaSequentialDataLoaderBase {
+ static { Loader.load(); }
+ /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
+ public JavaSequentialDataLoader(Pointer p) { super(p); }
+
+
+ /** Constructs the {@code StatelessDataLoader} from a {@code dataset}, a {@code sampler} and
+ * some {@code options}. */
+ public JavaSequentialDataLoader(
+ @ByVal @Cast("JavaCPP_javacpp_0003a_0003aDataset_0003ctorch_0003a_0003aTensor_0002ctorch_0003a_0003aTensor_0003e*") JavaDataset dataset,
+ @ByVal SequentialSampler sampler,
+ @ByVal DataLoaderOptions options) { super((Pointer)null); allocate(dataset, sampler, options); }
+ private native void allocate(
+ @ByVal @Cast("JavaCPP_javacpp_0003a_0003aDataset_0003ctorch_0003a_0003aTensor_0002ctorch_0003a_0003aTensor_0003e*") JavaDataset dataset,
+ @ByVal SequentialSampler sampler,
+ @ByVal DataLoaderOptions options);
+}
diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/JavaSequentialDataLoaderBase.java b/pytorch/src/gen/java/org/bytedeco/pytorch/JavaSequentialDataLoaderBase.java
new file mode 100644
index 00000000000..84f9ca47e46
--- /dev/null
+++ b/pytorch/src/gen/java/org/bytedeco/pytorch/JavaSequentialDataLoaderBase.java
@@ -0,0 +1,57 @@
+// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE
+
+package org.bytedeco.pytorch;
+
+import org.bytedeco.pytorch.Allocator;
+import org.bytedeco.pytorch.Function;
+import org.bytedeco.pytorch.functions.*;
+import org.bytedeco.pytorch.Module;
+import org.bytedeco.javacpp.annotation.Cast;
+import java.nio.*;
+import org.bytedeco.javacpp.*;
+import org.bytedeco.javacpp.annotation.*;
+
+import static org.bytedeco.javacpp.presets.javacpp.*;
+import static org.bytedeco.openblas.global.openblas_nolapack.*;
+import static org.bytedeco.openblas.global.openblas.*;
+
+import static org.bytedeco.pytorch.global.torch.*;
+
+@Name("torch::data::DataLoaderBase") @NoOffset @Properties(inherit = org.bytedeco.pytorch.presets.torch.class)
+public class JavaSequentialDataLoaderBase extends Pointer {
+ static { Loader.load(); }
+ /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
+ public JavaSequentialDataLoaderBase(Pointer p) { super(p); }
+
+
+ /** Constructs a new DataLoader from a {@code dataset} to sample from, {@code options}
+ * to configure the DataLoader with, and a {@code sampler} that specifies the
+ * sampling strategy. */
+
+ // NOLINTNEXTLINE(bugprone-exception-escape)
+
+ /** Returns an iterator into the DataLoader. The lifetime of the iterator is
+ * bound to the DataLoader. In C++ standards language, the category of the
+ * iterator is {@code OutputIterator}. See
+ * https://en.cppreference.com/w/cpp/named_req/OutputIterator for what this
+ * means. In short: you may increment the iterator and dereference it, but
+ * cannot go back, or step forward more than one position at a time. When the
+ * DataLoader is exhausted, it will compare equal with the special
+ * "sentinel" iterator returned by {@code DataLoader::end()}. Most of the time, you
+ * should only use range-for loops to loop over the DataLoader, but
+ * standard algorithms like {@code std::copy(dataloader.begin(), dataloader.end(),
+ * output_iterator)} are supported too. */
+ public native @ByVal ExampleVectorIterator begin();
+
+ /** Returns a special "sentinel" iterator that compares equal with a
+ * non-sentinel iterator once the DataLoader is exhausted. */
+ public native @ByVal ExampleVectorIterator end();
+
+ /** Joins the DataLoader's worker threads and drains internal queues.
+ * This function may only be invoked from the main thread (in which the
+ * DataLoader lives). */
+ public native void join();
+
+ /** Returns the options with which the DataLoader was configured. */
+ public native @Const @ByRef @NoException(true) FullDataLoaderOptions options();
+}
diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/JavaSequentialTensorDataLoader.java b/pytorch/src/gen/java/org/bytedeco/pytorch/JavaSequentialTensorDataLoader.java
new file mode 100644
index 00000000000..f85c25d2369
--- /dev/null
+++ b/pytorch/src/gen/java/org/bytedeco/pytorch/JavaSequentialTensorDataLoader.java
@@ -0,0 +1,37 @@
+// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE
+
+package org.bytedeco.pytorch;
+
+import org.bytedeco.pytorch.Allocator;
+import org.bytedeco.pytorch.Function;
+import org.bytedeco.pytorch.functions.*;
+import org.bytedeco.pytorch.Module;
+import org.bytedeco.javacpp.annotation.Cast;
+import java.nio.*;
+import org.bytedeco.javacpp.*;
+import org.bytedeco.javacpp.annotation.*;
+
+import static org.bytedeco.javacpp.presets.javacpp.*;
+import static org.bytedeco.openblas.global.openblas_nolapack.*;
+import static org.bytedeco.openblas.global.openblas.*;
+
+import static org.bytedeco.pytorch.global.torch.*;
+
+@Name("torch::data::StatelessDataLoader") @NoOffset @Properties(inherit = org.bytedeco.pytorch.presets.torch.class)
+public class JavaSequentialTensorDataLoader extends JavaSequentialTensorDataLoaderBase {
+ static { Loader.load(); }
+ /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
+ public JavaSequentialTensorDataLoader(Pointer p) { super(p); }
+
+
+ /** Constructs the {@code StatelessDataLoader} from a {@code dataset}, a {@code sampler} and
+ * some {@code options}. */
+ public JavaSequentialTensorDataLoader(
+ @ByVal @Cast("JavaCPP_javacpp_0003a_0003aDataset_0003ctorch_0003a_0003aTensor_0002ctorch_0003a_0003adata_0003a_0003aexample_0003a_0003aNoTarget_0003e*") JavaTensorDataset dataset,
+ @ByVal SequentialSampler sampler,
+ @ByVal DataLoaderOptions options) { super((Pointer)null); allocate(dataset, sampler, options); }
+ private native void allocate(
+ @ByVal @Cast("JavaCPP_javacpp_0003a_0003aDataset_0003ctorch_0003a_0003aTensor_0002ctorch_0003a_0003adata_0003a_0003aexample_0003a_0003aNoTarget_0003e*") JavaTensorDataset dataset,
+ @ByVal SequentialSampler sampler,
+ @ByVal DataLoaderOptions options);
+}
diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/JavaSequentialTensorDataLoaderBase.java b/pytorch/src/gen/java/org/bytedeco/pytorch/JavaSequentialTensorDataLoaderBase.java
new file mode 100644
index 00000000000..d041c78e9bd
--- /dev/null
+++ b/pytorch/src/gen/java/org/bytedeco/pytorch/JavaSequentialTensorDataLoaderBase.java
@@ -0,0 +1,57 @@
+// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE
+
+package org.bytedeco.pytorch;
+
+import org.bytedeco.pytorch.Allocator;
+import org.bytedeco.pytorch.Function;
+import org.bytedeco.pytorch.functions.*;
+import org.bytedeco.pytorch.Module;
+import org.bytedeco.javacpp.annotation.Cast;
+import java.nio.*;
+import org.bytedeco.javacpp.*;
+import org.bytedeco.javacpp.annotation.*;
+
+import static org.bytedeco.javacpp.presets.javacpp.*;
+import static org.bytedeco.openblas.global.openblas_nolapack.*;
+import static org.bytedeco.openblas.global.openblas.*;
+
+import static org.bytedeco.pytorch.global.torch.*;
+
+@Name("torch::data::DataLoaderBase") @NoOffset @Properties(inherit = org.bytedeco.pytorch.presets.torch.class)
+public class JavaSequentialTensorDataLoaderBase extends Pointer {
+ static { Loader.load(); }
+ /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
+ public JavaSequentialTensorDataLoaderBase(Pointer p) { super(p); }
+
+
+ /** Constructs a new DataLoader from a {@code dataset} to sample from, {@code options}
+ * to configure the DataLoader with, and a {@code sampler} that specifies the
+ * sampling strategy. */
+
+ // NOLINTNEXTLINE(bugprone-exception-escape)
+
+ /** Returns an iterator into the DataLoader. The lifetime of the iterator is
+ * bound to the DataLoader. In C++ standards language, the category of the
+ * iterator is {@code OutputIterator}. See
+ * https://en.cppreference.com/w/cpp/named_req/OutputIterator for what this
+ * means. In short: you may increment the iterator and dereference it, but
+ * cannot go back, or step forward more than one position at a time. When the
+ * DataLoader is exhausted, it will compare equal with the special
+ * "sentinel" iterator returned by {@code DataLoader::end()}. Most of the time, you
+ * should only use range-for loops to loop over the DataLoader, but
+ * standard algorithms like {@code std::copy(dataloader.begin(), dataloader.end(),
+ * output_iterator)} are supported too. */
+ public native @ByVal TensorExampleVectorIterator begin();
+
+ /** Returns a special "sentinel" iterator that compares equal with a
+ * non-sentinel iterator once the DataLoader is exhausted. */
+ public native @ByVal TensorExampleVectorIterator end();
+
+ /** Joins the DataLoader's worker threads and drains internal queues.
+ * This function may only be invoked from the main thread (in which the
+ * DataLoader lives). */
+ public native void join();
+
+ /** Returns the options with which the DataLoader was configured. */
+ public native @Const @ByRef @NoException(true) FullDataLoaderOptions options();
+}
diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/JavaStatefulBatchDataset.java b/pytorch/src/gen/java/org/bytedeco/pytorch/JavaStatefulBatchDataset.java
new file mode 100644
index 00000000000..6dd7491ad16
--- /dev/null
+++ b/pytorch/src/gen/java/org/bytedeco/pytorch/JavaStatefulBatchDataset.java
@@ -0,0 +1,39 @@
+// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE
+
+package org.bytedeco.pytorch;
+
+import org.bytedeco.pytorch.Allocator;
+import org.bytedeco.pytorch.Function;
+import org.bytedeco.pytorch.functions.*;
+import org.bytedeco.pytorch.Module;
+import org.bytedeco.javacpp.annotation.Cast;
+import java.nio.*;
+import org.bytedeco.javacpp.*;
+import org.bytedeco.javacpp.annotation.*;
+
+import static org.bytedeco.javacpp.presets.javacpp.*;
+import static org.bytedeco.openblas.global.openblas_nolapack.*;
+import static org.bytedeco.openblas.global.openblas.*;
+
+import static org.bytedeco.pytorch.global.torch.*;
+
+@Name("torch::data::datasets::BatchDataset,c10::optional > >,size_t>") @Properties(inherit = org.bytedeco.pytorch.presets.torch.class)
+public class JavaStatefulBatchDataset extends Pointer {
+ static { Loader.load(); }
+ /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
+ public JavaStatefulBatchDataset(Pointer p) { super(p); }
+
+ @MemberGetter public static native @Cast("const bool") boolean is_stateful();
+ public static final boolean is_stateful = is_stateful();
+
+ /** Returns a batch of data given an index. */
+ public native @ByVal ExampleVectorOptional get_batch(@Cast("size_t") long request);
+
+ /** Returns the size of the dataset, or an empty optional if it is unsized. */
+ public native @ByVal SizeTOptional size();
+
+ /** Creates a {@code MapDataset} that applies the given {@code transform} to this dataset. */
+
+ /** Creates a {@code MapDataset} that applies the given {@code transform} to this dataset. */
+
+}
diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/JavaStatefulDataLoader.java b/pytorch/src/gen/java/org/bytedeco/pytorch/JavaStatefulDataLoader.java
new file mode 100644
index 00000000000..260513108ad
--- /dev/null
+++ b/pytorch/src/gen/java/org/bytedeco/pytorch/JavaStatefulDataLoader.java
@@ -0,0 +1,30 @@
+// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE
+
+package org.bytedeco.pytorch;
+
+import org.bytedeco.pytorch.Allocator;
+import org.bytedeco.pytorch.Function;
+import org.bytedeco.pytorch.functions.*;
+import org.bytedeco.pytorch.Module;
+import org.bytedeco.javacpp.annotation.Cast;
+import java.nio.*;
+import org.bytedeco.javacpp.*;
+import org.bytedeco.javacpp.annotation.*;
+
+import static org.bytedeco.javacpp.presets.javacpp.*;
+import static org.bytedeco.openblas.global.openblas_nolapack.*;
+import static org.bytedeco.openblas.global.openblas.*;
+
+import static org.bytedeco.pytorch.global.torch.*;
+
+@Name("torch::data::StatefulDataLoader") @Properties(inherit = org.bytedeco.pytorch.presets.torch.class)
+public class JavaStatefulDataLoader extends JavaStatefulDataLoaderBase {
+ static { Loader.load(); }
+ /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
+ public JavaStatefulDataLoader(Pointer p) { super(p); }
+
+
+ /** Constructs the {@code StatefulDataLoader} from a {@code dataset} and some {@code options}. */
+ public JavaStatefulDataLoader(@ByVal @Cast("JavaCPP_javacpp_0003a_0003aStatefulDataset_0003ctorch_0003a_0003aTensor_0002ctorch_0003a_0003aTensor_0003e*") JavaStatefulDataset dataset, @ByVal DataLoaderOptions options) { super((Pointer)null); allocate(dataset, options); }
+ private native void allocate(@ByVal @Cast("JavaCPP_javacpp_0003a_0003aStatefulDataset_0003ctorch_0003a_0003aTensor_0002ctorch_0003a_0003aTensor_0003e*") JavaStatefulDataset dataset, @ByVal DataLoaderOptions options);
+}
diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/JavaStatefulDataLoaderBase.java b/pytorch/src/gen/java/org/bytedeco/pytorch/JavaStatefulDataLoaderBase.java
new file mode 100644
index 00000000000..ffe7dd7a1f0
--- /dev/null
+++ b/pytorch/src/gen/java/org/bytedeco/pytorch/JavaStatefulDataLoaderBase.java
@@ -0,0 +1,57 @@
+// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE
+
+package org.bytedeco.pytorch;
+
+import org.bytedeco.pytorch.Allocator;
+import org.bytedeco.pytorch.Function;
+import org.bytedeco.pytorch.functions.*;
+import org.bytedeco.pytorch.Module;
+import org.bytedeco.javacpp.annotation.Cast;
+import java.nio.*;
+import org.bytedeco.javacpp.*;
+import org.bytedeco.javacpp.annotation.*;
+
+import static org.bytedeco.javacpp.presets.javacpp.*;
+import static org.bytedeco.openblas.global.openblas_nolapack.*;
+import static org.bytedeco.openblas.global.openblas.*;
+
+import static org.bytedeco.pytorch.global.torch.*;
+
+@Name("torch::data::DataLoaderBase") @NoOffset @Properties(inherit = org.bytedeco.pytorch.presets.torch.class)
+public class JavaStatefulDataLoaderBase extends Pointer {
+ static { Loader.load(); }
+ /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
+ public JavaStatefulDataLoaderBase(Pointer p) { super(p); }
+
+
+ /** Constructs a new DataLoader from a {@code dataset} to sample from, {@code options}
+ * to configure the DataLoader with, and a {@code sampler} that specifies the
+ * sampling strategy. */
+
+ // NOLINTNEXTLINE(bugprone-exception-escape)
+
+ /** Returns an iterator into the DataLoader. The lifetime of the iterator is
+ * bound to the DataLoader. In C++ standards language, the category of the
+ * iterator is {@code OutputIterator}. See
+ * https://en.cppreference.com/w/cpp/named_req/OutputIterator for what this
+ * means. In short: you may increment the iterator and dereference it, but
+ * cannot go back, or step forward more than one position at a time. When the
+ * DataLoader is exhausted, it will compare equal with the special
+ * "sentinel" iterator returned by {@code DataLoader::end()}. Most of the time, you
+ * should only use range-for loops to loop over the DataLoader, but
+ * standard algorithms like {@code std::copy(dataloader.begin(), dataloader.end(),
+ * output_iterator)} are supported too. */
+ public native @ByVal ExampleVectorIterator begin();
+
+ /** Returns a special "sentinel" iterator that compares equal with a
+ * non-sentinel iterator once the DataLoader is exhausted. */
+ public native @ByVal ExampleVectorIterator end();
+
+ /** Joins the DataLoader's worker threads and drains internal queues.
+ * This function may only be invoked from the main thread (in which the
+ * DataLoader lives). */
+ public native void join();
+
+ /** Returns the options with which the DataLoader was configured. */
+ public native @Const @ByRef @NoException(true) FullDataLoaderOptions options();
+}
diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/JavaStatefulDataset.java b/pytorch/src/gen/java/org/bytedeco/pytorch/JavaStatefulDataset.java
new file mode 100644
index 00000000000..9d95edf2a47
--- /dev/null
+++ b/pytorch/src/gen/java/org/bytedeco/pytorch/JavaStatefulDataset.java
@@ -0,0 +1,47 @@
+// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE
+
+package org.bytedeco.pytorch;
+
+import org.bytedeco.pytorch.Allocator;
+import org.bytedeco.pytorch.Function;
+import org.bytedeco.pytorch.functions.*;
+import org.bytedeco.pytorch.Module;
+import org.bytedeco.javacpp.annotation.Cast;
+import java.nio.*;
+import org.bytedeco.javacpp.*;
+import org.bytedeco.javacpp.annotation.*;
+
+import static org.bytedeco.javacpp.presets.javacpp.*;
+import static org.bytedeco.openblas.global.openblas_nolapack.*;
+import static org.bytedeco.openblas.global.openblas.*;
+
+import static org.bytedeco.pytorch.global.torch.*;
+
+
+/**
+ * Abstract class for stateful datasets to be subclassed by Java user code.
+ */
+@Name("javacpp::StatefulDataset") @Properties(inherit = org.bytedeco.pytorch.presets.torch.class)
+public class JavaStatefulDataset extends JavaStatefulDatasetBase {
+ static { Loader.load(); }
+ /** Default native constructor. */
+ public JavaStatefulDataset() { super((Pointer)null); allocate(); }
+ /** Native array allocator. Access with {@link Pointer#position(long)}. */
+ public JavaStatefulDataset(long size) { super((Pointer)null); allocateArray(size); }
+ /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
+ public JavaStatefulDataset(Pointer p) { super(p); }
+ private native void allocate();
+ private native void allocateArray(long size);
+ @Override public JavaStatefulDataset position(long position) {
+ return (JavaStatefulDataset)super.position(position);
+ }
+ @Override public JavaStatefulDataset getPointer(long i) {
+ return new JavaStatefulDataset((Pointer)this).offsetAddress(i);
+ }
+
+ @Virtual(true) public native @ByVal @Const({false, false, true}) SizeTOptional size();
+ @Virtual(true) public native @ByVal ExampleVectorOptional get_batch(@Cast("size_t") long size);
+ @Virtual(true) public native void reset();
+ @Virtual(true) public native @Const({false, false, true}) void save(@ByRef OutputArchive archive);
+ @Virtual(true) public native void load(@ByRef InputArchive archive);
+}
diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/JavaStatefulDatasetBase.java b/pytorch/src/gen/java/org/bytedeco/pytorch/JavaStatefulDatasetBase.java
new file mode 100644
index 00000000000..185364c9be2
--- /dev/null
+++ b/pytorch/src/gen/java/org/bytedeco/pytorch/JavaStatefulDatasetBase.java
@@ -0,0 +1,34 @@
+// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE
+
+package org.bytedeco.pytorch;
+
+import org.bytedeco.pytorch.Allocator;
+import org.bytedeco.pytorch.Function;
+import org.bytedeco.pytorch.functions.*;
+import org.bytedeco.pytorch.Module;
+import org.bytedeco.javacpp.annotation.Cast;
+import java.nio.*;
+import org.bytedeco.javacpp.*;
+import org.bytedeco.javacpp.annotation.*;
+
+import static org.bytedeco.javacpp.presets.javacpp.*;
+import static org.bytedeco.openblas.global.openblas_nolapack.*;
+import static org.bytedeco.openblas.global.openblas.*;
+
+import static org.bytedeco.pytorch.global.torch.*;
+
+@Name("torch::data::datasets::StatefulDataset,std::vector >,size_t>") @Properties(inherit = org.bytedeco.pytorch.presets.torch.class)
+public class JavaStatefulDatasetBase extends JavaStatefulBatchDataset {
+ static { Loader.load(); }
+ /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
+ public JavaStatefulDatasetBase(Pointer p) { super(p); }
+
+ /** Resets internal state of the dataset. */
+ public native void reset();
+
+ /** Saves the statefulDataset's state to OutputArchive. */
+ public native void save(@ByRef OutputArchive archive);
+
+ /** Deserializes the statefulDataset's state from the {@code archive}. */
+ public native void load(@ByRef InputArchive archive);
+}
diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/JavaStatefulTensorBatchDataset.java b/pytorch/src/gen/java/org/bytedeco/pytorch/JavaStatefulTensorBatchDataset.java
new file mode 100644
index 00000000000..c2dcedd1752
--- /dev/null
+++ b/pytorch/src/gen/java/org/bytedeco/pytorch/JavaStatefulTensorBatchDataset.java
@@ -0,0 +1,39 @@
+// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE
+
+package org.bytedeco.pytorch;
+
+import org.bytedeco.pytorch.Allocator;
+import org.bytedeco.pytorch.Function;
+import org.bytedeco.pytorch.functions.*;
+import org.bytedeco.pytorch.Module;
+import org.bytedeco.javacpp.annotation.Cast;
+import java.nio.*;
+import org.bytedeco.javacpp.*;
+import org.bytedeco.javacpp.annotation.*;
+
+import static org.bytedeco.javacpp.presets.javacpp.*;
+import static org.bytedeco.openblas.global.openblas_nolapack.*;
+import static org.bytedeco.openblas.global.openblas.*;
+
+import static org.bytedeco.pytorch.global.torch.*;
+
+@Name("torch::data::datasets::BatchDataset,c10::optional > >,size_t>") @Properties(inherit = org.bytedeco.pytorch.presets.torch.class)
+public class JavaStatefulTensorBatchDataset extends Pointer {
+ static { Loader.load(); }
+ /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
+ public JavaStatefulTensorBatchDataset(Pointer p) { super(p); }
+
+ @MemberGetter public static native @Cast("const bool") boolean is_stateful();
+ public static final boolean is_stateful = is_stateful();
+
+ /** Returns a batch of data given an index. */
+ public native @ByVal TensorExampleVectorOptional get_batch(@Cast("size_t") long request);
+
+ /** Returns the size of the dataset, or an empty optional if it is unsized. */
+ public native @ByVal SizeTOptional size();
+
+ /** Creates a {@code MapDataset} that applies the given {@code transform} to this dataset. */
+
+ /** Creates a {@code MapDataset} that applies the given {@code transform} to this dataset. */
+
+}
diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/JavaStatefulTensorDataLoader.java b/pytorch/src/gen/java/org/bytedeco/pytorch/JavaStatefulTensorDataLoader.java
new file mode 100644
index 00000000000..1017c21b949
--- /dev/null
+++ b/pytorch/src/gen/java/org/bytedeco/pytorch/JavaStatefulTensorDataLoader.java
@@ -0,0 +1,30 @@
+// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE
+
+package org.bytedeco.pytorch;
+
+import org.bytedeco.pytorch.Allocator;
+import org.bytedeco.pytorch.Function;
+import org.bytedeco.pytorch.functions.*;
+import org.bytedeco.pytorch.Module;
+import org.bytedeco.javacpp.annotation.Cast;
+import java.nio.*;
+import org.bytedeco.javacpp.*;
+import org.bytedeco.javacpp.annotation.*;
+
+import static org.bytedeco.javacpp.presets.javacpp.*;
+import static org.bytedeco.openblas.global.openblas_nolapack.*;
+import static org.bytedeco.openblas.global.openblas.*;
+
+import static org.bytedeco.pytorch.global.torch.*;
+
+@Name("torch::data::StatefulDataLoader") @Properties(inherit = org.bytedeco.pytorch.presets.torch.class)
+public class JavaStatefulTensorDataLoader extends JavaStatefulTensorDataLoaderBase {
+ static { Loader.load(); }
+ /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
+ public JavaStatefulTensorDataLoader(Pointer p) { super(p); }
+
+
+ /** Constructs the {@code StatefulDataLoader} from a {@code dataset} and some {@code options}. */
+ public JavaStatefulTensorDataLoader(@ByVal @Cast("JavaCPP_javacpp_0003a_0003aStatefulDataset_0003ctorch_0003a_0003aTensor_0002ctorch_0003a_0003adata_0003a_0003aexample_0003a_0003aNoTarget_0003e*") JavaStatefulTensorDataset dataset, @ByVal DataLoaderOptions options) { super((Pointer)null); allocate(dataset, options); }
+ private native void allocate(@ByVal @Cast("JavaCPP_javacpp_0003a_0003aStatefulDataset_0003ctorch_0003a_0003aTensor_0002ctorch_0003a_0003adata_0003a_0003aexample_0003a_0003aNoTarget_0003e*") JavaStatefulTensorDataset dataset, @ByVal DataLoaderOptions options);
+}
diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/JavaStatefulTensorDataLoaderBase.java b/pytorch/src/gen/java/org/bytedeco/pytorch/JavaStatefulTensorDataLoaderBase.java
new file mode 100644
index 00000000000..7dd1c91209d
--- /dev/null
+++ b/pytorch/src/gen/java/org/bytedeco/pytorch/JavaStatefulTensorDataLoaderBase.java
@@ -0,0 +1,57 @@
+// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE
+
+package org.bytedeco.pytorch;
+
+import org.bytedeco.pytorch.Allocator;
+import org.bytedeco.pytorch.Function;
+import org.bytedeco.pytorch.functions.*;
+import org.bytedeco.pytorch.Module;
+import org.bytedeco.javacpp.annotation.Cast;
+import java.nio.*;
+import org.bytedeco.javacpp.*;
+import org.bytedeco.javacpp.annotation.*;
+
+import static org.bytedeco.javacpp.presets.javacpp.*;
+import static org.bytedeco.openblas.global.openblas_nolapack.*;
+import static org.bytedeco.openblas.global.openblas.*;
+
+import static org.bytedeco.pytorch.global.torch.*;
+
+@Name("torch::data::DataLoaderBase") @NoOffset @Properties(inherit = org.bytedeco.pytorch.presets.torch.class)
+public class JavaStatefulTensorDataLoaderBase extends Pointer {
+ static { Loader.load(); }
+ /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
+ public JavaStatefulTensorDataLoaderBase(Pointer p) { super(p); }
+
+
+ /** Constructs a new DataLoader from a {@code dataset} to sample from, {@code options}
+ * to configure the DataLoader with, and a {@code sampler} that specifies the
+ * sampling strategy. */
+
+ // NOLINTNEXTLINE(bugprone-exception-escape)
+
+ /** Returns an iterator into the DataLoader. The lifetime of the iterator is
+ * bound to the DataLoader. In C++ standards language, the category of the
+ * iterator is {@code OutputIterator}. See
+ * https://en.cppreference.com/w/cpp/named_req/OutputIterator for what this
+ * means. In short: you may increment the iterator and dereference it, but
+ * cannot go back, or step forward more than one position at a time. When the
+ * DataLoader is exhausted, it will compare equal with the special
+ * "sentinel" iterator returned by {@code DataLoader::end()}. Most of the time, you
+ * should only use range-for loops to loop over the DataLoader, but
+ * standard algorithms like {@code std::copy(dataloader.begin(), dataloader.end(),
+ * output_iterator)} are supported too. */
+ public native @ByVal TensorExampleVectorIterator begin();
+
+ /** Returns a special "sentinel" iterator that compares equal with a
+ * non-sentinel iterator once the DataLoader is exhausted. */
+ public native @ByVal TensorExampleVectorIterator end();
+
+ /** Joins the DataLoader's worker threads and drains internal queues.
+ * This function may only be invoked from the main thread (in which the
+ * DataLoader lives). */
+ public native void join();
+
+ /** Returns the options with which the DataLoader was configured. */
+ public native @Const @ByRef @NoException(true) FullDataLoaderOptions options();
+}
diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/JavaStatefulTensorDataset.java b/pytorch/src/gen/java/org/bytedeco/pytorch/JavaStatefulTensorDataset.java
new file mode 100644
index 00000000000..3447db6bad7
--- /dev/null
+++ b/pytorch/src/gen/java/org/bytedeco/pytorch/JavaStatefulTensorDataset.java
@@ -0,0 +1,43 @@
+// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE
+
+package org.bytedeco.pytorch;
+
+import org.bytedeco.pytorch.Allocator;
+import org.bytedeco.pytorch.Function;
+import org.bytedeco.pytorch.functions.*;
+import org.bytedeco.pytorch.Module;
+import org.bytedeco.javacpp.annotation.Cast;
+import java.nio.*;
+import org.bytedeco.javacpp.*;
+import org.bytedeco.javacpp.annotation.*;
+
+import static org.bytedeco.javacpp.presets.javacpp.*;
+import static org.bytedeco.openblas.global.openblas_nolapack.*;
+import static org.bytedeco.openblas.global.openblas.*;
+
+import static org.bytedeco.pytorch.global.torch.*;
+
+@Name("javacpp::StatefulDataset") @Properties(inherit = org.bytedeco.pytorch.presets.torch.class)
+public class JavaStatefulTensorDataset extends JavaStatefulTensorDatasetBase {
+ static { Loader.load(); }
+ /** Default native constructor. */
+ public JavaStatefulTensorDataset() { super((Pointer)null); allocate(); }
+ /** Native array allocator. Access with {@link Pointer#position(long)}. */
+ public JavaStatefulTensorDataset(long size) { super((Pointer)null); allocateArray(size); }
+ /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
+ public JavaStatefulTensorDataset(Pointer p) { super(p); }
+ private native void allocate();
+ private native void allocateArray(long size);
+ @Override public JavaStatefulTensorDataset position(long position) {
+ return (JavaStatefulTensorDataset)super.position(position);
+ }
+ @Override public JavaStatefulTensorDataset getPointer(long i) {
+ return new JavaStatefulTensorDataset((Pointer)this).offsetAddress(i);
+ }
+
+ @Virtual(true) public native @ByVal @Const({false, false, true}) SizeTOptional size();
+ @Virtual(true) public native @ByVal TensorExampleVectorOptional get_batch(@Cast("size_t") long size);
+ @Virtual(true) public native void reset();
+ @Virtual(true) public native @Const({false, false, true}) void save(@ByRef OutputArchive archive);
+ @Virtual(true) public native void load(@ByRef InputArchive archive);
+}
diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/JavaStatefulTensorDatasetBase.java b/pytorch/src/gen/java/org/bytedeco/pytorch/JavaStatefulTensorDatasetBase.java
new file mode 100644
index 00000000000..ad7f026b4fc
--- /dev/null
+++ b/pytorch/src/gen/java/org/bytedeco/pytorch/JavaStatefulTensorDatasetBase.java
@@ -0,0 +1,34 @@
+// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE
+
+package org.bytedeco.pytorch;
+
+import org.bytedeco.pytorch.Allocator;
+import org.bytedeco.pytorch.Function;
+import org.bytedeco.pytorch.functions.*;
+import org.bytedeco.pytorch.Module;
+import org.bytedeco.javacpp.annotation.Cast;
+import java.nio.*;
+import org.bytedeco.javacpp.*;
+import org.bytedeco.javacpp.annotation.*;
+
+import static org.bytedeco.javacpp.presets.javacpp.*;
+import static org.bytedeco.openblas.global.openblas_nolapack.*;
+import static org.bytedeco.openblas.global.openblas.*;
+
+import static org.bytedeco.pytorch.global.torch.*;
+
+@Name("torch::data::datasets::StatefulDataset,std::vector >,size_t>") @Properties(inherit = org.bytedeco.pytorch.presets.torch.class)
+public class JavaStatefulTensorDatasetBase extends JavaStatefulTensorBatchDataset {
+ static { Loader.load(); }
+ /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
+ public JavaStatefulTensorDatasetBase(Pointer p) { super(p); }
+
+ /** Resets internal state of the dataset. */
+ public native void reset();
+
+ /** Saves the statefulDataset's state to OutputArchive. */
+ public native void save(@ByRef OutputArchive archive);
+
+ /** Deserializes the statefulDataset's state from the {@code archive}. */
+ public native void load(@ByRef InputArchive archive);
+}
diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/JavaStreamBatchDataset.java b/pytorch/src/gen/java/org/bytedeco/pytorch/JavaStreamBatchDataset.java
new file mode 100644
index 00000000000..4069f0a12d7
--- /dev/null
+++ b/pytorch/src/gen/java/org/bytedeco/pytorch/JavaStreamBatchDataset.java
@@ -0,0 +1,39 @@
+// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE
+
+package org.bytedeco.pytorch;
+
+import org.bytedeco.pytorch.Allocator;
+import org.bytedeco.pytorch.Function;
+import org.bytedeco.pytorch.functions.*;
+import org.bytedeco.pytorch.Module;
+import org.bytedeco.javacpp.annotation.Cast;
+import java.nio.*;
+import org.bytedeco.javacpp.*;
+import org.bytedeco.javacpp.annotation.*;
+
+import static org.bytedeco.javacpp.presets.javacpp.*;
+import static org.bytedeco.openblas.global.openblas_nolapack.*;
+import static org.bytedeco.openblas.global.openblas.*;
+
+import static org.bytedeco.pytorch.global.torch.*;
+
+@Name("torch::data::datasets::BatchDataset,std::vector >,size_t>") @Properties(inherit = org.bytedeco.pytorch.presets.torch.class)
+public class JavaStreamBatchDataset extends Pointer {
+ static { Loader.load(); }
+ /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
+ public JavaStreamBatchDataset(Pointer p) { super(p); }
+
+ @MemberGetter public static native @Cast("const bool") boolean is_stateful();
+ public static final boolean is_stateful = is_stateful();
+
+ /** Returns a batch of data given an index. */
+ public native @ByVal ExampleVector get_batch(@Cast("size_t") long request);
+
+ /** Returns the size of the dataset, or an empty optional if it is unsized. */
+ public native @ByVal SizeTOptional size();
+
+ /** Creates a {@code MapDataset} that applies the given {@code transform} to this dataset. */
+
+ /** Creates a {@code MapDataset} that applies the given {@code transform} to this dataset. */
+
+}
diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/JavaStreamDataLoader.java b/pytorch/src/gen/java/org/bytedeco/pytorch/JavaStreamDataLoader.java
new file mode 100644
index 00000000000..4ee342eddbe
--- /dev/null
+++ b/pytorch/src/gen/java/org/bytedeco/pytorch/JavaStreamDataLoader.java
@@ -0,0 +1,37 @@
+// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE
+
+package org.bytedeco.pytorch;
+
+import org.bytedeco.pytorch.Allocator;
+import org.bytedeco.pytorch.Function;
+import org.bytedeco.pytorch.functions.*;
+import org.bytedeco.pytorch.Module;
+import org.bytedeco.javacpp.annotation.Cast;
+import java.nio.*;
+import org.bytedeco.javacpp.*;
+import org.bytedeco.javacpp.annotation.*;
+
+import static org.bytedeco.javacpp.presets.javacpp.*;
+import static org.bytedeco.openblas.global.openblas_nolapack.*;
+import static org.bytedeco.openblas.global.openblas.*;
+
+import static org.bytedeco.pytorch.global.torch.*;
+
+@Name("torch::data::StatelessDataLoader") @NoOffset @Properties(inherit = org.bytedeco.pytorch.presets.torch.class)
+public class JavaStreamDataLoader extends JavaStreamDataLoaderBase {
+ static { Loader.load(); }
+ /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
+ public JavaStreamDataLoader(Pointer p) { super(p); }
+
+
+ /** Constructs the {@code StatelessDataLoader} from a {@code dataset}, a {@code sampler} and
+ * some {@code options}. */
+ public JavaStreamDataLoader(
+ @ByVal @Cast("JavaCPP_javacpp_0003a_0003aStreamDataset_0003ctorch_0003a_0003aTensor_0002ctorch_0003a_0003aTensor_0003e*") JavaStreamDataset dataset,
+ @ByVal StreamSampler sampler,
+ @ByVal DataLoaderOptions options) { super((Pointer)null); allocate(dataset, sampler, options); }
+ private native void allocate(
+ @ByVal @Cast("JavaCPP_javacpp_0003a_0003aStreamDataset_0003ctorch_0003a_0003aTensor_0002ctorch_0003a_0003aTensor_0003e*") JavaStreamDataset dataset,
+ @ByVal StreamSampler sampler,
+ @ByVal DataLoaderOptions options);
+}
diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/JavaStreamDataLoaderBase.java b/pytorch/src/gen/java/org/bytedeco/pytorch/JavaStreamDataLoaderBase.java
new file mode 100644
index 00000000000..d0f82a8bf67
--- /dev/null
+++ b/pytorch/src/gen/java/org/bytedeco/pytorch/JavaStreamDataLoaderBase.java
@@ -0,0 +1,57 @@
+// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE
+
+package org.bytedeco.pytorch;
+
+import org.bytedeco.pytorch.Allocator;
+import org.bytedeco.pytorch.Function;
+import org.bytedeco.pytorch.functions.*;
+import org.bytedeco.pytorch.Module;
+import org.bytedeco.javacpp.annotation.Cast;
+import java.nio.*;
+import org.bytedeco.javacpp.*;
+import org.bytedeco.javacpp.annotation.*;
+
+import static org.bytedeco.javacpp.presets.javacpp.*;
+import static org.bytedeco.openblas.global.openblas_nolapack.*;
+import static org.bytedeco.openblas.global.openblas.*;
+
+import static org.bytedeco.pytorch.global.torch.*;
+
+@Name("torch::data::DataLoaderBase") @NoOffset @Properties(inherit = org.bytedeco.pytorch.presets.torch.class)
+public class JavaStreamDataLoaderBase extends Pointer {
+ static { Loader.load(); }
+ /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
+ public JavaStreamDataLoaderBase(Pointer p) { super(p); }
+
+
+ /** Constructs a new DataLoader from a {@code dataset} to sample from, {@code options}
+ * to configure the DataLoader with, and a {@code sampler} that specifies the
+ * sampling strategy. */
+
+ // NOLINTNEXTLINE(bugprone-exception-escape)
+
+ /** Returns an iterator into the DataLoader. The lifetime of the iterator is
+ * bound to the DataLoader. In C++ standards language, the category of the
+ * iterator is {@code OutputIterator}. See
+ * https://en.cppreference.com/w/cpp/named_req/OutputIterator for what this
+ * means. In short: you may increment the iterator and dereference it, but
+ * cannot go back, or step forward more than one position at a time. When the
+ * DataLoader is exhausted, it will compare equal with the special
+ * "sentinel" iterator returned by {@code DataLoader::end()}. Most of the time, you
+ * should only use range-for loops to loop over the DataLoader, but
+ * standard algorithms like {@code std::copy(dataloader.begin(), dataloader.end(),
+ * output_iterator)} are supported too. */
+ public native @ByVal ExampleVectorIterator begin();
+
+ /** Returns a special "sentinel" iterator that compares equal with a
+ * non-sentinel iterator once the DataLoader is exhausted. */
+ public native @ByVal ExampleVectorIterator end();
+
+ /** Joins the DataLoader's worker threads and drains internal queues.
+ * This function may only be invoked from the main thread (in which the
+ * DataLoader lives). */
+ public native void join();
+
+ /** Returns the options with which the DataLoader was configured. */
+ public native @Const @ByRef @NoException(true) FullDataLoaderOptions options();
+}
diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/JavaStreamDataset.java b/pytorch/src/gen/java/org/bytedeco/pytorch/JavaStreamDataset.java
new file mode 100644
index 00000000000..7b8e284e74d
--- /dev/null
+++ b/pytorch/src/gen/java/org/bytedeco/pytorch/JavaStreamDataset.java
@@ -0,0 +1,44 @@
+// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE
+
+package org.bytedeco.pytorch;
+
+import org.bytedeco.pytorch.Allocator;
+import org.bytedeco.pytorch.Function;
+import org.bytedeco.pytorch.functions.*;
+import org.bytedeco.pytorch.Module;
+import org.bytedeco.javacpp.annotation.Cast;
+import java.nio.*;
+import org.bytedeco.javacpp.*;
+import org.bytedeco.javacpp.annotation.*;
+
+import static org.bytedeco.javacpp.presets.javacpp.*;
+import static org.bytedeco.openblas.global.openblas_nolapack.*;
+import static org.bytedeco.openblas.global.openblas.*;
+
+import static org.bytedeco.pytorch.global.torch.*;
+
+
+/**
+ * Abstract class for stateless stream datasets to be subclassed by Java user code.
+ */
+@Name("javacpp::StreamDataset") @Properties(inherit = org.bytedeco.pytorch.presets.torch.class)
+public class JavaStreamDataset extends JavaStreamBatchDataset {
+ static { Loader.load(); }
+ /** Default native constructor. */
+ public JavaStreamDataset() { super((Pointer)null); allocate(); }
+ /** Native array allocator. Access with {@link Pointer#position(long)}. */
+ public JavaStreamDataset(long size) { super((Pointer)null); allocateArray(size); }
+ /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
+ public JavaStreamDataset(Pointer p) { super(p); }
+ private native void allocate();
+ private native void allocateArray(long size);
+ @Override public JavaStreamDataset position(long position) {
+ return (JavaStreamDataset)super.position(position);
+ }
+ @Override public JavaStreamDataset getPointer(long i) {
+ return new JavaStreamDataset((Pointer)this).offsetAddress(i);
+ }
+
+ @Virtual(true) public native @ByVal @Const({false, false, true}) SizeTOptional size();
+ @Virtual(true) public native @ByVal ExampleVector get_batch(@Cast("size_t") long size);
+}
diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/JavaStreamTensorBatchDataset.java b/pytorch/src/gen/java/org/bytedeco/pytorch/JavaStreamTensorBatchDataset.java
new file mode 100644
index 00000000000..4a9d808b8bb
--- /dev/null
+++ b/pytorch/src/gen/java/org/bytedeco/pytorch/JavaStreamTensorBatchDataset.java
@@ -0,0 +1,39 @@
+// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE
+
+package org.bytedeco.pytorch;
+
+import org.bytedeco.pytorch.Allocator;
+import org.bytedeco.pytorch.Function;
+import org.bytedeco.pytorch.functions.*;
+import org.bytedeco.pytorch.Module;
+import org.bytedeco.javacpp.annotation.Cast;
+import java.nio.*;
+import org.bytedeco.javacpp.*;
+import org.bytedeco.javacpp.annotation.*;
+
+import static org.bytedeco.javacpp.presets.javacpp.*;
+import static org.bytedeco.openblas.global.openblas_nolapack.*;
+import static org.bytedeco.openblas.global.openblas.*;
+
+import static org.bytedeco.pytorch.global.torch.*;
+
+@Name("torch::data::datasets::BatchDataset,std::vector >,size_t>") @Properties(inherit = org.bytedeco.pytorch.presets.torch.class)
+public class JavaStreamTensorBatchDataset extends Pointer {
+ static { Loader.load(); }
+ /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
+ public JavaStreamTensorBatchDataset(Pointer p) { super(p); }
+
+ @MemberGetter public static native @Cast("const bool") boolean is_stateful();
+ public static final boolean is_stateful = is_stateful();
+
+ /** Returns a batch of data given an index. */
+ public native @ByVal TensorExampleVector get_batch(@Cast("size_t") long request);
+
+ /** Returns the size of the dataset, or an empty optional if it is unsized. */
+ public native @ByVal SizeTOptional size();
+
+ /** Creates a {@code MapDataset} that applies the given {@code transform} to this dataset. */
+
+ /** Creates a {@code MapDataset} that applies the given {@code transform} to this dataset. */
+
+}
diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/JavaStreamTensorDataLoader.java b/pytorch/src/gen/java/org/bytedeco/pytorch/JavaStreamTensorDataLoader.java
new file mode 100644
index 00000000000..0532f11043e
--- /dev/null
+++ b/pytorch/src/gen/java/org/bytedeco/pytorch/JavaStreamTensorDataLoader.java
@@ -0,0 +1,37 @@
+// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE
+
+package org.bytedeco.pytorch;
+
+import org.bytedeco.pytorch.Allocator;
+import org.bytedeco.pytorch.Function;
+import org.bytedeco.pytorch.functions.*;
+import org.bytedeco.pytorch.Module;
+import org.bytedeco.javacpp.annotation.Cast;
+import java.nio.*;
+import org.bytedeco.javacpp.*;
+import org.bytedeco.javacpp.annotation.*;
+
+import static org.bytedeco.javacpp.presets.javacpp.*;
+import static org.bytedeco.openblas.global.openblas_nolapack.*;
+import static org.bytedeco.openblas.global.openblas.*;
+
+import static org.bytedeco.pytorch.global.torch.*;
+
+@Name("torch::data::StatelessDataLoader") @NoOffset @Properties(inherit = org.bytedeco.pytorch.presets.torch.class)
+public class JavaStreamTensorDataLoader extends JavaStreamTensorDataLoaderBase {
+ static { Loader.load(); }
+ /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
+ public JavaStreamTensorDataLoader(Pointer p) { super(p); }
+
+
+ /** Constructs the {@code StatelessDataLoader} from a {@code dataset}, a {@code sampler} and
+ * some {@code options}. */
+ public JavaStreamTensorDataLoader(
+ @ByVal @Cast("JavaCPP_javacpp_0003a_0003aStreamDataset_0003ctorch_0003a_0003aTensor_0002ctorch_0003a_0003adata_0003a_0003aexample_0003a_0003aNoTarget_0003e*") JavaStreamTensorDataset dataset,
+ @ByVal StreamSampler sampler,
+ @ByVal DataLoaderOptions options) { super((Pointer)null); allocate(dataset, sampler, options); }
+ private native void allocate(
+ @ByVal @Cast("JavaCPP_javacpp_0003a_0003aStreamDataset_0003ctorch_0003a_0003aTensor_0002ctorch_0003a_0003adata_0003a_0003aexample_0003a_0003aNoTarget_0003e*") JavaStreamTensorDataset dataset,
+ @ByVal StreamSampler sampler,
+ @ByVal DataLoaderOptions options);
+}
diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/JavaStreamTensorDataLoaderBase.java b/pytorch/src/gen/java/org/bytedeco/pytorch/JavaStreamTensorDataLoaderBase.java
new file mode 100644
index 00000000000..dd42ea8f3ad
--- /dev/null
+++ b/pytorch/src/gen/java/org/bytedeco/pytorch/JavaStreamTensorDataLoaderBase.java
@@ -0,0 +1,57 @@
+// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE
+
+package org.bytedeco.pytorch;
+
+import org.bytedeco.pytorch.Allocator;
+import org.bytedeco.pytorch.Function;
+import org.bytedeco.pytorch.functions.*;
+import org.bytedeco.pytorch.Module;
+import org.bytedeco.javacpp.annotation.Cast;
+import java.nio.*;
+import org.bytedeco.javacpp.*;
+import org.bytedeco.javacpp.annotation.*;
+
+import static org.bytedeco.javacpp.presets.javacpp.*;
+import static org.bytedeco.openblas.global.openblas_nolapack.*;
+import static org.bytedeco.openblas.global.openblas.*;
+
+import static org.bytedeco.pytorch.global.torch.*;
+
+@Name("torch::data::DataLoaderBase") @NoOffset @Properties(inherit = org.bytedeco.pytorch.presets.torch.class)
+public class JavaStreamTensorDataLoaderBase extends Pointer {
+ static { Loader.load(); }
+ /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
+ public JavaStreamTensorDataLoaderBase(Pointer p) { super(p); }
+
+
+ /** Constructs a new DataLoader from a {@code dataset} to sample from, {@code options}
+ * to configure the DataLoader with, and a {@code sampler} that specifies the
+ * sampling strategy. */
+
+ // NOLINTNEXTLINE(bugprone-exception-escape)
+
+ /** Returns an iterator into the DataLoader. The lifetime of the iterator is
+ * bound to the DataLoader. In C++ standards language, the category of the
+ * iterator is {@code OutputIterator}. See
+ * https://en.cppreference.com/w/cpp/named_req/OutputIterator for what this
+ * means. In short: you may increment the iterator and dereference it, but
+ * cannot go back, or step forward more than one position at a time. When the
+ * DataLoader is exhausted, it will compare equal with the special
+ * "sentinel" iterator returned by {@code DataLoader::end()}. Most of the time, you
+ * should only use range-for loops to loop over the DataLoader, but
+ * standard algorithms like {@code std::copy(dataloader.begin(), dataloader.end(),
+ * output_iterator)} are supported too. */
+ public native @ByVal TensorExampleVectorIterator begin();
+
+ /** Returns a special "sentinel" iterator that compares equal with a
+ * non-sentinel iterator once the DataLoader is exhausted. */
+ public native @ByVal TensorExampleVectorIterator end();
+
+ /** Joins the DataLoader's worker threads and drains internal queues.
+ * This function may only be invoked from the main thread (in which the
+ * DataLoader lives). */
+ public native void join();
+
+ /** Returns the options with which the DataLoader was configured. */
+ public native @Const @ByRef @NoException(true) FullDataLoaderOptions options();
+}
diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/JavaStreamTensorDataset.java b/pytorch/src/gen/java/org/bytedeco/pytorch/JavaStreamTensorDataset.java
new file mode 100644
index 00000000000..05860271878
--- /dev/null
+++ b/pytorch/src/gen/java/org/bytedeco/pytorch/JavaStreamTensorDataset.java
@@ -0,0 +1,40 @@
+// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE
+
+package org.bytedeco.pytorch;
+
+import org.bytedeco.pytorch.Allocator;
+import org.bytedeco.pytorch.Function;
+import org.bytedeco.pytorch.functions.*;
+import org.bytedeco.pytorch.Module;
+import org.bytedeco.javacpp.annotation.Cast;
+import java.nio.*;
+import org.bytedeco.javacpp.*;
+import org.bytedeco.javacpp.annotation.*;
+
+import static org.bytedeco.javacpp.presets.javacpp.*;
+import static org.bytedeco.openblas.global.openblas_nolapack.*;
+import static org.bytedeco.openblas.global.openblas.*;
+
+import static org.bytedeco.pytorch.global.torch.*;
+
+@Name("javacpp::StreamDataset") @Properties(inherit = org.bytedeco.pytorch.presets.torch.class)
+public class JavaStreamTensorDataset extends JavaStreamTensorBatchDataset {
+ static { Loader.load(); }
+ /** Default native constructor. */
+ public JavaStreamTensorDataset() { super((Pointer)null); allocate(); }
+ /** Native array allocator. Access with {@link Pointer#position(long)}. */
+ public JavaStreamTensorDataset(long size) { super((Pointer)null); allocateArray(size); }
+ /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
+ public JavaStreamTensorDataset(Pointer p) { super(p); }
+ private native void allocate();
+ private native void allocateArray(long size);
+ @Override public JavaStreamTensorDataset position(long position) {
+ return (JavaStreamTensorDataset)super.position(position);
+ }
+ @Override public JavaStreamTensorDataset getPointer(long i) {
+ return new JavaStreamTensorDataset((Pointer)this).offsetAddress(i);
+ }
+
+ @Virtual(true) public native @ByVal @Const({false, false, true}) SizeTOptional size();
+ @Virtual(true) public native @ByVal TensorExampleVector get_batch(@Cast("size_t") long size);
+}
diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/JavaTensorBatchDataset.java b/pytorch/src/gen/java/org/bytedeco/pytorch/JavaTensorBatchDataset.java
new file mode 100644
index 00000000000..d0aa00f1dcb
--- /dev/null
+++ b/pytorch/src/gen/java/org/bytedeco/pytorch/JavaTensorBatchDataset.java
@@ -0,0 +1,40 @@
+// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE
+
+package org.bytedeco.pytorch;
+
+import org.bytedeco.pytorch.Allocator;
+import org.bytedeco.pytorch.Function;
+import org.bytedeco.pytorch.functions.*;
+import org.bytedeco.pytorch.Module;
+import org.bytedeco.javacpp.annotation.Cast;
+import java.nio.*;
+import org.bytedeco.javacpp.*;
+import org.bytedeco.javacpp.annotation.*;
+
+import static org.bytedeco.javacpp.presets.javacpp.*;
+import static org.bytedeco.openblas.global.openblas_nolapack.*;
+import static org.bytedeco.openblas.global.openblas.*;
+
+import static org.bytedeco.pytorch.global.torch.*;
+
+@Name("torch::data::datasets::BatchDataset,std::vector > >") @Properties(inherit = org.bytedeco.pytorch.presets.torch.class)
+public class JavaTensorBatchDataset extends Pointer {
+ static { Loader.load(); }
+ /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
+ public JavaTensorBatchDataset(Pointer p) { super(p); }
+
+ @MemberGetter public static native @Cast("const bool") boolean is_stateful();
+ public static final boolean is_stateful = is_stateful();
+
+ /** Returns a batch of data given an index. */
+ public native @ByVal TensorExampleVector get_batch(@ByVal SizeTArrayRef request);
+ public native @ByVal TensorExampleVector get_batch(@ByVal @Cast({"size_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("size_t") long... request);
+
+ /** Returns the size of the dataset, or an empty optional if it is unsized. */
+ public native @ByVal SizeTOptional size();
+
+ /** Creates a {@code MapDataset} that applies the given {@code transform} to this dataset. */
+
+ /** Creates a {@code MapDataset} that applies the given {@code transform} to this dataset. */
+
+}
diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/JavaTensorDataset.java b/pytorch/src/gen/java/org/bytedeco/pytorch/JavaTensorDataset.java
new file mode 100644
index 00000000000..35e2f01d5ca
--- /dev/null
+++ b/pytorch/src/gen/java/org/bytedeco/pytorch/JavaTensorDataset.java
@@ -0,0 +1,41 @@
+// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE
+
+package org.bytedeco.pytorch;
+
+import org.bytedeco.pytorch.Allocator;
+import org.bytedeco.pytorch.Function;
+import org.bytedeco.pytorch.functions.*;
+import org.bytedeco.pytorch.Module;
+import org.bytedeco.javacpp.annotation.Cast;
+import java.nio.*;
+import org.bytedeco.javacpp.*;
+import org.bytedeco.javacpp.annotation.*;
+
+import static org.bytedeco.javacpp.presets.javacpp.*;
+import static org.bytedeco.openblas.global.openblas_nolapack.*;
+import static org.bytedeco.openblas.global.openblas.*;
+
+import static org.bytedeco.pytorch.global.torch.*;
+
+ @Name("javacpp::Dataset") @Properties(inherit = org.bytedeco.pytorch.presets.torch.class)
+public class JavaTensorDataset extends JavaTensorDatasetBase {
+ static { Loader.load(); }
+ /** Default native constructor. */
+ public JavaTensorDataset() { super((Pointer)null); allocate(); }
+ /** Native array allocator. Access with {@link Pointer#position(long)}. */
+ public JavaTensorDataset(long size) { super((Pointer)null); allocateArray(size); }
+ /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
+ public JavaTensorDataset(Pointer p) { super(p); }
+ private native void allocate();
+ private native void allocateArray(long size);
+ @Override public JavaTensorDataset position(long position) {
+ return (JavaTensorDataset)super.position(position);
+ }
+ @Override public JavaTensorDataset getPointer(long i) {
+ return new JavaTensorDataset((Pointer)this).offsetAddress(i);
+ }
+
+ @Virtual(true) public native @ByVal TensorExample get(@Cast("size_t") long index);
+ @Virtual(true) public native @ByVal @Const({false, false, true}) SizeTOptional size();
+ @Virtual public native @ByVal TensorExampleVector get_batch(@ByVal SizeTArrayRef indices);
+}
diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/JavaTensorDatasetBase.java b/pytorch/src/gen/java/org/bytedeco/pytorch/JavaTensorDatasetBase.java
new file mode 100644
index 00000000000..e0cfc8559a3
--- /dev/null
+++ b/pytorch/src/gen/java/org/bytedeco/pytorch/JavaTensorDatasetBase.java
@@ -0,0 +1,35 @@
+// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE
+
+package org.bytedeco.pytorch;
+
+import org.bytedeco.pytorch.Allocator;
+import org.bytedeco.pytorch.Function;
+import org.bytedeco.pytorch.functions.*;
+import org.bytedeco.pytorch.Module;
+import org.bytedeco.javacpp.annotation.Cast;
+import java.nio.*;
+import org.bytedeco.javacpp.*;
+import org.bytedeco.javacpp.annotation.*;
+
+import static org.bytedeco.javacpp.presets.javacpp.*;
+import static org.bytedeco.openblas.global.openblas_nolapack.*;
+import static org.bytedeco.openblas.global.openblas.*;
+
+import static org.bytedeco.pytorch.global.torch.*;
+
+@Name("torch::data::datasets::Dataset,torch::data::Example >") @Properties(inherit = org.bytedeco.pytorch.presets.torch.class)
+public class JavaTensorDatasetBase extends JavaTensorBatchDataset {
+ static { Loader.load(); }
+ /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
+ public JavaTensorDatasetBase(Pointer p) { super(p); }
+
+
+ /** Returns the example at the given index. */
+ public native @ByVal @Cast("torch::data::datasets::Dataset,torch::data::Example >::ExampleType*") TensorExample get(@Cast("size_t") long index);
+
+ /** Returns a batch of data.
+ * The default implementation calls {@code get()} for every requested index
+ * in the batch. */
+ public native @ByVal TensorExampleVector get_batch(@ByVal SizeTArrayRef indices);
+ public native @ByVal TensorExampleVector get_batch(@ByVal @Cast({"size_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("size_t") long... indices);
+}
diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/JitModule.java b/pytorch/src/gen/java/org/bytedeco/pytorch/JitModule.java
index 5df5dbd4718..f3f47687f30 100644
--- a/pytorch/src/gen/java/org/bytedeco/pytorch/JitModule.java
+++ b/pytorch/src/gen/java/org/bytedeco/pytorch/JitModule.java
@@ -37,6 +37,9 @@ public class JitModule extends JitObject {
private native void allocate(@SharedPtr CompilationUnit cu, @Const @SharedPtr("c10::ClassType") @ByRef ClassType type);
public JitModule() { super((Pointer)null); allocate(); }
private native void allocate();
+ public JitModule(@Const @ByRef JitModule arg0) { super((Pointer)null); allocate(arg0); }
+ private native void allocate(@Const @ByRef JitModule arg0);
+ public native @ByRef @Name("operator =") JitModule put(@Const @ByRef JitModule arg0);
public JitModule(
@ByVal QualifiedName arg0,
@SharedPtr CompilationUnit cu,
@@ -219,6 +222,7 @@ public native void _save_for_mobile(
public native @ByVal JitModule copy();
+ public native @ByVal JitModule deepcopy(@ByVal(nullValue = "c10::optional(c10::nullopt)") DeviceOptional device);
public native @ByVal JitModule deepcopy();
// Clones both the underlying `ClassType` and the module instance(data), this
diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/JitNodeWrap.java b/pytorch/src/gen/java/org/bytedeco/pytorch/JitNodeWrap.java
index 9d2dbfaf87b..ec6d18f9abd 100644
--- a/pytorch/src/gen/java/org/bytedeco/pytorch/JitNodeWrap.java
+++ b/pytorch/src/gen/java/org/bytedeco/pytorch/JitNodeWrap.java
@@ -27,13 +27,5 @@ public class JitNodeWrap extends Pointer {
private native void allocate(JitNode p);
public native void clear();
public native JitNode elem(); public native JitNodeWrap elem(JitNode setter);
- public static class Clear_cb_Pointer extends FunctionPointer {
- static { Loader.load(); }
- /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
- public Clear_cb_Pointer(Pointer p) { super(p); }
- protected Clear_cb_Pointer() { allocate(); }
- private native void allocate();
- public native void call(Pointer arg0);
- }
- public native Clear_cb_Pointer clear_cb(); public native JitNodeWrap clear_cb(Clear_cb_Pointer setter);
+ public native PointerConsumer clear_cb(); public native JitNodeWrap clear_cb(PointerConsumer setter);
}
diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/JitObject.java b/pytorch/src/gen/java/org/bytedeco/pytorch/JitObject.java
index 4dcc937a66b..7c6f00bf376 100644
--- a/pytorch/src/gen/java/org/bytedeco/pytorch/JitObject.java
+++ b/pytorch/src/gen/java/org/bytedeco/pytorch/JitObject.java
@@ -18,7 +18,6 @@
import static org.bytedeco.pytorch.global.torch.*;
-// NOLINTNEXTLINE(cppcoreguidelines-pro-type-member-init)
@Name("torch::jit::Object") @NoOffset @Properties(inherit = org.bytedeco.pytorch.presets.torch.class)
public class JitObject extends Pointer {
static { Loader.load(); }
@@ -34,7 +33,9 @@ public class JitObject extends Pointer {
public JitObject() { super((Pointer)null); allocate(); }
private native void allocate();
- // NOLINTNEXTLINE(cppcoreguidelines-pro-type-member-init)
+ public JitObject(@Const @ByRef JitObject arg0) { super((Pointer)null); allocate(arg0); }
+ private native void allocate(@Const @ByRef JitObject arg0);
+ public native @ByRef @Name("operator =") JitObject put(@Const @ByRef JitObject arg0);
public JitObject(@ByVal @Cast("torch::jit::ObjectPtr*") Pointer _ivalue) { super((Pointer)null); allocate(_ivalue); }
private native void allocate(@ByVal @Cast("torch::jit::ObjectPtr*") Pointer _ivalue);
public JitObject(@SharedPtr CompilationUnit cu, @Const @SharedPtr("c10::ClassType") @ByRef ClassType type) { super((Pointer)null); allocate(cu, type); }
diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/Lexer.java b/pytorch/src/gen/java/org/bytedeco/pytorch/Lexer.java
deleted file mode 100644
index ff7b37f261f..00000000000
--- a/pytorch/src/gen/java/org/bytedeco/pytorch/Lexer.java
+++ /dev/null
@@ -1,47 +0,0 @@
-// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE
-
-package org.bytedeco.pytorch;
-
-import org.bytedeco.pytorch.Allocator;
-import org.bytedeco.pytorch.Function;
-import org.bytedeco.pytorch.functions.*;
-import org.bytedeco.pytorch.Module;
-import org.bytedeco.javacpp.annotation.Cast;
-import java.nio.*;
-import org.bytedeco.javacpp.*;
-import org.bytedeco.javacpp.annotation.*;
-
-import static org.bytedeco.javacpp.presets.javacpp.*;
-import static org.bytedeco.openblas.global.openblas_nolapack.*;
-import static org.bytedeco.openblas.global.openblas.*;
-
-import static org.bytedeco.pytorch.global.torch.*;
-
-
-@Namespace("torch::jit") @NoOffset @Properties(inherit = org.bytedeco.pytorch.presets.torch.class)
-public class Lexer extends Pointer {
- static { Loader.load(); }
- /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
- public Lexer(Pointer p) { super(p); }
-
- public Lexer(@SharedPtr Source source) { super((Pointer)null); allocate(source); }
- private native void allocate(@SharedPtr Source source);
- // Return the current token, and then move to the next one
- public native @ByVal Token next();
- // Skip the current token if it matches the given kind
- public native @Cast("bool") boolean nextIf(int kind);
-
- public native void reportError(@StdString BytePointer what);
- public native void reportError(@StdString String what);
- public native void reportError(@StdString BytePointer what, @Const @ByRef Token t);
- public native void reportError(@StdString String what, @Const @ByRef Token t);
- public native void expected(@StdString BytePointer what, @Const @ByRef Token t);
- public native void expected(@StdString String what, @Const @ByRef Token t);
- public native void expected(@StdString BytePointer what);
- public native void expected(@StdString String what);
- // Check that the current token has a given kind, return the current token,
- // and advance to the next one.
- public native @ByVal Token expect(int kind);
- public native @ByRef Token lookahead();
- public native @ByRef Token cur();
-}
diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/ListType.java b/pytorch/src/gen/java/org/bytedeco/pytorch/ListType.java
index 222672f536b..ad73a59c945 100644
--- a/pytorch/src/gen/java/org/bytedeco/pytorch/ListType.java
+++ b/pytorch/src/gen/java/org/bytedeco/pytorch/ListType.java
@@ -45,6 +45,7 @@ public class ListType extends ListSingleElementType {
public static native @SharedPtr ListType ofTensors();
public static native @SharedPtr ListType ofOptionalTensors();
public static native @SharedPtr ListType ofInts();
+ public static native @SharedPtr ListType ofSymInts();
public static native @SharedPtr ListType ofFloats();
public static native @SharedPtr ListType ofComplexDoubles();
public static native @SharedPtr ListType ofBools();
diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/LongArrayRef.java b/pytorch/src/gen/java/org/bytedeco/pytorch/LongArrayRef.java
index 3d957c2bb04..6ddd4b17d82 100644
--- a/pytorch/src/gen/java/org/bytedeco/pytorch/LongArrayRef.java
+++ b/pytorch/src/gen/java/org/bytedeco/pytorch/LongArrayRef.java
@@ -105,7 +105,7 @@ public class LongArrayRef extends Pointer {
/** equals - Check for element-wise equality. */
public native @Cast("const bool") boolean equals(@ByVal LongArrayRef RHS);
- public native @Cast("const bool") boolean equals(@ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... RHS);
+ public native @Cast("const bool") boolean equals(@ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long... RHS);
/** slice(n, m) - Take M elements of the array starting at element N */
public native @Const @ByVal LongArrayRef slice(@Cast("size_t") long N, @Cast("size_t") long M);
diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/LongArrayRefOptional.java b/pytorch/src/gen/java/org/bytedeco/pytorch/LongArrayRefOptional.java
index d5aeec4c537..83ab525139f 100644
--- a/pytorch/src/gen/java/org/bytedeco/pytorch/LongArrayRefOptional.java
+++ b/pytorch/src/gen/java/org/bytedeco/pytorch/LongArrayRefOptional.java
@@ -23,7 +23,7 @@ public class LongArrayRefOptional extends Pointer {
/** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
public LongArrayRefOptional(Pointer p) { super(p); }
public LongArrayRefOptional(LongArrayRef value) { this(); put(value); }
- public LongArrayRefOptional(@Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... value) { this(); put(value); }
+ public LongArrayRefOptional(@Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long... value) { this(); put(value); }
public LongArrayRefOptional() { allocate(); }
private native void allocate();
public native @Name("operator =") @ByRef LongArrayRefOptional put(@ByRef LongArrayRefOptional x);
@@ -32,6 +32,6 @@ public class LongArrayRefOptional extends Pointer {
public native void reset();
public native @Name("value") @ByRef LongArrayRef get();
@ValueSetter public native LongArrayRefOptional put(@ByRef LongArrayRef value);
- @ValueSetter public native LongArrayRefOptional put(@ByRef @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... value);
+ @ValueSetter public native LongArrayRefOptional put(@ByRef @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long... value);
}
diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/LongList.java b/pytorch/src/gen/java/org/bytedeco/pytorch/LongList.java
index 33e4be27673..84a87ff9f2a 100644
--- a/pytorch/src/gen/java/org/bytedeco/pytorch/LongList.java
+++ b/pytorch/src/gen/java/org/bytedeco/pytorch/LongList.java
@@ -46,8 +46,8 @@ public class LongList extends Pointer {
*/
public LongList(@ByVal LongArrayRef initial_values) { super((Pointer)null); allocate(initial_values); }
private native void allocate(@ByVal LongArrayRef initial_values);
- public LongList(@ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... initial_values) { super((Pointer)null); allocate(initial_values); }
- private native void allocate(@ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... initial_values);
+ public LongList(@ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long... initial_values) { super((Pointer)null); allocate(initial_values); }
+ private native void allocate(@ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long... initial_values);
/**
* Create a generic list with runtime type information.
diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/LongStringMap.java b/pytorch/src/gen/java/org/bytedeco/pytorch/LongStringMap.java
deleted file mode 100644
index 869e85d05ef..00000000000
--- a/pytorch/src/gen/java/org/bytedeco/pytorch/LongStringMap.java
+++ /dev/null
@@ -1,51 +0,0 @@
-// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE
-
-package org.bytedeco.pytorch;
-
-import org.bytedeco.pytorch.Allocator;
-import org.bytedeco.pytorch.Function;
-import org.bytedeco.pytorch.functions.*;
-import org.bytedeco.pytorch.Module;
-import org.bytedeco.javacpp.annotation.Cast;
-import java.nio.*;
-import org.bytedeco.javacpp.*;
-import org.bytedeco.javacpp.annotation.*;
-
-import static org.bytedeco.javacpp.presets.javacpp.*;
-import static org.bytedeco.openblas.global.openblas_nolapack.*;
-import static org.bytedeco.openblas.global.openblas.*;
-
-import static org.bytedeco.pytorch.global.torch.*;
-
-@Name("std::unordered_map") @Properties(inherit = org.bytedeco.pytorch.presets.torch.class)
-public class LongStringMap extends Pointer {
- static { Loader.load(); }
- /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
- public LongStringMap(Pointer p) { super(p); }
- public LongStringMap() { allocate(); }
- private native void allocate();
- public native @Name("operator =") @ByRef LongStringMap put(@ByRef LongStringMap x);
-
- public boolean empty() { return size() == 0; }
- public native long size();
-
- public BytePointer front() { return get(0); }
- public BytePointer back() { return get(size() - 1); }
- @Index public native @StdString BytePointer get(@Cast("int64_t") long i);
- public native LongStringMap put(@Cast("int64_t") long i, BytePointer value);
- @ValueSetter @Index public native LongStringMap put(@Cast("int64_t") long i, @StdString String value);
-
- public native void erase(@ByVal Iterator pos);
- public native @ByVal Iterator begin();
- public native @ByVal Iterator end();
- @NoOffset @Name("iterator") public static class Iterator extends Pointer {
- public Iterator(Pointer p) { super(p); }
- public Iterator() { }
-
- public native @Name("operator ++") @ByRef Iterator increment();
- public native @Name("operator ==") boolean equals(@ByRef Iterator it);
- public native @Name("operator *().first") @MemberGetter @Cast("int64_t") long first();
- public native @Name("operator *().second") @MemberGetter @StdString BytePointer second();
- }
-}
-
diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/LongVaryingShape.java b/pytorch/src/gen/java/org/bytedeco/pytorch/LongVaryingShape.java
index 88f25ea2bd4..dc220822fcd 100644
--- a/pytorch/src/gen/java/org/bytedeco/pytorch/LongVaryingShape.java
+++ b/pytorch/src/gen/java/org/bytedeco/pytorch/LongVaryingShape.java
@@ -29,8 +29,8 @@ public class LongVaryingShape extends Pointer {
public LongVaryingShape(@ByVal LongArrayRef vec) { super((Pointer)null); allocate(vec); }
private native void allocate(@ByVal LongArrayRef vec);
- public LongVaryingShape(@ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... vec) { super((Pointer)null); allocate(vec); }
- private native void allocate(@ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... vec);
+ public LongVaryingShape(@ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long... vec) { super((Pointer)null); allocate(vec); }
+ private native void allocate(@ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long... vec);
public LongVaryingShape(@ByVal(nullValue = "c10::optional(c10::nullopt)") SizeTOptional size) { super((Pointer)null); allocate(size); }
private native void allocate(@ByVal(nullValue = "c10::optional(c10::nullopt)") SizeTOptional size);
diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/LongVectorArrayRef.java b/pytorch/src/gen/java/org/bytedeco/pytorch/LongVectorArrayRef.java
deleted file mode 100644
index 75d2d27fa99..00000000000
--- a/pytorch/src/gen/java/org/bytedeco/pytorch/LongVectorArrayRef.java
+++ /dev/null
@@ -1,133 +0,0 @@
-// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE
-
-package org.bytedeco.pytorch;
-
-import org.bytedeco.pytorch.Allocator;
-import org.bytedeco.pytorch.Function;
-import org.bytedeco.pytorch.functions.*;
-import org.bytedeco.pytorch.Module;
-import org.bytedeco.javacpp.annotation.Cast;
-import java.nio.*;
-import org.bytedeco.javacpp.*;
-import org.bytedeco.javacpp.annotation.*;
-
-import static org.bytedeco.javacpp.presets.javacpp.*;
-import static org.bytedeco.openblas.global.openblas_nolapack.*;
-import static org.bytedeco.openblas.global.openblas.*;
-
-import static org.bytedeco.pytorch.global.torch.*;
-
-@Name("c10::ArrayRef