diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/AdaptiveAvgPool1dImpl.java b/pytorch/src/gen/java/org/bytedeco/pytorch/AdaptiveAvgPool1dImpl.java index c1f139207b8..d2df7f53290 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/AdaptiveAvgPool1dImpl.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/AdaptiveAvgPool1dImpl.java @@ -44,6 +44,9 @@ private native void allocate( @Const @ByRef AdaptiveAvgPool1dOptions options_); /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public AdaptiveAvgPool1dImpl(Pointer p) { super(p); } + /** Downcast constructor. */ + public AdaptiveAvgPool1dImpl(Module pointer) { super((Pointer)null); allocate(pointer); } + @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::dynamic_pointer_cast") void allocate(@SharedPtr Module pointer); public native @ByVal Tensor forward(@Const @ByRef Tensor input); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/AdaptiveAvgPool1dImplBase.java b/pytorch/src/gen/java/org/bytedeco/pytorch/AdaptiveAvgPool1dImplBase.java index d34878d9424..8c707a3a4c8 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/AdaptiveAvgPool1dImplBase.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/AdaptiveAvgPool1dImplBase.java @@ -26,6 +26,9 @@ public class AdaptiveAvgPool1dImplBase extends AdaptiveAvgPool1dImplCloneable { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public AdaptiveAvgPool1dImplBase(Pointer p) { super(p); } + /** Downcast constructor. */ + public AdaptiveAvgPool1dImplBase(Module pointer) { super((Pointer)null); allocate(pointer); } + @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::dynamic_pointer_cast,torch::nn::AdaptiveAvgPool1dImpl>, torch::nn::Module>") void allocate(@SharedPtr Module pointer); public AdaptiveAvgPool1dImplBase(@ByVal @Cast("torch::ExpandingArray<1>*") LongPointer output_size) { super((Pointer)null); allocate(output_size); } private native void allocate(@ByVal @Cast("torch::ExpandingArray<1>*") LongPointer output_size); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/AdaptiveAvgPool1dImplCloneable.java b/pytorch/src/gen/java/org/bytedeco/pytorch/AdaptiveAvgPool1dImplCloneable.java index a7a1d46cb80..7c478f33035 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/AdaptiveAvgPool1dImplCloneable.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/AdaptiveAvgPool1dImplCloneable.java @@ -22,6 +22,9 @@ public class AdaptiveAvgPool1dImplCloneable extends Module { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public AdaptiveAvgPool1dImplCloneable(Pointer p) { super(p); } + /** Downcast constructor. */ + public AdaptiveAvgPool1dImplCloneable(Module pointer) { super((Pointer)null); allocate(pointer); } + @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::dynamic_pointer_cast, torch::nn::Module>") void allocate(@SharedPtr Module pointer); @Override public Module asModule() { return asModule(this); } @Namespace public static native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast>") Module asModule(@SharedPtr AdaptiveAvgPool1dImplCloneable pointer); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/AdaptiveAvgPool2dImpl.java b/pytorch/src/gen/java/org/bytedeco/pytorch/AdaptiveAvgPool2dImpl.java index f85363d029d..76ca6b27103 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/AdaptiveAvgPool2dImpl.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/AdaptiveAvgPool2dImpl.java @@ -44,6 +44,9 @@ private native void allocate( @Const @ByRef AdaptiveAvgPool2dOptions options_); /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public AdaptiveAvgPool2dImpl(Pointer p) { super(p); } + /** Downcast constructor. */ + public AdaptiveAvgPool2dImpl(Module pointer) { super((Pointer)null); allocate(pointer); } + @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::dynamic_pointer_cast") void allocate(@SharedPtr Module pointer); public native @ByVal Tensor forward(@Const @ByRef Tensor input); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/AdaptiveAvgPool2dImplBase.java b/pytorch/src/gen/java/org/bytedeco/pytorch/AdaptiveAvgPool2dImplBase.java index 6b77c1d3a6d..c01a2bd86b0 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/AdaptiveAvgPool2dImplBase.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/AdaptiveAvgPool2dImplBase.java @@ -22,6 +22,9 @@ public class AdaptiveAvgPool2dImplBase extends AdaptiveAvgPool2dImplCloneable { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public AdaptiveAvgPool2dImplBase(Pointer p) { super(p); } + /** Downcast constructor. */ + public AdaptiveAvgPool2dImplBase(Module pointer) { super((Pointer)null); allocate(pointer); } + @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::dynamic_pointer_cast,torch::nn::AdaptiveAvgPool2dImpl>, torch::nn::Module>") void allocate(@SharedPtr Module pointer); public AdaptiveAvgPool2dImplBase(@ByVal @Cast("torch::ExpandingArrayWithOptionalElem<2>*") LongOptional output_size) { super((Pointer)null); allocate(output_size); } private native void allocate(@ByVal @Cast("torch::ExpandingArrayWithOptionalElem<2>*") LongOptional output_size); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/AdaptiveAvgPool2dImplCloneable.java b/pytorch/src/gen/java/org/bytedeco/pytorch/AdaptiveAvgPool2dImplCloneable.java index a3156f2ee04..8c9e3a4d4fa 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/AdaptiveAvgPool2dImplCloneable.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/AdaptiveAvgPool2dImplCloneable.java @@ -22,6 +22,9 @@ public class AdaptiveAvgPool2dImplCloneable extends Module { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public AdaptiveAvgPool2dImplCloneable(Pointer p) { super(p); } + /** Downcast constructor. */ + public AdaptiveAvgPool2dImplCloneable(Module pointer) { super((Pointer)null); allocate(pointer); } + @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::dynamic_pointer_cast, torch::nn::Module>") void allocate(@SharedPtr Module pointer); @Override public Module asModule() { return asModule(this); } @Namespace public static native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast>") Module asModule(@SharedPtr AdaptiveAvgPool2dImplCloneable pointer); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/AdaptiveAvgPool3dImpl.java b/pytorch/src/gen/java/org/bytedeco/pytorch/AdaptiveAvgPool3dImpl.java index 457192c9ff7..582e1dfbb14 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/AdaptiveAvgPool3dImpl.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/AdaptiveAvgPool3dImpl.java @@ -44,6 +44,9 @@ private native void allocate( @Const @ByRef AdaptiveAvgPool3dOptions options_); /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public AdaptiveAvgPool3dImpl(Pointer p) { super(p); } + /** Downcast constructor. */ + public AdaptiveAvgPool3dImpl(Module pointer) { super((Pointer)null); allocate(pointer); } + @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::dynamic_pointer_cast") void allocate(@SharedPtr Module pointer); public native @ByVal Tensor forward(@Const @ByRef Tensor input); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/AdaptiveAvgPool3dImplBase.java b/pytorch/src/gen/java/org/bytedeco/pytorch/AdaptiveAvgPool3dImplBase.java index 7bf5b5c17f9..2a63c6efc7f 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/AdaptiveAvgPool3dImplBase.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/AdaptiveAvgPool3dImplBase.java @@ -22,6 +22,9 @@ public class AdaptiveAvgPool3dImplBase extends AdaptiveAvgPool3dImplCloneable { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public AdaptiveAvgPool3dImplBase(Pointer p) { super(p); } + /** Downcast constructor. */ + public AdaptiveAvgPool3dImplBase(Module pointer) { super((Pointer)null); allocate(pointer); } + @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::dynamic_pointer_cast,torch::nn::AdaptiveAvgPool3dImpl>, torch::nn::Module>") void allocate(@SharedPtr Module pointer); public AdaptiveAvgPool3dImplBase(@ByVal @Cast("torch::ExpandingArrayWithOptionalElem<3>*") LongOptional output_size) { super((Pointer)null); allocate(output_size); } private native void allocate(@ByVal @Cast("torch::ExpandingArrayWithOptionalElem<3>*") LongOptional output_size); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/AdaptiveAvgPool3dImplCloneable.java b/pytorch/src/gen/java/org/bytedeco/pytorch/AdaptiveAvgPool3dImplCloneable.java index 00879572dba..1b241bf3fc0 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/AdaptiveAvgPool3dImplCloneable.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/AdaptiveAvgPool3dImplCloneable.java @@ -22,6 +22,9 @@ public class AdaptiveAvgPool3dImplCloneable extends Module { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public AdaptiveAvgPool3dImplCloneable(Pointer p) { super(p); } + /** Downcast constructor. */ + public AdaptiveAvgPool3dImplCloneable(Module pointer) { super((Pointer)null); allocate(pointer); } + @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::dynamic_pointer_cast, torch::nn::Module>") void allocate(@SharedPtr Module pointer); @Override public Module asModule() { return asModule(this); } @Namespace public static native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast>") Module asModule(@SharedPtr AdaptiveAvgPool3dImplCloneable pointer); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/AdaptiveLogSoftmaxWithLossImpl.java b/pytorch/src/gen/java/org/bytedeco/pytorch/AdaptiveLogSoftmaxWithLossImpl.java index 68c7a75f4d4..0a190853a4d 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/AdaptiveLogSoftmaxWithLossImpl.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/AdaptiveLogSoftmaxWithLossImpl.java @@ -41,19 +41,22 @@ public class AdaptiveLogSoftmaxWithLossImpl extends AdaptiveLogSoftmaxWithLossIm static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public AdaptiveLogSoftmaxWithLossImpl(Pointer p) { super(p); } + /** Downcast constructor. */ + public AdaptiveLogSoftmaxWithLossImpl(Module pointer) { super((Pointer)null); allocate(pointer); } + @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::dynamic_pointer_cast") void allocate(@SharedPtr Module pointer); public AdaptiveLogSoftmaxWithLossImpl( @Cast("int64_t") long in_features, @Cast("int64_t") long n_classes, @ByVal @Cast("std::vector*") LongVector cutoffs) { super((Pointer)null); allocate(in_features, n_classes, cutoffs); } - @SharedPtr private native void allocate( + @SharedPtr @Name("std::make_shared") private native void allocate( @Cast("int64_t") long in_features, @Cast("int64_t") long n_classes, @ByVal @Cast("std::vector*") LongVector cutoffs); public AdaptiveLogSoftmaxWithLossImpl( @ByVal AdaptiveLogSoftmaxWithLossOptions options_) { super((Pointer)null); allocate(options_); } - @SharedPtr private native void allocate( + @SharedPtr @Name("std::make_shared") private native void allocate( @ByVal AdaptiveLogSoftmaxWithLossOptions options_); public native @ByVal ASMoutput forward(@Const @ByRef Tensor input, @Const @ByRef Tensor target); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/AdaptiveLogSoftmaxWithLossImplCloneable.java b/pytorch/src/gen/java/org/bytedeco/pytorch/AdaptiveLogSoftmaxWithLossImplCloneable.java index 6f3714a64ad..50717b9c241 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/AdaptiveLogSoftmaxWithLossImplCloneable.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/AdaptiveLogSoftmaxWithLossImplCloneable.java @@ -22,6 +22,9 @@ public class AdaptiveLogSoftmaxWithLossImplCloneable extends Module { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public AdaptiveLogSoftmaxWithLossImplCloneable(Pointer p) { super(p); } + /** Downcast constructor. */ + public AdaptiveLogSoftmaxWithLossImplCloneable(Module pointer) { super((Pointer)null); allocate(pointer); } + @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::dynamic_pointer_cast, torch::nn::Module>") void allocate(@SharedPtr Module pointer); @Override public Module asModule() { return asModule(this); } @Namespace public static native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast>") Module asModule(@SharedPtr AdaptiveLogSoftmaxWithLossImplCloneable pointer); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/AdaptiveMaxPool1dImpl.java b/pytorch/src/gen/java/org/bytedeco/pytorch/AdaptiveMaxPool1dImpl.java index 50fcd51fa50..72de98433b2 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/AdaptiveMaxPool1dImpl.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/AdaptiveMaxPool1dImpl.java @@ -44,6 +44,9 @@ private native void allocate( @Const @ByRef AdaptiveMaxPool1dOptions options_); /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public AdaptiveMaxPool1dImpl(Pointer p) { super(p); } + /** Downcast constructor. */ + public AdaptiveMaxPool1dImpl(Module pointer) { super((Pointer)null); allocate(pointer); } + @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::dynamic_pointer_cast") void allocate(@SharedPtr Module pointer); public native @ByVal Tensor forward(@Const @ByRef Tensor input); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/AdaptiveMaxPool1dImplBase.java b/pytorch/src/gen/java/org/bytedeco/pytorch/AdaptiveMaxPool1dImplBase.java index d1ab371afa6..e9156a3cca8 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/AdaptiveMaxPool1dImplBase.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/AdaptiveMaxPool1dImplBase.java @@ -26,6 +26,9 @@ public class AdaptiveMaxPool1dImplBase extends AdaptiveMaxPool1dImplCloneable { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public AdaptiveMaxPool1dImplBase(Pointer p) { super(p); } + /** Downcast constructor. */ + public AdaptiveMaxPool1dImplBase(Module pointer) { super((Pointer)null); allocate(pointer); } + @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::dynamic_pointer_cast,torch::nn::AdaptiveMaxPool1dImpl>, torch::nn::Module>") void allocate(@SharedPtr Module pointer); public AdaptiveMaxPool1dImplBase(@ByVal @Cast("torch::ExpandingArray<1>*") LongPointer output_size) { super((Pointer)null); allocate(output_size); } private native void allocate(@ByVal @Cast("torch::ExpandingArray<1>*") LongPointer output_size); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/AdaptiveMaxPool1dImplCloneable.java b/pytorch/src/gen/java/org/bytedeco/pytorch/AdaptiveMaxPool1dImplCloneable.java index 0c19a51ade2..3cd003b81e1 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/AdaptiveMaxPool1dImplCloneable.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/AdaptiveMaxPool1dImplCloneable.java @@ -22,6 +22,9 @@ public class AdaptiveMaxPool1dImplCloneable extends Module { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public AdaptiveMaxPool1dImplCloneable(Pointer p) { super(p); } + /** Downcast constructor. */ + public AdaptiveMaxPool1dImplCloneable(Module pointer) { super((Pointer)null); allocate(pointer); } + @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::dynamic_pointer_cast, torch::nn::Module>") void allocate(@SharedPtr Module pointer); @Override public Module asModule() { return asModule(this); } @Namespace public static native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast>") Module asModule(@SharedPtr AdaptiveMaxPool1dImplCloneable pointer); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/AdaptiveMaxPool2dImpl.java b/pytorch/src/gen/java/org/bytedeco/pytorch/AdaptiveMaxPool2dImpl.java index 92b48723b63..ebb94c455e1 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/AdaptiveMaxPool2dImpl.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/AdaptiveMaxPool2dImpl.java @@ -44,6 +44,9 @@ private native void allocate( @Const @ByRef AdaptiveMaxPool2dOptions options_); /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public AdaptiveMaxPool2dImpl(Pointer p) { super(p); } + /** Downcast constructor. */ + public AdaptiveMaxPool2dImpl(Module pointer) { super((Pointer)null); allocate(pointer); } + @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::dynamic_pointer_cast") void allocate(@SharedPtr Module pointer); public native @ByVal Tensor forward(@Const @ByRef Tensor input); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/AdaptiveMaxPool2dImplBase.java b/pytorch/src/gen/java/org/bytedeco/pytorch/AdaptiveMaxPool2dImplBase.java index 6af69576bf0..5d03333cd32 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/AdaptiveMaxPool2dImplBase.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/AdaptiveMaxPool2dImplBase.java @@ -22,6 +22,9 @@ public class AdaptiveMaxPool2dImplBase extends AdaptiveMaxPool2dImplCloneable { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public AdaptiveMaxPool2dImplBase(Pointer p) { super(p); } + /** Downcast constructor. */ + public AdaptiveMaxPool2dImplBase(Module pointer) { super((Pointer)null); allocate(pointer); } + @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::dynamic_pointer_cast,torch::nn::AdaptiveMaxPool2dImpl>, torch::nn::Module>") void allocate(@SharedPtr Module pointer); public AdaptiveMaxPool2dImplBase(@ByVal @Cast("torch::ExpandingArrayWithOptionalElem<2>*") LongOptional output_size) { super((Pointer)null); allocate(output_size); } private native void allocate(@ByVal @Cast("torch::ExpandingArrayWithOptionalElem<2>*") LongOptional output_size); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/AdaptiveMaxPool2dImplCloneable.java b/pytorch/src/gen/java/org/bytedeco/pytorch/AdaptiveMaxPool2dImplCloneable.java index 4e028df9b2e..8a0a5e03073 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/AdaptiveMaxPool2dImplCloneable.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/AdaptiveMaxPool2dImplCloneable.java @@ -22,6 +22,9 @@ public class AdaptiveMaxPool2dImplCloneable extends Module { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public AdaptiveMaxPool2dImplCloneable(Pointer p) { super(p); } + /** Downcast constructor. */ + public AdaptiveMaxPool2dImplCloneable(Module pointer) { super((Pointer)null); allocate(pointer); } + @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::dynamic_pointer_cast, torch::nn::Module>") void allocate(@SharedPtr Module pointer); @Override public Module asModule() { return asModule(this); } @Namespace public static native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast>") Module asModule(@SharedPtr AdaptiveMaxPool2dImplCloneable pointer); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/AdaptiveMaxPool3dImpl.java b/pytorch/src/gen/java/org/bytedeco/pytorch/AdaptiveMaxPool3dImpl.java index 07416e8ac99..d97272b86b5 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/AdaptiveMaxPool3dImpl.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/AdaptiveMaxPool3dImpl.java @@ -44,6 +44,9 @@ private native void allocate( @Const @ByRef AdaptiveMaxPool3dOptions options_); /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public AdaptiveMaxPool3dImpl(Pointer p) { super(p); } + /** Downcast constructor. */ + public AdaptiveMaxPool3dImpl(Module pointer) { super((Pointer)null); allocate(pointer); } + @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::dynamic_pointer_cast") void allocate(@SharedPtr Module pointer); public native @ByVal Tensor forward(@Const @ByRef Tensor input); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/AdaptiveMaxPool3dImplBase.java b/pytorch/src/gen/java/org/bytedeco/pytorch/AdaptiveMaxPool3dImplBase.java index c62ea0ad6d3..08a224d3b39 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/AdaptiveMaxPool3dImplBase.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/AdaptiveMaxPool3dImplBase.java @@ -22,6 +22,9 @@ public class AdaptiveMaxPool3dImplBase extends AdaptiveMaxPool3dImplCloneable { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public AdaptiveMaxPool3dImplBase(Pointer p) { super(p); } + /** Downcast constructor. */ + public AdaptiveMaxPool3dImplBase(Module pointer) { super((Pointer)null); allocate(pointer); } + @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::dynamic_pointer_cast,torch::nn::AdaptiveMaxPool3dImpl>, torch::nn::Module>") void allocate(@SharedPtr Module pointer); public AdaptiveMaxPool3dImplBase(@ByVal @Cast("torch::ExpandingArrayWithOptionalElem<3>*") LongOptional output_size) { super((Pointer)null); allocate(output_size); } private native void allocate(@ByVal @Cast("torch::ExpandingArrayWithOptionalElem<3>*") LongOptional output_size); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/AdaptiveMaxPool3dImplCloneable.java b/pytorch/src/gen/java/org/bytedeco/pytorch/AdaptiveMaxPool3dImplCloneable.java index 35baff5c140..736da48cb53 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/AdaptiveMaxPool3dImplCloneable.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/AdaptiveMaxPool3dImplCloneable.java @@ -22,6 +22,9 @@ public class AdaptiveMaxPool3dImplCloneable extends Module { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public AdaptiveMaxPool3dImplCloneable(Pointer p) { super(p); } + /** Downcast constructor. */ + public AdaptiveMaxPool3dImplCloneable(Module pointer) { super((Pointer)null); allocate(pointer); } + @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::dynamic_pointer_cast, torch::nn::Module>") void allocate(@SharedPtr Module pointer); @Override public Module asModule() { return asModule(this); } @Namespace public static native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast>") Module asModule(@SharedPtr AdaptiveMaxPool3dImplCloneable pointer); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/AlphaDropoutImpl.java b/pytorch/src/gen/java/org/bytedeco/pytorch/AlphaDropoutImpl.java index 5c3c61a790b..4077dab85be 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/AlphaDropoutImpl.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/AlphaDropoutImpl.java @@ -45,15 +45,9 @@ public class AlphaDropoutImpl extends AlphaDropoutImplBase { private native void allocate(); /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public AlphaDropoutImpl(Pointer p) { super(p); } - /** Native array allocator. Access with {@link Pointer#position(long)}. */ - public AlphaDropoutImpl(long size) { super((Pointer)null); allocateArray(size); } - private native void allocateArray(long size); - @Override public AlphaDropoutImpl position(long position) { - return (AlphaDropoutImpl)super.position(position); - } - @Override public AlphaDropoutImpl getPointer(long i) { - return new AlphaDropoutImpl((Pointer)this).offsetAddress(i); - } + /** Downcast constructor. */ + public AlphaDropoutImpl(Module pointer) { super((Pointer)null); allocate(pointer); } + @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::dynamic_pointer_cast") void allocate(@SharedPtr Module pointer); public native @ByVal Tensor forward(@Const @ByRef Tensor input); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/AlphaDropoutImplBase.java b/pytorch/src/gen/java/org/bytedeco/pytorch/AlphaDropoutImplBase.java index 237ab9acb16..2a8b153e382 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/AlphaDropoutImplBase.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/AlphaDropoutImplBase.java @@ -23,6 +23,9 @@ public class AlphaDropoutImplBase extends AlphaDropoutImplCloneable { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public AlphaDropoutImplBase(Pointer p) { super(p); } + /** Downcast constructor. */ + public AlphaDropoutImplBase(Module pointer) { super((Pointer)null); allocate(pointer); } + @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::dynamic_pointer_cast, torch::nn::Module>") void allocate(@SharedPtr Module pointer); public AlphaDropoutImplBase(double p) { super((Pointer)null); allocate(p); } private native void allocate(double p); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/AlphaDropoutImplCloneable.java b/pytorch/src/gen/java/org/bytedeco/pytorch/AlphaDropoutImplCloneable.java index d8c39e8b9a3..f9b29d04d21 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/AlphaDropoutImplCloneable.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/AlphaDropoutImplCloneable.java @@ -22,6 +22,9 @@ public class AlphaDropoutImplCloneable extends Module { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public AlphaDropoutImplCloneable(Pointer p) { super(p); } + /** Downcast constructor. */ + public AlphaDropoutImplCloneable(Module pointer) { super((Pointer)null); allocate(pointer); } + @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::dynamic_pointer_cast, torch::nn::Module>") void allocate(@SharedPtr Module pointer); @Override public Module asModule() { return asModule(this); } @Namespace public static native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast>") Module asModule(@SharedPtr AlphaDropoutImplCloneable pointer); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/AnyModule.java b/pytorch/src/gen/java/org/bytedeco/pytorch/AnyModule.java index 2414c7ffca2..6da3c43040e 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/AnyModule.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/AnyModule.java @@ -120,255 +120,255 @@ public class AnyModule extends Pointer { /** A default-constructed {@code AnyModule} is in an empty state. */ public AnyModule() { super((Pointer)null); allocate(); } - @SharedPtr private native void allocate(); + private native void allocate(); /** Constructs an {@code AnyModule} from a {@code shared_ptr} to concrete module object. */ public AnyModule(AdaptiveLogSoftmaxWithLossImpl module) { super((Pointer)null); allocate(module); } - @SharedPtr private native void allocate(@SharedPtr @Cast({"", "std::shared_ptr"}) AdaptiveLogSoftmaxWithLossImpl module); + private native void allocate(@SharedPtr @Cast({"", "std::shared_ptr"}) AdaptiveLogSoftmaxWithLossImpl module); public AnyModule(BatchNorm1dImpl module) { super((Pointer)null); allocate(module); } - @SharedPtr private native void allocate(@SharedPtr @Cast({"", "std::shared_ptr"}) BatchNorm1dImpl module); + private native void allocate(@SharedPtr @Cast({"", "std::shared_ptr"}) BatchNorm1dImpl module); public AnyModule(InstanceNorm1dImpl module) { super((Pointer)null); allocate(module); } - @SharedPtr private native void allocate(@SharedPtr @Cast({"", "std::shared_ptr"}) InstanceNorm1dImpl module); + private native void allocate(@SharedPtr @Cast({"", "std::shared_ptr"}) InstanceNorm1dImpl module); public AnyModule(Conv1dImpl module) { super((Pointer)null); allocate(module); } - @SharedPtr private native void allocate(@SharedPtr @Cast({"", "std::shared_ptr"}) Conv1dImpl module); + private native void allocate(@SharedPtr @Cast({"", "std::shared_ptr"}) Conv1dImpl module); public AnyModule(ConvTranspose1dImpl module) { super((Pointer)null); allocate(module); } - @SharedPtr private native void allocate(@SharedPtr @Cast({"", "std::shared_ptr"}) ConvTranspose1dImpl module); + private native void allocate(@SharedPtr @Cast({"", "std::shared_ptr"}) ConvTranspose1dImpl module); public AnyModule(DropoutImpl module) { super((Pointer)null); allocate(module); } - @SharedPtr private native void allocate(@SharedPtr @Cast({"", "std::shared_ptr"}) DropoutImpl module); + private native void allocate(@SharedPtr @Cast({"", "std::shared_ptr"}) DropoutImpl module); public AnyModule(BatchNorm2dImpl module) { super((Pointer)null); allocate(module); } - @SharedPtr private native void allocate(@SharedPtr @Cast({"", "std::shared_ptr"}) BatchNorm2dImpl module); + private native void allocate(@SharedPtr @Cast({"", "std::shared_ptr"}) BatchNorm2dImpl module); public AnyModule(InstanceNorm2dImpl module) { super((Pointer)null); allocate(module); } - @SharedPtr private native void allocate(@SharedPtr @Cast({"", "std::shared_ptr"}) InstanceNorm2dImpl module); + private native void allocate(@SharedPtr @Cast({"", "std::shared_ptr"}) InstanceNorm2dImpl module); public AnyModule(Conv2dImpl module) { super((Pointer)null); allocate(module); } - @SharedPtr private native void allocate(@SharedPtr @Cast({"", "std::shared_ptr"}) Conv2dImpl module); + private native void allocate(@SharedPtr @Cast({"", "std::shared_ptr"}) Conv2dImpl module); public AnyModule(ConvTranspose2dImpl module) { super((Pointer)null); allocate(module); } - @SharedPtr private native void allocate(@SharedPtr @Cast({"", "std::shared_ptr"}) ConvTranspose2dImpl module); + private native void allocate(@SharedPtr @Cast({"", "std::shared_ptr"}) ConvTranspose2dImpl module); public AnyModule(Dropout2dImpl module) { super((Pointer)null); allocate(module); } - @SharedPtr private native void allocate(@SharedPtr @Cast({"", "std::shared_ptr"}) Dropout2dImpl module); + private native void allocate(@SharedPtr @Cast({"", "std::shared_ptr"}) Dropout2dImpl module); public AnyModule(BatchNorm3dImpl module) { super((Pointer)null); allocate(module); } - @SharedPtr private native void allocate(@SharedPtr @Cast({"", "std::shared_ptr"}) BatchNorm3dImpl module); + private native void allocate(@SharedPtr @Cast({"", "std::shared_ptr"}) BatchNorm3dImpl module); public AnyModule(InstanceNorm3dImpl module) { super((Pointer)null); allocate(module); } - @SharedPtr private native void allocate(@SharedPtr @Cast({"", "std::shared_ptr"}) InstanceNorm3dImpl module); + private native void allocate(@SharedPtr @Cast({"", "std::shared_ptr"}) InstanceNorm3dImpl module); public AnyModule(Conv3dImpl module) { super((Pointer)null); allocate(module); } - @SharedPtr private native void allocate(@SharedPtr @Cast({"", "std::shared_ptr"}) Conv3dImpl module); + private native void allocate(@SharedPtr @Cast({"", "std::shared_ptr"}) Conv3dImpl module); public AnyModule(ConvTranspose3dImpl module) { super((Pointer)null); allocate(module); } - @SharedPtr private native void allocate(@SharedPtr @Cast({"", "std::shared_ptr"}) ConvTranspose3dImpl module); + private native void allocate(@SharedPtr @Cast({"", "std::shared_ptr"}) ConvTranspose3dImpl module); public AnyModule(Dropout3dImpl module) { super((Pointer)null); allocate(module); } - @SharedPtr private native void allocate(@SharedPtr @Cast({"", "std::shared_ptr"}) Dropout3dImpl module); + private native void allocate(@SharedPtr @Cast({"", "std::shared_ptr"}) Dropout3dImpl module); public AnyModule(AlphaDropoutImpl module) { super((Pointer)null); allocate(module); } - @SharedPtr private native void allocate(@SharedPtr @Cast({"", "std::shared_ptr"}) AlphaDropoutImpl module); + private native void allocate(@SharedPtr @Cast({"", "std::shared_ptr"}) AlphaDropoutImpl module); public AnyModule(FeatureAlphaDropoutImpl module) { super((Pointer)null); allocate(module); } - @SharedPtr private native void allocate(@SharedPtr @Cast({"", "std::shared_ptr"}) FeatureAlphaDropoutImpl module); + private native void allocate(@SharedPtr @Cast({"", "std::shared_ptr"}) FeatureAlphaDropoutImpl module); public AnyModule(CosineSimilarityImpl module) { super((Pointer)null); allocate(module); } - @SharedPtr private native void allocate(@SharedPtr @Cast({"", "std::shared_ptr"}) CosineSimilarityImpl module); + private native void allocate(@SharedPtr @Cast({"", "std::shared_ptr"}) CosineSimilarityImpl module); public AnyModule(PairwiseDistanceImpl module) { super((Pointer)null); allocate(module); } - @SharedPtr private native void allocate(@SharedPtr @Cast({"", "std::shared_ptr"}) PairwiseDistanceImpl module); + private native void allocate(@SharedPtr @Cast({"", "std::shared_ptr"}) PairwiseDistanceImpl module); public AnyModule(EmbeddingImpl module) { super((Pointer)null); allocate(module); } - @SharedPtr private native void allocate(@SharedPtr @Cast({"", "std::shared_ptr"}) EmbeddingImpl module); + private native void allocate(@SharedPtr @Cast({"", "std::shared_ptr"}) EmbeddingImpl module); public AnyModule(EmbeddingBagImpl module) { super((Pointer)null); allocate(module); } - @SharedPtr private native void allocate(@SharedPtr @Cast({"", "std::shared_ptr"}) EmbeddingBagImpl module); + private native void allocate(@SharedPtr @Cast({"", "std::shared_ptr"}) EmbeddingBagImpl module); public AnyModule(FoldImpl module) { super((Pointer)null); allocate(module); } - @SharedPtr private native void allocate(@SharedPtr @Cast({"", "std::shared_ptr"}) FoldImpl module); + private native void allocate(@SharedPtr @Cast({"", "std::shared_ptr"}) FoldImpl module); public AnyModule(UnfoldImpl module) { super((Pointer)null); allocate(module); } - @SharedPtr private native void allocate(@SharedPtr @Cast({"", "std::shared_ptr"}) UnfoldImpl module); + private native void allocate(@SharedPtr @Cast({"", "std::shared_ptr"}) UnfoldImpl module); public AnyModule(IdentityImpl module) { super((Pointer)null); allocate(module); } - @SharedPtr private native void allocate(@SharedPtr @Cast({"", "std::shared_ptr"}) IdentityImpl module); + private native void allocate(@SharedPtr @Cast({"", "std::shared_ptr"}) IdentityImpl module); public AnyModule(LinearImpl module) { super((Pointer)null); allocate(module); } - @SharedPtr private native void allocate(@SharedPtr @Cast({"", "std::shared_ptr"}) LinearImpl module); + private native void allocate(@SharedPtr @Cast({"", "std::shared_ptr"}) LinearImpl module); public AnyModule(BilinearImpl module) { super((Pointer)null); allocate(module); } - @SharedPtr private native void allocate(@SharedPtr @Cast({"", "std::shared_ptr"}) BilinearImpl module); + private native void allocate(@SharedPtr @Cast({"", "std::shared_ptr"}) BilinearImpl module); public AnyModule(FlattenImpl module) { super((Pointer)null); allocate(module); } - @SharedPtr private native void allocate(@SharedPtr @Cast({"", "std::shared_ptr"}) FlattenImpl module); + private native void allocate(@SharedPtr @Cast({"", "std::shared_ptr"}) FlattenImpl module); public AnyModule(UnflattenImpl module) { super((Pointer)null); allocate(module); } - @SharedPtr private native void allocate(@SharedPtr @Cast({"", "std::shared_ptr"}) UnflattenImpl module); + private native void allocate(@SharedPtr @Cast({"", "std::shared_ptr"}) UnflattenImpl module); public AnyModule(L1LossImpl module) { super((Pointer)null); allocate(module); } - @SharedPtr private native void allocate(@SharedPtr @Cast({"", "std::shared_ptr"}) L1LossImpl module); + private native void allocate(@SharedPtr @Cast({"", "std::shared_ptr"}) L1LossImpl module); public AnyModule(KLDivLossImpl module) { super((Pointer)null); allocate(module); } - @SharedPtr private native void allocate(@SharedPtr @Cast({"", "std::shared_ptr"}) KLDivLossImpl module); + private native void allocate(@SharedPtr @Cast({"", "std::shared_ptr"}) KLDivLossImpl module); public AnyModule(MSELossImpl module) { super((Pointer)null); allocate(module); } - @SharedPtr private native void allocate(@SharedPtr @Cast({"", "std::shared_ptr"}) MSELossImpl module); + private native void allocate(@SharedPtr @Cast({"", "std::shared_ptr"}) MSELossImpl module); public AnyModule(BCELossImpl module) { super((Pointer)null); allocate(module); } - @SharedPtr private native void allocate(@SharedPtr @Cast({"", "std::shared_ptr"}) BCELossImpl module); + private native void allocate(@SharedPtr @Cast({"", "std::shared_ptr"}) BCELossImpl module); public AnyModule(HingeEmbeddingLossImpl module) { super((Pointer)null); allocate(module); } - @SharedPtr private native void allocate(@SharedPtr @Cast({"", "std::shared_ptr"}) HingeEmbeddingLossImpl module); + private native void allocate(@SharedPtr @Cast({"", "std::shared_ptr"}) HingeEmbeddingLossImpl module); public AnyModule(MultiMarginLossImpl module) { super((Pointer)null); allocate(module); } - @SharedPtr private native void allocate(@SharedPtr @Cast({"", "std::shared_ptr"}) MultiMarginLossImpl module); + private native void allocate(@SharedPtr @Cast({"", "std::shared_ptr"}) MultiMarginLossImpl module); public AnyModule(CosineEmbeddingLossImpl module) { super((Pointer)null); allocate(module); } - @SharedPtr private native void allocate(@SharedPtr @Cast({"", "std::shared_ptr"}) CosineEmbeddingLossImpl module); + private native void allocate(@SharedPtr @Cast({"", "std::shared_ptr"}) CosineEmbeddingLossImpl module); public AnyModule(SmoothL1LossImpl module) { super((Pointer)null); allocate(module); } - @SharedPtr private native void allocate(@SharedPtr @Cast({"", "std::shared_ptr"}) SmoothL1LossImpl module); + private native void allocate(@SharedPtr @Cast({"", "std::shared_ptr"}) SmoothL1LossImpl module); public AnyModule(HuberLossImpl module) { super((Pointer)null); allocate(module); } - @SharedPtr private native void allocate(@SharedPtr @Cast({"", "std::shared_ptr"}) HuberLossImpl module); + private native void allocate(@SharedPtr @Cast({"", "std::shared_ptr"}) HuberLossImpl module); public AnyModule(MultiLabelMarginLossImpl module) { super((Pointer)null); allocate(module); } - @SharedPtr private native void allocate(@SharedPtr @Cast({"", "std::shared_ptr"}) MultiLabelMarginLossImpl module); + private native void allocate(@SharedPtr @Cast({"", "std::shared_ptr"}) MultiLabelMarginLossImpl module); public AnyModule(SoftMarginLossImpl module) { super((Pointer)null); allocate(module); } - @SharedPtr private native void allocate(@SharedPtr @Cast({"", "std::shared_ptr"}) SoftMarginLossImpl module); + private native void allocate(@SharedPtr @Cast({"", "std::shared_ptr"}) SoftMarginLossImpl module); public AnyModule(MultiLabelSoftMarginLossImpl module) { super((Pointer)null); allocate(module); } - @SharedPtr private native void allocate(@SharedPtr @Cast({"", "std::shared_ptr"}) MultiLabelSoftMarginLossImpl module); + private native void allocate(@SharedPtr @Cast({"", "std::shared_ptr"}) MultiLabelSoftMarginLossImpl module); public AnyModule(TripletMarginLossImpl module) { super((Pointer)null); allocate(module); } - @SharedPtr private native void allocate(@SharedPtr @Cast({"", "std::shared_ptr"}) TripletMarginLossImpl module); + private native void allocate(@SharedPtr @Cast({"", "std::shared_ptr"}) TripletMarginLossImpl module); public AnyModule(TripletMarginWithDistanceLossImpl module) { super((Pointer)null); allocate(module); } - @SharedPtr private native void allocate(@SharedPtr @Cast({"", "std::shared_ptr"}) TripletMarginWithDistanceLossImpl module); + private native void allocate(@SharedPtr @Cast({"", "std::shared_ptr"}) TripletMarginWithDistanceLossImpl module); public AnyModule(CTCLossImpl module) { super((Pointer)null); allocate(module); } - @SharedPtr private native void allocate(@SharedPtr @Cast({"", "std::shared_ptr"}) CTCLossImpl module); + private native void allocate(@SharedPtr @Cast({"", "std::shared_ptr"}) CTCLossImpl module); public AnyModule(PoissonNLLLossImpl module) { super((Pointer)null); allocate(module); } - @SharedPtr private native void allocate(@SharedPtr @Cast({"", "std::shared_ptr"}) PoissonNLLLossImpl module); + private native void allocate(@SharedPtr @Cast({"", "std::shared_ptr"}) PoissonNLLLossImpl module); public AnyModule(MarginRankingLossImpl module) { super((Pointer)null); allocate(module); } - @SharedPtr private native void allocate(@SharedPtr @Cast({"", "std::shared_ptr"}) MarginRankingLossImpl module); + private native void allocate(@SharedPtr @Cast({"", "std::shared_ptr"}) MarginRankingLossImpl module); public AnyModule(NLLLossImpl module) { super((Pointer)null); allocate(module); } - @SharedPtr private native void allocate(@SharedPtr @Cast({"", "std::shared_ptr"}) NLLLossImpl module); + private native void allocate(@SharedPtr @Cast({"", "std::shared_ptr"}) NLLLossImpl module); public AnyModule(CrossEntropyLossImpl module) { super((Pointer)null); allocate(module); } - @SharedPtr private native void allocate(@SharedPtr @Cast({"", "std::shared_ptr"}) CrossEntropyLossImpl module); + private native void allocate(@SharedPtr @Cast({"", "std::shared_ptr"}) CrossEntropyLossImpl module); public AnyModule(BCEWithLogitsLossImpl module) { super((Pointer)null); allocate(module); } - @SharedPtr private native void allocate(@SharedPtr @Cast({"", "std::shared_ptr"}) BCEWithLogitsLossImpl module); + private native void allocate(@SharedPtr @Cast({"", "std::shared_ptr"}) BCEWithLogitsLossImpl module); public AnyModule(ReflectionPad1dImpl module) { super((Pointer)null); allocate(module); } - @SharedPtr private native void allocate(@SharedPtr @Cast({"", "std::shared_ptr"}) ReflectionPad1dImpl module); + private native void allocate(@SharedPtr @Cast({"", "std::shared_ptr"}) ReflectionPad1dImpl module); public AnyModule(ReplicationPad1dImpl module) { super((Pointer)null); allocate(module); } - @SharedPtr private native void allocate(@SharedPtr @Cast({"", "std::shared_ptr"}) ReplicationPad1dImpl module); + private native void allocate(@SharedPtr @Cast({"", "std::shared_ptr"}) ReplicationPad1dImpl module); public AnyModule(ConstantPad1dImpl module) { super((Pointer)null); allocate(module); } - @SharedPtr private native void allocate(@SharedPtr @Cast({"", "std::shared_ptr"}) ConstantPad1dImpl module); + private native void allocate(@SharedPtr @Cast({"", "std::shared_ptr"}) ConstantPad1dImpl module); public AnyModule(AvgPool1dImpl module) { super((Pointer)null); allocate(module); } - @SharedPtr private native void allocate(@SharedPtr @Cast({"", "std::shared_ptr"}) AvgPool1dImpl module); + private native void allocate(@SharedPtr @Cast({"", "std::shared_ptr"}) AvgPool1dImpl module); public AnyModule(MaxPool1dImpl module) { super((Pointer)null); allocate(module); } - @SharedPtr private native void allocate(@SharedPtr @Cast({"", "std::shared_ptr"}) MaxPool1dImpl module); + private native void allocate(@SharedPtr @Cast({"", "std::shared_ptr"}) MaxPool1dImpl module); public AnyModule(AdaptiveAvgPool1dImpl module) { super((Pointer)null); allocate(module); } - @SharedPtr private native void allocate(@SharedPtr @Cast({"", "std::shared_ptr"}) AdaptiveAvgPool1dImpl module); + private native void allocate(@SharedPtr @Cast({"", "std::shared_ptr"}) AdaptiveAvgPool1dImpl module); public AnyModule(AdaptiveMaxPool1dImpl module) { super((Pointer)null); allocate(module); } - @SharedPtr private native void allocate(@SharedPtr @Cast({"", "std::shared_ptr"}) AdaptiveMaxPool1dImpl module); + private native void allocate(@SharedPtr @Cast({"", "std::shared_ptr"}) AdaptiveMaxPool1dImpl module); public AnyModule(MaxUnpool1dImpl module) { super((Pointer)null); allocate(module); } - @SharedPtr private native void allocate(@SharedPtr @Cast({"", "std::shared_ptr"}) MaxUnpool1dImpl module); + private native void allocate(@SharedPtr @Cast({"", "std::shared_ptr"}) MaxUnpool1dImpl module); public AnyModule(LPPool1dImpl module) { super((Pointer)null); allocate(module); } - @SharedPtr private native void allocate(@SharedPtr @Cast({"", "std::shared_ptr"}) LPPool1dImpl module); + private native void allocate(@SharedPtr @Cast({"", "std::shared_ptr"}) LPPool1dImpl module); public AnyModule(ReflectionPad2dImpl module) { super((Pointer)null); allocate(module); } - @SharedPtr private native void allocate(@SharedPtr @Cast({"", "std::shared_ptr"}) ReflectionPad2dImpl module); + private native void allocate(@SharedPtr @Cast({"", "std::shared_ptr"}) ReflectionPad2dImpl module); public AnyModule(ReplicationPad2dImpl module) { super((Pointer)null); allocate(module); } - @SharedPtr private native void allocate(@SharedPtr @Cast({"", "std::shared_ptr"}) ReplicationPad2dImpl module); + private native void allocate(@SharedPtr @Cast({"", "std::shared_ptr"}) ReplicationPad2dImpl module); public AnyModule(ConstantPad2dImpl module) { super((Pointer)null); allocate(module); } - @SharedPtr private native void allocate(@SharedPtr @Cast({"", "std::shared_ptr"}) ConstantPad2dImpl module); + private native void allocate(@SharedPtr @Cast({"", "std::shared_ptr"}) ConstantPad2dImpl module); public AnyModule(ZeroPad2dImpl module) { super((Pointer)null); allocate(module); } - @SharedPtr private native void allocate(@SharedPtr @Cast({"", "std::shared_ptr"}) ZeroPad2dImpl module); + private native void allocate(@SharedPtr @Cast({"", "std::shared_ptr"}) ZeroPad2dImpl module); public AnyModule(AvgPool2dImpl module) { super((Pointer)null); allocate(module); } - @SharedPtr private native void allocate(@SharedPtr @Cast({"", "std::shared_ptr"}) AvgPool2dImpl module); + private native void allocate(@SharedPtr @Cast({"", "std::shared_ptr"}) AvgPool2dImpl module); public AnyModule(MaxPool2dImpl module) { super((Pointer)null); allocate(module); } - @SharedPtr private native void allocate(@SharedPtr @Cast({"", "std::shared_ptr"}) MaxPool2dImpl module); + private native void allocate(@SharedPtr @Cast({"", "std::shared_ptr"}) MaxPool2dImpl module); public AnyModule(AdaptiveAvgPool2dImpl module) { super((Pointer)null); allocate(module); } - @SharedPtr private native void allocate(@SharedPtr @Cast({"", "std::shared_ptr"}) AdaptiveAvgPool2dImpl module); + private native void allocate(@SharedPtr @Cast({"", "std::shared_ptr"}) AdaptiveAvgPool2dImpl module); public AnyModule(AdaptiveMaxPool2dImpl module) { super((Pointer)null); allocate(module); } - @SharedPtr private native void allocate(@SharedPtr @Cast({"", "std::shared_ptr"}) AdaptiveMaxPool2dImpl module); + private native void allocate(@SharedPtr @Cast({"", "std::shared_ptr"}) AdaptiveMaxPool2dImpl module); public AnyModule(MaxUnpool2dImpl module) { super((Pointer)null); allocate(module); } - @SharedPtr private native void allocate(@SharedPtr @Cast({"", "std::shared_ptr"}) MaxUnpool2dImpl module); + private native void allocate(@SharedPtr @Cast({"", "std::shared_ptr"}) MaxUnpool2dImpl module); public AnyModule(FractionalMaxPool2dImpl module) { super((Pointer)null); allocate(module); } - @SharedPtr private native void allocate(@SharedPtr @Cast({"", "std::shared_ptr"}) FractionalMaxPool2dImpl module); + private native void allocate(@SharedPtr @Cast({"", "std::shared_ptr"}) FractionalMaxPool2dImpl module); public AnyModule(LPPool2dImpl module) { super((Pointer)null); allocate(module); } - @SharedPtr private native void allocate(@SharedPtr @Cast({"", "std::shared_ptr"}) LPPool2dImpl module); + private native void allocate(@SharedPtr @Cast({"", "std::shared_ptr"}) LPPool2dImpl module); public AnyModule(ReflectionPad3dImpl module) { super((Pointer)null); allocate(module); } - @SharedPtr private native void allocate(@SharedPtr @Cast({"", "std::shared_ptr"}) ReflectionPad3dImpl module); + private native void allocate(@SharedPtr @Cast({"", "std::shared_ptr"}) ReflectionPad3dImpl module); public AnyModule(ReplicationPad3dImpl module) { super((Pointer)null); allocate(module); } - @SharedPtr private native void allocate(@SharedPtr @Cast({"", "std::shared_ptr"}) ReplicationPad3dImpl module); + private native void allocate(@SharedPtr @Cast({"", "std::shared_ptr"}) ReplicationPad3dImpl module); public AnyModule(ConstantPad3dImpl module) { super((Pointer)null); allocate(module); } - @SharedPtr private native void allocate(@SharedPtr @Cast({"", "std::shared_ptr"}) ConstantPad3dImpl module); + private native void allocate(@SharedPtr @Cast({"", "std::shared_ptr"}) ConstantPad3dImpl module); public AnyModule(AvgPool3dImpl module) { super((Pointer)null); allocate(module); } - @SharedPtr private native void allocate(@SharedPtr @Cast({"", "std::shared_ptr"}) AvgPool3dImpl module); + private native void allocate(@SharedPtr @Cast({"", "std::shared_ptr"}) AvgPool3dImpl module); public AnyModule(MaxPool3dImpl module) { super((Pointer)null); allocate(module); } - @SharedPtr private native void allocate(@SharedPtr @Cast({"", "std::shared_ptr"}) MaxPool3dImpl module); + private native void allocate(@SharedPtr @Cast({"", "std::shared_ptr"}) MaxPool3dImpl module); public AnyModule(AdaptiveAvgPool3dImpl module) { super((Pointer)null); allocate(module); } - @SharedPtr private native void allocate(@SharedPtr @Cast({"", "std::shared_ptr"}) AdaptiveAvgPool3dImpl module); + private native void allocate(@SharedPtr @Cast({"", "std::shared_ptr"}) AdaptiveAvgPool3dImpl module); public AnyModule(AdaptiveMaxPool3dImpl module) { super((Pointer)null); allocate(module); } - @SharedPtr private native void allocate(@SharedPtr @Cast({"", "std::shared_ptr"}) AdaptiveMaxPool3dImpl module); + private native void allocate(@SharedPtr @Cast({"", "std::shared_ptr"}) AdaptiveMaxPool3dImpl module); public AnyModule(MaxUnpool3dImpl module) { super((Pointer)null); allocate(module); } - @SharedPtr private native void allocate(@SharedPtr @Cast({"", "std::shared_ptr"}) MaxUnpool3dImpl module); + private native void allocate(@SharedPtr @Cast({"", "std::shared_ptr"}) MaxUnpool3dImpl module); public AnyModule(FractionalMaxPool3dImpl module) { super((Pointer)null); allocate(module); } - @SharedPtr private native void allocate(@SharedPtr @Cast({"", "std::shared_ptr"}) FractionalMaxPool3dImpl module); + private native void allocate(@SharedPtr @Cast({"", "std::shared_ptr"}) FractionalMaxPool3dImpl module); public AnyModule(RNNImpl module) { super((Pointer)null); allocate(module); } - @SharedPtr private native void allocate(@SharedPtr @Cast({"", "std::shared_ptr"}) RNNImpl module); + private native void allocate(@SharedPtr @Cast({"", "std::shared_ptr"}) RNNImpl module); public AnyModule(LSTMImpl module) { super((Pointer)null); allocate(module); } - @SharedPtr private native void allocate(@SharedPtr @Cast({"", "std::shared_ptr"}) LSTMImpl module); + private native void allocate(@SharedPtr @Cast({"", "std::shared_ptr"}) LSTMImpl module); public AnyModule(GRUImpl module) { super((Pointer)null); allocate(module); } - @SharedPtr private native void allocate(@SharedPtr @Cast({"", "std::shared_ptr"}) GRUImpl module); + private native void allocate(@SharedPtr @Cast({"", "std::shared_ptr"}) GRUImpl module); public AnyModule(RNNCellImpl module) { super((Pointer)null); allocate(module); } - @SharedPtr private native void allocate(@SharedPtr @Cast({"", "std::shared_ptr"}) RNNCellImpl module); + private native void allocate(@SharedPtr @Cast({"", "std::shared_ptr"}) RNNCellImpl module); public AnyModule(LSTMCellImpl module) { super((Pointer)null); allocate(module); } - @SharedPtr private native void allocate(@SharedPtr @Cast({"", "std::shared_ptr"}) LSTMCellImpl module); + private native void allocate(@SharedPtr @Cast({"", "std::shared_ptr"}) LSTMCellImpl module); public AnyModule(GRUCellImpl module) { super((Pointer)null); allocate(module); } - @SharedPtr private native void allocate(@SharedPtr @Cast({"", "std::shared_ptr"}) GRUCellImpl module); + private native void allocate(@SharedPtr @Cast({"", "std::shared_ptr"}) GRUCellImpl module); public AnyModule(PixelShuffleImpl module) { super((Pointer)null); allocate(module); } - @SharedPtr private native void allocate(@SharedPtr @Cast({"", "std::shared_ptr"}) PixelShuffleImpl module); + private native void allocate(@SharedPtr @Cast({"", "std::shared_ptr"}) PixelShuffleImpl module); public AnyModule(PixelUnshuffleImpl module) { super((Pointer)null); allocate(module); } - @SharedPtr private native void allocate(@SharedPtr @Cast({"", "std::shared_ptr"}) PixelUnshuffleImpl module); + private native void allocate(@SharedPtr @Cast({"", "std::shared_ptr"}) PixelUnshuffleImpl module); public AnyModule(UpsampleImpl module) { super((Pointer)null); allocate(module); } - @SharedPtr private native void allocate(@SharedPtr @Cast({"", "std::shared_ptr"}) UpsampleImpl module); + private native void allocate(@SharedPtr @Cast({"", "std::shared_ptr"}) UpsampleImpl module); public AnyModule(ELUImpl module) { super((Pointer)null); allocate(module); } - @SharedPtr private native void allocate(@SharedPtr @Cast({"", "std::shared_ptr"}) ELUImpl module); + private native void allocate(@SharedPtr @Cast({"", "std::shared_ptr"}) ELUImpl module); public AnyModule(SELUImpl module) { super((Pointer)null); allocate(module); } - @SharedPtr private native void allocate(@SharedPtr @Cast({"", "std::shared_ptr"}) SELUImpl module); + private native void allocate(@SharedPtr @Cast({"", "std::shared_ptr"}) SELUImpl module); public AnyModule(HardshrinkImpl module) { super((Pointer)null); allocate(module); } - @SharedPtr private native void allocate(@SharedPtr @Cast({"", "std::shared_ptr"}) HardshrinkImpl module); + private native void allocate(@SharedPtr @Cast({"", "std::shared_ptr"}) HardshrinkImpl module); public AnyModule(HardtanhImpl module) { super((Pointer)null); allocate(module); } - @SharedPtr private native void allocate(@SharedPtr @Cast({"", "std::shared_ptr"}) HardtanhImpl module); + private native void allocate(@SharedPtr @Cast({"", "std::shared_ptr"}) HardtanhImpl module); public AnyModule(LeakyReLUImpl module) { super((Pointer)null); allocate(module); } - @SharedPtr private native void allocate(@SharedPtr @Cast({"", "std::shared_ptr"}) LeakyReLUImpl module); + private native void allocate(@SharedPtr @Cast({"", "std::shared_ptr"}) LeakyReLUImpl module); public AnyModule(LogSigmoidImpl module) { super((Pointer)null); allocate(module); } - @SharedPtr private native void allocate(@SharedPtr @Cast({"", "std::shared_ptr"}) LogSigmoidImpl module); + private native void allocate(@SharedPtr @Cast({"", "std::shared_ptr"}) LogSigmoidImpl module); public AnyModule(SoftmaxImpl module) { super((Pointer)null); allocate(module); } - @SharedPtr private native void allocate(@SharedPtr @Cast({"", "std::shared_ptr"}) SoftmaxImpl module); + private native void allocate(@SharedPtr @Cast({"", "std::shared_ptr"}) SoftmaxImpl module); public AnyModule(SoftminImpl module) { super((Pointer)null); allocate(module); } - @SharedPtr private native void allocate(@SharedPtr @Cast({"", "std::shared_ptr"}) SoftminImpl module); + private native void allocate(@SharedPtr @Cast({"", "std::shared_ptr"}) SoftminImpl module); public AnyModule(LogSoftmaxImpl module) { super((Pointer)null); allocate(module); } - @SharedPtr private native void allocate(@SharedPtr @Cast({"", "std::shared_ptr"}) LogSoftmaxImpl module); + private native void allocate(@SharedPtr @Cast({"", "std::shared_ptr"}) LogSoftmaxImpl module); public AnyModule(Softmax2dImpl module) { super((Pointer)null); allocate(module); } - @SharedPtr private native void allocate(@SharedPtr @Cast({"", "std::shared_ptr"}) Softmax2dImpl module); + private native void allocate(@SharedPtr @Cast({"", "std::shared_ptr"}) Softmax2dImpl module); public AnyModule(PReLUImpl module) { super((Pointer)null); allocate(module); } - @SharedPtr private native void allocate(@SharedPtr @Cast({"", "std::shared_ptr"}) PReLUImpl module); + private native void allocate(@SharedPtr @Cast({"", "std::shared_ptr"}) PReLUImpl module); public AnyModule(ReLUImpl module) { super((Pointer)null); allocate(module); } - @SharedPtr private native void allocate(@SharedPtr @Cast({"", "std::shared_ptr"}) ReLUImpl module); + private native void allocate(@SharedPtr @Cast({"", "std::shared_ptr"}) ReLUImpl module); public AnyModule(ReLU6Impl module) { super((Pointer)null); allocate(module); } - @SharedPtr private native void allocate(@SharedPtr @Cast({"", "std::shared_ptr"}) ReLU6Impl module); + private native void allocate(@SharedPtr @Cast({"", "std::shared_ptr"}) ReLU6Impl module); public AnyModule(RReLUImpl module) { super((Pointer)null); allocate(module); } - @SharedPtr private native void allocate(@SharedPtr @Cast({"", "std::shared_ptr"}) RReLUImpl module); + private native void allocate(@SharedPtr @Cast({"", "std::shared_ptr"}) RReLUImpl module); public AnyModule(CELUImpl module) { super((Pointer)null); allocate(module); } - @SharedPtr private native void allocate(@SharedPtr @Cast({"", "std::shared_ptr"}) CELUImpl module); + private native void allocate(@SharedPtr @Cast({"", "std::shared_ptr"}) CELUImpl module); public AnyModule(GLUImpl module) { super((Pointer)null); allocate(module); } - @SharedPtr private native void allocate(@SharedPtr @Cast({"", "std::shared_ptr"}) GLUImpl module); + private native void allocate(@SharedPtr @Cast({"", "std::shared_ptr"}) GLUImpl module); public AnyModule(GELUImpl module) { super((Pointer)null); allocate(module); } - @SharedPtr private native void allocate(@SharedPtr @Cast({"", "std::shared_ptr"}) GELUImpl module); + private native void allocate(@SharedPtr @Cast({"", "std::shared_ptr"}) GELUImpl module); public AnyModule(SiLUImpl module) { super((Pointer)null); allocate(module); } - @SharedPtr private native void allocate(@SharedPtr @Cast({"", "std::shared_ptr"}) SiLUImpl module); + private native void allocate(@SharedPtr @Cast({"", "std::shared_ptr"}) SiLUImpl module); public AnyModule(MishImpl module) { super((Pointer)null); allocate(module); } - @SharedPtr private native void allocate(@SharedPtr @Cast({"", "std::shared_ptr"}) MishImpl module); + private native void allocate(@SharedPtr @Cast({"", "std::shared_ptr"}) MishImpl module); public AnyModule(SigmoidImpl module) { super((Pointer)null); allocate(module); } - @SharedPtr private native void allocate(@SharedPtr @Cast({"", "std::shared_ptr"}) SigmoidImpl module); + private native void allocate(@SharedPtr @Cast({"", "std::shared_ptr"}) SigmoidImpl module); public AnyModule(SoftplusImpl module) { super((Pointer)null); allocate(module); } - @SharedPtr private native void allocate(@SharedPtr @Cast({"", "std::shared_ptr"}) SoftplusImpl module); + private native void allocate(@SharedPtr @Cast({"", "std::shared_ptr"}) SoftplusImpl module); public AnyModule(SoftshrinkImpl module) { super((Pointer)null); allocate(module); } - @SharedPtr private native void allocate(@SharedPtr @Cast({"", "std::shared_ptr"}) SoftshrinkImpl module); + private native void allocate(@SharedPtr @Cast({"", "std::shared_ptr"}) SoftshrinkImpl module); public AnyModule(SoftsignImpl module) { super((Pointer)null); allocate(module); } - @SharedPtr private native void allocate(@SharedPtr @Cast({"", "std::shared_ptr"}) SoftsignImpl module); + private native void allocate(@SharedPtr @Cast({"", "std::shared_ptr"}) SoftsignImpl module); public AnyModule(TanhImpl module) { super((Pointer)null); allocate(module); } - @SharedPtr private native void allocate(@SharedPtr @Cast({"", "std::shared_ptr"}) TanhImpl module); + private native void allocate(@SharedPtr @Cast({"", "std::shared_ptr"}) TanhImpl module); public AnyModule(TanhshrinkImpl module) { super((Pointer)null); allocate(module); } - @SharedPtr private native void allocate(@SharedPtr @Cast({"", "std::shared_ptr"}) TanhshrinkImpl module); + private native void allocate(@SharedPtr @Cast({"", "std::shared_ptr"}) TanhshrinkImpl module); public AnyModule(ThresholdImpl module) { super((Pointer)null); allocate(module); } - @SharedPtr private native void allocate(@SharedPtr @Cast({"", "std::shared_ptr"}) ThresholdImpl module); + private native void allocate(@SharedPtr @Cast({"", "std::shared_ptr"}) ThresholdImpl module); public AnyModule(MultiheadAttentionImpl module) { super((Pointer)null); allocate(module); } - @SharedPtr private native void allocate(@SharedPtr @Cast({"", "std::shared_ptr"}) MultiheadAttentionImpl module); + private native void allocate(@SharedPtr @Cast({"", "std::shared_ptr"}) MultiheadAttentionImpl module); public AnyModule(LayerNormImpl module) { super((Pointer)null); allocate(module); } - @SharedPtr private native void allocate(@SharedPtr @Cast({"", "std::shared_ptr"}) LayerNormImpl module); + private native void allocate(@SharedPtr @Cast({"", "std::shared_ptr"}) LayerNormImpl module); public AnyModule(LocalResponseNormImpl module) { super((Pointer)null); allocate(module); } - @SharedPtr private native void allocate(@SharedPtr @Cast({"", "std::shared_ptr"}) LocalResponseNormImpl module); + private native void allocate(@SharedPtr @Cast({"", "std::shared_ptr"}) LocalResponseNormImpl module); public AnyModule(CrossMapLRN2dImpl module) { super((Pointer)null); allocate(module); } - @SharedPtr private native void allocate(@SharedPtr @Cast({"", "std::shared_ptr"}) CrossMapLRN2dImpl module); + private native void allocate(@SharedPtr @Cast({"", "std::shared_ptr"}) CrossMapLRN2dImpl module); public AnyModule(GroupNormImpl module) { super((Pointer)null); allocate(module); } - @SharedPtr private native void allocate(@SharedPtr @Cast({"", "std::shared_ptr"}) GroupNormImpl module); + private native void allocate(@SharedPtr @Cast({"", "std::shared_ptr"}) GroupNormImpl module); public AnyModule(TransformerEncoderLayerImpl module) { super((Pointer)null); allocate(module); } - @SharedPtr private native void allocate(@SharedPtr @Cast({"", "std::shared_ptr"}) TransformerEncoderLayerImpl module); + private native void allocate(@SharedPtr @Cast({"", "std::shared_ptr"}) TransformerEncoderLayerImpl module); public AnyModule(TransformerDecoderLayerImpl module) { super((Pointer)null); allocate(module); } - @SharedPtr private native void allocate(@SharedPtr @Cast({"", "std::shared_ptr"}) TransformerDecoderLayerImpl module); + private native void allocate(@SharedPtr @Cast({"", "std::shared_ptr"}) TransformerDecoderLayerImpl module); public AnyModule(TransformerEncoderImpl module) { super((Pointer)null); allocate(module); } - @SharedPtr private native void allocate(@SharedPtr @Cast({"", "std::shared_ptr"}) TransformerEncoderImpl module); + private native void allocate(@SharedPtr @Cast({"", "std::shared_ptr"}) TransformerEncoderImpl module); public AnyModule(TransformerDecoderImpl module) { super((Pointer)null); allocate(module); } - @SharedPtr private native void allocate(@SharedPtr @Cast({"", "std::shared_ptr"}) TransformerDecoderImpl module); + private native void allocate(@SharedPtr @Cast({"", "std::shared_ptr"}) TransformerDecoderImpl module); public AnyModule(TransformerImpl module) { super((Pointer)null); allocate(module); } - @SharedPtr private native void allocate(@SharedPtr @Cast({"", "std::shared_ptr"}) TransformerImpl module); + private native void allocate(@SharedPtr @Cast({"", "std::shared_ptr"}) TransformerImpl module); /** Constructs an {@code AnyModule} from a concrete module object. */ @@ -377,7 +377,7 @@ public class AnyModule extends Pointer { /** Move construction and assignment is allowed, and follows the default * behavior of move for {@code std::unique_ptr}. */ public AnyModule(@ByRef(true) AnyModule arg0) { super((Pointer)null); allocate(arg0); } - @SharedPtr private native void allocate(@ByRef(true) AnyModule arg0); + private native void allocate(@ByRef(true) AnyModule arg0); public native @ByRef @Name("operator =") AnyModule put(@ByRef(true) AnyModule arg0); /** Creates a shallow copy of an {@code AnyModule}. */ diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/AvgPool1dImpl.java b/pytorch/src/gen/java/org/bytedeco/pytorch/AvgPool1dImpl.java index 075fa250ec1..ad55213cf9c 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/AvgPool1dImpl.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/AvgPool1dImpl.java @@ -42,6 +42,9 @@ public class AvgPool1dImpl extends AvgPool1dImplBase { private native void allocate(@Const @ByRef AvgPool1dOptions options_); /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public AvgPool1dImpl(Pointer p) { super(p); } + /** Downcast constructor. */ + public AvgPool1dImpl(Module pointer) { super((Pointer)null); allocate(pointer); } + @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::dynamic_pointer_cast") void allocate(@SharedPtr Module pointer); public native @ByVal Tensor forward(@Const @ByRef Tensor input); } diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/AvgPool1dImplBase.java b/pytorch/src/gen/java/org/bytedeco/pytorch/AvgPool1dImplBase.java index 3aa9edf6487..33be849e598 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/AvgPool1dImplBase.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/AvgPool1dImplBase.java @@ -24,6 +24,9 @@ public class AvgPool1dImplBase extends AvgPool1dImplCloneable { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public AvgPool1dImplBase(Pointer p) { super(p); } + /** Downcast constructor. */ + public AvgPool1dImplBase(Module pointer) { super((Pointer)null); allocate(pointer); } + @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::dynamic_pointer_cast, torch::nn::Module>") void allocate(@SharedPtr Module pointer); public AvgPool1dImplBase(@ByVal @Cast("torch::ExpandingArray<1>*") LongPointer kernel_size) { super((Pointer)null); allocate(kernel_size); } private native void allocate(@ByVal @Cast("torch::ExpandingArray<1>*") LongPointer kernel_size); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/AvgPool1dImplCloneable.java b/pytorch/src/gen/java/org/bytedeco/pytorch/AvgPool1dImplCloneable.java index 6f2000eab8e..b112a912e57 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/AvgPool1dImplCloneable.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/AvgPool1dImplCloneable.java @@ -22,6 +22,9 @@ public class AvgPool1dImplCloneable extends Module { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public AvgPool1dImplCloneable(Pointer p) { super(p); } + /** Downcast constructor. */ + public AvgPool1dImplCloneable(Module pointer) { super((Pointer)null); allocate(pointer); } + @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::dynamic_pointer_cast, torch::nn::Module>") void allocate(@SharedPtr Module pointer); @Override public Module asModule() { return asModule(this); } @Namespace public static native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast>") Module asModule(@SharedPtr AvgPool1dImplCloneable pointer); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/AvgPool2dImpl.java b/pytorch/src/gen/java/org/bytedeco/pytorch/AvgPool2dImpl.java index a5737361d37..37ef3455b16 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/AvgPool2dImpl.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/AvgPool2dImpl.java @@ -42,6 +42,9 @@ public class AvgPool2dImpl extends AvgPool2dImplBase { private native void allocate(@Const @ByRef AvgPool2dOptions options_); /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public AvgPool2dImpl(Pointer p) { super(p); } + /** Downcast constructor. */ + public AvgPool2dImpl(Module pointer) { super((Pointer)null); allocate(pointer); } + @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::dynamic_pointer_cast") void allocate(@SharedPtr Module pointer); public native @ByVal Tensor forward(@Const @ByRef Tensor input); } diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/AvgPool2dImplBase.java b/pytorch/src/gen/java/org/bytedeco/pytorch/AvgPool2dImplBase.java index 0b3199593a0..96f17955533 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/AvgPool2dImplBase.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/AvgPool2dImplBase.java @@ -22,6 +22,9 @@ public class AvgPool2dImplBase extends AvgPool2dImplCloneable { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public AvgPool2dImplBase(Pointer p) { super(p); } + /** Downcast constructor. */ + public AvgPool2dImplBase(Module pointer) { super((Pointer)null); allocate(pointer); } + @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::dynamic_pointer_cast, torch::nn::Module>") void allocate(@SharedPtr Module pointer); public AvgPool2dImplBase(@ByVal @Cast("torch::ExpandingArray<2>*") LongPointer kernel_size) { super((Pointer)null); allocate(kernel_size); } private native void allocate(@ByVal @Cast("torch::ExpandingArray<2>*") LongPointer kernel_size); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/AvgPool2dImplCloneable.java b/pytorch/src/gen/java/org/bytedeco/pytorch/AvgPool2dImplCloneable.java index 553918506c9..5eb5b53ecde 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/AvgPool2dImplCloneable.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/AvgPool2dImplCloneable.java @@ -22,6 +22,9 @@ public class AvgPool2dImplCloneable extends Module { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public AvgPool2dImplCloneable(Pointer p) { super(p); } + /** Downcast constructor. */ + public AvgPool2dImplCloneable(Module pointer) { super((Pointer)null); allocate(pointer); } + @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::dynamic_pointer_cast, torch::nn::Module>") void allocate(@SharedPtr Module pointer); @Override public Module asModule() { return asModule(this); } @Namespace public static native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast>") Module asModule(@SharedPtr AvgPool2dImplCloneable pointer); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/AvgPool3dImpl.java b/pytorch/src/gen/java/org/bytedeco/pytorch/AvgPool3dImpl.java index 91734e0ec15..50bd8feb54e 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/AvgPool3dImpl.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/AvgPool3dImpl.java @@ -42,6 +42,9 @@ public class AvgPool3dImpl extends AvgPool3dImplBase { private native void allocate(@Const @ByRef AvgPool3dOptions options_); /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public AvgPool3dImpl(Pointer p) { super(p); } + /** Downcast constructor. */ + public AvgPool3dImpl(Module pointer) { super((Pointer)null); allocate(pointer); } + @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::dynamic_pointer_cast") void allocate(@SharedPtr Module pointer); public native @ByVal Tensor forward(@Const @ByRef Tensor input); } diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/AvgPool3dImplBase.java b/pytorch/src/gen/java/org/bytedeco/pytorch/AvgPool3dImplBase.java index cad6f05eaf7..24e43335001 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/AvgPool3dImplBase.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/AvgPool3dImplBase.java @@ -22,6 +22,9 @@ public class AvgPool3dImplBase extends AvgPool3dImplCloneable { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public AvgPool3dImplBase(Pointer p) { super(p); } + /** Downcast constructor. */ + public AvgPool3dImplBase(Module pointer) { super((Pointer)null); allocate(pointer); } + @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::dynamic_pointer_cast, torch::nn::Module>") void allocate(@SharedPtr Module pointer); public AvgPool3dImplBase(@ByVal @Cast("torch::ExpandingArray<3>*") LongPointer kernel_size) { super((Pointer)null); allocate(kernel_size); } private native void allocate(@ByVal @Cast("torch::ExpandingArray<3>*") LongPointer kernel_size); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/AvgPool3dImplCloneable.java b/pytorch/src/gen/java/org/bytedeco/pytorch/AvgPool3dImplCloneable.java index 12bbab6da9a..588a5619d20 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/AvgPool3dImplCloneable.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/AvgPool3dImplCloneable.java @@ -22,6 +22,9 @@ public class AvgPool3dImplCloneable extends Module { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public AvgPool3dImplCloneable(Pointer p) { super(p); } + /** Downcast constructor. */ + public AvgPool3dImplCloneable(Module pointer) { super((Pointer)null); allocate(pointer); } + @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::dynamic_pointer_cast, torch::nn::Module>") void allocate(@SharedPtr Module pointer); @Override public Module asModule() { return asModule(this); } @Namespace public static native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast>") Module asModule(@SharedPtr AvgPool3dImplCloneable pointer); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/BCELossImpl.java b/pytorch/src/gen/java/org/bytedeco/pytorch/BCELossImpl.java index 564e886f125..c02921ab998 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/BCELossImpl.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/BCELossImpl.java @@ -37,20 +37,14 @@ public class BCELossImpl extends BCELossImplCloneable { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public BCELossImpl(Pointer p) { super(p); } - /** Native array allocator. Access with {@link Pointer#position(long)}. */ - public BCELossImpl(long size) { super((Pointer)null); allocateArray(size); } - private native void allocateArray(long size); - @Override public BCELossImpl position(long position) { - return (BCELossImpl)super.position(position); - } - @Override public BCELossImpl getPointer(long i) { - return new BCELossImpl((Pointer)this).offsetAddress(i); - } + /** Downcast constructor. */ + public BCELossImpl(Module pointer) { super((Pointer)null); allocate(pointer); } + @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::dynamic_pointer_cast") void allocate(@SharedPtr Module pointer); public BCELossImpl(@ByVal(nullValue = "torch::nn::BCELossOptions{}") BCELossOptions options_) { super((Pointer)null); allocate(options_); } - @SharedPtr private native void allocate(@ByVal(nullValue = "torch::nn::BCELossOptions{}") BCELossOptions options_); + @SharedPtr @Name("std::make_shared") private native void allocate(@ByVal(nullValue = "torch::nn::BCELossOptions{}") BCELossOptions options_); public BCELossImpl() { super((Pointer)null); allocate(); } - @SharedPtr private native void allocate(); + @SharedPtr @Name("std::make_shared") private native void allocate(); public native void reset(); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/BCELossImplCloneable.java b/pytorch/src/gen/java/org/bytedeco/pytorch/BCELossImplCloneable.java index b2f1b634de4..1888d0fae8d 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/BCELossImplCloneable.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/BCELossImplCloneable.java @@ -22,6 +22,9 @@ public class BCELossImplCloneable extends Module { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public BCELossImplCloneable(Pointer p) { super(p); } + /** Downcast constructor. */ + public BCELossImplCloneable(Module pointer) { super((Pointer)null); allocate(pointer); } + @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::dynamic_pointer_cast, torch::nn::Module>") void allocate(@SharedPtr Module pointer); @Override public Module asModule() { return asModule(this); } @Namespace public static native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast>") Module asModule(@SharedPtr BCELossImplCloneable pointer); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/BCEWithLogitsLossImpl.java b/pytorch/src/gen/java/org/bytedeco/pytorch/BCEWithLogitsLossImpl.java index 1b2a45761b4..4ba69c564be 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/BCEWithLogitsLossImpl.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/BCEWithLogitsLossImpl.java @@ -41,20 +41,14 @@ public class BCEWithLogitsLossImpl extends BCEWithLogitsLossImplCloneable { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public BCEWithLogitsLossImpl(Pointer p) { super(p); } - /** Native array allocator. Access with {@link Pointer#position(long)}. */ - public BCEWithLogitsLossImpl(long size) { super((Pointer)null); allocateArray(size); } - private native void allocateArray(long size); - @Override public BCEWithLogitsLossImpl position(long position) { - return (BCEWithLogitsLossImpl)super.position(position); - } - @Override public BCEWithLogitsLossImpl getPointer(long i) { - return new BCEWithLogitsLossImpl((Pointer)this).offsetAddress(i); - } + /** Downcast constructor. */ + public BCEWithLogitsLossImpl(Module pointer) { super((Pointer)null); allocate(pointer); } + @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::dynamic_pointer_cast") void allocate(@SharedPtr Module pointer); public BCEWithLogitsLossImpl(@ByVal(nullValue = "torch::nn::BCEWithLogitsLossOptions{}") BCEWithLogitsLossOptions options_) { super((Pointer)null); allocate(options_); } - @SharedPtr private native void allocate(@ByVal(nullValue = "torch::nn::BCEWithLogitsLossOptions{}") BCEWithLogitsLossOptions options_); + @SharedPtr @Name("std::make_shared") private native void allocate(@ByVal(nullValue = "torch::nn::BCEWithLogitsLossOptions{}") BCEWithLogitsLossOptions options_); public BCEWithLogitsLossImpl() { super((Pointer)null); allocate(); } - @SharedPtr private native void allocate(); + @SharedPtr @Name("std::make_shared") private native void allocate(); public native void reset(); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/BCEWithLogitsLossImplCloneable.java b/pytorch/src/gen/java/org/bytedeco/pytorch/BCEWithLogitsLossImplCloneable.java index a29e1ec90b9..d973446ec0d 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/BCEWithLogitsLossImplCloneable.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/BCEWithLogitsLossImplCloneable.java @@ -22,6 +22,9 @@ public class BCEWithLogitsLossImplCloneable extends Module { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public BCEWithLogitsLossImplCloneable(Pointer p) { super(p); } + /** Downcast constructor. */ + public BCEWithLogitsLossImplCloneable(Module pointer) { super((Pointer)null); allocate(pointer); } + @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::dynamic_pointer_cast, torch::nn::Module>") void allocate(@SharedPtr Module pointer); @Override public Module asModule() { return asModule(this); } @Namespace public static native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast>") Module asModule(@SharedPtr BCEWithLogitsLossImplCloneable pointer); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/BatchNorm1dImpl.java b/pytorch/src/gen/java/org/bytedeco/pytorch/BatchNorm1dImpl.java index 0f3e1f86659..4acfd9ca2c8 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/BatchNorm1dImpl.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/BatchNorm1dImpl.java @@ -43,5 +43,8 @@ public class BatchNorm1dImpl extends BatchNorm1dImplBase { private native void allocate(@Const @ByRef BatchNormOptions options_); /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public BatchNorm1dImpl(Pointer p) { super(p); } + /** Downcast constructor. */ + public BatchNorm1dImpl(Module pointer) { super((Pointer)null); allocate(pointer); } + @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::dynamic_pointer_cast") void allocate(@SharedPtr Module pointer); } diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/BatchNorm1dImplBase.java b/pytorch/src/gen/java/org/bytedeco/pytorch/BatchNorm1dImplBase.java index 29e94e51d8d..2ea865d750b 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/BatchNorm1dImplBase.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/BatchNorm1dImplBase.java @@ -24,6 +24,9 @@ public class BatchNorm1dImplBase extends BatchNorm1dImplBaseBase { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public BatchNorm1dImplBase(Pointer p) { super(p); } + /** Downcast constructor. */ + public BatchNorm1dImplBase(Module pointer) { super((Pointer)null); allocate(pointer); } + @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::dynamic_pointer_cast, torch::nn::Module>") void allocate(@SharedPtr Module pointer); public native @ByVal Tensor forward(@Const @ByRef Tensor input); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/BatchNorm1dImplBaseBase.java b/pytorch/src/gen/java/org/bytedeco/pytorch/BatchNorm1dImplBaseBase.java index 98ce98583e1..e0507018384 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/BatchNorm1dImplBaseBase.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/BatchNorm1dImplBaseBase.java @@ -25,6 +25,9 @@ public class BatchNorm1dImplBaseBase extends BatchNorm1dImplCloneable { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public BatchNorm1dImplBaseBase(Pointer p) { super(p); } + /** Downcast constructor. */ + public BatchNorm1dImplBaseBase(Module pointer) { super((Pointer)null); allocate(pointer); } + @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::dynamic_pointer_cast, torch::nn::Module>") void allocate(@SharedPtr Module pointer); public native void reset(); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/BatchNorm1dImplCloneable.java b/pytorch/src/gen/java/org/bytedeco/pytorch/BatchNorm1dImplCloneable.java index 36dbb74cf43..5c6b43b905e 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/BatchNorm1dImplCloneable.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/BatchNorm1dImplCloneable.java @@ -22,6 +22,9 @@ public class BatchNorm1dImplCloneable extends Module { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public BatchNorm1dImplCloneable(Pointer p) { super(p); } + /** Downcast constructor. */ + public BatchNorm1dImplCloneable(Module pointer) { super((Pointer)null); allocate(pointer); } + @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::dynamic_pointer_cast, torch::nn::Module>") void allocate(@SharedPtr Module pointer); @Override public Module asModule() { return asModule(this); } @Namespace public static native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast>") Module asModule(@SharedPtr BatchNorm1dImplCloneable pointer); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/BatchNorm2dImpl.java b/pytorch/src/gen/java/org/bytedeco/pytorch/BatchNorm2dImpl.java index ba82168585f..cb7b8c053e4 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/BatchNorm2dImpl.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/BatchNorm2dImpl.java @@ -43,5 +43,8 @@ public class BatchNorm2dImpl extends BatchNorm2dImplBase { private native void allocate(@Const @ByRef BatchNormOptions options_); /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public BatchNorm2dImpl(Pointer p) { super(p); } + /** Downcast constructor. */ + public BatchNorm2dImpl(Module pointer) { super((Pointer)null); allocate(pointer); } + @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::dynamic_pointer_cast") void allocate(@SharedPtr Module pointer); } diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/BatchNorm2dImplBase.java b/pytorch/src/gen/java/org/bytedeco/pytorch/BatchNorm2dImplBase.java index c6db5f8cb50..426641243c3 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/BatchNorm2dImplBase.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/BatchNorm2dImplBase.java @@ -22,6 +22,9 @@ public class BatchNorm2dImplBase extends BatchNorm2dImplBaseBase { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public BatchNorm2dImplBase(Pointer p) { super(p); } + /** Downcast constructor. */ + public BatchNorm2dImplBase(Module pointer) { super((Pointer)null); allocate(pointer); } + @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::dynamic_pointer_cast, torch::nn::Module>") void allocate(@SharedPtr Module pointer); public native @ByVal Tensor forward(@Const @ByRef Tensor input); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/BatchNorm2dImplBaseBase.java b/pytorch/src/gen/java/org/bytedeco/pytorch/BatchNorm2dImplBaseBase.java index 29a90b8a370..c6614ea20cd 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/BatchNorm2dImplBaseBase.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/BatchNorm2dImplBaseBase.java @@ -22,6 +22,9 @@ public class BatchNorm2dImplBaseBase extends BatchNorm2dImplCloneable { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public BatchNorm2dImplBaseBase(Pointer p) { super(p); } + /** Downcast constructor. */ + public BatchNorm2dImplBaseBase(Module pointer) { super((Pointer)null); allocate(pointer); } + @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::dynamic_pointer_cast, torch::nn::Module>") void allocate(@SharedPtr Module pointer); public native void reset(); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/BatchNorm2dImplCloneable.java b/pytorch/src/gen/java/org/bytedeco/pytorch/BatchNorm2dImplCloneable.java index fdc95618d6a..38c8ae3f695 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/BatchNorm2dImplCloneable.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/BatchNorm2dImplCloneable.java @@ -22,6 +22,9 @@ public class BatchNorm2dImplCloneable extends Module { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public BatchNorm2dImplCloneable(Pointer p) { super(p); } + /** Downcast constructor. */ + public BatchNorm2dImplCloneable(Module pointer) { super((Pointer)null); allocate(pointer); } + @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::dynamic_pointer_cast, torch::nn::Module>") void allocate(@SharedPtr Module pointer); @Override public Module asModule() { return asModule(this); } @Namespace public static native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast>") Module asModule(@SharedPtr BatchNorm2dImplCloneable pointer); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/BatchNorm3dImpl.java b/pytorch/src/gen/java/org/bytedeco/pytorch/BatchNorm3dImpl.java index 6a41698852a..9215a26a7dc 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/BatchNorm3dImpl.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/BatchNorm3dImpl.java @@ -43,5 +43,8 @@ public class BatchNorm3dImpl extends BatchNorm3dImplBase { private native void allocate(@Const @ByRef BatchNormOptions options_); /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public BatchNorm3dImpl(Pointer p) { super(p); } + /** Downcast constructor. */ + public BatchNorm3dImpl(Module pointer) { super((Pointer)null); allocate(pointer); } + @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::dynamic_pointer_cast") void allocate(@SharedPtr Module pointer); } diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/BatchNorm3dImplBase.java b/pytorch/src/gen/java/org/bytedeco/pytorch/BatchNorm3dImplBase.java index 723f6ad05b9..b6ac9885421 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/BatchNorm3dImplBase.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/BatchNorm3dImplBase.java @@ -22,6 +22,9 @@ public class BatchNorm3dImplBase extends BatchNorm3dImplBaseBase { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public BatchNorm3dImplBase(Pointer p) { super(p); } + /** Downcast constructor. */ + public BatchNorm3dImplBase(Module pointer) { super((Pointer)null); allocate(pointer); } + @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::dynamic_pointer_cast, torch::nn::Module>") void allocate(@SharedPtr Module pointer); public native @ByVal Tensor forward(@Const @ByRef Tensor input); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/BatchNorm3dImplBaseBase.java b/pytorch/src/gen/java/org/bytedeco/pytorch/BatchNorm3dImplBaseBase.java index a3e840c0a78..0985221c541 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/BatchNorm3dImplBaseBase.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/BatchNorm3dImplBaseBase.java @@ -22,6 +22,9 @@ public class BatchNorm3dImplBaseBase extends BatchNorm3dImplCloneable { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public BatchNorm3dImplBaseBase(Pointer p) { super(p); } + /** Downcast constructor. */ + public BatchNorm3dImplBaseBase(Module pointer) { super((Pointer)null); allocate(pointer); } + @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::dynamic_pointer_cast, torch::nn::Module>") void allocate(@SharedPtr Module pointer); public native void reset(); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/BatchNorm3dImplCloneable.java b/pytorch/src/gen/java/org/bytedeco/pytorch/BatchNorm3dImplCloneable.java index 8179041964b..cf0543d90c7 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/BatchNorm3dImplCloneable.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/BatchNorm3dImplCloneable.java @@ -22,6 +22,9 @@ public class BatchNorm3dImplCloneable extends Module { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public BatchNorm3dImplCloneable(Pointer p) { super(p); } + /** Downcast constructor. */ + public BatchNorm3dImplCloneable(Module pointer) { super((Pointer)null); allocate(pointer); } + @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::dynamic_pointer_cast, torch::nn::Module>") void allocate(@SharedPtr Module pointer); @Override public Module asModule() { return asModule(this); } @Namespace public static native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast>") Module asModule(@SharedPtr BatchNorm3dImplCloneable pointer); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/BilinearImpl.java b/pytorch/src/gen/java/org/bytedeco/pytorch/BilinearImpl.java index 29d7981280f..b15ec27613a 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/BilinearImpl.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/BilinearImpl.java @@ -36,11 +36,14 @@ public class BilinearImpl extends BilinearImplCloneable { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public BilinearImpl(Pointer p) { super(p); } + /** Downcast constructor. */ + public BilinearImpl(Module pointer) { super((Pointer)null); allocate(pointer); } + @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::dynamic_pointer_cast") void allocate(@SharedPtr Module pointer); public BilinearImpl(@Cast("int64_t") long in1_features, @Cast("int64_t") long in2_features, @Cast("int64_t") long out_features) { super((Pointer)null); allocate(in1_features, in2_features, out_features); } - @SharedPtr private native void allocate(@Cast("int64_t") long in1_features, @Cast("int64_t") long in2_features, @Cast("int64_t") long out_features); + @SharedPtr @Name("std::make_shared") private native void allocate(@Cast("int64_t") long in1_features, @Cast("int64_t") long in2_features, @Cast("int64_t") long out_features); public BilinearImpl(@Const @ByRef BilinearOptions options_) { super((Pointer)null); allocate(options_); } - @SharedPtr private native void allocate(@Const @ByRef BilinearOptions options_); + @SharedPtr @Name("std::make_shared") private native void allocate(@Const @ByRef BilinearOptions options_); public native void reset(); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/BilinearImplCloneable.java b/pytorch/src/gen/java/org/bytedeco/pytorch/BilinearImplCloneable.java index 7f7e0c35d7f..091a162181d 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/BilinearImplCloneable.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/BilinearImplCloneable.java @@ -22,6 +22,9 @@ public class BilinearImplCloneable extends Module { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public BilinearImplCloneable(Pointer p) { super(p); } + /** Downcast constructor. */ + public BilinearImplCloneable(Module pointer) { super((Pointer)null); allocate(pointer); } + @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::dynamic_pointer_cast, torch::nn::Module>") void allocate(@SharedPtr Module pointer); @Override public Module asModule() { return asModule(this); } @Namespace public static native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast>") Module asModule(@SharedPtr BilinearImplCloneable pointer); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/Blob.java b/pytorch/src/gen/java/org/bytedeco/pytorch/Blob.java index c835d4f6046..688b9c76ecb 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/Blob.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/Blob.java @@ -63,7 +63,7 @@ public class Blob extends Pointer { /** * Returns a printable typename of the blob. */ - public native @ByVal @Cast("c10::string_view*") @NoException(true) Pointer TypeName(); + public native @StringView @NoException(true) BytePointer TypeName(); /** * \brief Gets the const reference of the stored object. The code checks if diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/CELUImpl.java b/pytorch/src/gen/java/org/bytedeco/pytorch/CELUImpl.java index f546816c856..6d71f0f60fc 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/CELUImpl.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/CELUImpl.java @@ -36,20 +36,14 @@ public class CELUImpl extends CELUImplCloneable { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public CELUImpl(Pointer p) { super(p); } - /** Native array allocator. Access with {@link Pointer#position(long)}. */ - public CELUImpl(long size) { super((Pointer)null); allocateArray(size); } - private native void allocateArray(long size); - @Override public CELUImpl position(long position) { - return (CELUImpl)super.position(position); - } - @Override public CELUImpl getPointer(long i) { - return new CELUImpl((Pointer)this).offsetAddress(i); - } + /** Downcast constructor. */ + public CELUImpl(Module pointer) { super((Pointer)null); allocate(pointer); } + @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::dynamic_pointer_cast") void allocate(@SharedPtr Module pointer); public CELUImpl(@Const @ByRef(nullValue = "torch::nn::CELUOptions{}") CELUOptions options_) { super((Pointer)null); allocate(options_); } - @SharedPtr private native void allocate(@Const @ByRef(nullValue = "torch::nn::CELUOptions{}") CELUOptions options_); + @SharedPtr @Name("std::make_shared") private native void allocate(@Const @ByRef(nullValue = "torch::nn::CELUOptions{}") CELUOptions options_); public CELUImpl() { super((Pointer)null); allocate(); } - @SharedPtr private native void allocate(); + @SharedPtr @Name("std::make_shared") private native void allocate(); public native @ByVal Tensor forward(@ByVal Tensor input); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/CELUImplCloneable.java b/pytorch/src/gen/java/org/bytedeco/pytorch/CELUImplCloneable.java index 4be1d5ea58e..c59216a151b 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/CELUImplCloneable.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/CELUImplCloneable.java @@ -22,6 +22,9 @@ public class CELUImplCloneable extends Module { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public CELUImplCloneable(Pointer p) { super(p); } + /** Downcast constructor. */ + public CELUImplCloneable(Module pointer) { super((Pointer)null); allocate(pointer); } + @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::dynamic_pointer_cast, torch::nn::Module>") void allocate(@SharedPtr Module pointer); @Override public Module asModule() { return asModule(this); } @Namespace public static native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast>") Module asModule(@SharedPtr CELUImplCloneable pointer); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/CTCLossImpl.java b/pytorch/src/gen/java/org/bytedeco/pytorch/CTCLossImpl.java index 9312f372bf7..4270c9a5d31 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/CTCLossImpl.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/CTCLossImpl.java @@ -37,20 +37,14 @@ public class CTCLossImpl extends CTCLossImplCloneable { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public CTCLossImpl(Pointer p) { super(p); } - /** Native array allocator. Access with {@link Pointer#position(long)}. */ - public CTCLossImpl(long size) { super((Pointer)null); allocateArray(size); } - private native void allocateArray(long size); - @Override public CTCLossImpl position(long position) { - return (CTCLossImpl)super.position(position); - } - @Override public CTCLossImpl getPointer(long i) { - return new CTCLossImpl((Pointer)this).offsetAddress(i); - } + /** Downcast constructor. */ + public CTCLossImpl(Module pointer) { super((Pointer)null); allocate(pointer); } + @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::dynamic_pointer_cast") void allocate(@SharedPtr Module pointer); public CTCLossImpl(@ByVal(nullValue = "torch::nn::CTCLossOptions{}") CTCLossOptions options_) { super((Pointer)null); allocate(options_); } - @SharedPtr private native void allocate(@ByVal(nullValue = "torch::nn::CTCLossOptions{}") CTCLossOptions options_); + @SharedPtr @Name("std::make_shared") private native void allocate(@ByVal(nullValue = "torch::nn::CTCLossOptions{}") CTCLossOptions options_); public CTCLossImpl() { super((Pointer)null); allocate(); } - @SharedPtr private native void allocate(); + @SharedPtr @Name("std::make_shared") private native void allocate(); public native void reset(); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/CTCLossImplCloneable.java b/pytorch/src/gen/java/org/bytedeco/pytorch/CTCLossImplCloneable.java index c5ffee98872..ac8a98733a1 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/CTCLossImplCloneable.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/CTCLossImplCloneable.java @@ -22,6 +22,9 @@ public class CTCLossImplCloneable extends Module { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public CTCLossImplCloneable(Pointer p) { super(p); } + /** Downcast constructor. */ + public CTCLossImplCloneable(Module pointer) { super((Pointer)null); allocate(pointer); } + @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::dynamic_pointer_cast, torch::nn::Module>") void allocate(@SharedPtr Module pointer); @Override public Module asModule() { return asModule(this); } @Namespace public static native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast>") Module asModule(@SharedPtr CTCLossImplCloneable pointer); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/CompilationUnit.java b/pytorch/src/gen/java/org/bytedeco/pytorch/CompilationUnit.java index 4a8bbb3602d..c552c0c1e58 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/CompilationUnit.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/CompilationUnit.java @@ -29,15 +29,6 @@ public class CompilationUnit extends Pointer { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public CompilationUnit(Pointer p) { super(p); } - /** Native array allocator. Access with {@link Pointer#position(long)}. */ - public CompilationUnit(long size) { super((Pointer)null); allocateArray(size); } - private native void allocateArray(long size); - @Override public CompilationUnit position(long position) { - return (CompilationUnit)super.position(position); - } - @Override public CompilationUnit getPointer(long i) { - return new CompilationUnit((Pointer)this).offsetAddress(i); - } public enum FunctionType { Method(0), Hook(1), PreHook(2); @@ -50,15 +41,15 @@ public enum FunctionType { Method(0), Hook(1), PreHook(2); // constructor that takes a set of functions to compile using the native // resolver public CompilationUnit(@StdString BytePointer source) { super((Pointer)null); allocate(source); } - @SharedPtr private native void allocate(@StdString BytePointer source); + @SharedPtr @Name("std::make_shared") private native void allocate(@StdString BytePointer source); public CompilationUnit(@StdString String source) { super((Pointer)null); allocate(source); } - @SharedPtr private native void allocate(@StdString String source); + @SharedPtr @Name("std::make_shared") private native void allocate(@StdString String source); public CompilationUnit() { super((Pointer)null); allocate(); } - @SharedPtr private native void allocate(); + @SharedPtr @Name("std::make_shared") private native void allocate(); public native @ByRef @Name("operator =") CompilationUnit put(@ByRef(true) CompilationUnit arg0); public CompilationUnit(@ByRef(true) CompilationUnit arg0) { super((Pointer)null); allocate(arg0); } - @SharedPtr private native void allocate(@ByRef(true) CompilationUnit arg0); + @SharedPtr @Name("std::make_shared") private native void allocate(@ByRef(true) CompilationUnit arg0); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/ConstantPad1dImpl.java b/pytorch/src/gen/java/org/bytedeco/pytorch/ConstantPad1dImpl.java index def707bac9b..782e41b1441 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/ConstantPad1dImpl.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/ConstantPad1dImpl.java @@ -42,5 +42,8 @@ public class ConstantPad1dImpl extends ConstantPad1dImplBase { private native void allocate(@Const @ByRef ConstantPad1dOptions options_); /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public ConstantPad1dImpl(Pointer p) { super(p); } + /** Downcast constructor. */ + public ConstantPad1dImpl(Module pointer) { super((Pointer)null); allocate(pointer); } + @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::dynamic_pointer_cast") void allocate(@SharedPtr Module pointer); } diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/ConstantPad1dImplBase.java b/pytorch/src/gen/java/org/bytedeco/pytorch/ConstantPad1dImplBase.java index bf512337f1b..56b26c87a45 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/ConstantPad1dImplBase.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/ConstantPad1dImplBase.java @@ -26,6 +26,9 @@ public class ConstantPad1dImplBase extends ConstantPad1dImplCloneable { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public ConstantPad1dImplBase(Pointer p) { super(p); } + /** Downcast constructor. */ + public ConstantPad1dImplBase(Module pointer) { super((Pointer)null); allocate(pointer); } + @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::dynamic_pointer_cast, torch::nn::Module>") void allocate(@SharedPtr Module pointer); public ConstantPad1dImplBase(@ByVal @Cast("torch::ExpandingArray<1*2>*") LongPointer padding, double value) { super((Pointer)null); allocate(padding, value); } private native void allocate(@ByVal @Cast("torch::ExpandingArray<1*2>*") LongPointer padding, double value); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/ConstantPad1dImplCloneable.java b/pytorch/src/gen/java/org/bytedeco/pytorch/ConstantPad1dImplCloneable.java index 77c533320e3..5c256410f98 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/ConstantPad1dImplCloneable.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/ConstantPad1dImplCloneable.java @@ -22,6 +22,9 @@ public class ConstantPad1dImplCloneable extends Module { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public ConstantPad1dImplCloneable(Pointer p) { super(p); } + /** Downcast constructor. */ + public ConstantPad1dImplCloneable(Module pointer) { super((Pointer)null); allocate(pointer); } + @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::dynamic_pointer_cast, torch::nn::Module>") void allocate(@SharedPtr Module pointer); @Override public Module asModule() { return asModule(this); } @Namespace public static native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast>") Module asModule(@SharedPtr ConstantPad1dImplCloneable pointer); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/ConstantPad2dImpl.java b/pytorch/src/gen/java/org/bytedeco/pytorch/ConstantPad2dImpl.java index 47c6f1939c4..0924aca5821 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/ConstantPad2dImpl.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/ConstantPad2dImpl.java @@ -42,5 +42,8 @@ public class ConstantPad2dImpl extends ConstantPad2dImplBase { private native void allocate(@Const @ByRef ConstantPad2dOptions options_); /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public ConstantPad2dImpl(Pointer p) { super(p); } + /** Downcast constructor. */ + public ConstantPad2dImpl(Module pointer) { super((Pointer)null); allocate(pointer); } + @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::dynamic_pointer_cast") void allocate(@SharedPtr Module pointer); } diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/ConstantPad2dImplBase.java b/pytorch/src/gen/java/org/bytedeco/pytorch/ConstantPad2dImplBase.java index 3e01e544152..2e0694a3caf 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/ConstantPad2dImplBase.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/ConstantPad2dImplBase.java @@ -22,6 +22,9 @@ public class ConstantPad2dImplBase extends ConstantPad2dImplCloneable { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public ConstantPad2dImplBase(Pointer p) { super(p); } + /** Downcast constructor. */ + public ConstantPad2dImplBase(Module pointer) { super((Pointer)null); allocate(pointer); } + @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::dynamic_pointer_cast, torch::nn::Module>") void allocate(@SharedPtr Module pointer); public ConstantPad2dImplBase(@ByVal @Cast("torch::ExpandingArray<2*2>*") LongPointer padding, double value) { super((Pointer)null); allocate(padding, value); } private native void allocate(@ByVal @Cast("torch::ExpandingArray<2*2>*") LongPointer padding, double value); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/ConstantPad2dImplCloneable.java b/pytorch/src/gen/java/org/bytedeco/pytorch/ConstantPad2dImplCloneable.java index a1c7bdb5453..5329ebc4b48 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/ConstantPad2dImplCloneable.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/ConstantPad2dImplCloneable.java @@ -22,6 +22,9 @@ public class ConstantPad2dImplCloneable extends Module { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public ConstantPad2dImplCloneable(Pointer p) { super(p); } + /** Downcast constructor. */ + public ConstantPad2dImplCloneable(Module pointer) { super((Pointer)null); allocate(pointer); } + @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::dynamic_pointer_cast, torch::nn::Module>") void allocate(@SharedPtr Module pointer); @Override public Module asModule() { return asModule(this); } @Namespace public static native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast>") Module asModule(@SharedPtr ConstantPad2dImplCloneable pointer); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/ConstantPad3dImpl.java b/pytorch/src/gen/java/org/bytedeco/pytorch/ConstantPad3dImpl.java index 65b166530e7..225ce97ebcd 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/ConstantPad3dImpl.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/ConstantPad3dImpl.java @@ -42,5 +42,8 @@ public class ConstantPad3dImpl extends ConstantPad3dImplBase { private native void allocate(@Const @ByRef ConstantPad3dOptions options_); /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public ConstantPad3dImpl(Pointer p) { super(p); } + /** Downcast constructor. */ + public ConstantPad3dImpl(Module pointer) { super((Pointer)null); allocate(pointer); } + @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::dynamic_pointer_cast") void allocate(@SharedPtr Module pointer); } diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/ConstantPad3dImplBase.java b/pytorch/src/gen/java/org/bytedeco/pytorch/ConstantPad3dImplBase.java index 4dccffffbec..c7ac138c637 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/ConstantPad3dImplBase.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/ConstantPad3dImplBase.java @@ -22,6 +22,9 @@ public class ConstantPad3dImplBase extends ConstantPad3dImplCloneable { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public ConstantPad3dImplBase(Pointer p) { super(p); } + /** Downcast constructor. */ + public ConstantPad3dImplBase(Module pointer) { super((Pointer)null); allocate(pointer); } + @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::dynamic_pointer_cast, torch::nn::Module>") void allocate(@SharedPtr Module pointer); public ConstantPad3dImplBase(@ByVal @Cast("torch::ExpandingArray<3*2>*") LongPointer padding, double value) { super((Pointer)null); allocate(padding, value); } private native void allocate(@ByVal @Cast("torch::ExpandingArray<3*2>*") LongPointer padding, double value); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/ConstantPad3dImplCloneable.java b/pytorch/src/gen/java/org/bytedeco/pytorch/ConstantPad3dImplCloneable.java index bdf421014ad..d48a26576a5 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/ConstantPad3dImplCloneable.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/ConstantPad3dImplCloneable.java @@ -22,6 +22,9 @@ public class ConstantPad3dImplCloneable extends Module { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public ConstantPad3dImplCloneable(Pointer p) { super(p); } + /** Downcast constructor. */ + public ConstantPad3dImplCloneable(Module pointer) { super((Pointer)null); allocate(pointer); } + @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::dynamic_pointer_cast, torch::nn::Module>") void allocate(@SharedPtr Module pointer); @Override public Module asModule() { return asModule(this); } @Namespace public static native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast>") Module asModule(@SharedPtr ConstantPad3dImplCloneable pointer); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/ConstantString.java b/pytorch/src/gen/java/org/bytedeco/pytorch/ConstantString.java index 20383cafdc0..d656f24ec35 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/ConstantString.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/ConstantString.java @@ -22,19 +22,18 @@ @Namespace("c10::ivalue") @NoOffset @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) public class ConstantString extends Pointer { static { Loader.load(); } + /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ + public ConstantString(Pointer p) { super(p); } public ConstantString(@StdString BytePointer str) { super((Pointer)null); allocate(str); } private native void allocate(@StdString BytePointer str); public ConstantString(@StdString String str) { super((Pointer)null); allocate(str); } private native void allocate(@StdString String str); - public ConstantString(@ByVal @Cast("c10::string_view*") Pointer str) { super((Pointer)null); allocate(str); } - private native void allocate(@ByVal @Cast("c10::string_view*") Pointer str); public static native @ByVal ConstantStringPtr create(@StdString BytePointer str_); public static native @ByVal ConstantStringPtr create(@StdString String str_); - public static native @ByVal ConstantStringPtr create(@ByVal @Cast("c10::string_view*") Pointer str_); public native @StdString BytePointer string(); - public native @ByVal @Cast("c10::string_view*") Pointer string_view(); + public native @StringView BytePointer string_view(); public native @Const @ByRef @Name("operator const std::string&") @StdString @Override String toString(); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/Context.java b/pytorch/src/gen/java/org/bytedeco/pytorch/Context.java index dc114eb7b9d..62169c23401 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/Context.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/Context.java @@ -177,7 +177,8 @@ public class Context extends Pointer { // } // Throws an error if `Context::deterministicAlgorithms()` is true - public static native void alertNotDeterministic(@Cast("const c10::string_view*") @ByRef Pointer caller); + public static native void alertNotDeterministic(@StringView BytePointer caller); + public static native void alertNotDeterministic(@StringView String caller); // Throws an error if `Context::deterministicAlgorithms()` is true, CUDA // >= 10.2, and CUBLAS_WORKSPACE_CONFIG is not set to either ":16:8" or diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/Conv1dImpl.java b/pytorch/src/gen/java/org/bytedeco/pytorch/Conv1dImpl.java index 2aa9590600f..988d39bacab 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/Conv1dImpl.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/Conv1dImpl.java @@ -36,16 +36,19 @@ public class Conv1dImpl extends Conv1dImplBase { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public Conv1dImpl(Pointer p) { super(p); } + /** Downcast constructor. */ + public Conv1dImpl(Module pointer) { super((Pointer)null); allocate(pointer); } + @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::dynamic_pointer_cast") void allocate(@SharedPtr Module pointer); public Conv1dImpl( @Cast("int64_t") long input_channels, @Cast("int64_t") long output_channels, @ByVal @Cast("torch::ExpandingArray<1>*") LongPointer kernel_size) { super((Pointer)null); allocate(input_channels, output_channels, kernel_size); } - @SharedPtr private native void allocate( + @SharedPtr @Name("std::make_shared") private native void allocate( @Cast("int64_t") long input_channels, @Cast("int64_t") long output_channels, @ByVal @Cast("torch::ExpandingArray<1>*") LongPointer kernel_size); public Conv1dImpl(@ByVal Conv1dOptions options_) { super((Pointer)null); allocate(options_); } - @SharedPtr private native void allocate(@ByVal Conv1dOptions options_); + @SharedPtr @Name("std::make_shared") private native void allocate(@ByVal Conv1dOptions options_); public native @ByVal Tensor forward(@Const @ByRef Tensor input); } diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/Conv1dImplBase.java b/pytorch/src/gen/java/org/bytedeco/pytorch/Conv1dImplBase.java index 60060d92854..1ff44b1a462 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/Conv1dImplBase.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/Conv1dImplBase.java @@ -24,6 +24,9 @@ public class Conv1dImplBase extends Conv1dImplCloneable { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public Conv1dImplBase(Pointer p) { super(p); } + /** Downcast constructor. */ + public Conv1dImplBase(Module pointer) { super((Pointer)null); allocate(pointer); } + @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::dynamic_pointer_cast, torch::nn::Module>") void allocate(@SharedPtr Module pointer); public Conv1dImplBase(@ByVal DetailConv1dOptions options_) { super((Pointer)null); allocate(options_); } private native void allocate(@ByVal DetailConv1dOptions options_); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/Conv1dImplCloneable.java b/pytorch/src/gen/java/org/bytedeco/pytorch/Conv1dImplCloneable.java index f6917c76891..bb51e38126c 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/Conv1dImplCloneable.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/Conv1dImplCloneable.java @@ -22,6 +22,9 @@ public class Conv1dImplCloneable extends Module { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public Conv1dImplCloneable(Pointer p) { super(p); } + /** Downcast constructor. */ + public Conv1dImplCloneable(Module pointer) { super((Pointer)null); allocate(pointer); } + @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::dynamic_pointer_cast, torch::nn::Module>") void allocate(@SharedPtr Module pointer); @Override public Module asModule() { return asModule(this); } @Namespace public static native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast>") Module asModule(@SharedPtr Conv1dImplCloneable pointer); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/Conv2dImpl.java b/pytorch/src/gen/java/org/bytedeco/pytorch/Conv2dImpl.java index fa90e74e409..42eb61b4650 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/Conv2dImpl.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/Conv2dImpl.java @@ -36,16 +36,19 @@ public class Conv2dImpl extends Conv2dImplBase { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public Conv2dImpl(Pointer p) { super(p); } + /** Downcast constructor. */ + public Conv2dImpl(Module pointer) { super((Pointer)null); allocate(pointer); } + @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::dynamic_pointer_cast") void allocate(@SharedPtr Module pointer); public Conv2dImpl( @Cast("int64_t") long input_channels, @Cast("int64_t") long output_channels, @ByVal @Cast("torch::ExpandingArray<2>*") LongPointer kernel_size) { super((Pointer)null); allocate(input_channels, output_channels, kernel_size); } - @SharedPtr private native void allocate( + @SharedPtr @Name("std::make_shared") private native void allocate( @Cast("int64_t") long input_channels, @Cast("int64_t") long output_channels, @ByVal @Cast("torch::ExpandingArray<2>*") LongPointer kernel_size); public Conv2dImpl(@ByVal Conv2dOptions options_) { super((Pointer)null); allocate(options_); } - @SharedPtr private native void allocate(@ByVal Conv2dOptions options_); + @SharedPtr @Name("std::make_shared") private native void allocate(@ByVal Conv2dOptions options_); public native @ByVal Tensor forward(@Const @ByRef Tensor input); } diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/Conv2dImplBase.java b/pytorch/src/gen/java/org/bytedeco/pytorch/Conv2dImplBase.java index 20281a5c589..938f4c099ea 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/Conv2dImplBase.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/Conv2dImplBase.java @@ -22,6 +22,9 @@ public class Conv2dImplBase extends Conv2dImplCloneable { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public Conv2dImplBase(Pointer p) { super(p); } + /** Downcast constructor. */ + public Conv2dImplBase(Module pointer) { super((Pointer)null); allocate(pointer); } + @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::dynamic_pointer_cast, torch::nn::Module>") void allocate(@SharedPtr Module pointer); public Conv2dImplBase(@ByVal DetailConv2dOptions options_) { super((Pointer)null); allocate(options_); } private native void allocate(@ByVal DetailConv2dOptions options_); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/Conv2dImplCloneable.java b/pytorch/src/gen/java/org/bytedeco/pytorch/Conv2dImplCloneable.java index cd3f0d8e838..5189bc9b835 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/Conv2dImplCloneable.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/Conv2dImplCloneable.java @@ -22,6 +22,9 @@ public class Conv2dImplCloneable extends Module { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public Conv2dImplCloneable(Pointer p) { super(p); } + /** Downcast constructor. */ + public Conv2dImplCloneable(Module pointer) { super((Pointer)null); allocate(pointer); } + @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::dynamic_pointer_cast, torch::nn::Module>") void allocate(@SharedPtr Module pointer); @Override public Module asModule() { return asModule(this); } @Namespace public static native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast>") Module asModule(@SharedPtr Conv2dImplCloneable pointer); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/Conv3dImpl.java b/pytorch/src/gen/java/org/bytedeco/pytorch/Conv3dImpl.java index 0bfafc5d10d..f80d2b31aae 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/Conv3dImpl.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/Conv3dImpl.java @@ -36,16 +36,19 @@ public class Conv3dImpl extends Conv3dImplBase { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public Conv3dImpl(Pointer p) { super(p); } + /** Downcast constructor. */ + public Conv3dImpl(Module pointer) { super((Pointer)null); allocate(pointer); } + @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::dynamic_pointer_cast") void allocate(@SharedPtr Module pointer); public Conv3dImpl( @Cast("int64_t") long input_channels, @Cast("int64_t") long output_channels, @ByVal @Cast("torch::ExpandingArray<3>*") LongPointer kernel_size) { super((Pointer)null); allocate(input_channels, output_channels, kernel_size); } - @SharedPtr private native void allocate( + @SharedPtr @Name("std::make_shared") private native void allocate( @Cast("int64_t") long input_channels, @Cast("int64_t") long output_channels, @ByVal @Cast("torch::ExpandingArray<3>*") LongPointer kernel_size); public Conv3dImpl(@ByVal Conv3dOptions options_) { super((Pointer)null); allocate(options_); } - @SharedPtr private native void allocate(@ByVal Conv3dOptions options_); + @SharedPtr @Name("std::make_shared") private native void allocate(@ByVal Conv3dOptions options_); public native @ByVal Tensor forward(@Const @ByRef Tensor input); } diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/Conv3dImplBase.java b/pytorch/src/gen/java/org/bytedeco/pytorch/Conv3dImplBase.java index f463912dadf..829fe75ce14 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/Conv3dImplBase.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/Conv3dImplBase.java @@ -22,6 +22,9 @@ public class Conv3dImplBase extends Conv3dImplCloneable { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public Conv3dImplBase(Pointer p) { super(p); } + /** Downcast constructor. */ + public Conv3dImplBase(Module pointer) { super((Pointer)null); allocate(pointer); } + @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::dynamic_pointer_cast, torch::nn::Module>") void allocate(@SharedPtr Module pointer); public Conv3dImplBase(@ByVal DetailConv3dOptions options_) { super((Pointer)null); allocate(options_); } private native void allocate(@ByVal DetailConv3dOptions options_); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/Conv3dImplCloneable.java b/pytorch/src/gen/java/org/bytedeco/pytorch/Conv3dImplCloneable.java index d509cc82c7e..506f02ecd32 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/Conv3dImplCloneable.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/Conv3dImplCloneable.java @@ -22,6 +22,9 @@ public class Conv3dImplCloneable extends Module { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public Conv3dImplCloneable(Pointer p) { super(p); } + /** Downcast constructor. */ + public Conv3dImplCloneable(Module pointer) { super((Pointer)null); allocate(pointer); } + @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::dynamic_pointer_cast, torch::nn::Module>") void allocate(@SharedPtr Module pointer); @Override public Module asModule() { return asModule(this); } @Namespace public static native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast>") Module asModule(@SharedPtr Conv3dImplCloneable pointer); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/ConvTranspose1dImpl.java b/pytorch/src/gen/java/org/bytedeco/pytorch/ConvTranspose1dImpl.java index 5230849fdd2..6e9ee6bc8e5 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/ConvTranspose1dImpl.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/ConvTranspose1dImpl.java @@ -38,17 +38,20 @@ public class ConvTranspose1dImpl extends ConvTranspose1dImplBase { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public ConvTranspose1dImpl(Pointer p) { super(p); } + /** Downcast constructor. */ + public ConvTranspose1dImpl(Module pointer) { super((Pointer)null); allocate(pointer); } + @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::dynamic_pointer_cast") void allocate(@SharedPtr Module pointer); public ConvTranspose1dImpl( @Cast("int64_t") long input_channels, @Cast("int64_t") long output_channels, @ByVal @Cast("torch::ExpandingArray<1>*") LongPointer kernel_size) { super((Pointer)null); allocate(input_channels, output_channels, kernel_size); } - @SharedPtr private native void allocate( + @SharedPtr @Name("std::make_shared") private native void allocate( @Cast("int64_t") long input_channels, @Cast("int64_t") long output_channels, @ByVal @Cast("torch::ExpandingArray<1>*") LongPointer kernel_size); public ConvTranspose1dImpl(@ByVal ConvTranspose1dOptions options_) { super((Pointer)null); allocate(options_); } - @SharedPtr private native void allocate(@ByVal ConvTranspose1dOptions options_); + @SharedPtr @Name("std::make_shared") private native void allocate(@ByVal ConvTranspose1dOptions options_); public native @ByVal Tensor forward( @Const @ByRef Tensor input, @Const @ByRef(nullValue = "c10::optional(c10::nullopt)") LongArrayRefOptional output_size); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/ConvTranspose1dImplBase.java b/pytorch/src/gen/java/org/bytedeco/pytorch/ConvTranspose1dImplBase.java index fd7c3877547..6bb4b2796a3 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/ConvTranspose1dImplBase.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/ConvTranspose1dImplBase.java @@ -30,6 +30,9 @@ public class ConvTranspose1dImplBase extends ConvTranspose1dImplBaseBase { private native void allocate(@ByVal DetailConv1dOptions options_); /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public ConvTranspose1dImplBase(Pointer p) { super(p); } + /** Downcast constructor. */ + public ConvTranspose1dImplBase(Module pointer) { super((Pointer)null); allocate(pointer); } + @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::dynamic_pointer_cast, torch::nn::Module>") void allocate(@SharedPtr Module pointer); /** Pretty prints the {@code ConvTranspose{1,2,3}d} module into the given {@code stream}. */ diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/ConvTranspose1dImplBaseBase.java b/pytorch/src/gen/java/org/bytedeco/pytorch/ConvTranspose1dImplBaseBase.java index 0668559692f..e5b84fc48e1 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/ConvTranspose1dImplBaseBase.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/ConvTranspose1dImplBaseBase.java @@ -22,6 +22,9 @@ public class ConvTranspose1dImplBaseBase extends ConvTranspose1dImplCloneable { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public ConvTranspose1dImplBaseBase(Pointer p) { super(p); } + /** Downcast constructor. */ + public ConvTranspose1dImplBaseBase(Module pointer) { super((Pointer)null); allocate(pointer); } + @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::dynamic_pointer_cast, torch::nn::Module>") void allocate(@SharedPtr Module pointer); public ConvTranspose1dImplBaseBase(@ByVal DetailConv1dOptions options_) { super((Pointer)null); allocate(options_); } private native void allocate(@ByVal DetailConv1dOptions options_); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/ConvTranspose1dImplCloneable.java b/pytorch/src/gen/java/org/bytedeco/pytorch/ConvTranspose1dImplCloneable.java index 4af16c9fc40..6fddabec951 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/ConvTranspose1dImplCloneable.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/ConvTranspose1dImplCloneable.java @@ -22,6 +22,9 @@ public class ConvTranspose1dImplCloneable extends Module { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public ConvTranspose1dImplCloneable(Pointer p) { super(p); } + /** Downcast constructor. */ + public ConvTranspose1dImplCloneable(Module pointer) { super((Pointer)null); allocate(pointer); } + @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::dynamic_pointer_cast, torch::nn::Module>") void allocate(@SharedPtr Module pointer); @Override public Module asModule() { return asModule(this); } @Namespace public static native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast>") Module asModule(@SharedPtr ConvTranspose1dImplCloneable pointer); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/ConvTranspose2dImpl.java b/pytorch/src/gen/java/org/bytedeco/pytorch/ConvTranspose2dImpl.java index 600b7daa48a..bc9946eddc3 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/ConvTranspose2dImpl.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/ConvTranspose2dImpl.java @@ -38,17 +38,20 @@ public class ConvTranspose2dImpl extends ConvTranspose2dImplBase { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public ConvTranspose2dImpl(Pointer p) { super(p); } + /** Downcast constructor. */ + public ConvTranspose2dImpl(Module pointer) { super((Pointer)null); allocate(pointer); } + @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::dynamic_pointer_cast") void allocate(@SharedPtr Module pointer); public ConvTranspose2dImpl( @Cast("int64_t") long input_channels, @Cast("int64_t") long output_channels, @ByVal @Cast("torch::ExpandingArray<2>*") LongPointer kernel_size) { super((Pointer)null); allocate(input_channels, output_channels, kernel_size); } - @SharedPtr private native void allocate( + @SharedPtr @Name("std::make_shared") private native void allocate( @Cast("int64_t") long input_channels, @Cast("int64_t") long output_channels, @ByVal @Cast("torch::ExpandingArray<2>*") LongPointer kernel_size); public ConvTranspose2dImpl(@ByVal ConvTranspose2dOptions options_) { super((Pointer)null); allocate(options_); } - @SharedPtr private native void allocate(@ByVal ConvTranspose2dOptions options_); + @SharedPtr @Name("std::make_shared") private native void allocate(@ByVal ConvTranspose2dOptions options_); public native @ByVal Tensor forward( @Const @ByRef Tensor input, @Const @ByRef(nullValue = "c10::optional(c10::nullopt)") LongArrayRefOptional output_size); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/ConvTranspose2dImplBase.java b/pytorch/src/gen/java/org/bytedeco/pytorch/ConvTranspose2dImplBase.java index 94183c0acda..44fc8f0fad0 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/ConvTranspose2dImplBase.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/ConvTranspose2dImplBase.java @@ -26,6 +26,9 @@ public class ConvTranspose2dImplBase extends ConvTranspose2dImplBaseBase { private native void allocate(@ByVal DetailConv2dOptions options_); /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public ConvTranspose2dImplBase(Pointer p) { super(p); } + /** Downcast constructor. */ + public ConvTranspose2dImplBase(Module pointer) { super((Pointer)null); allocate(pointer); } + @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::dynamic_pointer_cast, torch::nn::Module>") void allocate(@SharedPtr Module pointer); /** Pretty prints the {@code ConvTranspose{1,2,3}d} module into the given {@code stream}. */ diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/ConvTranspose2dImplBaseBase.java b/pytorch/src/gen/java/org/bytedeco/pytorch/ConvTranspose2dImplBaseBase.java index bd712c2287e..8944d9806f7 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/ConvTranspose2dImplBaseBase.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/ConvTranspose2dImplBaseBase.java @@ -22,6 +22,9 @@ public class ConvTranspose2dImplBaseBase extends ConvTranspose2dImplCloneable { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public ConvTranspose2dImplBaseBase(Pointer p) { super(p); } + /** Downcast constructor. */ + public ConvTranspose2dImplBaseBase(Module pointer) { super((Pointer)null); allocate(pointer); } + @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::dynamic_pointer_cast, torch::nn::Module>") void allocate(@SharedPtr Module pointer); public ConvTranspose2dImplBaseBase(@ByVal DetailConv2dOptions options_) { super((Pointer)null); allocate(options_); } private native void allocate(@ByVal DetailConv2dOptions options_); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/ConvTranspose2dImplCloneable.java b/pytorch/src/gen/java/org/bytedeco/pytorch/ConvTranspose2dImplCloneable.java index a4c094051c8..e58666c7744 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/ConvTranspose2dImplCloneable.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/ConvTranspose2dImplCloneable.java @@ -22,6 +22,9 @@ public class ConvTranspose2dImplCloneable extends Module { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public ConvTranspose2dImplCloneable(Pointer p) { super(p); } + /** Downcast constructor. */ + public ConvTranspose2dImplCloneable(Module pointer) { super((Pointer)null); allocate(pointer); } + @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::dynamic_pointer_cast, torch::nn::Module>") void allocate(@SharedPtr Module pointer); @Override public Module asModule() { return asModule(this); } @Namespace public static native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast>") Module asModule(@SharedPtr ConvTranspose2dImplCloneable pointer); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/ConvTranspose3dImpl.java b/pytorch/src/gen/java/org/bytedeco/pytorch/ConvTranspose3dImpl.java index e1aaefd843c..91e1f635d3e 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/ConvTranspose3dImpl.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/ConvTranspose3dImpl.java @@ -38,17 +38,20 @@ public class ConvTranspose3dImpl extends ConvTranspose3dImplBase { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public ConvTranspose3dImpl(Pointer p) { super(p); } + /** Downcast constructor. */ + public ConvTranspose3dImpl(Module pointer) { super((Pointer)null); allocate(pointer); } + @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::dynamic_pointer_cast") void allocate(@SharedPtr Module pointer); public ConvTranspose3dImpl( @Cast("int64_t") long input_channels, @Cast("int64_t") long output_channels, @ByVal @Cast("torch::ExpandingArray<3>*") LongPointer kernel_size) { super((Pointer)null); allocate(input_channels, output_channels, kernel_size); } - @SharedPtr private native void allocate( + @SharedPtr @Name("std::make_shared") private native void allocate( @Cast("int64_t") long input_channels, @Cast("int64_t") long output_channels, @ByVal @Cast("torch::ExpandingArray<3>*") LongPointer kernel_size); public ConvTranspose3dImpl(@ByVal ConvTranspose3dOptions options_) { super((Pointer)null); allocate(options_); } - @SharedPtr private native void allocate(@ByVal ConvTranspose3dOptions options_); + @SharedPtr @Name("std::make_shared") private native void allocate(@ByVal ConvTranspose3dOptions options_); public native @ByVal Tensor forward( @Const @ByRef Tensor input, @Const @ByRef(nullValue = "c10::optional(c10::nullopt)") LongArrayRefOptional output_size); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/ConvTranspose3dImplBase.java b/pytorch/src/gen/java/org/bytedeco/pytorch/ConvTranspose3dImplBase.java index 52c64d0639e..4a5f29d8575 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/ConvTranspose3dImplBase.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/ConvTranspose3dImplBase.java @@ -26,6 +26,9 @@ public class ConvTranspose3dImplBase extends ConvTranspose3dImplBaseBase { private native void allocate(@ByVal DetailConv3dOptions options_); /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public ConvTranspose3dImplBase(Pointer p) { super(p); } + /** Downcast constructor. */ + public ConvTranspose3dImplBase(Module pointer) { super((Pointer)null); allocate(pointer); } + @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::dynamic_pointer_cast, torch::nn::Module>") void allocate(@SharedPtr Module pointer); /** Pretty prints the {@code ConvTranspose{1,2,3}d} module into the given {@code stream}. */ diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/ConvTranspose3dImplBaseBase.java b/pytorch/src/gen/java/org/bytedeco/pytorch/ConvTranspose3dImplBaseBase.java index 362c4a65668..3d47e8583b7 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/ConvTranspose3dImplBaseBase.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/ConvTranspose3dImplBaseBase.java @@ -22,6 +22,9 @@ public class ConvTranspose3dImplBaseBase extends ConvTranspose3dImplCloneable { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public ConvTranspose3dImplBaseBase(Pointer p) { super(p); } + /** Downcast constructor. */ + public ConvTranspose3dImplBaseBase(Module pointer) { super((Pointer)null); allocate(pointer); } + @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::dynamic_pointer_cast, torch::nn::Module>") void allocate(@SharedPtr Module pointer); public ConvTranspose3dImplBaseBase(@ByVal DetailConv3dOptions options_) { super((Pointer)null); allocate(options_); } private native void allocate(@ByVal DetailConv3dOptions options_); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/ConvTranspose3dImplCloneable.java b/pytorch/src/gen/java/org/bytedeco/pytorch/ConvTranspose3dImplCloneable.java index 27e987e1ac7..ba479272beb 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/ConvTranspose3dImplCloneable.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/ConvTranspose3dImplCloneable.java @@ -22,6 +22,9 @@ public class ConvTranspose3dImplCloneable extends Module { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public ConvTranspose3dImplCloneable(Pointer p) { super(p); } + /** Downcast constructor. */ + public ConvTranspose3dImplCloneable(Module pointer) { super((Pointer)null); allocate(pointer); } + @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::dynamic_pointer_cast, torch::nn::Module>") void allocate(@SharedPtr Module pointer); @Override public Module asModule() { return asModule(this); } @Namespace public static native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast>") Module asModule(@SharedPtr ConvTranspose3dImplCloneable pointer); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/CosineEmbeddingLossImpl.java b/pytorch/src/gen/java/org/bytedeco/pytorch/CosineEmbeddingLossImpl.java index 90300641499..68162e85f92 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/CosineEmbeddingLossImpl.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/CosineEmbeddingLossImpl.java @@ -41,20 +41,14 @@ public class CosineEmbeddingLossImpl extends CosineEmbeddingLossImplCloneable { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public CosineEmbeddingLossImpl(Pointer p) { super(p); } - /** Native array allocator. Access with {@link Pointer#position(long)}. */ - public CosineEmbeddingLossImpl(long size) { super((Pointer)null); allocateArray(size); } - private native void allocateArray(long size); - @Override public CosineEmbeddingLossImpl position(long position) { - return (CosineEmbeddingLossImpl)super.position(position); - } - @Override public CosineEmbeddingLossImpl getPointer(long i) { - return new CosineEmbeddingLossImpl((Pointer)this).offsetAddress(i); - } + /** Downcast constructor. */ + public CosineEmbeddingLossImpl(Module pointer) { super((Pointer)null); allocate(pointer); } + @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::dynamic_pointer_cast") void allocate(@SharedPtr Module pointer); public CosineEmbeddingLossImpl(@ByVal(nullValue = "torch::nn::CosineEmbeddingLossOptions{}") CosineEmbeddingLossOptions options_) { super((Pointer)null); allocate(options_); } - @SharedPtr private native void allocate(@ByVal(nullValue = "torch::nn::CosineEmbeddingLossOptions{}") CosineEmbeddingLossOptions options_); + @SharedPtr @Name("std::make_shared") private native void allocate(@ByVal(nullValue = "torch::nn::CosineEmbeddingLossOptions{}") CosineEmbeddingLossOptions options_); public CosineEmbeddingLossImpl() { super((Pointer)null); allocate(); } - @SharedPtr private native void allocate(); + @SharedPtr @Name("std::make_shared") private native void allocate(); public native void reset(); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/CosineEmbeddingLossImplCloneable.java b/pytorch/src/gen/java/org/bytedeco/pytorch/CosineEmbeddingLossImplCloneable.java index c7eb4c304f5..2c247cc1a07 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/CosineEmbeddingLossImplCloneable.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/CosineEmbeddingLossImplCloneable.java @@ -22,6 +22,9 @@ public class CosineEmbeddingLossImplCloneable extends Module { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public CosineEmbeddingLossImplCloneable(Pointer p) { super(p); } + /** Downcast constructor. */ + public CosineEmbeddingLossImplCloneable(Module pointer) { super((Pointer)null); allocate(pointer); } + @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::dynamic_pointer_cast, torch::nn::Module>") void allocate(@SharedPtr Module pointer); @Override public Module asModule() { return asModule(this); } @Namespace public static native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast>") Module asModule(@SharedPtr CosineEmbeddingLossImplCloneable pointer); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/CosineSimilarityImpl.java b/pytorch/src/gen/java/org/bytedeco/pytorch/CosineSimilarityImpl.java index 3e69b1f49e4..7ec08e848b0 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/CosineSimilarityImpl.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/CosineSimilarityImpl.java @@ -35,20 +35,14 @@ public class CosineSimilarityImpl extends CosineSimilarityImplCloneable { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public CosineSimilarityImpl(Pointer p) { super(p); } - /** Native array allocator. Access with {@link Pointer#position(long)}. */ - public CosineSimilarityImpl(long size) { super((Pointer)null); allocateArray(size); } - private native void allocateArray(long size); - @Override public CosineSimilarityImpl position(long position) { - return (CosineSimilarityImpl)super.position(position); - } - @Override public CosineSimilarityImpl getPointer(long i) { - return new CosineSimilarityImpl((Pointer)this).offsetAddress(i); - } + /** Downcast constructor. */ + public CosineSimilarityImpl(Module pointer) { super((Pointer)null); allocate(pointer); } + @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::dynamic_pointer_cast") void allocate(@SharedPtr Module pointer); public CosineSimilarityImpl(@Const @ByRef(nullValue = "torch::nn::CosineSimilarityOptions{}") CosineSimilarityOptions options_) { super((Pointer)null); allocate(options_); } - @SharedPtr private native void allocate(@Const @ByRef(nullValue = "torch::nn::CosineSimilarityOptions{}") CosineSimilarityOptions options_); + @SharedPtr @Name("std::make_shared") private native void allocate(@Const @ByRef(nullValue = "torch::nn::CosineSimilarityOptions{}") CosineSimilarityOptions options_); public CosineSimilarityImpl() { super((Pointer)null); allocate(); } - @SharedPtr private native void allocate(); + @SharedPtr @Name("std::make_shared") private native void allocate(); public native void reset(); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/CosineSimilarityImplCloneable.java b/pytorch/src/gen/java/org/bytedeco/pytorch/CosineSimilarityImplCloneable.java index b0b8f8bf895..a3a0132e4cf 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/CosineSimilarityImplCloneable.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/CosineSimilarityImplCloneable.java @@ -22,6 +22,9 @@ public class CosineSimilarityImplCloneable extends Module { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public CosineSimilarityImplCloneable(Pointer p) { super(p); } + /** Downcast constructor. */ + public CosineSimilarityImplCloneable(Module pointer) { super((Pointer)null); allocate(pointer); } + @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::dynamic_pointer_cast, torch::nn::Module>") void allocate(@SharedPtr Module pointer); @Override public Module asModule() { return asModule(this); } @Namespace public static native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast>") Module asModule(@SharedPtr CosineSimilarityImplCloneable pointer); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/CrossEntropyLossImpl.java b/pytorch/src/gen/java/org/bytedeco/pytorch/CrossEntropyLossImpl.java index dc200e5cfd1..360c02a60f8 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/CrossEntropyLossImpl.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/CrossEntropyLossImpl.java @@ -39,20 +39,14 @@ public class CrossEntropyLossImpl extends CrossEntropyLossImplCloneable { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public CrossEntropyLossImpl(Pointer p) { super(p); } - /** Native array allocator. Access with {@link Pointer#position(long)}. */ - public CrossEntropyLossImpl(long size) { super((Pointer)null); allocateArray(size); } - private native void allocateArray(long size); - @Override public CrossEntropyLossImpl position(long position) { - return (CrossEntropyLossImpl)super.position(position); - } - @Override public CrossEntropyLossImpl getPointer(long i) { - return new CrossEntropyLossImpl((Pointer)this).offsetAddress(i); - } + /** Downcast constructor. */ + public CrossEntropyLossImpl(Module pointer) { super((Pointer)null); allocate(pointer); } + @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::dynamic_pointer_cast") void allocate(@SharedPtr Module pointer); public CrossEntropyLossImpl(@ByVal(nullValue = "torch::nn::CrossEntropyLossOptions{}") CrossEntropyLossOptions options_) { super((Pointer)null); allocate(options_); } - @SharedPtr private native void allocate(@ByVal(nullValue = "torch::nn::CrossEntropyLossOptions{}") CrossEntropyLossOptions options_); + @SharedPtr @Name("std::make_shared") private native void allocate(@ByVal(nullValue = "torch::nn::CrossEntropyLossOptions{}") CrossEntropyLossOptions options_); public CrossEntropyLossImpl() { super((Pointer)null); allocate(); } - @SharedPtr private native void allocate(); + @SharedPtr @Name("std::make_shared") private native void allocate(); public native void reset(); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/CrossEntropyLossImplCloneable.java b/pytorch/src/gen/java/org/bytedeco/pytorch/CrossEntropyLossImplCloneable.java index 4e7caf7d2b8..7917db15c33 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/CrossEntropyLossImplCloneable.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/CrossEntropyLossImplCloneable.java @@ -22,6 +22,9 @@ public class CrossEntropyLossImplCloneable extends Module { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public CrossEntropyLossImplCloneable(Pointer p) { super(p); } + /** Downcast constructor. */ + public CrossEntropyLossImplCloneable(Module pointer) { super((Pointer)null); allocate(pointer); } + @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::dynamic_pointer_cast, torch::nn::Module>") void allocate(@SharedPtr Module pointer); @Override public Module asModule() { return asModule(this); } @Namespace public static native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast>") Module asModule(@SharedPtr CrossEntropyLossImplCloneable pointer); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/CrossMapLRN2dImpl.java b/pytorch/src/gen/java/org/bytedeco/pytorch/CrossMapLRN2dImpl.java index da3d6e50792..92b9da9cd69 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/CrossMapLRN2dImpl.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/CrossMapLRN2dImpl.java @@ -32,11 +32,14 @@ public class CrossMapLRN2dImpl extends CrossMapLRN2dImplCloneable { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public CrossMapLRN2dImpl(Pointer p) { super(p); } + /** Downcast constructor. */ + public CrossMapLRN2dImpl(Module pointer) { super((Pointer)null); allocate(pointer); } + @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::dynamic_pointer_cast") void allocate(@SharedPtr Module pointer); public CrossMapLRN2dImpl(@Cast("int64_t") long size) { super((Pointer)null); allocate(size); } - @SharedPtr private native void allocate(@Cast("int64_t") long size); + @SharedPtr @Name("std::make_shared") private native void allocate(@Cast("int64_t") long size); public CrossMapLRN2dImpl(@Const @ByRef CrossMapLRN2dOptions options_) { super((Pointer)null); allocate(options_); } - @SharedPtr private native void allocate(@Const @ByRef CrossMapLRN2dOptions options_); + @SharedPtr @Name("std::make_shared") private native void allocate(@Const @ByRef CrossMapLRN2dOptions options_); public native void reset(); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/CrossMapLRN2dImplCloneable.java b/pytorch/src/gen/java/org/bytedeco/pytorch/CrossMapLRN2dImplCloneable.java index 3e4905298ef..de992794b42 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/CrossMapLRN2dImplCloneable.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/CrossMapLRN2dImplCloneable.java @@ -22,6 +22,9 @@ public class CrossMapLRN2dImplCloneable extends Module { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public CrossMapLRN2dImplCloneable(Pointer p) { super(p); } + /** Downcast constructor. */ + public CrossMapLRN2dImplCloneable(Module pointer) { super((Pointer)null); allocate(pointer); } + @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::dynamic_pointer_cast, torch::nn::Module>") void allocate(@SharedPtr Module pointer); @Override public Module asModule() { return asModule(this); } @Namespace public static native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast>") Module asModule(@SharedPtr CrossMapLRN2dImplCloneable pointer); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/Dropout2dImpl.java b/pytorch/src/gen/java/org/bytedeco/pytorch/Dropout2dImpl.java index feef124b7a1..4637fade03c 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/Dropout2dImpl.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/Dropout2dImpl.java @@ -45,15 +45,9 @@ public class Dropout2dImpl extends Dropout2dImplBase { private native void allocate(); /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public Dropout2dImpl(Pointer p) { super(p); } - /** Native array allocator. Access with {@link Pointer#position(long)}. */ - public Dropout2dImpl(long size) { super((Pointer)null); allocateArray(size); } - private native void allocateArray(long size); - @Override public Dropout2dImpl position(long position) { - return (Dropout2dImpl)super.position(position); - } - @Override public Dropout2dImpl getPointer(long i) { - return new Dropout2dImpl((Pointer)this).offsetAddress(i); - } + /** Downcast constructor. */ + public Dropout2dImpl(Module pointer) { super((Pointer)null); allocate(pointer); } + @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::dynamic_pointer_cast") void allocate(@SharedPtr Module pointer); public native @ByVal Tensor forward(@ByVal Tensor input); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/Dropout2dImplBase.java b/pytorch/src/gen/java/org/bytedeco/pytorch/Dropout2dImplBase.java index 7cbb9abdb2b..288fbd29ca7 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/Dropout2dImplBase.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/Dropout2dImplBase.java @@ -23,6 +23,9 @@ public class Dropout2dImplBase extends Dropout2dImplCloneable { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public Dropout2dImplBase(Pointer p) { super(p); } + /** Downcast constructor. */ + public Dropout2dImplBase(Module pointer) { super((Pointer)null); allocate(pointer); } + @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::dynamic_pointer_cast, torch::nn::Module>") void allocate(@SharedPtr Module pointer); public Dropout2dImplBase(double p) { super((Pointer)null); allocate(p); } private native void allocate(double p); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/Dropout2dImplCloneable.java b/pytorch/src/gen/java/org/bytedeco/pytorch/Dropout2dImplCloneable.java index 2b01bdc4424..e77aabe6478 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/Dropout2dImplCloneable.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/Dropout2dImplCloneable.java @@ -22,6 +22,9 @@ public class Dropout2dImplCloneable extends Module { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public Dropout2dImplCloneable(Pointer p) { super(p); } + /** Downcast constructor. */ + public Dropout2dImplCloneable(Module pointer) { super((Pointer)null); allocate(pointer); } + @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::dynamic_pointer_cast, torch::nn::Module>") void allocate(@SharedPtr Module pointer); @Override public Module asModule() { return asModule(this); } @Namespace public static native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast>") Module asModule(@SharedPtr Dropout2dImplCloneable pointer); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/Dropout3dImpl.java b/pytorch/src/gen/java/org/bytedeco/pytorch/Dropout3dImpl.java index 0bed3a64b85..57c3ec6879f 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/Dropout3dImpl.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/Dropout3dImpl.java @@ -45,15 +45,9 @@ public class Dropout3dImpl extends Dropout3dImplBase { private native void allocate(); /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public Dropout3dImpl(Pointer p) { super(p); } - /** Native array allocator. Access with {@link Pointer#position(long)}. */ - public Dropout3dImpl(long size) { super((Pointer)null); allocateArray(size); } - private native void allocateArray(long size); - @Override public Dropout3dImpl position(long position) { - return (Dropout3dImpl)super.position(position); - } - @Override public Dropout3dImpl getPointer(long i) { - return new Dropout3dImpl((Pointer)this).offsetAddress(i); - } + /** Downcast constructor. */ + public Dropout3dImpl(Module pointer) { super((Pointer)null); allocate(pointer); } + @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::dynamic_pointer_cast") void allocate(@SharedPtr Module pointer); public native @ByVal Tensor forward(@ByVal Tensor input); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/Dropout3dImplBase.java b/pytorch/src/gen/java/org/bytedeco/pytorch/Dropout3dImplBase.java index e016f8a10a0..88f1564edf1 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/Dropout3dImplBase.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/Dropout3dImplBase.java @@ -23,6 +23,9 @@ public class Dropout3dImplBase extends Dropout3dImplCloneable { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public Dropout3dImplBase(Pointer p) { super(p); } + /** Downcast constructor. */ + public Dropout3dImplBase(Module pointer) { super((Pointer)null); allocate(pointer); } + @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::dynamic_pointer_cast, torch::nn::Module>") void allocate(@SharedPtr Module pointer); public Dropout3dImplBase(double p) { super((Pointer)null); allocate(p); } private native void allocate(double p); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/Dropout3dImplCloneable.java b/pytorch/src/gen/java/org/bytedeco/pytorch/Dropout3dImplCloneable.java index 34280bd1b62..61b44ca9de9 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/Dropout3dImplCloneable.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/Dropout3dImplCloneable.java @@ -22,6 +22,9 @@ public class Dropout3dImplCloneable extends Module { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public Dropout3dImplCloneable(Pointer p) { super(p); } + /** Downcast constructor. */ + public Dropout3dImplCloneable(Module pointer) { super((Pointer)null); allocate(pointer); } + @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::dynamic_pointer_cast, torch::nn::Module>") void allocate(@SharedPtr Module pointer); @Override public Module asModule() { return asModule(this); } @Namespace public static native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast>") Module asModule(@SharedPtr Dropout3dImplCloneable pointer); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/DropoutImpl.java b/pytorch/src/gen/java/org/bytedeco/pytorch/DropoutImpl.java index 2ed4626dddf..f6dcb628771 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/DropoutImpl.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/DropoutImpl.java @@ -45,15 +45,9 @@ public class DropoutImpl extends DropoutImplBase { private native void allocate(); /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public DropoutImpl(Pointer p) { super(p); } - /** Native array allocator. Access with {@link Pointer#position(long)}. */ - public DropoutImpl(long size) { super((Pointer)null); allocateArray(size); } - private native void allocateArray(long size); - @Override public DropoutImpl position(long position) { - return (DropoutImpl)super.position(position); - } - @Override public DropoutImpl getPointer(long i) { - return new DropoutImpl((Pointer)this).offsetAddress(i); - } + /** Downcast constructor. */ + public DropoutImpl(Module pointer) { super((Pointer)null); allocate(pointer); } + @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::dynamic_pointer_cast") void allocate(@SharedPtr Module pointer); public native @ByVal Tensor forward(@ByVal Tensor input); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/DropoutImplBase.java b/pytorch/src/gen/java/org/bytedeco/pytorch/DropoutImplBase.java index 12321edffc3..2d8732c65e5 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/DropoutImplBase.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/DropoutImplBase.java @@ -23,6 +23,9 @@ public class DropoutImplBase extends DropoutImplCloneable { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public DropoutImplBase(Pointer p) { super(p); } + /** Downcast constructor. */ + public DropoutImplBase(Module pointer) { super((Pointer)null); allocate(pointer); } + @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::dynamic_pointer_cast, torch::nn::Module>") void allocate(@SharedPtr Module pointer); public DropoutImplBase(double p) { super((Pointer)null); allocate(p); } private native void allocate(double p); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/DropoutImplCloneable.java b/pytorch/src/gen/java/org/bytedeco/pytorch/DropoutImplCloneable.java index 5243b179e30..733572281d4 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/DropoutImplCloneable.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/DropoutImplCloneable.java @@ -22,6 +22,9 @@ public class DropoutImplCloneable extends Module { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public DropoutImplCloneable(Pointer p) { super(p); } + /** Downcast constructor. */ + public DropoutImplCloneable(Module pointer) { super((Pointer)null); allocate(pointer); } + @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::dynamic_pointer_cast, torch::nn::Module>") void allocate(@SharedPtr Module pointer); @Override public Module asModule() { return asModule(this); } @Namespace public static native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast>") Module asModule(@SharedPtr DropoutImplCloneable pointer); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/ELUImpl.java b/pytorch/src/gen/java/org/bytedeco/pytorch/ELUImpl.java index 33120d24748..be88075573f 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/ELUImpl.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/ELUImpl.java @@ -36,20 +36,14 @@ public class ELUImpl extends ELUImplCloneable { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public ELUImpl(Pointer p) { super(p); } - /** Native array allocator. Access with {@link Pointer#position(long)}. */ - public ELUImpl(long size) { super((Pointer)null); allocateArray(size); } - private native void allocateArray(long size); - @Override public ELUImpl position(long position) { - return (ELUImpl)super.position(position); - } - @Override public ELUImpl getPointer(long i) { - return new ELUImpl((Pointer)this).offsetAddress(i); - } + /** Downcast constructor. */ + public ELUImpl(Module pointer) { super((Pointer)null); allocate(pointer); } + @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::dynamic_pointer_cast") void allocate(@SharedPtr Module pointer); public ELUImpl(@Const @ByRef(nullValue = "torch::nn::ELUOptions{}") ELUOptions options_) { super((Pointer)null); allocate(options_); } - @SharedPtr private native void allocate(@Const @ByRef(nullValue = "torch::nn::ELUOptions{}") ELUOptions options_); + @SharedPtr @Name("std::make_shared") private native void allocate(@Const @ByRef(nullValue = "torch::nn::ELUOptions{}") ELUOptions options_); public ELUImpl() { super((Pointer)null); allocate(); } - @SharedPtr private native void allocate(); + @SharedPtr @Name("std::make_shared") private native void allocate(); public native @ByVal Tensor forward(@ByVal Tensor input); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/ELUImplCloneable.java b/pytorch/src/gen/java/org/bytedeco/pytorch/ELUImplCloneable.java index 3ac4f7f3853..6f0a55f07a0 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/ELUImplCloneable.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/ELUImplCloneable.java @@ -22,6 +22,9 @@ public class ELUImplCloneable extends Module { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public ELUImplCloneable(Pointer p) { super(p); } + /** Downcast constructor. */ + public ELUImplCloneable(Module pointer) { super((Pointer)null); allocate(pointer); } + @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::dynamic_pointer_cast, torch::nn::Module>") void allocate(@SharedPtr Module pointer); @Override public Module asModule() { return asModule(this); } @Namespace public static native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast>") Module asModule(@SharedPtr ELUImplCloneable pointer); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/EmbeddingBagImpl.java b/pytorch/src/gen/java/org/bytedeco/pytorch/EmbeddingBagImpl.java index c7a9f2cb2a1..051df033d58 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/EmbeddingBagImpl.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/EmbeddingBagImpl.java @@ -39,11 +39,14 @@ public class EmbeddingBagImpl extends EmbeddingBagImplCloneable { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public EmbeddingBagImpl(Pointer p) { super(p); } + /** Downcast constructor. */ + public EmbeddingBagImpl(Module pointer) { super((Pointer)null); allocate(pointer); } + @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::dynamic_pointer_cast") void allocate(@SharedPtr Module pointer); public EmbeddingBagImpl(@Cast("int64_t") long num_embeddings, @Cast("int64_t") long embedding_dim) { super((Pointer)null); allocate(num_embeddings, embedding_dim); } - @SharedPtr private native void allocate(@Cast("int64_t") long num_embeddings, @Cast("int64_t") long embedding_dim); + @SharedPtr @Name("std::make_shared") private native void allocate(@Cast("int64_t") long num_embeddings, @Cast("int64_t") long embedding_dim); public EmbeddingBagImpl(@ByVal EmbeddingBagOptions options_) { super((Pointer)null); allocate(options_); } - @SharedPtr private native void allocate(@ByVal EmbeddingBagOptions options_); + @SharedPtr @Name("std::make_shared") private native void allocate(@ByVal EmbeddingBagOptions options_); public native void reset(); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/EmbeddingBagImplCloneable.java b/pytorch/src/gen/java/org/bytedeco/pytorch/EmbeddingBagImplCloneable.java index 953e07cee99..04ee33f9a59 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/EmbeddingBagImplCloneable.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/EmbeddingBagImplCloneable.java @@ -22,6 +22,9 @@ public class EmbeddingBagImplCloneable extends Module { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public EmbeddingBagImplCloneable(Pointer p) { super(p); } + /** Downcast constructor. */ + public EmbeddingBagImplCloneable(Module pointer) { super((Pointer)null); allocate(pointer); } + @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::dynamic_pointer_cast, torch::nn::Module>") void allocate(@SharedPtr Module pointer); @Override public Module asModule() { return asModule(this); } @Namespace public static native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast>") Module asModule(@SharedPtr EmbeddingBagImplCloneable pointer); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/EmbeddingImpl.java b/pytorch/src/gen/java/org/bytedeco/pytorch/EmbeddingImpl.java index 46a68df8f0c..ba7108e44b9 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/EmbeddingImpl.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/EmbeddingImpl.java @@ -38,11 +38,14 @@ public class EmbeddingImpl extends EmbeddingImplCloneable { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public EmbeddingImpl(Pointer p) { super(p); } + /** Downcast constructor. */ + public EmbeddingImpl(Module pointer) { super((Pointer)null); allocate(pointer); } + @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::dynamic_pointer_cast") void allocate(@SharedPtr Module pointer); public EmbeddingImpl(@Cast("int64_t") long num_embeddings, @Cast("int64_t") long embedding_dim) { super((Pointer)null); allocate(num_embeddings, embedding_dim); } - @SharedPtr private native void allocate(@Cast("int64_t") long num_embeddings, @Cast("int64_t") long embedding_dim); + @SharedPtr @Name("std::make_shared") private native void allocate(@Cast("int64_t") long num_embeddings, @Cast("int64_t") long embedding_dim); public EmbeddingImpl(@ByVal EmbeddingOptions options_) { super((Pointer)null); allocate(options_); } - @SharedPtr private native void allocate(@ByVal EmbeddingOptions options_); + @SharedPtr @Name("std::make_shared") private native void allocate(@ByVal EmbeddingOptions options_); public native void reset(); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/EmbeddingImplCloneable.java b/pytorch/src/gen/java/org/bytedeco/pytorch/EmbeddingImplCloneable.java index ef121bb9dea..3544cb42a76 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/EmbeddingImplCloneable.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/EmbeddingImplCloneable.java @@ -22,6 +22,9 @@ public class EmbeddingImplCloneable extends Module { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public EmbeddingImplCloneable(Pointer p) { super(p); } + /** Downcast constructor. */ + public EmbeddingImplCloneable(Module pointer) { super((Pointer)null); allocate(pointer); } + @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::dynamic_pointer_cast, torch::nn::Module>") void allocate(@SharedPtr Module pointer); @Override public Module asModule() { return asModule(this); } @Namespace public static native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast>") Module asModule(@SharedPtr EmbeddingImplCloneable pointer); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/FeatureAlphaDropoutImpl.java b/pytorch/src/gen/java/org/bytedeco/pytorch/FeatureAlphaDropoutImpl.java index 7465d0abfe1..958be29699c 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/FeatureAlphaDropoutImpl.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/FeatureAlphaDropoutImpl.java @@ -42,15 +42,9 @@ public class FeatureAlphaDropoutImpl extends FeatureAlphaDropoutImplBase { private native void allocate(); /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public FeatureAlphaDropoutImpl(Pointer p) { super(p); } - /** Native array allocator. Access with {@link Pointer#position(long)}. */ - public FeatureAlphaDropoutImpl(long size) { super((Pointer)null); allocateArray(size); } - private native void allocateArray(long size); - @Override public FeatureAlphaDropoutImpl position(long position) { - return (FeatureAlphaDropoutImpl)super.position(position); - } - @Override public FeatureAlphaDropoutImpl getPointer(long i) { - return new FeatureAlphaDropoutImpl((Pointer)this).offsetAddress(i); - } + /** Downcast constructor. */ + public FeatureAlphaDropoutImpl(Module pointer) { super((Pointer)null); allocate(pointer); } + @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::dynamic_pointer_cast") void allocate(@SharedPtr Module pointer); public native @ByVal Tensor forward(@Const @ByRef Tensor input); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/FeatureAlphaDropoutImplBase.java b/pytorch/src/gen/java/org/bytedeco/pytorch/FeatureAlphaDropoutImplBase.java index f99b80c4d67..5ad59291e97 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/FeatureAlphaDropoutImplBase.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/FeatureAlphaDropoutImplBase.java @@ -23,6 +23,9 @@ public class FeatureAlphaDropoutImplBase extends FeatureAlphaDropoutImplCloneabl static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public FeatureAlphaDropoutImplBase(Pointer p) { super(p); } + /** Downcast constructor. */ + public FeatureAlphaDropoutImplBase(Module pointer) { super((Pointer)null); allocate(pointer); } + @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::dynamic_pointer_cast, torch::nn::Module>") void allocate(@SharedPtr Module pointer); public FeatureAlphaDropoutImplBase(double p) { super((Pointer)null); allocate(p); } private native void allocate(double p); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/FeatureAlphaDropoutImplCloneable.java b/pytorch/src/gen/java/org/bytedeco/pytorch/FeatureAlphaDropoutImplCloneable.java index 250de2c87de..544c4b2955c 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/FeatureAlphaDropoutImplCloneable.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/FeatureAlphaDropoutImplCloneable.java @@ -22,6 +22,9 @@ public class FeatureAlphaDropoutImplCloneable extends Module { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public FeatureAlphaDropoutImplCloneable(Pointer p) { super(p); } + /** Downcast constructor. */ + public FeatureAlphaDropoutImplCloneable(Module pointer) { super((Pointer)null); allocate(pointer); } + @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::dynamic_pointer_cast, torch::nn::Module>") void allocate(@SharedPtr Module pointer); @Override public Module asModule() { return asModule(this); } @Namespace public static native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast>") Module asModule(@SharedPtr FeatureAlphaDropoutImplCloneable pointer); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/FlattenImpl.java b/pytorch/src/gen/java/org/bytedeco/pytorch/FlattenImpl.java index ce3e26d90ef..53ad95db882 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/FlattenImpl.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/FlattenImpl.java @@ -36,20 +36,14 @@ public class FlattenImpl extends FlattenImplCloneable { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public FlattenImpl(Pointer p) { super(p); } - /** Native array allocator. Access with {@link Pointer#position(long)}. */ - public FlattenImpl(long size) { super((Pointer)null); allocateArray(size); } - private native void allocateArray(long size); - @Override public FlattenImpl position(long position) { - return (FlattenImpl)super.position(position); - } - @Override public FlattenImpl getPointer(long i) { - return new FlattenImpl((Pointer)this).offsetAddress(i); - } + /** Downcast constructor. */ + public FlattenImpl(Module pointer) { super((Pointer)null); allocate(pointer); } + @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::dynamic_pointer_cast") void allocate(@SharedPtr Module pointer); public FlattenImpl(@Const @ByRef(nullValue = "torch::nn::FlattenOptions{}") FlattenOptions options_) { super((Pointer)null); allocate(options_); } - @SharedPtr private native void allocate(@Const @ByRef(nullValue = "torch::nn::FlattenOptions{}") FlattenOptions options_); + @SharedPtr @Name("std::make_shared") private native void allocate(@Const @ByRef(nullValue = "torch::nn::FlattenOptions{}") FlattenOptions options_); public FlattenImpl() { super((Pointer)null); allocate(); } - @SharedPtr private native void allocate(); + @SharedPtr @Name("std::make_shared") private native void allocate(); public native void reset(); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/FlattenImplCloneable.java b/pytorch/src/gen/java/org/bytedeco/pytorch/FlattenImplCloneable.java index db95e788eef..67179490c8e 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/FlattenImplCloneable.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/FlattenImplCloneable.java @@ -22,6 +22,9 @@ public class FlattenImplCloneable extends Module { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public FlattenImplCloneable(Pointer p) { super(p); } + /** Downcast constructor. */ + public FlattenImplCloneable(Module pointer) { super((Pointer)null); allocate(pointer); } + @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::dynamic_pointer_cast, torch::nn::Module>") void allocate(@SharedPtr Module pointer); @Override public Module asModule() { return asModule(this); } @Namespace public static native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast>") Module asModule(@SharedPtr FlattenImplCloneable pointer); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/FoldImpl.java b/pytorch/src/gen/java/org/bytedeco/pytorch/FoldImpl.java index f9fd42c89f2..f503bff3ea0 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/FoldImpl.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/FoldImpl.java @@ -35,11 +35,14 @@ public class FoldImpl extends FoldImplCloneable { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public FoldImpl(Pointer p) { super(p); } + /** Downcast constructor. */ + public FoldImpl(Module pointer) { super((Pointer)null); allocate(pointer); } + @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::dynamic_pointer_cast") void allocate(@SharedPtr Module pointer); public FoldImpl(@ByVal @Cast("torch::ExpandingArray<2>*") LongPointer output_size, @ByVal @Cast("torch::ExpandingArray<2>*") LongPointer kernel_size) { super((Pointer)null); allocate(output_size, kernel_size); } - @SharedPtr private native void allocate(@ByVal @Cast("torch::ExpandingArray<2>*") LongPointer output_size, @ByVal @Cast("torch::ExpandingArray<2>*") LongPointer kernel_size); + @SharedPtr @Name("std::make_shared") private native void allocate(@ByVal @Cast("torch::ExpandingArray<2>*") LongPointer output_size, @ByVal @Cast("torch::ExpandingArray<2>*") LongPointer kernel_size); public FoldImpl(@Const @ByRef FoldOptions options_) { super((Pointer)null); allocate(options_); } - @SharedPtr private native void allocate(@Const @ByRef FoldOptions options_); + @SharedPtr @Name("std::make_shared") private native void allocate(@Const @ByRef FoldOptions options_); public native void reset(); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/FoldImplCloneable.java b/pytorch/src/gen/java/org/bytedeco/pytorch/FoldImplCloneable.java index 7d7c51ec54c..2d4aaba4f52 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/FoldImplCloneable.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/FoldImplCloneable.java @@ -22,6 +22,9 @@ public class FoldImplCloneable extends Module { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public FoldImplCloneable(Pointer p) { super(p); } + /** Downcast constructor. */ + public FoldImplCloneable(Module pointer) { super((Pointer)null); allocate(pointer); } + @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::dynamic_pointer_cast, torch::nn::Module>") void allocate(@SharedPtr Module pointer); @Override public Module asModule() { return asModule(this); } @Namespace public static native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast>") Module asModule(@SharedPtr FoldImplCloneable pointer); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/FractionalMaxPool2dImpl.java b/pytorch/src/gen/java/org/bytedeco/pytorch/FractionalMaxPool2dImpl.java index 7edc4eeb4e9..eb0e4846e39 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/FractionalMaxPool2dImpl.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/FractionalMaxPool2dImpl.java @@ -37,11 +37,14 @@ public class FractionalMaxPool2dImpl extends FractionalMaxPool2dImplCloneable { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public FractionalMaxPool2dImpl(Pointer p) { super(p); } + /** Downcast constructor. */ + public FractionalMaxPool2dImpl(Module pointer) { super((Pointer)null); allocate(pointer); } + @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::dynamic_pointer_cast") void allocate(@SharedPtr Module pointer); public FractionalMaxPool2dImpl(@ByVal @Cast("torch::ExpandingArray<2>*") LongPointer kernel_size) { super((Pointer)null); allocate(kernel_size); } - @SharedPtr private native void allocate(@ByVal @Cast("torch::ExpandingArray<2>*") LongPointer kernel_size); + @SharedPtr @Name("std::make_shared") private native void allocate(@ByVal @Cast("torch::ExpandingArray<2>*") LongPointer kernel_size); public FractionalMaxPool2dImpl(@ByVal FractionalMaxPool2dOptions options_) { super((Pointer)null); allocate(options_); } - @SharedPtr private native void allocate(@ByVal FractionalMaxPool2dOptions options_); + @SharedPtr @Name("std::make_shared") private native void allocate(@ByVal FractionalMaxPool2dOptions options_); public native void reset(); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/FractionalMaxPool2dImplCloneable.java b/pytorch/src/gen/java/org/bytedeco/pytorch/FractionalMaxPool2dImplCloneable.java index f5fb60d7f84..4c604762b97 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/FractionalMaxPool2dImplCloneable.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/FractionalMaxPool2dImplCloneable.java @@ -22,6 +22,9 @@ public class FractionalMaxPool2dImplCloneable extends Module { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public FractionalMaxPool2dImplCloneable(Pointer p) { super(p); } + /** Downcast constructor. */ + public FractionalMaxPool2dImplCloneable(Module pointer) { super((Pointer)null); allocate(pointer); } + @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::dynamic_pointer_cast, torch::nn::Module>") void allocate(@SharedPtr Module pointer); @Override public Module asModule() { return asModule(this); } @Namespace public static native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast>") Module asModule(@SharedPtr FractionalMaxPool2dImplCloneable pointer); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/FractionalMaxPool3dImpl.java b/pytorch/src/gen/java/org/bytedeco/pytorch/FractionalMaxPool3dImpl.java index 3dbd532e3a1..46cad5c500b 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/FractionalMaxPool3dImpl.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/FractionalMaxPool3dImpl.java @@ -37,11 +37,14 @@ public class FractionalMaxPool3dImpl extends FractionalMaxPool3dImplCloneable { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public FractionalMaxPool3dImpl(Pointer p) { super(p); } + /** Downcast constructor. */ + public FractionalMaxPool3dImpl(Module pointer) { super((Pointer)null); allocate(pointer); } + @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::dynamic_pointer_cast") void allocate(@SharedPtr Module pointer); public FractionalMaxPool3dImpl(@ByVal @Cast("torch::ExpandingArray<3>*") LongPointer kernel_size) { super((Pointer)null); allocate(kernel_size); } - @SharedPtr private native void allocate(@ByVal @Cast("torch::ExpandingArray<3>*") LongPointer kernel_size); + @SharedPtr @Name("std::make_shared") private native void allocate(@ByVal @Cast("torch::ExpandingArray<3>*") LongPointer kernel_size); public FractionalMaxPool3dImpl(@ByVal FractionalMaxPool3dOptions options_) { super((Pointer)null); allocate(options_); } - @SharedPtr private native void allocate(@ByVal FractionalMaxPool3dOptions options_); + @SharedPtr @Name("std::make_shared") private native void allocate(@ByVal FractionalMaxPool3dOptions options_); public native void reset(); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/FractionalMaxPool3dImplCloneable.java b/pytorch/src/gen/java/org/bytedeco/pytorch/FractionalMaxPool3dImplCloneable.java index aaef5baf793..3a06dfa442a 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/FractionalMaxPool3dImplCloneable.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/FractionalMaxPool3dImplCloneable.java @@ -22,6 +22,9 @@ public class FractionalMaxPool3dImplCloneable extends Module { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public FractionalMaxPool3dImplCloneable(Pointer p) { super(p); } + /** Downcast constructor. */ + public FractionalMaxPool3dImplCloneable(Module pointer) { super((Pointer)null); allocate(pointer); } + @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::dynamic_pointer_cast, torch::nn::Module>") void allocate(@SharedPtr Module pointer); @Override public Module asModule() { return asModule(this); } @Namespace public static native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast>") Module asModule(@SharedPtr FractionalMaxPool3dImplCloneable pointer); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/Function.java b/pytorch/src/gen/java/org/bytedeco/pytorch/Function.java index 3c07618ec99..6e6f07a1e56 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/Function.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/Function.java @@ -28,7 +28,7 @@ public class Function extends Pointer { /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public Function(Pointer p) { super(p); } - public native @ByVal @Cast("c10::string_view*") Pointer doc_string(); + public native @StringView BytePointer doc_string(); public native @Cast("bool") boolean isGraphFunction(); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/FunctionSchema.java b/pytorch/src/gen/java/org/bytedeco/pytorch/FunctionSchema.java index a2f6424e7ce..0a44a739ebf 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/FunctionSchema.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/FunctionSchema.java @@ -185,7 +185,8 @@ private native void allocate( public native @Cast("bool") boolean is_aliasing(@Const @ByRef SchemaArgument argument); public native @Cast("bool") boolean is_mutable(); public native @Cast("bool") boolean is_mutable(@Const @ByRef SchemaArgument argument); - public native @Cast("bool") boolean is_mutable(@ByVal @Cast("c10::string_view*") Pointer name); + public native @Cast("bool") boolean is_mutable(@StringView BytePointer name); + public native @Cast("bool") boolean is_mutable(@StringView String name); // Returns whether lhs and rhs may alias directly. // This does not account for cases where lhs or rhs are a container that @@ -218,7 +219,8 @@ private native void allocate( public native @StdVector Argument getCorrectList(SchemaArgType type); public native @StdVector Argument getCorrectList(@Cast("c10::SchemaArgType") int type); - public native @ByVal IntOptional argumentIndexWithName(@ByVal @Cast("c10::string_view*") Pointer name); + public native @ByVal IntOptional argumentIndexWithName(@StringView BytePointer name); + public native @ByVal IntOptional argumentIndexWithName(@StringView String name); public native @ByVal FunctionSchema cloneWithName(@StdString BytePointer name, @StdString BytePointer overload_name); public native @ByVal FunctionSchema cloneWithName(@StdString String name, @StdString String overload_name); public native @ByVal FunctionSchema cloneWithArguments(@StdVector Argument new_arguments); @@ -261,7 +263,7 @@ private native void allocate( public native void setAliasAnalysis(AliasAnalysisKind v); public native void setAliasAnalysis(@Cast("c10::AliasAnalysisKind") byte v); - public native @ByVal @Cast("c10::optional*") Pointer getNamespace(); + public native @ByVal StringViewOptional getNamespace(); // Returns true if we successfully set the namespace (as there // was none set, and false otherwise) diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/GELUImpl.java b/pytorch/src/gen/java/org/bytedeco/pytorch/GELUImpl.java index d6da93bfb8d..57abeb06864 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/GELUImpl.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/GELUImpl.java @@ -28,20 +28,14 @@ public class GELUImpl extends GELUImplCloneable { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public GELUImpl(Pointer p) { super(p); } - /** Native array allocator. Access with {@link Pointer#position(long)}. */ - public GELUImpl(long size) { super((Pointer)null); allocateArray(size); } - private native void allocateArray(long size); - @Override public GELUImpl position(long position) { - return (GELUImpl)super.position(position); - } - @Override public GELUImpl getPointer(long i) { - return new GELUImpl((Pointer)this).offsetAddress(i); - } + /** Downcast constructor. */ + public GELUImpl(Module pointer) { super((Pointer)null); allocate(pointer); } + @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::dynamic_pointer_cast") void allocate(@SharedPtr Module pointer); public GELUImpl(@ByVal(nullValue = "torch::nn::GELUOptions{}") GELUOptions options_) { super((Pointer)null); allocate(options_); } - @SharedPtr private native void allocate(@ByVal(nullValue = "torch::nn::GELUOptions{}") GELUOptions options_); + @SharedPtr @Name("std::make_shared") private native void allocate(@ByVal(nullValue = "torch::nn::GELUOptions{}") GELUOptions options_); public GELUImpl() { super((Pointer)null); allocate(); } - @SharedPtr private native void allocate(); + @SharedPtr @Name("std::make_shared") private native void allocate(); public native @ByVal Tensor forward(@Const @ByRef Tensor input); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/GELUImplCloneable.java b/pytorch/src/gen/java/org/bytedeco/pytorch/GELUImplCloneable.java index 3d8061d04a1..6bf9884fb1d 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/GELUImplCloneable.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/GELUImplCloneable.java @@ -22,6 +22,9 @@ public class GELUImplCloneable extends Module { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public GELUImplCloneable(Pointer p) { super(p); } + /** Downcast constructor. */ + public GELUImplCloneable(Module pointer) { super((Pointer)null); allocate(pointer); } + @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::dynamic_pointer_cast, torch::nn::Module>") void allocate(@SharedPtr Module pointer); @Override public Module asModule() { return asModule(this); } @Namespace public static native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast>") Module asModule(@SharedPtr GELUImplCloneable pointer); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/GLUImpl.java b/pytorch/src/gen/java/org/bytedeco/pytorch/GLUImpl.java index 4f49d330f6e..32a1e6a99d2 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/GLUImpl.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/GLUImpl.java @@ -36,20 +36,14 @@ public class GLUImpl extends GLUImplCloneable { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public GLUImpl(Pointer p) { super(p); } - /** Native array allocator. Access with {@link Pointer#position(long)}. */ - public GLUImpl(long size) { super((Pointer)null); allocateArray(size); } - private native void allocateArray(long size); - @Override public GLUImpl position(long position) { - return (GLUImpl)super.position(position); - } - @Override public GLUImpl getPointer(long i) { - return new GLUImpl((Pointer)this).offsetAddress(i); - } + /** Downcast constructor. */ + public GLUImpl(Module pointer) { super((Pointer)null); allocate(pointer); } + @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::dynamic_pointer_cast") void allocate(@SharedPtr Module pointer); public GLUImpl(@Const @ByRef(nullValue = "torch::nn::GLUOptions{}") GLUOptions options_) { super((Pointer)null); allocate(options_); } - @SharedPtr private native void allocate(@Const @ByRef(nullValue = "torch::nn::GLUOptions{}") GLUOptions options_); + @SharedPtr @Name("std::make_shared") private native void allocate(@Const @ByRef(nullValue = "torch::nn::GLUOptions{}") GLUOptions options_); public GLUImpl() { super((Pointer)null); allocate(); } - @SharedPtr private native void allocate(); + @SharedPtr @Name("std::make_shared") private native void allocate(); public native @ByVal Tensor forward(@Const @ByRef Tensor input); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/GLUImplCloneable.java b/pytorch/src/gen/java/org/bytedeco/pytorch/GLUImplCloneable.java index 07edf9dc8ca..5122010386e 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/GLUImplCloneable.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/GLUImplCloneable.java @@ -22,6 +22,9 @@ public class GLUImplCloneable extends Module { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public GLUImplCloneable(Pointer p) { super(p); } + /** Downcast constructor. */ + public GLUImplCloneable(Module pointer) { super((Pointer)null); allocate(pointer); } + @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::dynamic_pointer_cast, torch::nn::Module>") void allocate(@SharedPtr Module pointer); @Override public Module asModule() { return asModule(this); } @Namespace public static native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast>") Module asModule(@SharedPtr GLUImplCloneable pointer); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/GRUCellImpl.java b/pytorch/src/gen/java/org/bytedeco/pytorch/GRUCellImpl.java index b79e6521857..fc06ebeb8b5 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/GRUCellImpl.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/GRUCellImpl.java @@ -37,11 +37,14 @@ public class GRUCellImpl extends GRUCellImplBase { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public GRUCellImpl(Pointer p) { super(p); } + /** Downcast constructor. */ + public GRUCellImpl(Module pointer) { super((Pointer)null); allocate(pointer); } + @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::dynamic_pointer_cast") void allocate(@SharedPtr Module pointer); public GRUCellImpl(@Cast("int64_t") long input_size, @Cast("int64_t") long hidden_size) { super((Pointer)null); allocate(input_size, hidden_size); } - @SharedPtr private native void allocate(@Cast("int64_t") long input_size, @Cast("int64_t") long hidden_size); + @SharedPtr @Name("std::make_shared") private native void allocate(@Cast("int64_t") long input_size, @Cast("int64_t") long hidden_size); public GRUCellImpl(@Const @ByRef GRUCellOptions options_) { super((Pointer)null); allocate(options_); } - @SharedPtr private native void allocate(@Const @ByRef GRUCellOptions options_); + @SharedPtr @Name("std::make_shared") private native void allocate(@Const @ByRef GRUCellOptions options_); public native @ByVal Tensor forward(@Const @ByRef Tensor input, @ByVal(nullValue = "torch::Tensor{}") Tensor hx); public native @ByVal Tensor forward(@Const @ByRef Tensor input); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/GRUCellImplBase.java b/pytorch/src/gen/java/org/bytedeco/pytorch/GRUCellImplBase.java index 2a094ae44bf..81821e24553 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/GRUCellImplBase.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/GRUCellImplBase.java @@ -22,6 +22,9 @@ public class GRUCellImplBase extends GRUCellImplCloneable { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public GRUCellImplBase(Pointer p) { super(p); } + /** Downcast constructor. */ + public GRUCellImplBase(Module pointer) { super((Pointer)null); allocate(pointer); } + @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::dynamic_pointer_cast, torch::nn::Module>") void allocate(@SharedPtr Module pointer); public GRUCellImplBase(@Const @ByRef RNNCellOptionsBase options_) { super((Pointer)null); allocate(options_); } private native void allocate(@Const @ByRef RNNCellOptionsBase options_); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/GRUCellImplCloneable.java b/pytorch/src/gen/java/org/bytedeco/pytorch/GRUCellImplCloneable.java index eef3d754108..6545f0ccc67 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/GRUCellImplCloneable.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/GRUCellImplCloneable.java @@ -22,6 +22,9 @@ public class GRUCellImplCloneable extends Module { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public GRUCellImplCloneable(Pointer p) { super(p); } + /** Downcast constructor. */ + public GRUCellImplCloneable(Module pointer) { super((Pointer)null); allocate(pointer); } + @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::dynamic_pointer_cast, torch::nn::Module>") void allocate(@SharedPtr Module pointer); @Override public Module asModule() { return asModule(this); } @Namespace public static native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast>") Module asModule(@SharedPtr GRUCellImplCloneable pointer); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/GRUImpl.java b/pytorch/src/gen/java/org/bytedeco/pytorch/GRUImpl.java index 37696eff60e..ea7c56217f1 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/GRUImpl.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/GRUImpl.java @@ -37,11 +37,14 @@ public class GRUImpl extends GRUImplBase { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public GRUImpl(Pointer p) { super(p); } + /** Downcast constructor. */ + public GRUImpl(Module pointer) { super((Pointer)null); allocate(pointer); } + @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::dynamic_pointer_cast") void allocate(@SharedPtr Module pointer); public GRUImpl(@Cast("int64_t") long input_size, @Cast("int64_t") long hidden_size) { super((Pointer)null); allocate(input_size, hidden_size); } - @SharedPtr private native void allocate(@Cast("int64_t") long input_size, @Cast("int64_t") long hidden_size); + @SharedPtr @Name("std::make_shared") private native void allocate(@Cast("int64_t") long input_size, @Cast("int64_t") long hidden_size); public GRUImpl(@Const @ByRef GRUOptions options_) { super((Pointer)null); allocate(options_); } - @SharedPtr private native void allocate(@Const @ByRef GRUOptions options_); + @SharedPtr @Name("std::make_shared") private native void allocate(@Const @ByRef GRUOptions options_); public native @ByVal T_TensorTensor_T forward(@Const @ByRef Tensor input, @ByVal(nullValue = "torch::Tensor{}") Tensor hx); public native @ByVal T_TensorTensor_T forward(@Const @ByRef Tensor input); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/GRUImplBase.java b/pytorch/src/gen/java/org/bytedeco/pytorch/GRUImplBase.java index d5b5dfe42ec..d6d53f9cb20 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/GRUImplBase.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/GRUImplBase.java @@ -22,6 +22,9 @@ public class GRUImplBase extends GRUImplCloneable { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public GRUImplBase(Pointer p) { super(p); } + /** Downcast constructor. */ + public GRUImplBase(Module pointer) { super((Pointer)null); allocate(pointer); } + @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::dynamic_pointer_cast, torch::nn::Module>") void allocate(@SharedPtr Module pointer); public GRUImplBase(@Const @ByRef RNNOptionsBase options_) { super((Pointer)null); allocate(options_); } private native void allocate(@Const @ByRef RNNOptionsBase options_); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/GRUImplCloneable.java b/pytorch/src/gen/java/org/bytedeco/pytorch/GRUImplCloneable.java index 045548107cd..af4db7a9741 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/GRUImplCloneable.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/GRUImplCloneable.java @@ -22,6 +22,9 @@ public class GRUImplCloneable extends Module { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public GRUImplCloneable(Pointer p) { super(p); } + /** Downcast constructor. */ + public GRUImplCloneable(Module pointer) { super((Pointer)null); allocate(pointer); } + @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::dynamic_pointer_cast, torch::nn::Module>") void allocate(@SharedPtr Module pointer); @Override public Module asModule() { return asModule(this); } @Namespace public static native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast>") Module asModule(@SharedPtr GRUImplCloneable pointer); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/Graph.java b/pytorch/src/gen/java/org/bytedeco/pytorch/Graph.java index f2b23283c78..fac88eff6ab 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/Graph.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/Graph.java @@ -21,22 +21,13 @@ @Namespace("torch::jit") @NoOffset @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) public class Graph extends Pointer { static { Loader.load(); } - /** Native array allocator. Access with {@link Pointer#position(long)}. */ - public Graph(long size) { super((Pointer)null); allocateArray(size); } - private native void allocateArray(long size); - @Override public Graph position(long position) { - return (Graph)super.position(position); - } - @Override public Graph getPointer(long i) { - return new Graph((Pointer)this).offsetAddress(i); - } public Graph(@ByVal(nullValue = "torch::jit::ScopePtr(c10::make_intrusive())") @Cast("torch::jit::ScopePtr*") Pointer scope_root) { super((Pointer)null); allocate(scope_root); } - @SharedPtr private native void allocate(@ByVal(nullValue = "torch::jit::ScopePtr(c10::make_intrusive())") @Cast("torch::jit::ScopePtr*") Pointer scope_root); + @SharedPtr @Name("std::make_shared") private native void allocate(@ByVal(nullValue = "torch::jit::ScopePtr(c10::make_intrusive())") @Cast("torch::jit::ScopePtr*") Pointer scope_root); public Graph() { super((Pointer)null); allocate(); } - @SharedPtr private native void allocate(); + @SharedPtr @Name("std::make_shared") private native void allocate(); public native @ByVal ValueArrayRef inputs(); public native @ByVal ValueArrayRef outputs(); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/GroupNormImpl.java b/pytorch/src/gen/java/org/bytedeco/pytorch/GroupNormImpl.java index 1bc8dbefd25..951561758e5 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/GroupNormImpl.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/GroupNormImpl.java @@ -37,11 +37,14 @@ public class GroupNormImpl extends GroupNormImplCloneable { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public GroupNormImpl(Pointer p) { super(p); } + /** Downcast constructor. */ + public GroupNormImpl(Module pointer) { super((Pointer)null); allocate(pointer); } + @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::dynamic_pointer_cast") void allocate(@SharedPtr Module pointer); public GroupNormImpl(@Cast("int64_t") long num_groups, @Cast("int64_t") long num_channels) { super((Pointer)null); allocate(num_groups, num_channels); } - @SharedPtr private native void allocate(@Cast("int64_t") long num_groups, @Cast("int64_t") long num_channels); + @SharedPtr @Name("std::make_shared") private native void allocate(@Cast("int64_t") long num_groups, @Cast("int64_t") long num_channels); public GroupNormImpl(@Const @ByRef GroupNormOptions options_) { super((Pointer)null); allocate(options_); } - @SharedPtr private native void allocate(@Const @ByRef GroupNormOptions options_); + @SharedPtr @Name("std::make_shared") private native void allocate(@Const @ByRef GroupNormOptions options_); public native void reset(); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/GroupNormImplCloneable.java b/pytorch/src/gen/java/org/bytedeco/pytorch/GroupNormImplCloneable.java index 529245fffc8..fded480fc93 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/GroupNormImplCloneable.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/GroupNormImplCloneable.java @@ -22,6 +22,9 @@ public class GroupNormImplCloneable extends Module { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public GroupNormImplCloneable(Pointer p) { super(p); } + /** Downcast constructor. */ + public GroupNormImplCloneable(Module pointer) { super((Pointer)null); allocate(pointer); } + @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::dynamic_pointer_cast, torch::nn::Module>") void allocate(@SharedPtr Module pointer); @Override public Module asModule() { return asModule(this); } @Namespace public static native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast>") Module asModule(@SharedPtr GroupNormImplCloneable pointer); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/HardshrinkImpl.java b/pytorch/src/gen/java/org/bytedeco/pytorch/HardshrinkImpl.java index d721a4481ac..947b3dd5bd1 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/HardshrinkImpl.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/HardshrinkImpl.java @@ -36,20 +36,14 @@ public class HardshrinkImpl extends HardshrinkImplCloneable { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public HardshrinkImpl(Pointer p) { super(p); } - /** Native array allocator. Access with {@link Pointer#position(long)}. */ - public HardshrinkImpl(long size) { super((Pointer)null); allocateArray(size); } - private native void allocateArray(long size); - @Override public HardshrinkImpl position(long position) { - return (HardshrinkImpl)super.position(position); - } - @Override public HardshrinkImpl getPointer(long i) { - return new HardshrinkImpl((Pointer)this).offsetAddress(i); - } + /** Downcast constructor. */ + public HardshrinkImpl(Module pointer) { super((Pointer)null); allocate(pointer); } + @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::dynamic_pointer_cast") void allocate(@SharedPtr Module pointer); public HardshrinkImpl(@Const @ByRef(nullValue = "torch::nn::HardshrinkOptions{}") HardshrinkOptions options_) { super((Pointer)null); allocate(options_); } - @SharedPtr private native void allocate(@Const @ByRef(nullValue = "torch::nn::HardshrinkOptions{}") HardshrinkOptions options_); + @SharedPtr @Name("std::make_shared") private native void allocate(@Const @ByRef(nullValue = "torch::nn::HardshrinkOptions{}") HardshrinkOptions options_); public HardshrinkImpl() { super((Pointer)null); allocate(); } - @SharedPtr private native void allocate(); + @SharedPtr @Name("std::make_shared") private native void allocate(); public native @ByVal Tensor forward(@Const @ByRef Tensor input); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/HardshrinkImplCloneable.java b/pytorch/src/gen/java/org/bytedeco/pytorch/HardshrinkImplCloneable.java index 3bbda28cfc1..437ad825764 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/HardshrinkImplCloneable.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/HardshrinkImplCloneable.java @@ -22,6 +22,9 @@ public class HardshrinkImplCloneable extends Module { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public HardshrinkImplCloneable(Pointer p) { super(p); } + /** Downcast constructor. */ + public HardshrinkImplCloneable(Module pointer) { super((Pointer)null); allocate(pointer); } + @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::dynamic_pointer_cast, torch::nn::Module>") void allocate(@SharedPtr Module pointer); @Override public Module asModule() { return asModule(this); } @Namespace public static native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast>") Module asModule(@SharedPtr HardshrinkImplCloneable pointer); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/HardtanhImpl.java b/pytorch/src/gen/java/org/bytedeco/pytorch/HardtanhImpl.java index 76bc777bdbf..878b4248a61 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/HardtanhImpl.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/HardtanhImpl.java @@ -37,20 +37,14 @@ public class HardtanhImpl extends HardtanhImplCloneable { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public HardtanhImpl(Pointer p) { super(p); } - /** Native array allocator. Access with {@link Pointer#position(long)}. */ - public HardtanhImpl(long size) { super((Pointer)null); allocateArray(size); } - private native void allocateArray(long size); - @Override public HardtanhImpl position(long position) { - return (HardtanhImpl)super.position(position); - } - @Override public HardtanhImpl getPointer(long i) { - return new HardtanhImpl((Pointer)this).offsetAddress(i); - } + /** Downcast constructor. */ + public HardtanhImpl(Module pointer) { super((Pointer)null); allocate(pointer); } + @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::dynamic_pointer_cast") void allocate(@SharedPtr Module pointer); public HardtanhImpl(@Const @ByRef(nullValue = "torch::nn::HardtanhOptions{}") HardtanhOptions options_) { super((Pointer)null); allocate(options_); } - @SharedPtr private native void allocate(@Const @ByRef(nullValue = "torch::nn::HardtanhOptions{}") HardtanhOptions options_); + @SharedPtr @Name("std::make_shared") private native void allocate(@Const @ByRef(nullValue = "torch::nn::HardtanhOptions{}") HardtanhOptions options_); public HardtanhImpl() { super((Pointer)null); allocate(); } - @SharedPtr private native void allocate(); + @SharedPtr @Name("std::make_shared") private native void allocate(); public native @ByVal Tensor forward(@ByVal Tensor input); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/HardtanhImplCloneable.java b/pytorch/src/gen/java/org/bytedeco/pytorch/HardtanhImplCloneable.java index bdb15e3647f..944f51c75b9 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/HardtanhImplCloneable.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/HardtanhImplCloneable.java @@ -22,6 +22,9 @@ public class HardtanhImplCloneable extends Module { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public HardtanhImplCloneable(Pointer p) { super(p); } + /** Downcast constructor. */ + public HardtanhImplCloneable(Module pointer) { super((Pointer)null); allocate(pointer); } + @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::dynamic_pointer_cast, torch::nn::Module>") void allocate(@SharedPtr Module pointer); @Override public Module asModule() { return asModule(this); } @Namespace public static native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast>") Module asModule(@SharedPtr HardtanhImplCloneable pointer); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/HingeEmbeddingLossImpl.java b/pytorch/src/gen/java/org/bytedeco/pytorch/HingeEmbeddingLossImpl.java index 0255c887079..c754383595f 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/HingeEmbeddingLossImpl.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/HingeEmbeddingLossImpl.java @@ -39,20 +39,14 @@ public class HingeEmbeddingLossImpl extends HingeEmbeddingLossImplCloneable { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public HingeEmbeddingLossImpl(Pointer p) { super(p); } - /** Native array allocator. Access with {@link Pointer#position(long)}. */ - public HingeEmbeddingLossImpl(long size) { super((Pointer)null); allocateArray(size); } - private native void allocateArray(long size); - @Override public HingeEmbeddingLossImpl position(long position) { - return (HingeEmbeddingLossImpl)super.position(position); - } - @Override public HingeEmbeddingLossImpl getPointer(long i) { - return new HingeEmbeddingLossImpl((Pointer)this).offsetAddress(i); - } + /** Downcast constructor. */ + public HingeEmbeddingLossImpl(Module pointer) { super((Pointer)null); allocate(pointer); } + @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::dynamic_pointer_cast") void allocate(@SharedPtr Module pointer); public HingeEmbeddingLossImpl(@ByVal(nullValue = "torch::nn::HingeEmbeddingLossOptions{}") HingeEmbeddingLossOptions options_) { super((Pointer)null); allocate(options_); } - @SharedPtr private native void allocate(@ByVal(nullValue = "torch::nn::HingeEmbeddingLossOptions{}") HingeEmbeddingLossOptions options_); + @SharedPtr @Name("std::make_shared") private native void allocate(@ByVal(nullValue = "torch::nn::HingeEmbeddingLossOptions{}") HingeEmbeddingLossOptions options_); public HingeEmbeddingLossImpl() { super((Pointer)null); allocate(); } - @SharedPtr private native void allocate(); + @SharedPtr @Name("std::make_shared") private native void allocate(); public native void reset(); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/HingeEmbeddingLossImplCloneable.java b/pytorch/src/gen/java/org/bytedeco/pytorch/HingeEmbeddingLossImplCloneable.java index a6aa3a93d60..fb1dc84af74 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/HingeEmbeddingLossImplCloneable.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/HingeEmbeddingLossImplCloneable.java @@ -22,6 +22,9 @@ public class HingeEmbeddingLossImplCloneable extends Module { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public HingeEmbeddingLossImplCloneable(Pointer p) { super(p); } + /** Downcast constructor. */ + public HingeEmbeddingLossImplCloneable(Module pointer) { super((Pointer)null); allocate(pointer); } + @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::dynamic_pointer_cast, torch::nn::Module>") void allocate(@SharedPtr Module pointer); @Override public Module asModule() { return asModule(this); } @Namespace public static native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast>") Module asModule(@SharedPtr HingeEmbeddingLossImplCloneable pointer); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/HuberLossImpl.java b/pytorch/src/gen/java/org/bytedeco/pytorch/HuberLossImpl.java index 059af1815d3..33ef569d82c 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/HuberLossImpl.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/HuberLossImpl.java @@ -38,20 +38,14 @@ public class HuberLossImpl extends HuberLossImplCloneable { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public HuberLossImpl(Pointer p) { super(p); } - /** Native array allocator. Access with {@link Pointer#position(long)}. */ - public HuberLossImpl(long size) { super((Pointer)null); allocateArray(size); } - private native void allocateArray(long size); - @Override public HuberLossImpl position(long position) { - return (HuberLossImpl)super.position(position); - } - @Override public HuberLossImpl getPointer(long i) { - return new HuberLossImpl((Pointer)this).offsetAddress(i); - } + /** Downcast constructor. */ + public HuberLossImpl(Module pointer) { super((Pointer)null); allocate(pointer); } + @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::dynamic_pointer_cast") void allocate(@SharedPtr Module pointer); public HuberLossImpl(@ByVal(nullValue = "torch::nn::HuberLossOptions{}") HuberLossOptions options_) { super((Pointer)null); allocate(options_); } - @SharedPtr private native void allocate(@ByVal(nullValue = "torch::nn::HuberLossOptions{}") HuberLossOptions options_); + @SharedPtr @Name("std::make_shared") private native void allocate(@ByVal(nullValue = "torch::nn::HuberLossOptions{}") HuberLossOptions options_); public HuberLossImpl() { super((Pointer)null); allocate(); } - @SharedPtr private native void allocate(); + @SharedPtr @Name("std::make_shared") private native void allocate(); public native void reset(); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/HuberLossImplCloneable.java b/pytorch/src/gen/java/org/bytedeco/pytorch/HuberLossImplCloneable.java index adf692cb26e..1e99317506d 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/HuberLossImplCloneable.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/HuberLossImplCloneable.java @@ -22,6 +22,9 @@ public class HuberLossImplCloneable extends Module { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public HuberLossImplCloneable(Pointer p) { super(p); } + /** Downcast constructor. */ + public HuberLossImplCloneable(Module pointer) { super((Pointer)null); allocate(pointer); } + @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::dynamic_pointer_cast, torch::nn::Module>") void allocate(@SharedPtr Module pointer); @Override public Module asModule() { return asModule(this); } @Namespace public static native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast>") Module asModule(@SharedPtr HuberLossImplCloneable pointer); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/IValue.java b/pytorch/src/gen/java/org/bytedeco/pytorch/IValue.java index 923609a90b9..fef627f9ae0 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/IValue.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/IValue.java @@ -285,7 +285,7 @@ public class IValue extends Pointer { public native @ByVal @Name("toString") ConstantStringPtr toConstantString(); public native @StdString BytePointer toStringRef(); public native @ByVal @Cast("c10::optional >*") Pointer toOptionalStringRef(); - public native @ByVal @Cast("c10::string_view*") Pointer toStringView(); + public native @StringView BytePointer toStringView(); // DoubleList public native @Cast("bool") boolean isDoubleList(); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/IdentityImpl.java b/pytorch/src/gen/java/org/bytedeco/pytorch/IdentityImpl.java index 80c8e6de2cd..cb6d7843169 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/IdentityImpl.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/IdentityImpl.java @@ -28,18 +28,12 @@ public class IdentityImpl extends IdentityImplCloneable { static { Loader.load(); } /** Default native constructor. */ public IdentityImpl() { super((Pointer)null); allocate(); } - /** Native array allocator. Access with {@link Pointer#position(long)}. */ - public IdentityImpl(long size) { super((Pointer)null); allocateArray(size); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public IdentityImpl(Pointer p) { super(p); } - private native void allocate(); - private native void allocateArray(long size); - @Override public IdentityImpl position(long position) { - return (IdentityImpl)super.position(position); - } - @Override public IdentityImpl getPointer(long i) { - return new IdentityImpl((Pointer)this).offsetAddress(i); - } + @SharedPtr @Name("std::make_shared") private native void allocate(); + /** Downcast constructor. */ + public IdentityImpl(Module pointer) { super((Pointer)null); allocate(pointer); } + @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::dynamic_pointer_cast") void allocate(@SharedPtr Module pointer); public native void reset(); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/IdentityImplCloneable.java b/pytorch/src/gen/java/org/bytedeco/pytorch/IdentityImplCloneable.java index d3138e250e4..8976d900293 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/IdentityImplCloneable.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/IdentityImplCloneable.java @@ -22,6 +22,9 @@ public class IdentityImplCloneable extends Module { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public IdentityImplCloneable(Pointer p) { super(p); } + /** Downcast constructor. */ + public IdentityImplCloneable(Module pointer) { super((Pointer)null); allocate(pointer); } + @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::dynamic_pointer_cast, torch::nn::Module>") void allocate(@SharedPtr Module pointer); @Override public Module asModule() { return asModule(this); } @Namespace public static native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast>") Module asModule(@SharedPtr IdentityImplCloneable pointer); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/InstanceNorm1dImpl.java b/pytorch/src/gen/java/org/bytedeco/pytorch/InstanceNorm1dImpl.java index 68f4e8b9fa1..358066c2d56 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/InstanceNorm1dImpl.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/InstanceNorm1dImpl.java @@ -43,5 +43,8 @@ public class InstanceNorm1dImpl extends InstanceNorm1dImplBase { private native void allocate(@Const @ByRef InstanceNormOptions options_); /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public InstanceNorm1dImpl(Pointer p) { super(p); } + /** Downcast constructor. */ + public InstanceNorm1dImpl(Module pointer) { super((Pointer)null); allocate(pointer); } + @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::dynamic_pointer_cast") void allocate(@SharedPtr Module pointer); } diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/InstanceNorm1dImplBase.java b/pytorch/src/gen/java/org/bytedeco/pytorch/InstanceNorm1dImplBase.java index 92ca6ec7905..715b6e1fd0c 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/InstanceNorm1dImplBase.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/InstanceNorm1dImplBase.java @@ -24,6 +24,9 @@ public class InstanceNorm1dImplBase extends InstanceNorm1dImplBaseBase { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public InstanceNorm1dImplBase(Pointer p) { super(p); } + /** Downcast constructor. */ + public InstanceNorm1dImplBase(Module pointer) { super((Pointer)null); allocate(pointer); } + @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::dynamic_pointer_cast, torch::nn::Module>") void allocate(@SharedPtr Module pointer); public native @ByVal Tensor forward(@Const @ByRef Tensor input); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/InstanceNorm1dImplBaseBase.java b/pytorch/src/gen/java/org/bytedeco/pytorch/InstanceNorm1dImplBaseBase.java index 555f456c535..aed8025b7ea 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/InstanceNorm1dImplBaseBase.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/InstanceNorm1dImplBaseBase.java @@ -22,6 +22,9 @@ public class InstanceNorm1dImplBaseBase extends InstanceNorm1dImplCloneable { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public InstanceNorm1dImplBaseBase(Pointer p) { super(p); } + /** Downcast constructor. */ + public InstanceNorm1dImplBaseBase(Module pointer) { super((Pointer)null); allocate(pointer); } + @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::dynamic_pointer_cast, torch::nn::Module>") void allocate(@SharedPtr Module pointer); public native void reset(); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/InstanceNorm1dImplCloneable.java b/pytorch/src/gen/java/org/bytedeco/pytorch/InstanceNorm1dImplCloneable.java index ddd021dade4..20feddca1b1 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/InstanceNorm1dImplCloneable.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/InstanceNorm1dImplCloneable.java @@ -22,6 +22,9 @@ public class InstanceNorm1dImplCloneable extends Module { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public InstanceNorm1dImplCloneable(Pointer p) { super(p); } + /** Downcast constructor. */ + public InstanceNorm1dImplCloneable(Module pointer) { super((Pointer)null); allocate(pointer); } + @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::dynamic_pointer_cast, torch::nn::Module>") void allocate(@SharedPtr Module pointer); @Override public Module asModule() { return asModule(this); } @Namespace public static native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast>") Module asModule(@SharedPtr InstanceNorm1dImplCloneable pointer); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/InstanceNorm2dImpl.java b/pytorch/src/gen/java/org/bytedeco/pytorch/InstanceNorm2dImpl.java index 44131b2b8ea..77e498e1156 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/InstanceNorm2dImpl.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/InstanceNorm2dImpl.java @@ -43,5 +43,8 @@ public class InstanceNorm2dImpl extends InstanceNorm2dImplBase { private native void allocate(@Const @ByRef InstanceNormOptions options_); /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public InstanceNorm2dImpl(Pointer p) { super(p); } + /** Downcast constructor. */ + public InstanceNorm2dImpl(Module pointer) { super((Pointer)null); allocate(pointer); } + @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::dynamic_pointer_cast") void allocate(@SharedPtr Module pointer); } diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/InstanceNorm2dImplBase.java b/pytorch/src/gen/java/org/bytedeco/pytorch/InstanceNorm2dImplBase.java index d753c04ee71..120364ac626 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/InstanceNorm2dImplBase.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/InstanceNorm2dImplBase.java @@ -22,6 +22,9 @@ public class InstanceNorm2dImplBase extends InstanceNorm2dImplBaseBase { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public InstanceNorm2dImplBase(Pointer p) { super(p); } + /** Downcast constructor. */ + public InstanceNorm2dImplBase(Module pointer) { super((Pointer)null); allocate(pointer); } + @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::dynamic_pointer_cast, torch::nn::Module>") void allocate(@SharedPtr Module pointer); public native @ByVal Tensor forward(@Const @ByRef Tensor input); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/InstanceNorm2dImplBaseBase.java b/pytorch/src/gen/java/org/bytedeco/pytorch/InstanceNorm2dImplBaseBase.java index ca1f82de43c..2eb1a0aa639 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/InstanceNorm2dImplBaseBase.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/InstanceNorm2dImplBaseBase.java @@ -22,6 +22,9 @@ public class InstanceNorm2dImplBaseBase extends InstanceNorm2dImplCloneable { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public InstanceNorm2dImplBaseBase(Pointer p) { super(p); } + /** Downcast constructor. */ + public InstanceNorm2dImplBaseBase(Module pointer) { super((Pointer)null); allocate(pointer); } + @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::dynamic_pointer_cast, torch::nn::Module>") void allocate(@SharedPtr Module pointer); public native void reset(); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/InstanceNorm2dImplCloneable.java b/pytorch/src/gen/java/org/bytedeco/pytorch/InstanceNorm2dImplCloneable.java index c1788b33b40..0d82eddf72b 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/InstanceNorm2dImplCloneable.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/InstanceNorm2dImplCloneable.java @@ -22,6 +22,9 @@ public class InstanceNorm2dImplCloneable extends Module { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public InstanceNorm2dImplCloneable(Pointer p) { super(p); } + /** Downcast constructor. */ + public InstanceNorm2dImplCloneable(Module pointer) { super((Pointer)null); allocate(pointer); } + @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::dynamic_pointer_cast, torch::nn::Module>") void allocate(@SharedPtr Module pointer); @Override public Module asModule() { return asModule(this); } @Namespace public static native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast>") Module asModule(@SharedPtr InstanceNorm2dImplCloneable pointer); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/InstanceNorm3dImpl.java b/pytorch/src/gen/java/org/bytedeco/pytorch/InstanceNorm3dImpl.java index 4de31c198c4..20c8cb55079 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/InstanceNorm3dImpl.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/InstanceNorm3dImpl.java @@ -43,5 +43,8 @@ public class InstanceNorm3dImpl extends InstanceNorm3dImplBase { private native void allocate(@Const @ByRef InstanceNormOptions options_); /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public InstanceNorm3dImpl(Pointer p) { super(p); } + /** Downcast constructor. */ + public InstanceNorm3dImpl(Module pointer) { super((Pointer)null); allocate(pointer); } + @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::dynamic_pointer_cast") void allocate(@SharedPtr Module pointer); } diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/InstanceNorm3dImplBase.java b/pytorch/src/gen/java/org/bytedeco/pytorch/InstanceNorm3dImplBase.java index 52ce402ef11..557e80dc782 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/InstanceNorm3dImplBase.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/InstanceNorm3dImplBase.java @@ -22,6 +22,9 @@ public class InstanceNorm3dImplBase extends InstanceNorm3dImplBaseBase { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public InstanceNorm3dImplBase(Pointer p) { super(p); } + /** Downcast constructor. */ + public InstanceNorm3dImplBase(Module pointer) { super((Pointer)null); allocate(pointer); } + @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::dynamic_pointer_cast, torch::nn::Module>") void allocate(@SharedPtr Module pointer); public native @ByVal Tensor forward(@Const @ByRef Tensor input); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/InstanceNorm3dImplBaseBase.java b/pytorch/src/gen/java/org/bytedeco/pytorch/InstanceNorm3dImplBaseBase.java index ff13601e6ea..ef56a5e7367 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/InstanceNorm3dImplBaseBase.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/InstanceNorm3dImplBaseBase.java @@ -22,6 +22,9 @@ public class InstanceNorm3dImplBaseBase extends InstanceNorm3dImplCloneable { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public InstanceNorm3dImplBaseBase(Pointer p) { super(p); } + /** Downcast constructor. */ + public InstanceNorm3dImplBaseBase(Module pointer) { super((Pointer)null); allocate(pointer); } + @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::dynamic_pointer_cast, torch::nn::Module>") void allocate(@SharedPtr Module pointer); public native void reset(); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/InstanceNorm3dImplCloneable.java b/pytorch/src/gen/java/org/bytedeco/pytorch/InstanceNorm3dImplCloneable.java index 0fabb125a3a..73ad5f95c0d 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/InstanceNorm3dImplCloneable.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/InstanceNorm3dImplCloneable.java @@ -22,6 +22,9 @@ public class InstanceNorm3dImplCloneable extends Module { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public InstanceNorm3dImplCloneable(Pointer p) { super(p); } + /** Downcast constructor. */ + public InstanceNorm3dImplCloneable(Module pointer) { super((Pointer)null); allocate(pointer); } + @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::dynamic_pointer_cast, torch::nn::Module>") void allocate(@SharedPtr Module pointer); @Override public Module asModule() { return asModule(this); } @Namespace public static native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast>") Module asModule(@SharedPtr InstanceNorm3dImplCloneable pointer); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/KLDivLossImpl.java b/pytorch/src/gen/java/org/bytedeco/pytorch/KLDivLossImpl.java index e5697d664b8..30c74f7e393 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/KLDivLossImpl.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/KLDivLossImpl.java @@ -37,20 +37,14 @@ public class KLDivLossImpl extends KLDivLossImplCloneable { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public KLDivLossImpl(Pointer p) { super(p); } - /** Native array allocator. Access with {@link Pointer#position(long)}. */ - public KLDivLossImpl(long size) { super((Pointer)null); allocateArray(size); } - private native void allocateArray(long size); - @Override public KLDivLossImpl position(long position) { - return (KLDivLossImpl)super.position(position); - } - @Override public KLDivLossImpl getPointer(long i) { - return new KLDivLossImpl((Pointer)this).offsetAddress(i); - } + /** Downcast constructor. */ + public KLDivLossImpl(Module pointer) { super((Pointer)null); allocate(pointer); } + @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::dynamic_pointer_cast") void allocate(@SharedPtr Module pointer); public KLDivLossImpl(@ByVal(nullValue = "torch::nn::KLDivLossOptions{}") KLDivLossOptions options_) { super((Pointer)null); allocate(options_); } - @SharedPtr private native void allocate(@ByVal(nullValue = "torch::nn::KLDivLossOptions{}") KLDivLossOptions options_); + @SharedPtr @Name("std::make_shared") private native void allocate(@ByVal(nullValue = "torch::nn::KLDivLossOptions{}") KLDivLossOptions options_); public KLDivLossImpl() { super((Pointer)null); allocate(); } - @SharedPtr private native void allocate(); + @SharedPtr @Name("std::make_shared") private native void allocate(); public native void reset(); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/KLDivLossImplCloneable.java b/pytorch/src/gen/java/org/bytedeco/pytorch/KLDivLossImplCloneable.java index 5304cc14566..56ab93a1958 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/KLDivLossImplCloneable.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/KLDivLossImplCloneable.java @@ -22,6 +22,9 @@ public class KLDivLossImplCloneable extends Module { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public KLDivLossImplCloneable(Pointer p) { super(p); } + /** Downcast constructor. */ + public KLDivLossImplCloneable(Module pointer) { super((Pointer)null); allocate(pointer); } + @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::dynamic_pointer_cast, torch::nn::Module>") void allocate(@SharedPtr Module pointer); @Override public Module asModule() { return asModule(this); } @Namespace public static native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast>") Module asModule(@SharedPtr KLDivLossImplCloneable pointer); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/L1LossImpl.java b/pytorch/src/gen/java/org/bytedeco/pytorch/L1LossImpl.java index aa9134366b7..4f2ab7c6a08 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/L1LossImpl.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/L1LossImpl.java @@ -37,20 +37,14 @@ public class L1LossImpl extends L1LossImplCloneable { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public L1LossImpl(Pointer p) { super(p); } - /** Native array allocator. Access with {@link Pointer#position(long)}. */ - public L1LossImpl(long size) { super((Pointer)null); allocateArray(size); } - private native void allocateArray(long size); - @Override public L1LossImpl position(long position) { - return (L1LossImpl)super.position(position); - } - @Override public L1LossImpl getPointer(long i) { - return new L1LossImpl((Pointer)this).offsetAddress(i); - } + /** Downcast constructor. */ + public L1LossImpl(Module pointer) { super((Pointer)null); allocate(pointer); } + @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::dynamic_pointer_cast") void allocate(@SharedPtr Module pointer); public L1LossImpl(@ByVal(nullValue = "torch::nn::L1LossOptions{}") L1LossOptions options_) { super((Pointer)null); allocate(options_); } - @SharedPtr private native void allocate(@ByVal(nullValue = "torch::nn::L1LossOptions{}") L1LossOptions options_); + @SharedPtr @Name("std::make_shared") private native void allocate(@ByVal(nullValue = "torch::nn::L1LossOptions{}") L1LossOptions options_); public L1LossImpl() { super((Pointer)null); allocate(); } - @SharedPtr private native void allocate(); + @SharedPtr @Name("std::make_shared") private native void allocate(); public native void reset(); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/L1LossImplCloneable.java b/pytorch/src/gen/java/org/bytedeco/pytorch/L1LossImplCloneable.java index 63f3520178d..3b737fc54d0 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/L1LossImplCloneable.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/L1LossImplCloneable.java @@ -22,6 +22,9 @@ public class L1LossImplCloneable extends Module { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public L1LossImplCloneable(Pointer p) { super(p); } + /** Downcast constructor. */ + public L1LossImplCloneable(Module pointer) { super((Pointer)null); allocate(pointer); } + @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::dynamic_pointer_cast, torch::nn::Module>") void allocate(@SharedPtr Module pointer); @Override public Module asModule() { return asModule(this); } @Namespace public static native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast>") Module asModule(@SharedPtr L1LossImplCloneable pointer); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/LPPool1dImpl.java b/pytorch/src/gen/java/org/bytedeco/pytorch/LPPool1dImpl.java index d331471f8c0..3c893c9793d 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/LPPool1dImpl.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/LPPool1dImpl.java @@ -42,6 +42,9 @@ public class LPPool1dImpl extends LPPool1dImplBase { private native void allocate(@Const @ByRef LPPool1dOptions options_); /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public LPPool1dImpl(Pointer p) { super(p); } + /** Downcast constructor. */ + public LPPool1dImpl(Module pointer) { super((Pointer)null); allocate(pointer); } + @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::dynamic_pointer_cast") void allocate(@SharedPtr Module pointer); public native @ByVal Tensor forward(@Const @ByRef Tensor input); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/LPPool1dImplBase.java b/pytorch/src/gen/java/org/bytedeco/pytorch/LPPool1dImplBase.java index 542f496814f..7d3987055ba 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/LPPool1dImplBase.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/LPPool1dImplBase.java @@ -26,6 +26,9 @@ public class LPPool1dImplBase extends LPPool1dImplCloneable { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public LPPool1dImplBase(Pointer p) { super(p); } + /** Downcast constructor. */ + public LPPool1dImplBase(Module pointer) { super((Pointer)null); allocate(pointer); } + @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::dynamic_pointer_cast, torch::nn::Module>") void allocate(@SharedPtr Module pointer); public LPPool1dImplBase(double norm_type, @ByVal @Cast("torch::ExpandingArray<1>*") LongPointer kernel_size) { super((Pointer)null); allocate(norm_type, kernel_size); } private native void allocate(double norm_type, @ByVal @Cast("torch::ExpandingArray<1>*") LongPointer kernel_size); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/LPPool1dImplCloneable.java b/pytorch/src/gen/java/org/bytedeco/pytorch/LPPool1dImplCloneable.java index 8ef91cb134e..7a24950b48b 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/LPPool1dImplCloneable.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/LPPool1dImplCloneable.java @@ -22,6 +22,9 @@ public class LPPool1dImplCloneable extends Module { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public LPPool1dImplCloneable(Pointer p) { super(p); } + /** Downcast constructor. */ + public LPPool1dImplCloneable(Module pointer) { super((Pointer)null); allocate(pointer); } + @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::dynamic_pointer_cast, torch::nn::Module>") void allocate(@SharedPtr Module pointer); @Override public Module asModule() { return asModule(this); } @Namespace public static native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast>") Module asModule(@SharedPtr LPPool1dImplCloneable pointer); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/LPPool2dImpl.java b/pytorch/src/gen/java/org/bytedeco/pytorch/LPPool2dImpl.java index d0e9d6dd967..40e4f58d597 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/LPPool2dImpl.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/LPPool2dImpl.java @@ -43,6 +43,9 @@ public class LPPool2dImpl extends LPPool2dImplBase { private native void allocate(@Const @ByRef LPPool2dOptions options_); /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public LPPool2dImpl(Pointer p) { super(p); } + /** Downcast constructor. */ + public LPPool2dImpl(Module pointer) { super((Pointer)null); allocate(pointer); } + @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::dynamic_pointer_cast") void allocate(@SharedPtr Module pointer); public native @ByVal Tensor forward(@Const @ByRef Tensor input); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/LPPool2dImplBase.java b/pytorch/src/gen/java/org/bytedeco/pytorch/LPPool2dImplBase.java index 24d6d3509ef..78c78ca0047 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/LPPool2dImplBase.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/LPPool2dImplBase.java @@ -22,6 +22,9 @@ public class LPPool2dImplBase extends LPPool2dImplCloneable { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public LPPool2dImplBase(Pointer p) { super(p); } + /** Downcast constructor. */ + public LPPool2dImplBase(Module pointer) { super((Pointer)null); allocate(pointer); } + @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::dynamic_pointer_cast, torch::nn::Module>") void allocate(@SharedPtr Module pointer); public LPPool2dImplBase(double norm_type, @ByVal @Cast("torch::ExpandingArray<2>*") LongPointer kernel_size) { super((Pointer)null); allocate(norm_type, kernel_size); } private native void allocate(double norm_type, @ByVal @Cast("torch::ExpandingArray<2>*") LongPointer kernel_size); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/LPPool2dImplCloneable.java b/pytorch/src/gen/java/org/bytedeco/pytorch/LPPool2dImplCloneable.java index 0fea8e6edb1..8e53c9b9b9b 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/LPPool2dImplCloneable.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/LPPool2dImplCloneable.java @@ -22,6 +22,9 @@ public class LPPool2dImplCloneable extends Module { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public LPPool2dImplCloneable(Pointer p) { super(p); } + /** Downcast constructor. */ + public LPPool2dImplCloneable(Module pointer) { super((Pointer)null); allocate(pointer); } + @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::dynamic_pointer_cast, torch::nn::Module>") void allocate(@SharedPtr Module pointer); @Override public Module asModule() { return asModule(this); } @Namespace public static native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast>") Module asModule(@SharedPtr LPPool2dImplCloneable pointer); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/LSTMCellImpl.java b/pytorch/src/gen/java/org/bytedeco/pytorch/LSTMCellImpl.java index 049ca25b277..8c6540a2a6d 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/LSTMCellImpl.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/LSTMCellImpl.java @@ -37,11 +37,14 @@ public class LSTMCellImpl extends LSTMCellImplBase { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public LSTMCellImpl(Pointer p) { super(p); } + /** Downcast constructor. */ + public LSTMCellImpl(Module pointer) { super((Pointer)null); allocate(pointer); } + @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::dynamic_pointer_cast") void allocate(@SharedPtr Module pointer); public LSTMCellImpl(@Cast("int64_t") long input_size, @Cast("int64_t") long hidden_size) { super((Pointer)null); allocate(input_size, hidden_size); } - @SharedPtr private native void allocate(@Cast("int64_t") long input_size, @Cast("int64_t") long hidden_size); + @SharedPtr @Name("std::make_shared") private native void allocate(@Cast("int64_t") long input_size, @Cast("int64_t") long hidden_size); public LSTMCellImpl(@Const @ByRef LSTMCellOptions options_) { super((Pointer)null); allocate(options_); } - @SharedPtr private native void allocate(@Const @ByRef LSTMCellOptions options_); + @SharedPtr @Name("std::make_shared") private native void allocate(@Const @ByRef LSTMCellOptions options_); public native @ByVal T_TensorTensor_T forward( @Const @ByRef Tensor input, diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/LSTMCellImplBase.java b/pytorch/src/gen/java/org/bytedeco/pytorch/LSTMCellImplBase.java index 70f15bcdd02..f3c853acdb7 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/LSTMCellImplBase.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/LSTMCellImplBase.java @@ -22,6 +22,9 @@ public class LSTMCellImplBase extends LSTMCellImplCloneable { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public LSTMCellImplBase(Pointer p) { super(p); } + /** Downcast constructor. */ + public LSTMCellImplBase(Module pointer) { super((Pointer)null); allocate(pointer); } + @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::dynamic_pointer_cast, torch::nn::Module>") void allocate(@SharedPtr Module pointer); public LSTMCellImplBase(@Const @ByRef RNNCellOptionsBase options_) { super((Pointer)null); allocate(options_); } private native void allocate(@Const @ByRef RNNCellOptionsBase options_); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/LSTMCellImplCloneable.java b/pytorch/src/gen/java/org/bytedeco/pytorch/LSTMCellImplCloneable.java index a39748328cb..0136f7f61b9 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/LSTMCellImplCloneable.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/LSTMCellImplCloneable.java @@ -22,6 +22,9 @@ public class LSTMCellImplCloneable extends Module { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public LSTMCellImplCloneable(Pointer p) { super(p); } + /** Downcast constructor. */ + public LSTMCellImplCloneable(Module pointer) { super((Pointer)null); allocate(pointer); } + @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::dynamic_pointer_cast, torch::nn::Module>") void allocate(@SharedPtr Module pointer); @Override public Module asModule() { return asModule(this); } @Namespace public static native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast>") Module asModule(@SharedPtr LSTMCellImplCloneable pointer); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/LSTMImpl.java b/pytorch/src/gen/java/org/bytedeco/pytorch/LSTMImpl.java index 5416c888b4f..ba468fb6880 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/LSTMImpl.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/LSTMImpl.java @@ -37,11 +37,14 @@ public class LSTMImpl extends LSTMImplBase { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public LSTMImpl(Pointer p) { super(p); } + /** Downcast constructor. */ + public LSTMImpl(Module pointer) { super((Pointer)null); allocate(pointer); } + @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::dynamic_pointer_cast") void allocate(@SharedPtr Module pointer); public LSTMImpl(@Cast("int64_t") long input_size, @Cast("int64_t") long hidden_size) { super((Pointer)null); allocate(input_size, hidden_size); } - @SharedPtr private native void allocate(@Cast("int64_t") long input_size, @Cast("int64_t") long hidden_size); + @SharedPtr @Name("std::make_shared") private native void allocate(@Cast("int64_t") long input_size, @Cast("int64_t") long hidden_size); public LSTMImpl(@Const @ByRef LSTMOptions options_) { super((Pointer)null); allocate(options_); } - @SharedPtr private native void allocate(@Const @ByRef LSTMOptions options_); + @SharedPtr @Name("std::make_shared") private native void allocate(@Const @ByRef LSTMOptions options_); public native @ByVal T_TensorT_TensorTensor_T_T forward( @Const @ByRef Tensor input, diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/LSTMImplBase.java b/pytorch/src/gen/java/org/bytedeco/pytorch/LSTMImplBase.java index 7a47b1b97f5..7511ce10912 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/LSTMImplBase.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/LSTMImplBase.java @@ -22,6 +22,9 @@ public class LSTMImplBase extends LSTMImplCloneable { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public LSTMImplBase(Pointer p) { super(p); } + /** Downcast constructor. */ + public LSTMImplBase(Module pointer) { super((Pointer)null); allocate(pointer); } + @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::dynamic_pointer_cast, torch::nn::Module>") void allocate(@SharedPtr Module pointer); public LSTMImplBase(@Const @ByRef RNNOptionsBase options_) { super((Pointer)null); allocate(options_); } private native void allocate(@Const @ByRef RNNOptionsBase options_); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/LSTMImplCloneable.java b/pytorch/src/gen/java/org/bytedeco/pytorch/LSTMImplCloneable.java index 851d770ae07..87b3bb4ba52 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/LSTMImplCloneable.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/LSTMImplCloneable.java @@ -22,6 +22,9 @@ public class LSTMImplCloneable extends Module { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public LSTMImplCloneable(Pointer p) { super(p); } + /** Downcast constructor. */ + public LSTMImplCloneable(Module pointer) { super((Pointer)null); allocate(pointer); } + @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::dynamic_pointer_cast, torch::nn::Module>") void allocate(@SharedPtr Module pointer); @Override public Module asModule() { return asModule(this); } @Namespace public static native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast>") Module asModule(@SharedPtr LSTMImplCloneable pointer); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/LayerNormImpl.java b/pytorch/src/gen/java/org/bytedeco/pytorch/LayerNormImpl.java index cc9f77c603b..5e57171a825 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/LayerNormImpl.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/LayerNormImpl.java @@ -38,11 +38,14 @@ public class LayerNormImpl extends LayerNormImplCloneable { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public LayerNormImpl(Pointer p) { super(p); } + /** Downcast constructor. */ + public LayerNormImpl(Module pointer) { super((Pointer)null); allocate(pointer); } + @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::dynamic_pointer_cast") void allocate(@SharedPtr Module pointer); public LayerNormImpl(@ByVal @Cast("std::vector*") LongVector normalized_shape) { super((Pointer)null); allocate(normalized_shape); } - @SharedPtr private native void allocate(@ByVal @Cast("std::vector*") LongVector normalized_shape); + @SharedPtr @Name("std::make_shared") private native void allocate(@ByVal @Cast("std::vector*") LongVector normalized_shape); public LayerNormImpl(@ByVal LayerNormOptions options_) { super((Pointer)null); allocate(options_); } - @SharedPtr private native void allocate(@ByVal LayerNormOptions options_); + @SharedPtr @Name("std::make_shared") private native void allocate(@ByVal LayerNormOptions options_); public native void reset(); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/LayerNormImplCloneable.java b/pytorch/src/gen/java/org/bytedeco/pytorch/LayerNormImplCloneable.java index 9b156c3722c..0f70ea043de 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/LayerNormImplCloneable.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/LayerNormImplCloneable.java @@ -22,6 +22,9 @@ public class LayerNormImplCloneable extends Module { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public LayerNormImplCloneable(Pointer p) { super(p); } + /** Downcast constructor. */ + public LayerNormImplCloneable(Module pointer) { super((Pointer)null); allocate(pointer); } + @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::dynamic_pointer_cast, torch::nn::Module>") void allocate(@SharedPtr Module pointer); @Override public Module asModule() { return asModule(this); } @Namespace public static native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast>") Module asModule(@SharedPtr LayerNormImplCloneable pointer); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/LeakyReLUImpl.java b/pytorch/src/gen/java/org/bytedeco/pytorch/LeakyReLUImpl.java index 1422bc42d11..7e037ae9418 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/LeakyReLUImpl.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/LeakyReLUImpl.java @@ -36,20 +36,14 @@ public class LeakyReLUImpl extends LeakyReLUImplCloneable { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public LeakyReLUImpl(Pointer p) { super(p); } - /** Native array allocator. Access with {@link Pointer#position(long)}. */ - public LeakyReLUImpl(long size) { super((Pointer)null); allocateArray(size); } - private native void allocateArray(long size); - @Override public LeakyReLUImpl position(long position) { - return (LeakyReLUImpl)super.position(position); - } - @Override public LeakyReLUImpl getPointer(long i) { - return new LeakyReLUImpl((Pointer)this).offsetAddress(i); - } + /** Downcast constructor. */ + public LeakyReLUImpl(Module pointer) { super((Pointer)null); allocate(pointer); } + @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::dynamic_pointer_cast") void allocate(@SharedPtr Module pointer); public LeakyReLUImpl(@Const @ByRef(nullValue = "torch::nn::LeakyReLUOptions{}") LeakyReLUOptions options_) { super((Pointer)null); allocate(options_); } - @SharedPtr private native void allocate(@Const @ByRef(nullValue = "torch::nn::LeakyReLUOptions{}") LeakyReLUOptions options_); + @SharedPtr @Name("std::make_shared") private native void allocate(@Const @ByRef(nullValue = "torch::nn::LeakyReLUOptions{}") LeakyReLUOptions options_); public LeakyReLUImpl() { super((Pointer)null); allocate(); } - @SharedPtr private native void allocate(); + @SharedPtr @Name("std::make_shared") private native void allocate(); public native @ByVal Tensor forward(@ByVal Tensor input); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/LeakyReLUImplCloneable.java b/pytorch/src/gen/java/org/bytedeco/pytorch/LeakyReLUImplCloneable.java index c91e1140a44..ff10eaffcb9 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/LeakyReLUImplCloneable.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/LeakyReLUImplCloneable.java @@ -22,6 +22,9 @@ public class LeakyReLUImplCloneable extends Module { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public LeakyReLUImplCloneable(Pointer p) { super(p); } + /** Downcast constructor. */ + public LeakyReLUImplCloneable(Module pointer) { super((Pointer)null); allocate(pointer); } + @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::dynamic_pointer_cast, torch::nn::Module>") void allocate(@SharedPtr Module pointer); @Override public Module asModule() { return asModule(this); } @Namespace public static native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast>") Module asModule(@SharedPtr LeakyReLUImplCloneable pointer); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/LinearImpl.java b/pytorch/src/gen/java/org/bytedeco/pytorch/LinearImpl.java index 92e565af3bd..ce6c320ebc4 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/LinearImpl.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/LinearImpl.java @@ -36,11 +36,14 @@ public class LinearImpl extends LinearImplCloneable { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public LinearImpl(Pointer p) { super(p); } + /** Downcast constructor. */ + public LinearImpl(Module pointer) { super((Pointer)null); allocate(pointer); } + @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::dynamic_pointer_cast") void allocate(@SharedPtr Module pointer); public LinearImpl(@Cast("int64_t") long in_features, @Cast("int64_t") long out_features) { super((Pointer)null); allocate(in_features, out_features); } - @SharedPtr private native void allocate(@Cast("int64_t") long in_features, @Cast("int64_t") long out_features); + @SharedPtr @Name("std::make_shared") private native void allocate(@Cast("int64_t") long in_features, @Cast("int64_t") long out_features); public LinearImpl(@Const @ByRef LinearOptions options_) { super((Pointer)null); allocate(options_); } - @SharedPtr private native void allocate(@Const @ByRef LinearOptions options_); + @SharedPtr @Name("std::make_shared") private native void allocate(@Const @ByRef LinearOptions options_); public native void reset(); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/LinearImplCloneable.java b/pytorch/src/gen/java/org/bytedeco/pytorch/LinearImplCloneable.java index 7715fdf3bb7..373350b4fb5 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/LinearImplCloneable.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/LinearImplCloneable.java @@ -22,6 +22,9 @@ public class LinearImplCloneable extends Module { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public LinearImplCloneable(Pointer p) { super(p); } + /** Downcast constructor. */ + public LinearImplCloneable(Module pointer) { super((Pointer)null); allocate(pointer); } + @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::dynamic_pointer_cast, torch::nn::Module>") void allocate(@SharedPtr Module pointer); @Override public Module asModule() { return asModule(this); } @Namespace public static native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast>") Module asModule(@SharedPtr LinearImplCloneable pointer); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/LocalResponseNormImpl.java b/pytorch/src/gen/java/org/bytedeco/pytorch/LocalResponseNormImpl.java index 403863f6de2..de7dac17d08 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/LocalResponseNormImpl.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/LocalResponseNormImpl.java @@ -40,11 +40,14 @@ public class LocalResponseNormImpl extends LocalResponseNormImplCloneable { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public LocalResponseNormImpl(Pointer p) { super(p); } + /** Downcast constructor. */ + public LocalResponseNormImpl(Module pointer) { super((Pointer)null); allocate(pointer); } + @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::dynamic_pointer_cast") void allocate(@SharedPtr Module pointer); public LocalResponseNormImpl(@Cast("int64_t") long size) { super((Pointer)null); allocate(size); } - @SharedPtr private native void allocate(@Cast("int64_t") long size); + @SharedPtr @Name("std::make_shared") private native void allocate(@Cast("int64_t") long size); public LocalResponseNormImpl(@Const @ByRef LocalResponseNormOptions options_) { super((Pointer)null); allocate(options_); } - @SharedPtr private native void allocate(@Const @ByRef LocalResponseNormOptions options_); + @SharedPtr @Name("std::make_shared") private native void allocate(@Const @ByRef LocalResponseNormOptions options_); public native @ByVal Tensor forward(@Const @ByRef Tensor input); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/LocalResponseNormImplCloneable.java b/pytorch/src/gen/java/org/bytedeco/pytorch/LocalResponseNormImplCloneable.java index 738e7b47f2f..40d47d59204 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/LocalResponseNormImplCloneable.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/LocalResponseNormImplCloneable.java @@ -22,6 +22,9 @@ public class LocalResponseNormImplCloneable extends Module { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public LocalResponseNormImplCloneable(Pointer p) { super(p); } + /** Downcast constructor. */ + public LocalResponseNormImplCloneable(Module pointer) { super((Pointer)null); allocate(pointer); } + @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::dynamic_pointer_cast, torch::nn::Module>") void allocate(@SharedPtr Module pointer); @Override public Module asModule() { return asModule(this); } @Namespace public static native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast>") Module asModule(@SharedPtr LocalResponseNormImplCloneable pointer); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/LogSigmoidImpl.java b/pytorch/src/gen/java/org/bytedeco/pytorch/LogSigmoidImpl.java index b9ac75d9bae..e674a34a8c7 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/LogSigmoidImpl.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/LogSigmoidImpl.java @@ -28,18 +28,12 @@ public class LogSigmoidImpl extends LogSigmoidImplCloneable { static { Loader.load(); } /** Default native constructor. */ public LogSigmoidImpl() { super((Pointer)null); allocate(); } - /** Native array allocator. Access with {@link Pointer#position(long)}. */ - public LogSigmoidImpl(long size) { super((Pointer)null); allocateArray(size); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public LogSigmoidImpl(Pointer p) { super(p); } - private native void allocate(); - private native void allocateArray(long size); - @Override public LogSigmoidImpl position(long position) { - return (LogSigmoidImpl)super.position(position); - } - @Override public LogSigmoidImpl getPointer(long i) { - return new LogSigmoidImpl((Pointer)this).offsetAddress(i); - } + @SharedPtr @Name("std::make_shared") private native void allocate(); + /** Downcast constructor. */ + public LogSigmoidImpl(Module pointer) { super((Pointer)null); allocate(pointer); } + @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::dynamic_pointer_cast") void allocate(@SharedPtr Module pointer); public native @ByVal Tensor forward(@Const @ByRef Tensor input); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/LogSigmoidImplCloneable.java b/pytorch/src/gen/java/org/bytedeco/pytorch/LogSigmoidImplCloneable.java index 44e6697c45b..a5543c4b1f5 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/LogSigmoidImplCloneable.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/LogSigmoidImplCloneable.java @@ -22,6 +22,9 @@ public class LogSigmoidImplCloneable extends Module { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public LogSigmoidImplCloneable(Pointer p) { super(p); } + /** Downcast constructor. */ + public LogSigmoidImplCloneable(Module pointer) { super((Pointer)null); allocate(pointer); } + @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::dynamic_pointer_cast, torch::nn::Module>") void allocate(@SharedPtr Module pointer); @Override public Module asModule() { return asModule(this); } @Namespace public static native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast>") Module asModule(@SharedPtr LogSigmoidImplCloneable pointer); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/LogSoftmaxImpl.java b/pytorch/src/gen/java/org/bytedeco/pytorch/LogSoftmaxImpl.java index de8a84fa2e7..e3a39fcd1c8 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/LogSoftmaxImpl.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/LogSoftmaxImpl.java @@ -36,11 +36,14 @@ public class LogSoftmaxImpl extends LogSoftmaxImplCloneable { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public LogSoftmaxImpl(Pointer p) { super(p); } + /** Downcast constructor. */ + public LogSoftmaxImpl(Module pointer) { super((Pointer)null); allocate(pointer); } + @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::dynamic_pointer_cast") void allocate(@SharedPtr Module pointer); public LogSoftmaxImpl(@Cast("int64_t") long dim) { super((Pointer)null); allocate(dim); } - @SharedPtr private native void allocate(@Cast("int64_t") long dim); + @SharedPtr @Name("std::make_shared") private native void allocate(@Cast("int64_t") long dim); public LogSoftmaxImpl(@Const @ByRef LogSoftmaxOptions options_) { super((Pointer)null); allocate(options_); } - @SharedPtr private native void allocate(@Const @ByRef LogSoftmaxOptions options_); + @SharedPtr @Name("std::make_shared") private native void allocate(@Const @ByRef LogSoftmaxOptions options_); public native @ByVal Tensor forward(@Const @ByRef Tensor input); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/LogSoftmaxImplCloneable.java b/pytorch/src/gen/java/org/bytedeco/pytorch/LogSoftmaxImplCloneable.java index 1e34f4086ea..91d8bd61953 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/LogSoftmaxImplCloneable.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/LogSoftmaxImplCloneable.java @@ -22,6 +22,9 @@ public class LogSoftmaxImplCloneable extends Module { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public LogSoftmaxImplCloneable(Pointer p) { super(p); } + /** Downcast constructor. */ + public LogSoftmaxImplCloneable(Module pointer) { super((Pointer)null); allocate(pointer); } + @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::dynamic_pointer_cast, torch::nn::Module>") void allocate(@SharedPtr Module pointer); @Override public Module asModule() { return asModule(this); } @Namespace public static native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast>") Module asModule(@SharedPtr LogSoftmaxImplCloneable pointer); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/MSELossImpl.java b/pytorch/src/gen/java/org/bytedeco/pytorch/MSELossImpl.java index d5e5a345f5a..ac090fb39e7 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/MSELossImpl.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/MSELossImpl.java @@ -37,20 +37,14 @@ public class MSELossImpl extends MSELossImplCloneable { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public MSELossImpl(Pointer p) { super(p); } - /** Native array allocator. Access with {@link Pointer#position(long)}. */ - public MSELossImpl(long size) { super((Pointer)null); allocateArray(size); } - private native void allocateArray(long size); - @Override public MSELossImpl position(long position) { - return (MSELossImpl)super.position(position); - } - @Override public MSELossImpl getPointer(long i) { - return new MSELossImpl((Pointer)this).offsetAddress(i); - } + /** Downcast constructor. */ + public MSELossImpl(Module pointer) { super((Pointer)null); allocate(pointer); } + @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::dynamic_pointer_cast") void allocate(@SharedPtr Module pointer); public MSELossImpl(@ByVal(nullValue = "torch::nn::MSELossOptions{}") MSELossOptions options_) { super((Pointer)null); allocate(options_); } - @SharedPtr private native void allocate(@ByVal(nullValue = "torch::nn::MSELossOptions{}") MSELossOptions options_); + @SharedPtr @Name("std::make_shared") private native void allocate(@ByVal(nullValue = "torch::nn::MSELossOptions{}") MSELossOptions options_); public MSELossImpl() { super((Pointer)null); allocate(); } - @SharedPtr private native void allocate(); + @SharedPtr @Name("std::make_shared") private native void allocate(); public native void reset(); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/MSELossImplCloneable.java b/pytorch/src/gen/java/org/bytedeco/pytorch/MSELossImplCloneable.java index 11f7b86113d..87b3be55ef1 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/MSELossImplCloneable.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/MSELossImplCloneable.java @@ -22,6 +22,9 @@ public class MSELossImplCloneable extends Module { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public MSELossImplCloneable(Pointer p) { super(p); } + /** Downcast constructor. */ + public MSELossImplCloneable(Module pointer) { super((Pointer)null); allocate(pointer); } + @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::dynamic_pointer_cast, torch::nn::Module>") void allocate(@SharedPtr Module pointer); @Override public Module asModule() { return asModule(this); } @Namespace public static native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast>") Module asModule(@SharedPtr MSELossImplCloneable pointer); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/MarginRankingLossImpl.java b/pytorch/src/gen/java/org/bytedeco/pytorch/MarginRankingLossImpl.java index d8f4d4fac7c..6c0ee6ba5ce 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/MarginRankingLossImpl.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/MarginRankingLossImpl.java @@ -40,20 +40,14 @@ public class MarginRankingLossImpl extends MarginRankingLossImplCloneable { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public MarginRankingLossImpl(Pointer p) { super(p); } - /** Native array allocator. Access with {@link Pointer#position(long)}. */ - public MarginRankingLossImpl(long size) { super((Pointer)null); allocateArray(size); } - private native void allocateArray(long size); - @Override public MarginRankingLossImpl position(long position) { - return (MarginRankingLossImpl)super.position(position); - } - @Override public MarginRankingLossImpl getPointer(long i) { - return new MarginRankingLossImpl((Pointer)this).offsetAddress(i); - } + /** Downcast constructor. */ + public MarginRankingLossImpl(Module pointer) { super((Pointer)null); allocate(pointer); } + @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::dynamic_pointer_cast") void allocate(@SharedPtr Module pointer); public MarginRankingLossImpl(@ByVal(nullValue = "torch::nn::MarginRankingLossOptions{}") MarginRankingLossOptions options_) { super((Pointer)null); allocate(options_); } - @SharedPtr private native void allocate(@ByVal(nullValue = "torch::nn::MarginRankingLossOptions{}") MarginRankingLossOptions options_); + @SharedPtr @Name("std::make_shared") private native void allocate(@ByVal(nullValue = "torch::nn::MarginRankingLossOptions{}") MarginRankingLossOptions options_); public MarginRankingLossImpl() { super((Pointer)null); allocate(); } - @SharedPtr private native void allocate(); + @SharedPtr @Name("std::make_shared") private native void allocate(); public native void reset(); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/MarginRankingLossImplCloneable.java b/pytorch/src/gen/java/org/bytedeco/pytorch/MarginRankingLossImplCloneable.java index 4172ab881df..84b630b6ffa 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/MarginRankingLossImplCloneable.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/MarginRankingLossImplCloneable.java @@ -22,6 +22,9 @@ public class MarginRankingLossImplCloneable extends Module { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public MarginRankingLossImplCloneable(Pointer p) { super(p); } + /** Downcast constructor. */ + public MarginRankingLossImplCloneable(Module pointer) { super((Pointer)null); allocate(pointer); } + @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::dynamic_pointer_cast, torch::nn::Module>") void allocate(@SharedPtr Module pointer); @Override public Module asModule() { return asModule(this); } @Namespace public static native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast>") Module asModule(@SharedPtr MarginRankingLossImplCloneable pointer); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/MaxPool1dImpl.java b/pytorch/src/gen/java/org/bytedeco/pytorch/MaxPool1dImpl.java index 396420523b1..930af1e1965 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/MaxPool1dImpl.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/MaxPool1dImpl.java @@ -42,6 +42,9 @@ public class MaxPool1dImpl extends MaxPool1dImplBase { private native void allocate(@Const @ByRef MaxPool1dOptions options_); /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public MaxPool1dImpl(Pointer p) { super(p); } + /** Downcast constructor. */ + public MaxPool1dImpl(Module pointer) { super((Pointer)null); allocate(pointer); } + @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::dynamic_pointer_cast") void allocate(@SharedPtr Module pointer); public native @ByVal Tensor forward(@Const @ByRef Tensor input); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/MaxPool1dImplBase.java b/pytorch/src/gen/java/org/bytedeco/pytorch/MaxPool1dImplBase.java index aa0a1a47e80..3d3558dd995 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/MaxPool1dImplBase.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/MaxPool1dImplBase.java @@ -26,6 +26,9 @@ public class MaxPool1dImplBase extends MaxPool1dImplCloneable { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public MaxPool1dImplBase(Pointer p) { super(p); } + /** Downcast constructor. */ + public MaxPool1dImplBase(Module pointer) { super((Pointer)null); allocate(pointer); } + @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::dynamic_pointer_cast, torch::nn::Module>") void allocate(@SharedPtr Module pointer); public MaxPool1dImplBase(@ByVal @Cast("torch::ExpandingArray<1>*") LongPointer kernel_size) { super((Pointer)null); allocate(kernel_size); } private native void allocate(@ByVal @Cast("torch::ExpandingArray<1>*") LongPointer kernel_size); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/MaxPool1dImplCloneable.java b/pytorch/src/gen/java/org/bytedeco/pytorch/MaxPool1dImplCloneable.java index 995afcd6dd3..ca70c794908 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/MaxPool1dImplCloneable.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/MaxPool1dImplCloneable.java @@ -22,6 +22,9 @@ public class MaxPool1dImplCloneable extends Module { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public MaxPool1dImplCloneable(Pointer p) { super(p); } + /** Downcast constructor. */ + public MaxPool1dImplCloneable(Module pointer) { super((Pointer)null); allocate(pointer); } + @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::dynamic_pointer_cast, torch::nn::Module>") void allocate(@SharedPtr Module pointer); @Override public Module asModule() { return asModule(this); } @Namespace public static native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast>") Module asModule(@SharedPtr MaxPool1dImplCloneable pointer); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/MaxPool2dImpl.java b/pytorch/src/gen/java/org/bytedeco/pytorch/MaxPool2dImpl.java index 5cc4fbb4d54..ccf2bd302d0 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/MaxPool2dImpl.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/MaxPool2dImpl.java @@ -42,6 +42,9 @@ public class MaxPool2dImpl extends MaxPool2dImplBase { private native void allocate(@Const @ByRef MaxPool2dOptions options_); /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public MaxPool2dImpl(Pointer p) { super(p); } + /** Downcast constructor. */ + public MaxPool2dImpl(Module pointer) { super((Pointer)null); allocate(pointer); } + @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::dynamic_pointer_cast") void allocate(@SharedPtr Module pointer); public native @ByVal Tensor forward(@Const @ByRef Tensor input); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/MaxPool2dImplBase.java b/pytorch/src/gen/java/org/bytedeco/pytorch/MaxPool2dImplBase.java index d1d7de44569..55aeea9579f 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/MaxPool2dImplBase.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/MaxPool2dImplBase.java @@ -22,6 +22,9 @@ public class MaxPool2dImplBase extends MaxPool2dImplCloneable { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public MaxPool2dImplBase(Pointer p) { super(p); } + /** Downcast constructor. */ + public MaxPool2dImplBase(Module pointer) { super((Pointer)null); allocate(pointer); } + @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::dynamic_pointer_cast, torch::nn::Module>") void allocate(@SharedPtr Module pointer); public MaxPool2dImplBase(@ByVal @Cast("torch::ExpandingArray<2>*") LongPointer kernel_size) { super((Pointer)null); allocate(kernel_size); } private native void allocate(@ByVal @Cast("torch::ExpandingArray<2>*") LongPointer kernel_size); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/MaxPool2dImplCloneable.java b/pytorch/src/gen/java/org/bytedeco/pytorch/MaxPool2dImplCloneable.java index 7af0d92b666..7040859d47d 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/MaxPool2dImplCloneable.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/MaxPool2dImplCloneable.java @@ -22,6 +22,9 @@ public class MaxPool2dImplCloneable extends Module { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public MaxPool2dImplCloneable(Pointer p) { super(p); } + /** Downcast constructor. */ + public MaxPool2dImplCloneable(Module pointer) { super((Pointer)null); allocate(pointer); } + @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::dynamic_pointer_cast, torch::nn::Module>") void allocate(@SharedPtr Module pointer); @Override public Module asModule() { return asModule(this); } @Namespace public static native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast>") Module asModule(@SharedPtr MaxPool2dImplCloneable pointer); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/MaxPool3dImpl.java b/pytorch/src/gen/java/org/bytedeco/pytorch/MaxPool3dImpl.java index b564461af18..178014621a3 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/MaxPool3dImpl.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/MaxPool3dImpl.java @@ -42,6 +42,9 @@ public class MaxPool3dImpl extends MaxPool3dImplBase { private native void allocate(@Const @ByRef MaxPool3dOptions options_); /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public MaxPool3dImpl(Pointer p) { super(p); } + /** Downcast constructor. */ + public MaxPool3dImpl(Module pointer) { super((Pointer)null); allocate(pointer); } + @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::dynamic_pointer_cast") void allocate(@SharedPtr Module pointer); public native @ByVal Tensor forward(@Const @ByRef Tensor input); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/MaxPool3dImplBase.java b/pytorch/src/gen/java/org/bytedeco/pytorch/MaxPool3dImplBase.java index 55b12f2afc3..0e09bdd31af 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/MaxPool3dImplBase.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/MaxPool3dImplBase.java @@ -22,6 +22,9 @@ public class MaxPool3dImplBase extends MaxPool3dImplCloneable { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public MaxPool3dImplBase(Pointer p) { super(p); } + /** Downcast constructor. */ + public MaxPool3dImplBase(Module pointer) { super((Pointer)null); allocate(pointer); } + @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::dynamic_pointer_cast, torch::nn::Module>") void allocate(@SharedPtr Module pointer); public MaxPool3dImplBase(@ByVal @Cast("torch::ExpandingArray<3>*") LongPointer kernel_size) { super((Pointer)null); allocate(kernel_size); } private native void allocate(@ByVal @Cast("torch::ExpandingArray<3>*") LongPointer kernel_size); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/MaxPool3dImplCloneable.java b/pytorch/src/gen/java/org/bytedeco/pytorch/MaxPool3dImplCloneable.java index 1334c4ff6bc..4f631a1450a 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/MaxPool3dImplCloneable.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/MaxPool3dImplCloneable.java @@ -22,6 +22,9 @@ public class MaxPool3dImplCloneable extends Module { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public MaxPool3dImplCloneable(Pointer p) { super(p); } + /** Downcast constructor. */ + public MaxPool3dImplCloneable(Module pointer) { super((Pointer)null); allocate(pointer); } + @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::dynamic_pointer_cast, torch::nn::Module>") void allocate(@SharedPtr Module pointer); @Override public Module asModule() { return asModule(this); } @Namespace public static native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast>") Module asModule(@SharedPtr MaxPool3dImplCloneable pointer); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/MaxUnpool1dImpl.java b/pytorch/src/gen/java/org/bytedeco/pytorch/MaxUnpool1dImpl.java index 3c1915babe4..e21a057981f 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/MaxUnpool1dImpl.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/MaxUnpool1dImpl.java @@ -42,6 +42,9 @@ public class MaxUnpool1dImpl extends MaxUnpool1dImplBase { private native void allocate(@Const @ByRef MaxUnpool1dOptions options_); /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public MaxUnpool1dImpl(Pointer p) { super(p); } + /** Downcast constructor. */ + public MaxUnpool1dImpl(Module pointer) { super((Pointer)null); allocate(pointer); } + @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::dynamic_pointer_cast") void allocate(@SharedPtr Module pointer); public native @ByVal Tensor forward( @Const @ByRef Tensor input, diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/MaxUnpool1dImplBase.java b/pytorch/src/gen/java/org/bytedeco/pytorch/MaxUnpool1dImplBase.java index 84cb64e3af4..125249b8c72 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/MaxUnpool1dImplBase.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/MaxUnpool1dImplBase.java @@ -26,6 +26,9 @@ public class MaxUnpool1dImplBase extends MaxUnpool1dImplCloneable { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public MaxUnpool1dImplBase(Pointer p) { super(p); } + /** Downcast constructor. */ + public MaxUnpool1dImplBase(Module pointer) { super((Pointer)null); allocate(pointer); } + @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::dynamic_pointer_cast, torch::nn::Module>") void allocate(@SharedPtr Module pointer); public MaxUnpool1dImplBase(@ByVal @Cast("torch::ExpandingArray<1>*") LongPointer kernel_size) { super((Pointer)null); allocate(kernel_size); } private native void allocate(@ByVal @Cast("torch::ExpandingArray<1>*") LongPointer kernel_size); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/MaxUnpool1dImplCloneable.java b/pytorch/src/gen/java/org/bytedeco/pytorch/MaxUnpool1dImplCloneable.java index 6c149986337..22efb8a003a 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/MaxUnpool1dImplCloneable.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/MaxUnpool1dImplCloneable.java @@ -22,6 +22,9 @@ public class MaxUnpool1dImplCloneable extends Module { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public MaxUnpool1dImplCloneable(Pointer p) { super(p); } + /** Downcast constructor. */ + public MaxUnpool1dImplCloneable(Module pointer) { super((Pointer)null); allocate(pointer); } + @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::dynamic_pointer_cast, torch::nn::Module>") void allocate(@SharedPtr Module pointer); @Override public Module asModule() { return asModule(this); } @Namespace public static native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast>") Module asModule(@SharedPtr MaxUnpool1dImplCloneable pointer); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/MaxUnpool2dImpl.java b/pytorch/src/gen/java/org/bytedeco/pytorch/MaxUnpool2dImpl.java index fa219490054..be56144dda7 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/MaxUnpool2dImpl.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/MaxUnpool2dImpl.java @@ -42,6 +42,9 @@ public class MaxUnpool2dImpl extends MaxUnpool2dImplBase { private native void allocate(@Const @ByRef MaxUnpool2dOptions options_); /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public MaxUnpool2dImpl(Pointer p) { super(p); } + /** Downcast constructor. */ + public MaxUnpool2dImpl(Module pointer) { super((Pointer)null); allocate(pointer); } + @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::dynamic_pointer_cast") void allocate(@SharedPtr Module pointer); public native @ByVal Tensor forward( @Const @ByRef Tensor input, diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/MaxUnpool2dImplBase.java b/pytorch/src/gen/java/org/bytedeco/pytorch/MaxUnpool2dImplBase.java index c285af93212..acd4ca350e1 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/MaxUnpool2dImplBase.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/MaxUnpool2dImplBase.java @@ -22,6 +22,9 @@ public class MaxUnpool2dImplBase extends MaxUnpool2dImplCloneable { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public MaxUnpool2dImplBase(Pointer p) { super(p); } + /** Downcast constructor. */ + public MaxUnpool2dImplBase(Module pointer) { super((Pointer)null); allocate(pointer); } + @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::dynamic_pointer_cast, torch::nn::Module>") void allocate(@SharedPtr Module pointer); public MaxUnpool2dImplBase(@ByVal @Cast("torch::ExpandingArray<2>*") LongPointer kernel_size) { super((Pointer)null); allocate(kernel_size); } private native void allocate(@ByVal @Cast("torch::ExpandingArray<2>*") LongPointer kernel_size); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/MaxUnpool2dImplCloneable.java b/pytorch/src/gen/java/org/bytedeco/pytorch/MaxUnpool2dImplCloneable.java index fac762d4164..7d99f59e9f1 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/MaxUnpool2dImplCloneable.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/MaxUnpool2dImplCloneable.java @@ -22,6 +22,9 @@ public class MaxUnpool2dImplCloneable extends Module { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public MaxUnpool2dImplCloneable(Pointer p) { super(p); } + /** Downcast constructor. */ + public MaxUnpool2dImplCloneable(Module pointer) { super((Pointer)null); allocate(pointer); } + @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::dynamic_pointer_cast, torch::nn::Module>") void allocate(@SharedPtr Module pointer); @Override public Module asModule() { return asModule(this); } @Namespace public static native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast>") Module asModule(@SharedPtr MaxUnpool2dImplCloneable pointer); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/MaxUnpool3dImpl.java b/pytorch/src/gen/java/org/bytedeco/pytorch/MaxUnpool3dImpl.java index 3dfc5f0aa5d..c38e8401eae 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/MaxUnpool3dImpl.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/MaxUnpool3dImpl.java @@ -42,6 +42,9 @@ public class MaxUnpool3dImpl extends MaxUnpool3dImplBase { private native void allocate(@Const @ByRef MaxUnpool3dOptions options_); /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public MaxUnpool3dImpl(Pointer p) { super(p); } + /** Downcast constructor. */ + public MaxUnpool3dImpl(Module pointer) { super((Pointer)null); allocate(pointer); } + @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::dynamic_pointer_cast") void allocate(@SharedPtr Module pointer); public native @ByVal Tensor forward( @Const @ByRef Tensor input, diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/MaxUnpool3dImplBase.java b/pytorch/src/gen/java/org/bytedeco/pytorch/MaxUnpool3dImplBase.java index 7df4fb8f5bc..56063f9ccb5 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/MaxUnpool3dImplBase.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/MaxUnpool3dImplBase.java @@ -22,6 +22,9 @@ public class MaxUnpool3dImplBase extends MaxUnpool3dImplCloneable { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public MaxUnpool3dImplBase(Pointer p) { super(p); } + /** Downcast constructor. */ + public MaxUnpool3dImplBase(Module pointer) { super((Pointer)null); allocate(pointer); } + @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::dynamic_pointer_cast, torch::nn::Module>") void allocate(@SharedPtr Module pointer); public MaxUnpool3dImplBase(@ByVal @Cast("torch::ExpandingArray<3>*") LongPointer kernel_size) { super((Pointer)null); allocate(kernel_size); } private native void allocate(@ByVal @Cast("torch::ExpandingArray<3>*") LongPointer kernel_size); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/MaxUnpool3dImplCloneable.java b/pytorch/src/gen/java/org/bytedeco/pytorch/MaxUnpool3dImplCloneable.java index 93d4eb6ff57..4e577760e6c 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/MaxUnpool3dImplCloneable.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/MaxUnpool3dImplCloneable.java @@ -22,6 +22,9 @@ public class MaxUnpool3dImplCloneable extends Module { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public MaxUnpool3dImplCloneable(Pointer p) { super(p); } + /** Downcast constructor. */ + public MaxUnpool3dImplCloneable(Module pointer) { super((Pointer)null); allocate(pointer); } + @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::dynamic_pointer_cast, torch::nn::Module>") void allocate(@SharedPtr Module pointer); @Override public Module asModule() { return asModule(this); } @Namespace public static native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast>") Module asModule(@SharedPtr MaxUnpool3dImplCloneable pointer); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/MaybeOwnedTraitsGenericImplTensor.java b/pytorch/src/gen/java/org/bytedeco/pytorch/MaybeOwnedTraitsGenericImplTensor.java deleted file mode 100644 index 0bdbc06c2d1..00000000000 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/MaybeOwnedTraitsGenericImplTensor.java +++ /dev/null @@ -1,54 +0,0 @@ -// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE - -package org.bytedeco.pytorch; - -import org.bytedeco.pytorch.Allocator; -import org.bytedeco.pytorch.Function; -import org.bytedeco.pytorch.functions.*; -import org.bytedeco.pytorch.Module; -import org.bytedeco.javacpp.annotation.Cast; -import java.nio.*; -import org.bytedeco.javacpp.*; -import org.bytedeco.javacpp.annotation.*; - -import static org.bytedeco.javacpp.presets.javacpp.*; -import static org.bytedeco.openblas.global.openblas_nolapack.*; -import static org.bytedeco.openblas.global.openblas.*; - -import static org.bytedeco.pytorch.global.torch.*; - - -/** MaybeOwnedTraits describes how to borrow from T. Here is how we - * can implement borrowing from an arbitrary type T using a raw - * pointer to const: */ -@Name("c10::MaybeOwnedTraitsGenericImpl >") @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) -public class MaybeOwnedTraitsGenericImplTensor extends Pointer { - static { Loader.load(); } - /** Default native constructor. */ - public MaybeOwnedTraitsGenericImplTensor() { super((Pointer)null); allocate(); } - /** Native array allocator. Access with {@link Pointer#position(long)}. */ - public MaybeOwnedTraitsGenericImplTensor(long size) { super((Pointer)null); allocateArray(size); } - /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ - public MaybeOwnedTraitsGenericImplTensor(Pointer p) { super(p); } - private native void allocate(); - private native void allocateArray(long size); - @Override public MaybeOwnedTraitsGenericImplTensor position(long position) { - return (MaybeOwnedTraitsGenericImplTensor)super.position(position); - } - @Override public MaybeOwnedTraitsGenericImplTensor getPointer(long i) { - return new MaybeOwnedTraitsGenericImplTensor((Pointer)this).offsetAddress(i); - } - - - public static native @Const @SharedPtr("at::Tensor") Tensor createBorrow(@Const @SharedPtr("at::Tensor") @ByRef Tensor from); - - - - - - public static native @Const @SharedPtr("at::Tensor") @ByRef Tensor referenceFromBorrow(@SharedPtr("at::Tensor") @ByPtrRef Tensor borrow); - - public static native @Const @SharedPtr("at::Tensor") Tensor pointerFromBorrow(@SharedPtr("at::Tensor") @ByPtrRef Tensor borrow); - - public static native @Cast("bool") boolean debugBorrowIsValid(@SharedPtr("at::Tensor") @ByPtrRef Tensor borrow); -} diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/MishImpl.java b/pytorch/src/gen/java/org/bytedeco/pytorch/MishImpl.java index 005e5214978..c4df80bafc7 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/MishImpl.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/MishImpl.java @@ -28,18 +28,12 @@ public class MishImpl extends MishImplCloneable { static { Loader.load(); } /** Default native constructor. */ public MishImpl() { super((Pointer)null); allocate(); } - /** Native array allocator. Access with {@link Pointer#position(long)}. */ - public MishImpl(long size) { super((Pointer)null); allocateArray(size); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public MishImpl(Pointer p) { super(p); } - private native void allocate(); - private native void allocateArray(long size); - @Override public MishImpl position(long position) { - return (MishImpl)super.position(position); - } - @Override public MishImpl getPointer(long i) { - return new MishImpl((Pointer)this).offsetAddress(i); - } + @SharedPtr @Name("std::make_shared") private native void allocate(); + /** Downcast constructor. */ + public MishImpl(Module pointer) { super((Pointer)null); allocate(pointer); } + @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::dynamic_pointer_cast") void allocate(@SharedPtr Module pointer); public native @ByVal Tensor forward(@Const @ByRef Tensor input); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/MishImplCloneable.java b/pytorch/src/gen/java/org/bytedeco/pytorch/MishImplCloneable.java index 55915701e23..69c91823905 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/MishImplCloneable.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/MishImplCloneable.java @@ -22,6 +22,9 @@ public class MishImplCloneable extends Module { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public MishImplCloneable(Pointer p) { super(p); } + /** Downcast constructor. */ + public MishImplCloneable(Module pointer) { super((Pointer)null); allocate(pointer); } + @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::dynamic_pointer_cast, torch::nn::Module>") void allocate(@SharedPtr Module pointer); @Override public Module asModule() { return asModule(this); } @Namespace public static native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast>") Module asModule(@SharedPtr MishImplCloneable pointer); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/Module.java b/pytorch/src/gen/java/org/bytedeco/pytorch/Module.java index 2da0762f136..d31e290cce4 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/Module.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/Module.java @@ -65,29 +65,20 @@ public class Module extends Pointer { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public Module(Pointer p) { super(p); } - /** Native array allocator. Access with {@link Pointer#position(long)}. */ - public Module(long size) { super((Pointer)null); allocateArray(size); } - private native void allocateArray(long size); - @Override public Module position(long position) { - return (Module)super.position(position); - } - @Override public Module getPointer(long i) { - return new Module((Pointer)this).offsetAddress(i); - } public Module asModule() { return this; } /** Tells the base {@code Module} about the name of the submodule. */ public Module(@StdString BytePointer name) { super((Pointer)null); allocate(name); } - @SharedPtr private native void allocate(@StdString BytePointer name); + @SharedPtr @Name("std::make_shared") private native void allocate(@StdString BytePointer name); public Module(@StdString String name) { super((Pointer)null); allocate(name); } - @SharedPtr private native void allocate(@StdString String name); + @SharedPtr @Name("std::make_shared") private native void allocate(@StdString String name); /** Constructs the module without immediate knowledge of the submodule's name. * The name of the submodule is inferred via RTTI (if possible) the first * time {@code .name()} is invoked. */ public Module() { super((Pointer)null); allocate(); } - @SharedPtr private native void allocate(); + @SharedPtr @Name("std::make_shared") private native void allocate(); /** Returns the name of the {@code Module}. * diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/ModuleDictImpl.java b/pytorch/src/gen/java/org/bytedeco/pytorch/ModuleDictImpl.java index bce177db037..733f81e3327 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/ModuleDictImpl.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/ModuleDictImpl.java @@ -79,30 +79,24 @@ public class ModuleDictImpl extends ModuleDictImplCloneable { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public ModuleDictImpl(Pointer p) { super(p); } - /** Native array allocator. Access with {@link Pointer#position(long)}. */ - public ModuleDictImpl(long size) { super((Pointer)null); allocateArray(size); } - private native void allocateArray(long size); - @Override public ModuleDictImpl position(long position) { - return (ModuleDictImpl)super.position(position); - } - @Override public ModuleDictImpl getPointer(long i) { - return new ModuleDictImpl((Pointer)this).offsetAddress(i); - } + /** Downcast constructor. */ + public ModuleDictImpl(Module pointer) { super((Pointer)null); allocate(pointer); } + @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::dynamic_pointer_cast") void allocate(@SharedPtr Module pointer); public ModuleDictImpl() { super((Pointer)null); allocate(); } - @SharedPtr private native void allocate(); + @SharedPtr @Name("std::make_shared") private native void allocate(); /** Constructs the {@code ModuleDict} from a list of string-Module pairs. */ public ModuleDictImpl( @Const @ByRef StringSharedModuleVector modules) { super((Pointer)null); allocate(modules); } - @SharedPtr private native void allocate( + @SharedPtr @Name("std::make_shared") private native void allocate( @Const @ByRef StringSharedModuleVector modules); /** Constructs the {@code ModuleDict} from an {@code OrderedDict}. */ public ModuleDictImpl( @Const @ByRef StringSharedModuleDict modules) { super((Pointer)null); allocate(modules); } - @SharedPtr private native void allocate( + @SharedPtr @Name("std::make_shared") private native void allocate( @Const @ByRef StringSharedModuleDict modules); /** Return the items in the {@code ModuleDict}. */ diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/ModuleDictImplCloneable.java b/pytorch/src/gen/java/org/bytedeco/pytorch/ModuleDictImplCloneable.java index 83949f92495..36cfd87e061 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/ModuleDictImplCloneable.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/ModuleDictImplCloneable.java @@ -29,6 +29,9 @@ public class ModuleDictImplCloneable extends Module { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public ModuleDictImplCloneable(Pointer p) { super(p); } + /** Downcast constructor. */ + public ModuleDictImplCloneable(Module pointer) { super((Pointer)null); allocate(pointer); } + @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::dynamic_pointer_cast, torch::nn::Module>") void allocate(@SharedPtr Module pointer); @Override public Module asModule() { return asModule(this); } @Namespace public static native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast>") Module asModule(@SharedPtr ModuleDictImplCloneable pointer); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/ModuleListImpl.java b/pytorch/src/gen/java/org/bytedeco/pytorch/ModuleListImpl.java index d41dd904624..d7dc84ed748 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/ModuleListImpl.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/ModuleListImpl.java @@ -67,19 +67,13 @@ public class ModuleListImpl extends ModuleListImplCloneable { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public ModuleListImpl(Pointer p) { super(p); } - /** Native array allocator. Access with {@link Pointer#position(long)}. */ - public ModuleListImpl(long size) { super((Pointer)null); allocateArray(size); } - private native void allocateArray(long size); - @Override public ModuleListImpl position(long position) { - return (ModuleListImpl)super.position(position); - } - @Override public ModuleListImpl getPointer(long i) { - return new ModuleListImpl((Pointer)this).offsetAddress(i); - } + /** Downcast constructor. */ + public ModuleListImpl(Module pointer) { super((Pointer)null); allocate(pointer); } + @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::dynamic_pointer_cast") void allocate(@SharedPtr Module pointer); public ModuleListImpl() { super((Pointer)null); allocate(); } - @SharedPtr private native void allocate(); + @SharedPtr @Name("std::make_shared") private native void allocate(); /** Constructs the {@code ModuleList} from a variadic list of modules. */ diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/ModuleListImplCloneable.java b/pytorch/src/gen/java/org/bytedeco/pytorch/ModuleListImplCloneable.java index 8580276f85b..021bce4bc44 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/ModuleListImplCloneable.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/ModuleListImplCloneable.java @@ -22,6 +22,9 @@ public class ModuleListImplCloneable extends Module { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public ModuleListImplCloneable(Pointer p) { super(p); } + /** Downcast constructor. */ + public ModuleListImplCloneable(Module pointer) { super((Pointer)null); allocate(pointer); } + @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::dynamic_pointer_cast, torch::nn::Module>") void allocate(@SharedPtr Module pointer); @Override public Module asModule() { return asModule(this); } @Namespace public static native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast>") Module asModule(@SharedPtr ModuleListImplCloneable pointer); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/MultiLabelMarginLossImpl.java b/pytorch/src/gen/java/org/bytedeco/pytorch/MultiLabelMarginLossImpl.java index 9790a642bff..d2a61d25018 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/MultiLabelMarginLossImpl.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/MultiLabelMarginLossImpl.java @@ -40,20 +40,14 @@ public class MultiLabelMarginLossImpl extends MultiLabelMarginLossImplCloneable static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public MultiLabelMarginLossImpl(Pointer p) { super(p); } - /** Native array allocator. Access with {@link Pointer#position(long)}. */ - public MultiLabelMarginLossImpl(long size) { super((Pointer)null); allocateArray(size); } - private native void allocateArray(long size); - @Override public MultiLabelMarginLossImpl position(long position) { - return (MultiLabelMarginLossImpl)super.position(position); - } - @Override public MultiLabelMarginLossImpl getPointer(long i) { - return new MultiLabelMarginLossImpl((Pointer)this).offsetAddress(i); - } + /** Downcast constructor. */ + public MultiLabelMarginLossImpl(Module pointer) { super((Pointer)null); allocate(pointer); } + @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::dynamic_pointer_cast") void allocate(@SharedPtr Module pointer); public MultiLabelMarginLossImpl(@ByVal(nullValue = "torch::nn::MultiLabelMarginLossOptions{}") MultiLabelMarginLossOptions options_) { super((Pointer)null); allocate(options_); } - @SharedPtr private native void allocate(@ByVal(nullValue = "torch::nn::MultiLabelMarginLossOptions{}") MultiLabelMarginLossOptions options_); + @SharedPtr @Name("std::make_shared") private native void allocate(@ByVal(nullValue = "torch::nn::MultiLabelMarginLossOptions{}") MultiLabelMarginLossOptions options_); public MultiLabelMarginLossImpl() { super((Pointer)null); allocate(); } - @SharedPtr private native void allocate(); + @SharedPtr @Name("std::make_shared") private native void allocate(); public native void reset(); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/MultiLabelMarginLossImplCloneable.java b/pytorch/src/gen/java/org/bytedeco/pytorch/MultiLabelMarginLossImplCloneable.java index 33ce8340d6a..8147ec4b658 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/MultiLabelMarginLossImplCloneable.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/MultiLabelMarginLossImplCloneable.java @@ -22,6 +22,9 @@ public class MultiLabelMarginLossImplCloneable extends Module { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public MultiLabelMarginLossImplCloneable(Pointer p) { super(p); } + /** Downcast constructor. */ + public MultiLabelMarginLossImplCloneable(Module pointer) { super((Pointer)null); allocate(pointer); } + @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::dynamic_pointer_cast, torch::nn::Module>") void allocate(@SharedPtr Module pointer); @Override public Module asModule() { return asModule(this); } @Namespace public static native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast>") Module asModule(@SharedPtr MultiLabelMarginLossImplCloneable pointer); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/MultiLabelSoftMarginLossImpl.java b/pytorch/src/gen/java/org/bytedeco/pytorch/MultiLabelSoftMarginLossImpl.java index 261ec52c51f..5986c7ac789 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/MultiLabelSoftMarginLossImpl.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/MultiLabelSoftMarginLossImpl.java @@ -40,22 +40,16 @@ public class MultiLabelSoftMarginLossImpl extends MultiLabelSoftMarginLossImplCl static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public MultiLabelSoftMarginLossImpl(Pointer p) { super(p); } - /** Native array allocator. Access with {@link Pointer#position(long)}. */ - public MultiLabelSoftMarginLossImpl(long size) { super((Pointer)null); allocateArray(size); } - private native void allocateArray(long size); - @Override public MultiLabelSoftMarginLossImpl position(long position) { - return (MultiLabelSoftMarginLossImpl)super.position(position); - } - @Override public MultiLabelSoftMarginLossImpl getPointer(long i) { - return new MultiLabelSoftMarginLossImpl((Pointer)this).offsetAddress(i); - } + /** Downcast constructor. */ + public MultiLabelSoftMarginLossImpl(Module pointer) { super((Pointer)null); allocate(pointer); } + @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::dynamic_pointer_cast") void allocate(@SharedPtr Module pointer); public MultiLabelSoftMarginLossImpl( @ByVal(nullValue = "torch::nn::MultiLabelSoftMarginLossOptions{}") MultiLabelSoftMarginLossOptions options_) { super((Pointer)null); allocate(options_); } - @SharedPtr private native void allocate( + @SharedPtr @Name("std::make_shared") private native void allocate( @ByVal(nullValue = "torch::nn::MultiLabelSoftMarginLossOptions{}") MultiLabelSoftMarginLossOptions options_); public MultiLabelSoftMarginLossImpl() { super((Pointer)null); allocate(); } - @SharedPtr private native void allocate(); + @SharedPtr @Name("std::make_shared") private native void allocate(); /** Pretty prints the {@code MultiLabelSoftMarginLoss} module into the given * {@code stream}. */ diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/MultiLabelSoftMarginLossImplCloneable.java b/pytorch/src/gen/java/org/bytedeco/pytorch/MultiLabelSoftMarginLossImplCloneable.java index ef7de6a6efe..905010b796f 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/MultiLabelSoftMarginLossImplCloneable.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/MultiLabelSoftMarginLossImplCloneable.java @@ -22,6 +22,9 @@ public class MultiLabelSoftMarginLossImplCloneable extends Module { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public MultiLabelSoftMarginLossImplCloneable(Pointer p) { super(p); } + /** Downcast constructor. */ + public MultiLabelSoftMarginLossImplCloneable(Module pointer) { super((Pointer)null); allocate(pointer); } + @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::dynamic_pointer_cast, torch::nn::Module>") void allocate(@SharedPtr Module pointer); @Override public Module asModule() { return asModule(this); } @Namespace public static native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast>") Module asModule(@SharedPtr MultiLabelSoftMarginLossImplCloneable pointer); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/MultiMarginLossImpl.java b/pytorch/src/gen/java/org/bytedeco/pytorch/MultiMarginLossImpl.java index 9fbec19e09d..722e00cc4f3 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/MultiMarginLossImpl.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/MultiMarginLossImpl.java @@ -40,20 +40,14 @@ public class MultiMarginLossImpl extends MultiMarginLossImplCloneable { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public MultiMarginLossImpl(Pointer p) { super(p); } - /** Native array allocator. Access with {@link Pointer#position(long)}. */ - public MultiMarginLossImpl(long size) { super((Pointer)null); allocateArray(size); } - private native void allocateArray(long size); - @Override public MultiMarginLossImpl position(long position) { - return (MultiMarginLossImpl)super.position(position); - } - @Override public MultiMarginLossImpl getPointer(long i) { - return new MultiMarginLossImpl((Pointer)this).offsetAddress(i); - } + /** Downcast constructor. */ + public MultiMarginLossImpl(Module pointer) { super((Pointer)null); allocate(pointer); } + @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::dynamic_pointer_cast") void allocate(@SharedPtr Module pointer); public MultiMarginLossImpl(@ByVal(nullValue = "torch::nn::MultiMarginLossOptions{}") MultiMarginLossOptions options_) { super((Pointer)null); allocate(options_); } - @SharedPtr private native void allocate(@ByVal(nullValue = "torch::nn::MultiMarginLossOptions{}") MultiMarginLossOptions options_); + @SharedPtr @Name("std::make_shared") private native void allocate(@ByVal(nullValue = "torch::nn::MultiMarginLossOptions{}") MultiMarginLossOptions options_); public MultiMarginLossImpl() { super((Pointer)null); allocate(); } - @SharedPtr private native void allocate(); + @SharedPtr @Name("std::make_shared") private native void allocate(); public native void reset(); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/MultiMarginLossImplCloneable.java b/pytorch/src/gen/java/org/bytedeco/pytorch/MultiMarginLossImplCloneable.java index 922936b9cbb..58eb580a8cd 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/MultiMarginLossImplCloneable.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/MultiMarginLossImplCloneable.java @@ -22,6 +22,9 @@ public class MultiMarginLossImplCloneable extends Module { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public MultiMarginLossImplCloneable(Pointer p) { super(p); } + /** Downcast constructor. */ + public MultiMarginLossImplCloneable(Module pointer) { super((Pointer)null); allocate(pointer); } + @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::dynamic_pointer_cast, torch::nn::Module>") void allocate(@SharedPtr Module pointer); @Override public Module asModule() { return asModule(this); } @Namespace public static native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast>") Module asModule(@SharedPtr MultiMarginLossImplCloneable pointer); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/MultiheadAttentionImpl.java b/pytorch/src/gen/java/org/bytedeco/pytorch/MultiheadAttentionImpl.java index 42d5e88e385..195d44461bc 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/MultiheadAttentionImpl.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/MultiheadAttentionImpl.java @@ -36,11 +36,14 @@ public class MultiheadAttentionImpl extends MultiheadAttentionImplCloneable { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public MultiheadAttentionImpl(Pointer p) { super(p); } + /** Downcast constructor. */ + public MultiheadAttentionImpl(Module pointer) { super((Pointer)null); allocate(pointer); } + @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::dynamic_pointer_cast") void allocate(@SharedPtr Module pointer); public MultiheadAttentionImpl(@Cast("int64_t") long embed_dim, @Cast("int64_t") long num_heads) { super((Pointer)null); allocate(embed_dim, num_heads); } - @SharedPtr private native void allocate(@Cast("int64_t") long embed_dim, @Cast("int64_t") long num_heads); + @SharedPtr @Name("std::make_shared") private native void allocate(@Cast("int64_t") long embed_dim, @Cast("int64_t") long num_heads); public MultiheadAttentionImpl(@Const @ByRef MultiheadAttentionOptions options_) { super((Pointer)null); allocate(options_); } - @SharedPtr private native void allocate(@Const @ByRef MultiheadAttentionOptions options_); + @SharedPtr @Name("std::make_shared") private native void allocate(@Const @ByRef MultiheadAttentionOptions options_); public native @ByVal T_TensorTensor_T forward( @Const @ByRef Tensor query, diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/MultiheadAttentionImplCloneable.java b/pytorch/src/gen/java/org/bytedeco/pytorch/MultiheadAttentionImplCloneable.java index 6ff01f777e9..8eba1d7108c 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/MultiheadAttentionImplCloneable.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/MultiheadAttentionImplCloneable.java @@ -22,6 +22,9 @@ public class MultiheadAttentionImplCloneable extends Module { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public MultiheadAttentionImplCloneable(Pointer p) { super(p); } + /** Downcast constructor. */ + public MultiheadAttentionImplCloneable(Module pointer) { super((Pointer)null); allocate(pointer); } + @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::dynamic_pointer_cast, torch::nn::Module>") void allocate(@SharedPtr Module pointer); @Override public Module asModule() { return asModule(this); } @Namespace public static native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast>") Module asModule(@SharedPtr MultiheadAttentionImplCloneable pointer); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/NLLLossImpl.java b/pytorch/src/gen/java/org/bytedeco/pytorch/NLLLossImpl.java index ea39ed82cd6..9b3aa7cb6a2 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/NLLLossImpl.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/NLLLossImpl.java @@ -37,20 +37,14 @@ public class NLLLossImpl extends NLLLossImplCloneable { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public NLLLossImpl(Pointer p) { super(p); } - /** Native array allocator. Access with {@link Pointer#position(long)}. */ - public NLLLossImpl(long size) { super((Pointer)null); allocateArray(size); } - private native void allocateArray(long size); - @Override public NLLLossImpl position(long position) { - return (NLLLossImpl)super.position(position); - } - @Override public NLLLossImpl getPointer(long i) { - return new NLLLossImpl((Pointer)this).offsetAddress(i); - } + /** Downcast constructor. */ + public NLLLossImpl(Module pointer) { super((Pointer)null); allocate(pointer); } + @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::dynamic_pointer_cast") void allocate(@SharedPtr Module pointer); public NLLLossImpl(@ByVal(nullValue = "torch::nn::NLLLossOptions{}") NLLLossOptions options_) { super((Pointer)null); allocate(options_); } - @SharedPtr private native void allocate(@ByVal(nullValue = "torch::nn::NLLLossOptions{}") NLLLossOptions options_); + @SharedPtr @Name("std::make_shared") private native void allocate(@ByVal(nullValue = "torch::nn::NLLLossOptions{}") NLLLossOptions options_); public NLLLossImpl() { super((Pointer)null); allocate(); } - @SharedPtr private native void allocate(); + @SharedPtr @Name("std::make_shared") private native void allocate(); /** Pretty prints the {@code NLLLoss} module into the given {@code stream}. */ public native void pretty_print(@Cast("std::ostream*") @ByRef Pointer stream); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/NLLLossImplCloneable.java b/pytorch/src/gen/java/org/bytedeco/pytorch/NLLLossImplCloneable.java index 7d39db10873..a2a731d0520 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/NLLLossImplCloneable.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/NLLLossImplCloneable.java @@ -22,6 +22,9 @@ public class NLLLossImplCloneable extends Module { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public NLLLossImplCloneable(Pointer p) { super(p); } + /** Downcast constructor. */ + public NLLLossImplCloneable(Module pointer) { super((Pointer)null); allocate(pointer); } + @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::dynamic_pointer_cast, torch::nn::Module>") void allocate(@SharedPtr Module pointer); @Override public Module asModule() { return asModule(this); } @Namespace public static native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast>") Module asModule(@SharedPtr NLLLossImplCloneable pointer); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/OperatorName.java b/pytorch/src/gen/java/org/bytedeco/pytorch/OperatorName.java index ca743c87f48..63a06be07b3 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/OperatorName.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/OperatorName.java @@ -38,7 +38,7 @@ public class OperatorName extends Pointer { // Return the namespace of this OperatorName, if it exists. The // returned string_view is only live as long as the OperatorName // exists and name is not mutated - public native @ByVal @Cast("c10::optional*") Pointer getNamespace(); + public native @ByVal StringViewOptional getNamespace(); // Returns true if we successfully set the namespace public native @Cast("bool") boolean setNamespaceIfNotSet(@Cast("const char*") BytePointer ns); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/PReLUImpl.java b/pytorch/src/gen/java/org/bytedeco/pytorch/PReLUImpl.java index 9ba9d92167f..40819aef20d 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/PReLUImpl.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/PReLUImpl.java @@ -36,20 +36,14 @@ public class PReLUImpl extends PReLUImplCloneable { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public PReLUImpl(Pointer p) { super(p); } - /** Native array allocator. Access with {@link Pointer#position(long)}. */ - public PReLUImpl(long size) { super((Pointer)null); allocateArray(size); } - private native void allocateArray(long size); - @Override public PReLUImpl position(long position) { - return (PReLUImpl)super.position(position); - } - @Override public PReLUImpl getPointer(long i) { - return new PReLUImpl((Pointer)this).offsetAddress(i); - } + /** Downcast constructor. */ + public PReLUImpl(Module pointer) { super((Pointer)null); allocate(pointer); } + @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::dynamic_pointer_cast") void allocate(@SharedPtr Module pointer); public PReLUImpl(@Const @ByRef(nullValue = "torch::nn::PReLUOptions{}") PReLUOptions options_) { super((Pointer)null); allocate(options_); } - @SharedPtr private native void allocate(@Const @ByRef(nullValue = "torch::nn::PReLUOptions{}") PReLUOptions options_); + @SharedPtr @Name("std::make_shared") private native void allocate(@Const @ByRef(nullValue = "torch::nn::PReLUOptions{}") PReLUOptions options_); public PReLUImpl() { super((Pointer)null); allocate(); } - @SharedPtr private native void allocate(); + @SharedPtr @Name("std::make_shared") private native void allocate(); public native @ByVal Tensor forward(@Const @ByRef Tensor input); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/PReLUImplCloneable.java b/pytorch/src/gen/java/org/bytedeco/pytorch/PReLUImplCloneable.java index d0cc096b017..77e90bed3d7 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/PReLUImplCloneable.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/PReLUImplCloneable.java @@ -22,6 +22,9 @@ public class PReLUImplCloneable extends Module { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public PReLUImplCloneable(Pointer p) { super(p); } + /** Downcast constructor. */ + public PReLUImplCloneable(Module pointer) { super((Pointer)null); allocate(pointer); } + @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::dynamic_pointer_cast, torch::nn::Module>") void allocate(@SharedPtr Module pointer); @Override public Module asModule() { return asModule(this); } @Namespace public static native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast>") Module asModule(@SharedPtr PReLUImplCloneable pointer); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/PairwiseDistanceImpl.java b/pytorch/src/gen/java/org/bytedeco/pytorch/PairwiseDistanceImpl.java index 251c3d821e7..b86b9f0e5f4 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/PairwiseDistanceImpl.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/PairwiseDistanceImpl.java @@ -38,20 +38,14 @@ public class PairwiseDistanceImpl extends PairwiseDistanceImplCloneable { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public PairwiseDistanceImpl(Pointer p) { super(p); } - /** Native array allocator. Access with {@link Pointer#position(long)}. */ - public PairwiseDistanceImpl(long size) { super((Pointer)null); allocateArray(size); } - private native void allocateArray(long size); - @Override public PairwiseDistanceImpl position(long position) { - return (PairwiseDistanceImpl)super.position(position); - } - @Override public PairwiseDistanceImpl getPointer(long i) { - return new PairwiseDistanceImpl((Pointer)this).offsetAddress(i); - } + /** Downcast constructor. */ + public PairwiseDistanceImpl(Module pointer) { super((Pointer)null); allocate(pointer); } + @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::dynamic_pointer_cast") void allocate(@SharedPtr Module pointer); public PairwiseDistanceImpl(@Const @ByRef(nullValue = "torch::nn::PairwiseDistanceOptions{}") PairwiseDistanceOptions options_) { super((Pointer)null); allocate(options_); } - @SharedPtr private native void allocate(@Const @ByRef(nullValue = "torch::nn::PairwiseDistanceOptions{}") PairwiseDistanceOptions options_); + @SharedPtr @Name("std::make_shared") private native void allocate(@Const @ByRef(nullValue = "torch::nn::PairwiseDistanceOptions{}") PairwiseDistanceOptions options_); public PairwiseDistanceImpl() { super((Pointer)null); allocate(); } - @SharedPtr private native void allocate(); + @SharedPtr @Name("std::make_shared") private native void allocate(); public native void reset(); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/PairwiseDistanceImplCloneable.java b/pytorch/src/gen/java/org/bytedeco/pytorch/PairwiseDistanceImplCloneable.java index 3b6d939e5de..19391fb493a 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/PairwiseDistanceImplCloneable.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/PairwiseDistanceImplCloneable.java @@ -22,6 +22,9 @@ public class PairwiseDistanceImplCloneable extends Module { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public PairwiseDistanceImplCloneable(Pointer p) { super(p); } + /** Downcast constructor. */ + public PairwiseDistanceImplCloneable(Module pointer) { super((Pointer)null); allocate(pointer); } + @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::dynamic_pointer_cast, torch::nn::Module>") void allocate(@SharedPtr Module pointer); @Override public Module asModule() { return asModule(this); } @Namespace public static native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast>") Module asModule(@SharedPtr PairwiseDistanceImplCloneable pointer); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/ParameterDictImpl.java b/pytorch/src/gen/java/org/bytedeco/pytorch/ParameterDictImpl.java index e44a0bf2a89..5ea509ada3d 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/ParameterDictImpl.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/ParameterDictImpl.java @@ -23,23 +23,17 @@ public class ParameterDictImpl extends ParameterDictImplCloneable { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public ParameterDictImpl(Pointer p) { super(p); } - /** Native array allocator. Access with {@link Pointer#position(long)}. */ - public ParameterDictImpl(long size) { super((Pointer)null); allocateArray(size); } - private native void allocateArray(long size); - @Override public ParameterDictImpl position(long position) { - return (ParameterDictImpl)super.position(position); - } - @Override public ParameterDictImpl getPointer(long i) { - return new ParameterDictImpl((Pointer)this).offsetAddress(i); - } + /** Downcast constructor. */ + public ParameterDictImpl(Module pointer) { super((Pointer)null); allocate(pointer); } + @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::dynamic_pointer_cast") void allocate(@SharedPtr Module pointer); public ParameterDictImpl() { super((Pointer)null); allocate(); } - @SharedPtr private native void allocate(); + @SharedPtr @Name("std::make_shared") private native void allocate(); public ParameterDictImpl( @Const @ByRef StringTensorDict params) { super((Pointer)null); allocate(params); } - @SharedPtr private native void allocate( + @SharedPtr @Name("std::make_shared") private native void allocate( @Const @ByRef StringTensorDict params); /** {@code reset()} is empty for {@code ParameterDict}, since it does not have diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/ParameterDictImplCloneable.java b/pytorch/src/gen/java/org/bytedeco/pytorch/ParameterDictImplCloneable.java index 3527f76c67c..2bbae99ab94 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/ParameterDictImplCloneable.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/ParameterDictImplCloneable.java @@ -22,6 +22,9 @@ public class ParameterDictImplCloneable extends Module { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public ParameterDictImplCloneable(Pointer p) { super(p); } + /** Downcast constructor. */ + public ParameterDictImplCloneable(Module pointer) { super((Pointer)null); allocate(pointer); } + @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::dynamic_pointer_cast, torch::nn::Module>") void allocate(@SharedPtr Module pointer); @Override public Module asModule() { return asModule(this); } @Namespace public static native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast>") Module asModule(@SharedPtr ParameterDictImplCloneable pointer); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/ParameterListImpl.java b/pytorch/src/gen/java/org/bytedeco/pytorch/ParameterListImpl.java index fd2f1b48a0b..ab295e340e5 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/ParameterListImpl.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/ParameterListImpl.java @@ -22,19 +22,13 @@ public class ParameterListImpl extends ParameterListImplCloneable { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public ParameterListImpl(Pointer p) { super(p); } - /** Native array allocator. Access with {@link Pointer#position(long)}. */ - public ParameterListImpl(long size) { super((Pointer)null); allocateArray(size); } - private native void allocateArray(long size); - @Override public ParameterListImpl position(long position) { - return (ParameterListImpl)super.position(position); - } - @Override public ParameterListImpl getPointer(long i) { - return new ParameterListImpl((Pointer)this).offsetAddress(i); - } + /** Downcast constructor. */ + public ParameterListImpl(Module pointer) { super((Pointer)null); allocate(pointer); } + @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::dynamic_pointer_cast") void allocate(@SharedPtr Module pointer); public ParameterListImpl() { super((Pointer)null); allocate(); } - @SharedPtr private native void allocate(); + @SharedPtr @Name("std::make_shared") private native void allocate(); /** Constructs the {@code ParameterList} from a variadic list of ParameterList. */ diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/ParameterListImplCloneable.java b/pytorch/src/gen/java/org/bytedeco/pytorch/ParameterListImplCloneable.java index 137c31cde9b..20600226f14 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/ParameterListImplCloneable.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/ParameterListImplCloneable.java @@ -22,6 +22,9 @@ public class ParameterListImplCloneable extends Module { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public ParameterListImplCloneable(Pointer p) { super(p); } + /** Downcast constructor. */ + public ParameterListImplCloneable(Module pointer) { super((Pointer)null); allocate(pointer); } + @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::dynamic_pointer_cast, torch::nn::Module>") void allocate(@SharedPtr Module pointer); @Override public Module asModule() { return asModule(this); } @Namespace public static native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast>") Module asModule(@SharedPtr ParameterListImplCloneable pointer); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/PixelShuffleImpl.java b/pytorch/src/gen/java/org/bytedeco/pytorch/PixelShuffleImpl.java index b806a66051f..a76c156e097 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/PixelShuffleImpl.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/PixelShuffleImpl.java @@ -39,9 +39,12 @@ public class PixelShuffleImpl extends PixelShuffleImplCloneable { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public PixelShuffleImpl(Pointer p) { super(p); } + /** Downcast constructor. */ + public PixelShuffleImpl(Module pointer) { super((Pointer)null); allocate(pointer); } + @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::dynamic_pointer_cast") void allocate(@SharedPtr Module pointer); public PixelShuffleImpl(@Const @ByRef PixelShuffleOptions options_) { super((Pointer)null); allocate(options_); } - @SharedPtr private native void allocate(@Const @ByRef PixelShuffleOptions options_); + @SharedPtr @Name("std::make_shared") private native void allocate(@Const @ByRef PixelShuffleOptions options_); /** Pretty prints the {@code PixelShuffle} module into the given {@code stream}. */ public native void pretty_print(@Cast("std::ostream*") @ByRef Pointer stream); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/PixelShuffleImplCloneable.java b/pytorch/src/gen/java/org/bytedeco/pytorch/PixelShuffleImplCloneable.java index 23ab59020c7..cb5d7c7de54 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/PixelShuffleImplCloneable.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/PixelShuffleImplCloneable.java @@ -22,6 +22,9 @@ public class PixelShuffleImplCloneable extends Module { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public PixelShuffleImplCloneable(Pointer p) { super(p); } + /** Downcast constructor. */ + public PixelShuffleImplCloneable(Module pointer) { super((Pointer)null); allocate(pointer); } + @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::dynamic_pointer_cast, torch::nn::Module>") void allocate(@SharedPtr Module pointer); @Override public Module asModule() { return asModule(this); } @Namespace public static native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast>") Module asModule(@SharedPtr PixelShuffleImplCloneable pointer); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/PixelUnshuffleImpl.java b/pytorch/src/gen/java/org/bytedeco/pytorch/PixelUnshuffleImpl.java index 7e041525ee9..88566929b2d 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/PixelUnshuffleImpl.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/PixelUnshuffleImpl.java @@ -38,9 +38,12 @@ public class PixelUnshuffleImpl extends PixelUnshuffleImplCloneable { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public PixelUnshuffleImpl(Pointer p) { super(p); } + /** Downcast constructor. */ + public PixelUnshuffleImpl(Module pointer) { super((Pointer)null); allocate(pointer); } + @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::dynamic_pointer_cast") void allocate(@SharedPtr Module pointer); public PixelUnshuffleImpl(@Const @ByRef PixelUnshuffleOptions options_) { super((Pointer)null); allocate(options_); } - @SharedPtr private native void allocate(@Const @ByRef PixelUnshuffleOptions options_); + @SharedPtr @Name("std::make_shared") private native void allocate(@Const @ByRef PixelUnshuffleOptions options_); /** Pretty prints the {@code PixelUnshuffle} module into the given {@code stream}. */ public native void pretty_print(@Cast("std::ostream*") @ByRef Pointer stream); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/PixelUnshuffleImplCloneable.java b/pytorch/src/gen/java/org/bytedeco/pytorch/PixelUnshuffleImplCloneable.java index b68bb2958e8..a8f046448a9 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/PixelUnshuffleImplCloneable.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/PixelUnshuffleImplCloneable.java @@ -22,6 +22,9 @@ public class PixelUnshuffleImplCloneable extends Module { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public PixelUnshuffleImplCloneable(Pointer p) { super(p); } + /** Downcast constructor. */ + public PixelUnshuffleImplCloneable(Module pointer) { super((Pointer)null); allocate(pointer); } + @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::dynamic_pointer_cast, torch::nn::Module>") void allocate(@SharedPtr Module pointer); @Override public Module asModule() { return asModule(this); } @Namespace public static native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast>") Module asModule(@SharedPtr PixelUnshuffleImplCloneable pointer); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/PoissonNLLLossImpl.java b/pytorch/src/gen/java/org/bytedeco/pytorch/PoissonNLLLossImpl.java index da3ae69f8bf..da673851291 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/PoissonNLLLossImpl.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/PoissonNLLLossImpl.java @@ -38,20 +38,14 @@ public class PoissonNLLLossImpl extends PoissonNLLLossImplCloneable { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public PoissonNLLLossImpl(Pointer p) { super(p); } - /** Native array allocator. Access with {@link Pointer#position(long)}. */ - public PoissonNLLLossImpl(long size) { super((Pointer)null); allocateArray(size); } - private native void allocateArray(long size); - @Override public PoissonNLLLossImpl position(long position) { - return (PoissonNLLLossImpl)super.position(position); - } - @Override public PoissonNLLLossImpl getPointer(long i) { - return new PoissonNLLLossImpl((Pointer)this).offsetAddress(i); - } + /** Downcast constructor. */ + public PoissonNLLLossImpl(Module pointer) { super((Pointer)null); allocate(pointer); } + @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::dynamic_pointer_cast") void allocate(@SharedPtr Module pointer); public PoissonNLLLossImpl(@ByVal(nullValue = "torch::nn::PoissonNLLLossOptions{}") PoissonNLLLossOptions options_) { super((Pointer)null); allocate(options_); } - @SharedPtr private native void allocate(@ByVal(nullValue = "torch::nn::PoissonNLLLossOptions{}") PoissonNLLLossOptions options_); + @SharedPtr @Name("std::make_shared") private native void allocate(@ByVal(nullValue = "torch::nn::PoissonNLLLossOptions{}") PoissonNLLLossOptions options_); public PoissonNLLLossImpl() { super((Pointer)null); allocate(); } - @SharedPtr private native void allocate(); + @SharedPtr @Name("std::make_shared") private native void allocate(); public native void reset(); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/PoissonNLLLossImplCloneable.java b/pytorch/src/gen/java/org/bytedeco/pytorch/PoissonNLLLossImplCloneable.java index 0f11bd11054..4e1a92f4bbd 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/PoissonNLLLossImplCloneable.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/PoissonNLLLossImplCloneable.java @@ -22,6 +22,9 @@ public class PoissonNLLLossImplCloneable extends Module { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public PoissonNLLLossImplCloneable(Pointer p) { super(p); } + /** Downcast constructor. */ + public PoissonNLLLossImplCloneable(Module pointer) { super((Pointer)null); allocate(pointer); } + @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::dynamic_pointer_cast, torch::nn::Module>") void allocate(@SharedPtr Module pointer); @Override public Module asModule() { return asModule(this); } @Namespace public static native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast>") Module asModule(@SharedPtr PoissonNLLLossImplCloneable pointer); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/RNNCellImpl.java b/pytorch/src/gen/java/org/bytedeco/pytorch/RNNCellImpl.java index 99afe6a4ac4..a9118262374 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/RNNCellImpl.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/RNNCellImpl.java @@ -38,11 +38,14 @@ public class RNNCellImpl extends RNNCellImplBase { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public RNNCellImpl(Pointer p) { super(p); } + /** Downcast constructor. */ + public RNNCellImpl(Module pointer) { super((Pointer)null); allocate(pointer); } + @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::dynamic_pointer_cast") void allocate(@SharedPtr Module pointer); public RNNCellImpl(@Cast("int64_t") long input_size, @Cast("int64_t") long hidden_size) { super((Pointer)null); allocate(input_size, hidden_size); } - @SharedPtr private native void allocate(@Cast("int64_t") long input_size, @Cast("int64_t") long hidden_size); + @SharedPtr @Name("std::make_shared") private native void allocate(@Cast("int64_t") long input_size, @Cast("int64_t") long hidden_size); public RNNCellImpl(@Const @ByRef RNNCellOptions options_) { super((Pointer)null); allocate(options_); } - @SharedPtr private native void allocate(@Const @ByRef RNNCellOptions options_); + @SharedPtr @Name("std::make_shared") private native void allocate(@Const @ByRef RNNCellOptions options_); public native @ByVal Tensor forward(@Const @ByRef Tensor input, @ByVal(nullValue = "torch::Tensor{}") Tensor hx); public native @ByVal Tensor forward(@Const @ByRef Tensor input); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/RNNCellImplBase.java b/pytorch/src/gen/java/org/bytedeco/pytorch/RNNCellImplBase.java index abee4a38f00..0e2b5d9e3e9 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/RNNCellImplBase.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/RNNCellImplBase.java @@ -23,6 +23,9 @@ public class RNNCellImplBase extends RNNCellImplCloneable { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public RNNCellImplBase(Pointer p) { super(p); } + /** Downcast constructor. */ + public RNNCellImplBase(Module pointer) { super((Pointer)null); allocate(pointer); } + @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::dynamic_pointer_cast, torch::nn::Module>") void allocate(@SharedPtr Module pointer); public RNNCellImplBase(@Const @ByRef RNNCellOptionsBase options_) { super((Pointer)null); allocate(options_); } private native void allocate(@Const @ByRef RNNCellOptionsBase options_); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/RNNCellImplCloneable.java b/pytorch/src/gen/java/org/bytedeco/pytorch/RNNCellImplCloneable.java index cfaa49a76d5..34356fb1263 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/RNNCellImplCloneable.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/RNNCellImplCloneable.java @@ -22,6 +22,9 @@ public class RNNCellImplCloneable extends Module { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public RNNCellImplCloneable(Pointer p) { super(p); } + /** Downcast constructor. */ + public RNNCellImplCloneable(Module pointer) { super((Pointer)null); allocate(pointer); } + @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::dynamic_pointer_cast, torch::nn::Module>") void allocate(@SharedPtr Module pointer); @Override public Module asModule() { return asModule(this); } @Namespace public static native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast>") Module asModule(@SharedPtr RNNCellImplCloneable pointer); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/RNNImpl.java b/pytorch/src/gen/java/org/bytedeco/pytorch/RNNImpl.java index ea4177f0cda..3faecd94721 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/RNNImpl.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/RNNImpl.java @@ -37,11 +37,14 @@ public class RNNImpl extends RNNImplBase { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public RNNImpl(Pointer p) { super(p); } + /** Downcast constructor. */ + public RNNImpl(Module pointer) { super((Pointer)null); allocate(pointer); } + @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::dynamic_pointer_cast") void allocate(@SharedPtr Module pointer); public RNNImpl(@Cast("int64_t") long input_size, @Cast("int64_t") long hidden_size) { super((Pointer)null); allocate(input_size, hidden_size); } - @SharedPtr private native void allocate(@Cast("int64_t") long input_size, @Cast("int64_t") long hidden_size); + @SharedPtr @Name("std::make_shared") private native void allocate(@Cast("int64_t") long input_size, @Cast("int64_t") long hidden_size); public RNNImpl(@Const @ByRef RNNOptions options_) { super((Pointer)null); allocate(options_); } - @SharedPtr private native void allocate(@Const @ByRef RNNOptions options_); + @SharedPtr @Name("std::make_shared") private native void allocate(@Const @ByRef RNNOptions options_); public native @ByVal T_TensorTensor_T forward(@Const @ByRef Tensor input, @ByVal(nullValue = "torch::Tensor{}") Tensor hx); public native @ByVal T_TensorTensor_T forward(@Const @ByRef Tensor input); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/RNNImplBase.java b/pytorch/src/gen/java/org/bytedeco/pytorch/RNNImplBase.java index d3159654188..d506aae38db 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/RNNImplBase.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/RNNImplBase.java @@ -23,6 +23,9 @@ public class RNNImplBase extends RNNImplCloneable { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public RNNImplBase(Pointer p) { super(p); } + /** Downcast constructor. */ + public RNNImplBase(Module pointer) { super((Pointer)null); allocate(pointer); } + @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::dynamic_pointer_cast, torch::nn::Module>") void allocate(@SharedPtr Module pointer); public RNNImplBase(@Const @ByRef RNNOptionsBase options_) { super((Pointer)null); allocate(options_); } private native void allocate(@Const @ByRef RNNOptionsBase options_); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/RNNImplCloneable.java b/pytorch/src/gen/java/org/bytedeco/pytorch/RNNImplCloneable.java index dd945e792e3..b82f1bfd5ac 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/RNNImplCloneable.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/RNNImplCloneable.java @@ -22,6 +22,9 @@ public class RNNImplCloneable extends Module { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public RNNImplCloneable(Pointer p) { super(p); } + /** Downcast constructor. */ + public RNNImplCloneable(Module pointer) { super((Pointer)null); allocate(pointer); } + @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::dynamic_pointer_cast, torch::nn::Module>") void allocate(@SharedPtr Module pointer); @Override public Module asModule() { return asModule(this); } @Namespace public static native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast>") Module asModule(@SharedPtr RNNImplCloneable pointer); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/RReLUImpl.java b/pytorch/src/gen/java/org/bytedeco/pytorch/RReLUImpl.java index bfdb73b3aee..3b78ab1151f 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/RReLUImpl.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/RReLUImpl.java @@ -36,20 +36,14 @@ public class RReLUImpl extends RReLUImplCloneable { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public RReLUImpl(Pointer p) { super(p); } - /** Native array allocator. Access with {@link Pointer#position(long)}. */ - public RReLUImpl(long size) { super((Pointer)null); allocateArray(size); } - private native void allocateArray(long size); - @Override public RReLUImpl position(long position) { - return (RReLUImpl)super.position(position); - } - @Override public RReLUImpl getPointer(long i) { - return new RReLUImpl((Pointer)this).offsetAddress(i); - } + /** Downcast constructor. */ + public RReLUImpl(Module pointer) { super((Pointer)null); allocate(pointer); } + @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::dynamic_pointer_cast") void allocate(@SharedPtr Module pointer); public RReLUImpl(@Const @ByRef(nullValue = "torch::nn::RReLUOptions{}") RReLUOptions options_) { super((Pointer)null); allocate(options_); } - @SharedPtr private native void allocate(@Const @ByRef(nullValue = "torch::nn::RReLUOptions{}") RReLUOptions options_); + @SharedPtr @Name("std::make_shared") private native void allocate(@Const @ByRef(nullValue = "torch::nn::RReLUOptions{}") RReLUOptions options_); public RReLUImpl() { super((Pointer)null); allocate(); } - @SharedPtr private native void allocate(); + @SharedPtr @Name("std::make_shared") private native void allocate(); public native @ByVal Tensor forward(@ByVal Tensor input); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/RReLUImplCloneable.java b/pytorch/src/gen/java/org/bytedeco/pytorch/RReLUImplCloneable.java index be4449ad6ce..c32b92a41cb 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/RReLUImplCloneable.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/RReLUImplCloneable.java @@ -22,6 +22,9 @@ public class RReLUImplCloneable extends Module { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public RReLUImplCloneable(Pointer p) { super(p); } + /** Downcast constructor. */ + public RReLUImplCloneable(Module pointer) { super((Pointer)null); allocate(pointer); } + @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::dynamic_pointer_cast, torch::nn::Module>") void allocate(@SharedPtr Module pointer); @Override public Module asModule() { return asModule(this); } @Namespace public static native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast>") Module asModule(@SharedPtr RReLUImplCloneable pointer); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/ReLU6Impl.java b/pytorch/src/gen/java/org/bytedeco/pytorch/ReLU6Impl.java index 02942be581a..10ee00f5bf2 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/ReLU6Impl.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/ReLU6Impl.java @@ -36,20 +36,14 @@ public class ReLU6Impl extends ReLU6ImplCloneable { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public ReLU6Impl(Pointer p) { super(p); } - /** Native array allocator. Access with {@link Pointer#position(long)}. */ - public ReLU6Impl(long size) { super((Pointer)null); allocateArray(size); } - private native void allocateArray(long size); - @Override public ReLU6Impl position(long position) { - return (ReLU6Impl)super.position(position); - } - @Override public ReLU6Impl getPointer(long i) { - return new ReLU6Impl((Pointer)this).offsetAddress(i); - } + /** Downcast constructor. */ + public ReLU6Impl(Module pointer) { super((Pointer)null); allocate(pointer); } + @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::dynamic_pointer_cast") void allocate(@SharedPtr Module pointer); public ReLU6Impl(@Const @ByRef(nullValue = "torch::nn::ReLU6Options{}") ReLU6Options options_) { super((Pointer)null); allocate(options_); } - @SharedPtr private native void allocate(@Const @ByRef(nullValue = "torch::nn::ReLU6Options{}") ReLU6Options options_); + @SharedPtr @Name("std::make_shared") private native void allocate(@Const @ByRef(nullValue = "torch::nn::ReLU6Options{}") ReLU6Options options_); public ReLU6Impl() { super((Pointer)null); allocate(); } - @SharedPtr private native void allocate(); + @SharedPtr @Name("std::make_shared") private native void allocate(); public native @ByVal Tensor forward(@ByVal Tensor input); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/ReLU6ImplCloneable.java b/pytorch/src/gen/java/org/bytedeco/pytorch/ReLU6ImplCloneable.java index c492dc875fe..9564d0860d3 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/ReLU6ImplCloneable.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/ReLU6ImplCloneable.java @@ -22,6 +22,9 @@ public class ReLU6ImplCloneable extends Module { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public ReLU6ImplCloneable(Pointer p) { super(p); } + /** Downcast constructor. */ + public ReLU6ImplCloneable(Module pointer) { super((Pointer)null); allocate(pointer); } + @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::dynamic_pointer_cast, torch::nn::Module>") void allocate(@SharedPtr Module pointer); @Override public Module asModule() { return asModule(this); } @Namespace public static native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast>") Module asModule(@SharedPtr ReLU6ImplCloneable pointer); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/ReLUImpl.java b/pytorch/src/gen/java/org/bytedeco/pytorch/ReLUImpl.java index 35b4d458242..3362f4ce097 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/ReLUImpl.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/ReLUImpl.java @@ -36,20 +36,14 @@ public class ReLUImpl extends ReLUImplCloneable { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public ReLUImpl(Pointer p) { super(p); } - /** Native array allocator. Access with {@link Pointer#position(long)}. */ - public ReLUImpl(long size) { super((Pointer)null); allocateArray(size); } - private native void allocateArray(long size); - @Override public ReLUImpl position(long position) { - return (ReLUImpl)super.position(position); - } - @Override public ReLUImpl getPointer(long i) { - return new ReLUImpl((Pointer)this).offsetAddress(i); - } + /** Downcast constructor. */ + public ReLUImpl(Module pointer) { super((Pointer)null); allocate(pointer); } + @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::dynamic_pointer_cast") void allocate(@SharedPtr Module pointer); public ReLUImpl(@Const @ByRef(nullValue = "torch::nn::ReLUOptions{}") ReLUOptions options_) { super((Pointer)null); allocate(options_); } - @SharedPtr private native void allocate(@Const @ByRef(nullValue = "torch::nn::ReLUOptions{}") ReLUOptions options_); + @SharedPtr @Name("std::make_shared") private native void allocate(@Const @ByRef(nullValue = "torch::nn::ReLUOptions{}") ReLUOptions options_); public ReLUImpl() { super((Pointer)null); allocate(); } - @SharedPtr private native void allocate(); + @SharedPtr @Name("std::make_shared") private native void allocate(); public native @ByVal Tensor forward(@ByVal Tensor input); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/ReLUImplCloneable.java b/pytorch/src/gen/java/org/bytedeco/pytorch/ReLUImplCloneable.java index a043cbacc53..fb3ae8871da 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/ReLUImplCloneable.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/ReLUImplCloneable.java @@ -22,6 +22,9 @@ public class ReLUImplCloneable extends Module { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public ReLUImplCloneable(Pointer p) { super(p); } + /** Downcast constructor. */ + public ReLUImplCloneable(Module pointer) { super((Pointer)null); allocate(pointer); } + @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::dynamic_pointer_cast, torch::nn::Module>") void allocate(@SharedPtr Module pointer); @Override public Module asModule() { return asModule(this); } @Namespace public static native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast>") Module asModule(@SharedPtr ReLUImplCloneable pointer); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/ReflectionPad1dImpl.java b/pytorch/src/gen/java/org/bytedeco/pytorch/ReflectionPad1dImpl.java index 49c1a5e76a2..1605d9fb0e7 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/ReflectionPad1dImpl.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/ReflectionPad1dImpl.java @@ -43,5 +43,8 @@ public class ReflectionPad1dImpl extends ReflectionPad1dImplBase { private native void allocate(@Const @ByRef ReflectionPad1dOptions options_); /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public ReflectionPad1dImpl(Pointer p) { super(p); } + /** Downcast constructor. */ + public ReflectionPad1dImpl(Module pointer) { super((Pointer)null); allocate(pointer); } + @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::dynamic_pointer_cast") void allocate(@SharedPtr Module pointer); } diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/ReflectionPad1dImplBase.java b/pytorch/src/gen/java/org/bytedeco/pytorch/ReflectionPad1dImplBase.java index 5997c6a5210..ff7e97c563d 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/ReflectionPad1dImplBase.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/ReflectionPad1dImplBase.java @@ -24,6 +24,9 @@ public class ReflectionPad1dImplBase extends ReflectionPad1dImplCloneable { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public ReflectionPad1dImplBase(Pointer p) { super(p); } + /** Downcast constructor. */ + public ReflectionPad1dImplBase(Module pointer) { super((Pointer)null); allocate(pointer); } + @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::dynamic_pointer_cast, torch::nn::Module>") void allocate(@SharedPtr Module pointer); public ReflectionPad1dImplBase(@ByVal @Cast("torch::ExpandingArray<1*2>*") LongPointer padding) { super((Pointer)null); allocate(padding); } private native void allocate(@ByVal @Cast("torch::ExpandingArray<1*2>*") LongPointer padding); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/ReflectionPad1dImplCloneable.java b/pytorch/src/gen/java/org/bytedeco/pytorch/ReflectionPad1dImplCloneable.java index 9735c421b6c..d6d730ef296 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/ReflectionPad1dImplCloneable.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/ReflectionPad1dImplCloneable.java @@ -22,6 +22,9 @@ public class ReflectionPad1dImplCloneable extends Module { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public ReflectionPad1dImplCloneable(Pointer p) { super(p); } + /** Downcast constructor. */ + public ReflectionPad1dImplCloneable(Module pointer) { super((Pointer)null); allocate(pointer); } + @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::dynamic_pointer_cast, torch::nn::Module>") void allocate(@SharedPtr Module pointer); @Override public Module asModule() { return asModule(this); } @Namespace public static native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast>") Module asModule(@SharedPtr ReflectionPad1dImplCloneable pointer); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/ReflectionPad2dImpl.java b/pytorch/src/gen/java/org/bytedeco/pytorch/ReflectionPad2dImpl.java index 16cd9b8d0d1..8f1e1f56dec 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/ReflectionPad2dImpl.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/ReflectionPad2dImpl.java @@ -43,5 +43,8 @@ public class ReflectionPad2dImpl extends ReflectionPad2dImplBase { private native void allocate(@Const @ByRef ReflectionPad2dOptions options_); /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public ReflectionPad2dImpl(Pointer p) { super(p); } + /** Downcast constructor. */ + public ReflectionPad2dImpl(Module pointer) { super((Pointer)null); allocate(pointer); } + @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::dynamic_pointer_cast") void allocate(@SharedPtr Module pointer); } diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/ReflectionPad2dImplBase.java b/pytorch/src/gen/java/org/bytedeco/pytorch/ReflectionPad2dImplBase.java index 46f9d030849..0430687f96a 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/ReflectionPad2dImplBase.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/ReflectionPad2dImplBase.java @@ -22,6 +22,9 @@ public class ReflectionPad2dImplBase extends ReflectionPad2dImplCloneable { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public ReflectionPad2dImplBase(Pointer p) { super(p); } + /** Downcast constructor. */ + public ReflectionPad2dImplBase(Module pointer) { super((Pointer)null); allocate(pointer); } + @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::dynamic_pointer_cast, torch::nn::Module>") void allocate(@SharedPtr Module pointer); public ReflectionPad2dImplBase(@ByVal @Cast("torch::ExpandingArray<2*2>*") LongPointer padding) { super((Pointer)null); allocate(padding); } private native void allocate(@ByVal @Cast("torch::ExpandingArray<2*2>*") LongPointer padding); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/ReflectionPad2dImplCloneable.java b/pytorch/src/gen/java/org/bytedeco/pytorch/ReflectionPad2dImplCloneable.java index 16f8858b1e3..bf72b001088 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/ReflectionPad2dImplCloneable.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/ReflectionPad2dImplCloneable.java @@ -22,6 +22,9 @@ public class ReflectionPad2dImplCloneable extends Module { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public ReflectionPad2dImplCloneable(Pointer p) { super(p); } + /** Downcast constructor. */ + public ReflectionPad2dImplCloneable(Module pointer) { super((Pointer)null); allocate(pointer); } + @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::dynamic_pointer_cast, torch::nn::Module>") void allocate(@SharedPtr Module pointer); @Override public Module asModule() { return asModule(this); } @Namespace public static native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast>") Module asModule(@SharedPtr ReflectionPad2dImplCloneable pointer); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/ReflectionPad3dImpl.java b/pytorch/src/gen/java/org/bytedeco/pytorch/ReflectionPad3dImpl.java index c35516a1d69..cf8c2f8e3f2 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/ReflectionPad3dImpl.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/ReflectionPad3dImpl.java @@ -44,5 +44,8 @@ public class ReflectionPad3dImpl extends ReflectionPad3dImplBase { private native void allocate(@Const @ByRef ReflectionPad3dOptions options_); /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public ReflectionPad3dImpl(Pointer p) { super(p); } + /** Downcast constructor. */ + public ReflectionPad3dImpl(Module pointer) { super((Pointer)null); allocate(pointer); } + @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::dynamic_pointer_cast") void allocate(@SharedPtr Module pointer); } diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/ReflectionPad3dImplBase.java b/pytorch/src/gen/java/org/bytedeco/pytorch/ReflectionPad3dImplBase.java index 09103a09a04..74a7ee6f1a6 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/ReflectionPad3dImplBase.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/ReflectionPad3dImplBase.java @@ -22,6 +22,9 @@ public class ReflectionPad3dImplBase extends ReflectionPad3dImplCloneable { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public ReflectionPad3dImplBase(Pointer p) { super(p); } + /** Downcast constructor. */ + public ReflectionPad3dImplBase(Module pointer) { super((Pointer)null); allocate(pointer); } + @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::dynamic_pointer_cast, torch::nn::Module>") void allocate(@SharedPtr Module pointer); public ReflectionPad3dImplBase(@ByVal @Cast("torch::ExpandingArray<3*2>*") LongPointer padding) { super((Pointer)null); allocate(padding); } private native void allocate(@ByVal @Cast("torch::ExpandingArray<3*2>*") LongPointer padding); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/ReflectionPad3dImplCloneable.java b/pytorch/src/gen/java/org/bytedeco/pytorch/ReflectionPad3dImplCloneable.java index 47f7e78d0f8..d1153f7542c 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/ReflectionPad3dImplCloneable.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/ReflectionPad3dImplCloneable.java @@ -22,6 +22,9 @@ public class ReflectionPad3dImplCloneable extends Module { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public ReflectionPad3dImplCloneable(Pointer p) { super(p); } + /** Downcast constructor. */ + public ReflectionPad3dImplCloneable(Module pointer) { super((Pointer)null); allocate(pointer); } + @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::dynamic_pointer_cast, torch::nn::Module>") void allocate(@SharedPtr Module pointer); @Override public Module asModule() { return asModule(this); } @Namespace public static native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast>") Module asModule(@SharedPtr ReflectionPad3dImplCloneable pointer); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/ReplicationPad1dImpl.java b/pytorch/src/gen/java/org/bytedeco/pytorch/ReplicationPad1dImpl.java index 92e34930a02..58286c8f5e5 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/ReplicationPad1dImpl.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/ReplicationPad1dImpl.java @@ -43,5 +43,8 @@ public class ReplicationPad1dImpl extends ReplicationPad1dImplBase { private native void allocate(@Const @ByRef ReplicationPad1dOptions options_); /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public ReplicationPad1dImpl(Pointer p) { super(p); } + /** Downcast constructor. */ + public ReplicationPad1dImpl(Module pointer) { super((Pointer)null); allocate(pointer); } + @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::dynamic_pointer_cast") void allocate(@SharedPtr Module pointer); } diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/ReplicationPad1dImplBase.java b/pytorch/src/gen/java/org/bytedeco/pytorch/ReplicationPad1dImplBase.java index e70212d7a48..ad298ef3675 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/ReplicationPad1dImplBase.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/ReplicationPad1dImplBase.java @@ -26,6 +26,9 @@ public class ReplicationPad1dImplBase extends ReplicationPad1dImplCloneable { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public ReplicationPad1dImplBase(Pointer p) { super(p); } + /** Downcast constructor. */ + public ReplicationPad1dImplBase(Module pointer) { super((Pointer)null); allocate(pointer); } + @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::dynamic_pointer_cast, torch::nn::Module>") void allocate(@SharedPtr Module pointer); public ReplicationPad1dImplBase(@ByVal @Cast("torch::ExpandingArray<1*2>*") LongPointer padding) { super((Pointer)null); allocate(padding); } private native void allocate(@ByVal @Cast("torch::ExpandingArray<1*2>*") LongPointer padding); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/ReplicationPad1dImplCloneable.java b/pytorch/src/gen/java/org/bytedeco/pytorch/ReplicationPad1dImplCloneable.java index 0f549136e37..6cd7ed55f32 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/ReplicationPad1dImplCloneable.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/ReplicationPad1dImplCloneable.java @@ -22,6 +22,9 @@ public class ReplicationPad1dImplCloneable extends Module { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public ReplicationPad1dImplCloneable(Pointer p) { super(p); } + /** Downcast constructor. */ + public ReplicationPad1dImplCloneable(Module pointer) { super((Pointer)null); allocate(pointer); } + @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::dynamic_pointer_cast, torch::nn::Module>") void allocate(@SharedPtr Module pointer); @Override public Module asModule() { return asModule(this); } @Namespace public static native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast>") Module asModule(@SharedPtr ReplicationPad1dImplCloneable pointer); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/ReplicationPad2dImpl.java b/pytorch/src/gen/java/org/bytedeco/pytorch/ReplicationPad2dImpl.java index 10dd3c52483..475639ed826 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/ReplicationPad2dImpl.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/ReplicationPad2dImpl.java @@ -43,5 +43,8 @@ public class ReplicationPad2dImpl extends ReplicationPad2dImplBase { private native void allocate(@Const @ByRef ReplicationPad2dOptions options_); /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public ReplicationPad2dImpl(Pointer p) { super(p); } + /** Downcast constructor. */ + public ReplicationPad2dImpl(Module pointer) { super((Pointer)null); allocate(pointer); } + @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::dynamic_pointer_cast") void allocate(@SharedPtr Module pointer); } diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/ReplicationPad2dImplBase.java b/pytorch/src/gen/java/org/bytedeco/pytorch/ReplicationPad2dImplBase.java index 494ea96d211..77bc76b0940 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/ReplicationPad2dImplBase.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/ReplicationPad2dImplBase.java @@ -22,6 +22,9 @@ public class ReplicationPad2dImplBase extends ReplicationPad2dImplCloneable { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public ReplicationPad2dImplBase(Pointer p) { super(p); } + /** Downcast constructor. */ + public ReplicationPad2dImplBase(Module pointer) { super((Pointer)null); allocate(pointer); } + @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::dynamic_pointer_cast, torch::nn::Module>") void allocate(@SharedPtr Module pointer); public ReplicationPad2dImplBase(@ByVal @Cast("torch::ExpandingArray<2*2>*") LongPointer padding) { super((Pointer)null); allocate(padding); } private native void allocate(@ByVal @Cast("torch::ExpandingArray<2*2>*") LongPointer padding); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/ReplicationPad2dImplCloneable.java b/pytorch/src/gen/java/org/bytedeco/pytorch/ReplicationPad2dImplCloneable.java index 3a1611f0543..3d96983c9e0 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/ReplicationPad2dImplCloneable.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/ReplicationPad2dImplCloneable.java @@ -22,6 +22,9 @@ public class ReplicationPad2dImplCloneable extends Module { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public ReplicationPad2dImplCloneable(Pointer p) { super(p); } + /** Downcast constructor. */ + public ReplicationPad2dImplCloneable(Module pointer) { super((Pointer)null); allocate(pointer); } + @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::dynamic_pointer_cast, torch::nn::Module>") void allocate(@SharedPtr Module pointer); @Override public Module asModule() { return asModule(this); } @Namespace public static native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast>") Module asModule(@SharedPtr ReplicationPad2dImplCloneable pointer); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/ReplicationPad3dImpl.java b/pytorch/src/gen/java/org/bytedeco/pytorch/ReplicationPad3dImpl.java index 8fd9ceb3fa4..05f08b58be4 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/ReplicationPad3dImpl.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/ReplicationPad3dImpl.java @@ -43,5 +43,8 @@ public class ReplicationPad3dImpl extends ReplicationPad3dImplBase { private native void allocate(@Const @ByRef ReplicationPad3dOptions options_); /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public ReplicationPad3dImpl(Pointer p) { super(p); } + /** Downcast constructor. */ + public ReplicationPad3dImpl(Module pointer) { super((Pointer)null); allocate(pointer); } + @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::dynamic_pointer_cast") void allocate(@SharedPtr Module pointer); } diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/ReplicationPad3dImplBase.java b/pytorch/src/gen/java/org/bytedeco/pytorch/ReplicationPad3dImplBase.java index f2fc7171517..e093b13f2c1 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/ReplicationPad3dImplBase.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/ReplicationPad3dImplBase.java @@ -22,6 +22,9 @@ public class ReplicationPad3dImplBase extends ReplicationPad3dImplCloneable { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public ReplicationPad3dImplBase(Pointer p) { super(p); } + /** Downcast constructor. */ + public ReplicationPad3dImplBase(Module pointer) { super((Pointer)null); allocate(pointer); } + @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::dynamic_pointer_cast, torch::nn::Module>") void allocate(@SharedPtr Module pointer); public ReplicationPad3dImplBase(@ByVal @Cast("torch::ExpandingArray<3*2>*") LongPointer padding) { super((Pointer)null); allocate(padding); } private native void allocate(@ByVal @Cast("torch::ExpandingArray<3*2>*") LongPointer padding); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/ReplicationPad3dImplCloneable.java b/pytorch/src/gen/java/org/bytedeco/pytorch/ReplicationPad3dImplCloneable.java index a92e6b9ab89..b410dd009d4 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/ReplicationPad3dImplCloneable.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/ReplicationPad3dImplCloneable.java @@ -22,6 +22,9 @@ public class ReplicationPad3dImplCloneable extends Module { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public ReplicationPad3dImplCloneable(Pointer p) { super(p); } + /** Downcast constructor. */ + public ReplicationPad3dImplCloneable(Module pointer) { super((Pointer)null); allocate(pointer); } + @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::dynamic_pointer_cast, torch::nn::Module>") void allocate(@SharedPtr Module pointer); @Override public Module asModule() { return asModule(this); } @Namespace public static native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast>") Module asModule(@SharedPtr ReplicationPad3dImplCloneable pointer); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/Resolver.java b/pytorch/src/gen/java/org/bytedeco/pytorch/Resolver.java index dbccdd2a605..405364eda78 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/Resolver.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/Resolver.java @@ -37,18 +37,9 @@ public class Resolver extends Pointer { static { Loader.load(); } /** Default native constructor. */ public Resolver() { super((Pointer)null); allocate(); } - /** Native array allocator. Access with {@link Pointer#position(long)}. */ - public Resolver(long size) { super((Pointer)null); allocateArray(size); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public Resolver(Pointer p) { super(p); } - private native void allocate(); - private native void allocateArray(long size); - @Override public Resolver position(long position) { - return (Resolver)super.position(position); - } - @Override public Resolver getPointer(long i) { - return new Resolver((Pointer)this).offsetAddress(i); - } + @SharedPtr @Name("std::make_shared") private native void allocate(); // Resolve a given name to a SugaredValue. This takes the method `m` that the diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/SELUImpl.java b/pytorch/src/gen/java/org/bytedeco/pytorch/SELUImpl.java index 188a107c800..b716d2f9324 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/SELUImpl.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/SELUImpl.java @@ -36,20 +36,14 @@ public class SELUImpl extends SELUImplCloneable { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public SELUImpl(Pointer p) { super(p); } - /** Native array allocator. Access with {@link Pointer#position(long)}. */ - public SELUImpl(long size) { super((Pointer)null); allocateArray(size); } - private native void allocateArray(long size); - @Override public SELUImpl position(long position) { - return (SELUImpl)super.position(position); - } - @Override public SELUImpl getPointer(long i) { - return new SELUImpl((Pointer)this).offsetAddress(i); - } + /** Downcast constructor. */ + public SELUImpl(Module pointer) { super((Pointer)null); allocate(pointer); } + @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::dynamic_pointer_cast") void allocate(@SharedPtr Module pointer); public SELUImpl(@Const @ByRef(nullValue = "torch::nn::SELUOptions{}") SELUOptions options_) { super((Pointer)null); allocate(options_); } - @SharedPtr private native void allocate(@Const @ByRef(nullValue = "torch::nn::SELUOptions{}") SELUOptions options_); + @SharedPtr @Name("std::make_shared") private native void allocate(@Const @ByRef(nullValue = "torch::nn::SELUOptions{}") SELUOptions options_); public SELUImpl() { super((Pointer)null); allocate(); } - @SharedPtr private native void allocate(); + @SharedPtr @Name("std::make_shared") private native void allocate(); public native @ByVal Tensor forward(@ByVal Tensor input); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/SELUImplCloneable.java b/pytorch/src/gen/java/org/bytedeco/pytorch/SELUImplCloneable.java index 7bbe3d02e86..d15b019b655 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/SELUImplCloneable.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/SELUImplCloneable.java @@ -22,6 +22,9 @@ public class SELUImplCloneable extends Module { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public SELUImplCloneable(Pointer p) { super(p); } + /** Downcast constructor. */ + public SELUImplCloneable(Module pointer) { super((Pointer)null); allocate(pointer); } + @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::dynamic_pointer_cast, torch::nn::Module>") void allocate(@SharedPtr Module pointer); @Override public Module asModule() { return asModule(this); } @Namespace public static native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast>") Module asModule(@SharedPtr SELUImplCloneable pointer); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/SchemaInfo.java b/pytorch/src/gen/java/org/bytedeco/pytorch/SchemaInfo.java index 497840ba78b..2d9afab7bc4 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/SchemaInfo.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/SchemaInfo.java @@ -41,9 +41,11 @@ public class SchemaInfo extends Pointer { public native @Cast("bool") boolean is_mutable(@Const @ByRef SchemaArgument argument); - public native @Cast("bool") boolean is_mutable(@ByVal @Cast("c10::string_view*") Pointer name); + public native @Cast("bool") boolean is_mutable(@StringView BytePointer name); + public native @Cast("bool") boolean is_mutable(@StringView String name); - public native @Cast("bool") boolean has_argument(@ByVal @Cast("c10::string_view*") Pointer name); + public native @Cast("bool") boolean has_argument(@StringView BytePointer name); + public native @Cast("bool") boolean has_argument(@StringView String name); public native @Cast("bool") boolean is_nondeterministic(); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/SequentialImpl.java b/pytorch/src/gen/java/org/bytedeco/pytorch/SequentialImpl.java index 0507f0aca0b..09fcaef8459 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/SequentialImpl.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/SequentialImpl.java @@ -91,26 +91,20 @@ public class SequentialImpl extends SequentialImplCloneable { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public SequentialImpl(Pointer p) { super(p); } - /** Native array allocator. Access with {@link Pointer#position(long)}. */ - public SequentialImpl(long size) { super((Pointer)null); allocateArray(size); } - private native void allocateArray(long size); - @Override public SequentialImpl position(long position) { - return (SequentialImpl)super.position(position); - } - @Override public SequentialImpl getPointer(long i) { - return new SequentialImpl((Pointer)this).offsetAddress(i); - } + /** Downcast constructor. */ + public SequentialImpl(Module pointer) { super((Pointer)null); allocate(pointer); } + @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::dynamic_pointer_cast") void allocate(@SharedPtr Module pointer); public SequentialImpl() { super((Pointer)null); allocate(); } - @SharedPtr private native void allocate(); + @SharedPtr @Name("std::make_shared") private native void allocate(); /** Constructs the {@code Sequential} from a variadic list of modules. */ /** Constructs the {@code Sequential} from an {@code OrderedDict} of named {@code AnyModule}s. */ public SequentialImpl( @ByRef(true) StringAnyModuleDict ordered_dict) { super((Pointer)null); allocate(ordered_dict); } - @SharedPtr private native void allocate( + @SharedPtr @Name("std::make_shared") private native void allocate( @ByRef(true) StringAnyModuleDict ordered_dict); /** Constructs the {@code Sequential} from a braced-init-list of named {@code AnyModule}s. diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/SequentialImplCloneable.java b/pytorch/src/gen/java/org/bytedeco/pytorch/SequentialImplCloneable.java index 4565f511da8..914cb294b88 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/SequentialImplCloneable.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/SequentialImplCloneable.java @@ -22,6 +22,9 @@ public class SequentialImplCloneable extends Module { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public SequentialImplCloneable(Pointer p) { super(p); } + /** Downcast constructor. */ + public SequentialImplCloneable(Module pointer) { super((Pointer)null); allocate(pointer); } + @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::dynamic_pointer_cast, torch::nn::Module>") void allocate(@SharedPtr Module pointer); @Override public Module asModule() { return asModule(this); } @Namespace public static native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast>") Module asModule(@SharedPtr SequentialImplCloneable pointer); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/SiLUImpl.java b/pytorch/src/gen/java/org/bytedeco/pytorch/SiLUImpl.java index 1e2941da8e0..567f4132232 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/SiLUImpl.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/SiLUImpl.java @@ -28,18 +28,12 @@ public class SiLUImpl extends SiLUImplCloneable { static { Loader.load(); } /** Default native constructor. */ public SiLUImpl() { super((Pointer)null); allocate(); } - /** Native array allocator. Access with {@link Pointer#position(long)}. */ - public SiLUImpl(long size) { super((Pointer)null); allocateArray(size); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public SiLUImpl(Pointer p) { super(p); } - private native void allocate(); - private native void allocateArray(long size); - @Override public SiLUImpl position(long position) { - return (SiLUImpl)super.position(position); - } - @Override public SiLUImpl getPointer(long i) { - return new SiLUImpl((Pointer)this).offsetAddress(i); - } + @SharedPtr @Name("std::make_shared") private native void allocate(); + /** Downcast constructor. */ + public SiLUImpl(Module pointer) { super((Pointer)null); allocate(pointer); } + @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::dynamic_pointer_cast") void allocate(@SharedPtr Module pointer); public native @ByVal Tensor forward(@Const @ByRef Tensor input); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/SiLUImplCloneable.java b/pytorch/src/gen/java/org/bytedeco/pytorch/SiLUImplCloneable.java index 8bc485fb410..bb3899035d7 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/SiLUImplCloneable.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/SiLUImplCloneable.java @@ -22,6 +22,9 @@ public class SiLUImplCloneable extends Module { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public SiLUImplCloneable(Pointer p) { super(p); } + /** Downcast constructor. */ + public SiLUImplCloneable(Module pointer) { super((Pointer)null); allocate(pointer); } + @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::dynamic_pointer_cast, torch::nn::Module>") void allocate(@SharedPtr Module pointer); @Override public Module asModule() { return asModule(this); } @Namespace public static native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast>") Module asModule(@SharedPtr SiLUImplCloneable pointer); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/SigmoidImpl.java b/pytorch/src/gen/java/org/bytedeco/pytorch/SigmoidImpl.java index 09256d58399..5b3cd982495 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/SigmoidImpl.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/SigmoidImpl.java @@ -28,18 +28,12 @@ public class SigmoidImpl extends SigmoidImplCloneable { static { Loader.load(); } /** Default native constructor. */ public SigmoidImpl() { super((Pointer)null); allocate(); } - /** Native array allocator. Access with {@link Pointer#position(long)}. */ - public SigmoidImpl(long size) { super((Pointer)null); allocateArray(size); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public SigmoidImpl(Pointer p) { super(p); } - private native void allocate(); - private native void allocateArray(long size); - @Override public SigmoidImpl position(long position) { - return (SigmoidImpl)super.position(position); - } - @Override public SigmoidImpl getPointer(long i) { - return new SigmoidImpl((Pointer)this).offsetAddress(i); - } + @SharedPtr @Name("std::make_shared") private native void allocate(); + /** Downcast constructor. */ + public SigmoidImpl(Module pointer) { super((Pointer)null); allocate(pointer); } + @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::dynamic_pointer_cast") void allocate(@SharedPtr Module pointer); public native @ByVal Tensor forward(@Const @ByRef Tensor input); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/SigmoidImplCloneable.java b/pytorch/src/gen/java/org/bytedeco/pytorch/SigmoidImplCloneable.java index c12b7db95ab..0b4ea8c6497 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/SigmoidImplCloneable.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/SigmoidImplCloneable.java @@ -22,6 +22,9 @@ public class SigmoidImplCloneable extends Module { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public SigmoidImplCloneable(Pointer p) { super(p); } + /** Downcast constructor. */ + public SigmoidImplCloneable(Module pointer) { super((Pointer)null); allocate(pointer); } + @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::dynamic_pointer_cast, torch::nn::Module>") void allocate(@SharedPtr Module pointer); @Override public Module asModule() { return asModule(this); } @Namespace public static native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast>") Module asModule(@SharedPtr SigmoidImplCloneable pointer); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/SmoothL1LossImpl.java b/pytorch/src/gen/java/org/bytedeco/pytorch/SmoothL1LossImpl.java index 06269e5e93f..18a4352e998 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/SmoothL1LossImpl.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/SmoothL1LossImpl.java @@ -40,20 +40,14 @@ public class SmoothL1LossImpl extends SmoothL1LossImplCloneable { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public SmoothL1LossImpl(Pointer p) { super(p); } - /** Native array allocator. Access with {@link Pointer#position(long)}. */ - public SmoothL1LossImpl(long size) { super((Pointer)null); allocateArray(size); } - private native void allocateArray(long size); - @Override public SmoothL1LossImpl position(long position) { - return (SmoothL1LossImpl)super.position(position); - } - @Override public SmoothL1LossImpl getPointer(long i) { - return new SmoothL1LossImpl((Pointer)this).offsetAddress(i); - } + /** Downcast constructor. */ + public SmoothL1LossImpl(Module pointer) { super((Pointer)null); allocate(pointer); } + @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::dynamic_pointer_cast") void allocate(@SharedPtr Module pointer); public SmoothL1LossImpl(@ByVal(nullValue = "torch::nn::SmoothL1LossOptions{}") SmoothL1LossOptions options) { super((Pointer)null); allocate(options); } - @SharedPtr private native void allocate(@ByVal(nullValue = "torch::nn::SmoothL1LossOptions{}") SmoothL1LossOptions options); + @SharedPtr @Name("std::make_shared") private native void allocate(@ByVal(nullValue = "torch::nn::SmoothL1LossOptions{}") SmoothL1LossOptions options); public SmoothL1LossImpl() { super((Pointer)null); allocate(); } - @SharedPtr private native void allocate(); + @SharedPtr @Name("std::make_shared") private native void allocate(); public native void reset(); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/SmoothL1LossImplCloneable.java b/pytorch/src/gen/java/org/bytedeco/pytorch/SmoothL1LossImplCloneable.java index a04402f7500..3df8951aca8 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/SmoothL1LossImplCloneable.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/SmoothL1LossImplCloneable.java @@ -22,6 +22,9 @@ public class SmoothL1LossImplCloneable extends Module { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public SmoothL1LossImplCloneable(Pointer p) { super(p); } + /** Downcast constructor. */ + public SmoothL1LossImplCloneable(Module pointer) { super((Pointer)null); allocate(pointer); } + @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::dynamic_pointer_cast, torch::nn::Module>") void allocate(@SharedPtr Module pointer); @Override public Module asModule() { return asModule(this); } @Namespace public static native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast>") Module asModule(@SharedPtr SmoothL1LossImplCloneable pointer); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/SoftMarginLossImpl.java b/pytorch/src/gen/java/org/bytedeco/pytorch/SoftMarginLossImpl.java index b98d3d226f9..838a078851d 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/SoftMarginLossImpl.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/SoftMarginLossImpl.java @@ -39,20 +39,14 @@ public class SoftMarginLossImpl extends SoftMarginLossImplCloneable { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public SoftMarginLossImpl(Pointer p) { super(p); } - /** Native array allocator. Access with {@link Pointer#position(long)}. */ - public SoftMarginLossImpl(long size) { super((Pointer)null); allocateArray(size); } - private native void allocateArray(long size); - @Override public SoftMarginLossImpl position(long position) { - return (SoftMarginLossImpl)super.position(position); - } - @Override public SoftMarginLossImpl getPointer(long i) { - return new SoftMarginLossImpl((Pointer)this).offsetAddress(i); - } + /** Downcast constructor. */ + public SoftMarginLossImpl(Module pointer) { super((Pointer)null); allocate(pointer); } + @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::dynamic_pointer_cast") void allocate(@SharedPtr Module pointer); public SoftMarginLossImpl(@ByVal(nullValue = "torch::nn::SoftMarginLossOptions{}") SoftMarginLossOptions options_) { super((Pointer)null); allocate(options_); } - @SharedPtr private native void allocate(@ByVal(nullValue = "torch::nn::SoftMarginLossOptions{}") SoftMarginLossOptions options_); + @SharedPtr @Name("std::make_shared") private native void allocate(@ByVal(nullValue = "torch::nn::SoftMarginLossOptions{}") SoftMarginLossOptions options_); public SoftMarginLossImpl() { super((Pointer)null); allocate(); } - @SharedPtr private native void allocate(); + @SharedPtr @Name("std::make_shared") private native void allocate(); /** Pretty prints the {@code SoftMarginLoss} module into the given {@code stream}. */ public native void pretty_print(@Cast("std::ostream*") @ByRef Pointer stream); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/SoftMarginLossImplCloneable.java b/pytorch/src/gen/java/org/bytedeco/pytorch/SoftMarginLossImplCloneable.java index 4a0bfd4405b..ee3b166841e 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/SoftMarginLossImplCloneable.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/SoftMarginLossImplCloneable.java @@ -22,6 +22,9 @@ public class SoftMarginLossImplCloneable extends Module { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public SoftMarginLossImplCloneable(Pointer p) { super(p); } + /** Downcast constructor. */ + public SoftMarginLossImplCloneable(Module pointer) { super((Pointer)null); allocate(pointer); } + @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::dynamic_pointer_cast, torch::nn::Module>") void allocate(@SharedPtr Module pointer); @Override public Module asModule() { return asModule(this); } @Namespace public static native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast>") Module asModule(@SharedPtr SoftMarginLossImplCloneable pointer); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/Softmax2dImpl.java b/pytorch/src/gen/java/org/bytedeco/pytorch/Softmax2dImpl.java index 51cfa208d0c..bddd9c06933 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/Softmax2dImpl.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/Softmax2dImpl.java @@ -28,18 +28,12 @@ public class Softmax2dImpl extends Softmax2dImplCloneable { static { Loader.load(); } /** Default native constructor. */ public Softmax2dImpl() { super((Pointer)null); allocate(); } - /** Native array allocator. Access with {@link Pointer#position(long)}. */ - public Softmax2dImpl(long size) { super((Pointer)null); allocateArray(size); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public Softmax2dImpl(Pointer p) { super(p); } - private native void allocate(); - private native void allocateArray(long size); - @Override public Softmax2dImpl position(long position) { - return (Softmax2dImpl)super.position(position); - } - @Override public Softmax2dImpl getPointer(long i) { - return new Softmax2dImpl((Pointer)this).offsetAddress(i); - } + @SharedPtr @Name("std::make_shared") private native void allocate(); + /** Downcast constructor. */ + public Softmax2dImpl(Module pointer) { super((Pointer)null); allocate(pointer); } + @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::dynamic_pointer_cast") void allocate(@SharedPtr Module pointer); public native @ByVal Tensor forward(@Const @ByRef Tensor input); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/Softmax2dImplCloneable.java b/pytorch/src/gen/java/org/bytedeco/pytorch/Softmax2dImplCloneable.java index e852f490664..9bc6a768282 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/Softmax2dImplCloneable.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/Softmax2dImplCloneable.java @@ -22,6 +22,9 @@ public class Softmax2dImplCloneable extends Module { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public Softmax2dImplCloneable(Pointer p) { super(p); } + /** Downcast constructor. */ + public Softmax2dImplCloneable(Module pointer) { super((Pointer)null); allocate(pointer); } + @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::dynamic_pointer_cast, torch::nn::Module>") void allocate(@SharedPtr Module pointer); @Override public Module asModule() { return asModule(this); } @Namespace public static native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast>") Module asModule(@SharedPtr Softmax2dImplCloneable pointer); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/SoftmaxImpl.java b/pytorch/src/gen/java/org/bytedeco/pytorch/SoftmaxImpl.java index b246f27e0cc..ea418aea4b1 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/SoftmaxImpl.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/SoftmaxImpl.java @@ -36,11 +36,14 @@ public class SoftmaxImpl extends SoftmaxImplCloneable { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public SoftmaxImpl(Pointer p) { super(p); } + /** Downcast constructor. */ + public SoftmaxImpl(Module pointer) { super((Pointer)null); allocate(pointer); } + @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::dynamic_pointer_cast") void allocate(@SharedPtr Module pointer); public SoftmaxImpl(@Cast("int64_t") long dim) { super((Pointer)null); allocate(dim); } - @SharedPtr private native void allocate(@Cast("int64_t") long dim); + @SharedPtr @Name("std::make_shared") private native void allocate(@Cast("int64_t") long dim); public SoftmaxImpl(@Const @ByRef SoftmaxOptions options_) { super((Pointer)null); allocate(options_); } - @SharedPtr private native void allocate(@Const @ByRef SoftmaxOptions options_); + @SharedPtr @Name("std::make_shared") private native void allocate(@Const @ByRef SoftmaxOptions options_); public native @ByVal Tensor forward(@Const @ByRef Tensor input); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/SoftmaxImplCloneable.java b/pytorch/src/gen/java/org/bytedeco/pytorch/SoftmaxImplCloneable.java index aed37772063..bd0c6e306cb 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/SoftmaxImplCloneable.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/SoftmaxImplCloneable.java @@ -22,6 +22,9 @@ public class SoftmaxImplCloneable extends Module { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public SoftmaxImplCloneable(Pointer p) { super(p); } + /** Downcast constructor. */ + public SoftmaxImplCloneable(Module pointer) { super((Pointer)null); allocate(pointer); } + @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::dynamic_pointer_cast, torch::nn::Module>") void allocate(@SharedPtr Module pointer); @Override public Module asModule() { return asModule(this); } @Namespace public static native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast>") Module asModule(@SharedPtr SoftmaxImplCloneable pointer); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/SoftminImpl.java b/pytorch/src/gen/java/org/bytedeco/pytorch/SoftminImpl.java index a0ac0eb6572..c14e886f010 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/SoftminImpl.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/SoftminImpl.java @@ -36,11 +36,14 @@ public class SoftminImpl extends SoftminImplCloneable { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public SoftminImpl(Pointer p) { super(p); } + /** Downcast constructor. */ + public SoftminImpl(Module pointer) { super((Pointer)null); allocate(pointer); } + @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::dynamic_pointer_cast") void allocate(@SharedPtr Module pointer); public SoftminImpl(@Cast("int64_t") long dim) { super((Pointer)null); allocate(dim); } - @SharedPtr private native void allocate(@Cast("int64_t") long dim); + @SharedPtr @Name("std::make_shared") private native void allocate(@Cast("int64_t") long dim); public SoftminImpl(@Const @ByRef SoftminOptions options_) { super((Pointer)null); allocate(options_); } - @SharedPtr private native void allocate(@Const @ByRef SoftminOptions options_); + @SharedPtr @Name("std::make_shared") private native void allocate(@Const @ByRef SoftminOptions options_); public native @ByVal Tensor forward(@Const @ByRef Tensor input); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/SoftminImplCloneable.java b/pytorch/src/gen/java/org/bytedeco/pytorch/SoftminImplCloneable.java index 5f69d534f7f..b76fedb74bd 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/SoftminImplCloneable.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/SoftminImplCloneable.java @@ -22,6 +22,9 @@ public class SoftminImplCloneable extends Module { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public SoftminImplCloneable(Pointer p) { super(p); } + /** Downcast constructor. */ + public SoftminImplCloneable(Module pointer) { super((Pointer)null); allocate(pointer); } + @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::dynamic_pointer_cast, torch::nn::Module>") void allocate(@SharedPtr Module pointer); @Override public Module asModule() { return asModule(this); } @Namespace public static native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast>") Module asModule(@SharedPtr SoftminImplCloneable pointer); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/SoftplusImpl.java b/pytorch/src/gen/java/org/bytedeco/pytorch/SoftplusImpl.java index fec9a6699fe..66d21011138 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/SoftplusImpl.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/SoftplusImpl.java @@ -36,20 +36,14 @@ public class SoftplusImpl extends SoftplusImplCloneable { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public SoftplusImpl(Pointer p) { super(p); } - /** Native array allocator. Access with {@link Pointer#position(long)}. */ - public SoftplusImpl(long size) { super((Pointer)null); allocateArray(size); } - private native void allocateArray(long size); - @Override public SoftplusImpl position(long position) { - return (SoftplusImpl)super.position(position); - } - @Override public SoftplusImpl getPointer(long i) { - return new SoftplusImpl((Pointer)this).offsetAddress(i); - } + /** Downcast constructor. */ + public SoftplusImpl(Module pointer) { super((Pointer)null); allocate(pointer); } + @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::dynamic_pointer_cast") void allocate(@SharedPtr Module pointer); public SoftplusImpl(@Const @ByRef(nullValue = "torch::nn::SoftplusOptions{}") SoftplusOptions options_) { super((Pointer)null); allocate(options_); } - @SharedPtr private native void allocate(@Const @ByRef(nullValue = "torch::nn::SoftplusOptions{}") SoftplusOptions options_); + @SharedPtr @Name("std::make_shared") private native void allocate(@Const @ByRef(nullValue = "torch::nn::SoftplusOptions{}") SoftplusOptions options_); public SoftplusImpl() { super((Pointer)null); allocate(); } - @SharedPtr private native void allocate(); + @SharedPtr @Name("std::make_shared") private native void allocate(); public native @ByVal Tensor forward(@Const @ByRef Tensor input); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/SoftplusImplCloneable.java b/pytorch/src/gen/java/org/bytedeco/pytorch/SoftplusImplCloneable.java index 5d37c1744f1..c04be5f999e 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/SoftplusImplCloneable.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/SoftplusImplCloneable.java @@ -22,6 +22,9 @@ public class SoftplusImplCloneable extends Module { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public SoftplusImplCloneable(Pointer p) { super(p); } + /** Downcast constructor. */ + public SoftplusImplCloneable(Module pointer) { super((Pointer)null); allocate(pointer); } + @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::dynamic_pointer_cast, torch::nn::Module>") void allocate(@SharedPtr Module pointer); @Override public Module asModule() { return asModule(this); } @Namespace public static native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast>") Module asModule(@SharedPtr SoftplusImplCloneable pointer); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/SoftshrinkImpl.java b/pytorch/src/gen/java/org/bytedeco/pytorch/SoftshrinkImpl.java index abd2d0a66b2..dc03c83f5c0 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/SoftshrinkImpl.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/SoftshrinkImpl.java @@ -36,20 +36,14 @@ public class SoftshrinkImpl extends SoftshrinkImplCloneable { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public SoftshrinkImpl(Pointer p) { super(p); } - /** Native array allocator. Access with {@link Pointer#position(long)}. */ - public SoftshrinkImpl(long size) { super((Pointer)null); allocateArray(size); } - private native void allocateArray(long size); - @Override public SoftshrinkImpl position(long position) { - return (SoftshrinkImpl)super.position(position); - } - @Override public SoftshrinkImpl getPointer(long i) { - return new SoftshrinkImpl((Pointer)this).offsetAddress(i); - } + /** Downcast constructor. */ + public SoftshrinkImpl(Module pointer) { super((Pointer)null); allocate(pointer); } + @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::dynamic_pointer_cast") void allocate(@SharedPtr Module pointer); public SoftshrinkImpl(@Const @ByRef(nullValue = "torch::nn::SoftshrinkOptions{}") SoftshrinkOptions options_) { super((Pointer)null); allocate(options_); } - @SharedPtr private native void allocate(@Const @ByRef(nullValue = "torch::nn::SoftshrinkOptions{}") SoftshrinkOptions options_); + @SharedPtr @Name("std::make_shared") private native void allocate(@Const @ByRef(nullValue = "torch::nn::SoftshrinkOptions{}") SoftshrinkOptions options_); public SoftshrinkImpl() { super((Pointer)null); allocate(); } - @SharedPtr private native void allocate(); + @SharedPtr @Name("std::make_shared") private native void allocate(); public native @ByVal Tensor forward(@Const @ByRef Tensor input); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/SoftshrinkImplCloneable.java b/pytorch/src/gen/java/org/bytedeco/pytorch/SoftshrinkImplCloneable.java index 7fc7bd6204d..becb137d369 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/SoftshrinkImplCloneable.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/SoftshrinkImplCloneable.java @@ -22,6 +22,9 @@ public class SoftshrinkImplCloneable extends Module { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public SoftshrinkImplCloneable(Pointer p) { super(p); } + /** Downcast constructor. */ + public SoftshrinkImplCloneable(Module pointer) { super((Pointer)null); allocate(pointer); } + @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::dynamic_pointer_cast, torch::nn::Module>") void allocate(@SharedPtr Module pointer); @Override public Module asModule() { return asModule(this); } @Namespace public static native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast>") Module asModule(@SharedPtr SoftshrinkImplCloneable pointer); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/SoftsignImpl.java b/pytorch/src/gen/java/org/bytedeco/pytorch/SoftsignImpl.java index e268e01f2f9..3b49d20277a 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/SoftsignImpl.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/SoftsignImpl.java @@ -28,18 +28,12 @@ public class SoftsignImpl extends SoftsignImplCloneable { static { Loader.load(); } /** Default native constructor. */ public SoftsignImpl() { super((Pointer)null); allocate(); } - /** Native array allocator. Access with {@link Pointer#position(long)}. */ - public SoftsignImpl(long size) { super((Pointer)null); allocateArray(size); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public SoftsignImpl(Pointer p) { super(p); } - private native void allocate(); - private native void allocateArray(long size); - @Override public SoftsignImpl position(long position) { - return (SoftsignImpl)super.position(position); - } - @Override public SoftsignImpl getPointer(long i) { - return new SoftsignImpl((Pointer)this).offsetAddress(i); - } + @SharedPtr @Name("std::make_shared") private native void allocate(); + /** Downcast constructor. */ + public SoftsignImpl(Module pointer) { super((Pointer)null); allocate(pointer); } + @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::dynamic_pointer_cast") void allocate(@SharedPtr Module pointer); public native @ByVal Tensor forward(@Const @ByRef Tensor input); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/SoftsignImplCloneable.java b/pytorch/src/gen/java/org/bytedeco/pytorch/SoftsignImplCloneable.java index 8b13f6c2501..e142596426d 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/SoftsignImplCloneable.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/SoftsignImplCloneable.java @@ -22,6 +22,9 @@ public class SoftsignImplCloneable extends Module { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public SoftsignImplCloneable(Pointer p) { super(p); } + /** Downcast constructor. */ + public SoftsignImplCloneable(Module pointer) { super((Pointer)null); allocate(pointer); } + @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::dynamic_pointer_cast, torch::nn::Module>") void allocate(@SharedPtr Module pointer); @Override public Module asModule() { return asModule(this); } @Namespace public static native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast>") Module asModule(@SharedPtr SoftsignImplCloneable pointer); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/Source.java b/pytorch/src/gen/java/org/bytedeco/pytorch/Source.java index 70776b2cf98..65acebad17e 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/Source.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/Source.java @@ -27,6 +27,8 @@ @Namespace("torch::jit") @NoOffset @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) public class Source extends Pointer { static { Loader.load(); } + /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ + public Source(Pointer p) { super(p); } // Whether or not Source should copy the string passed in the constructor. public enum CopiesString { COPIES_STRING(0), DONT_COPY(1); @@ -39,33 +41,37 @@ public enum CopiesString { COPIES_STRING(0), DONT_COPY(1); } public Source( - @ByVal @Cast("c10::string_view*") Pointer text_view, + @StringView BytePointer text_view, @ByVal(nullValue = "c10::optional(c10::nullopt)") StringOptional filename, @Cast("size_t") long starting_line_no/*=0*/, @SharedPtr SourceRangeUnpickler gen_ranges/*=nullptr*/, CopiesString copies_str/*=torch::jit::Source::COPIES_STRING*/) { super((Pointer)null); allocate(text_view, filename, starting_line_no, gen_ranges, copies_str); } private native void allocate( - @ByVal @Cast("c10::string_view*") Pointer text_view, + @StringView BytePointer text_view, @ByVal(nullValue = "c10::optional(c10::nullopt)") StringOptional filename, @Cast("size_t") long starting_line_no/*=0*/, @SharedPtr SourceRangeUnpickler gen_ranges/*=nullptr*/, CopiesString copies_str/*=torch::jit::Source::COPIES_STRING*/); public Source( - @ByVal @Cast("c10::string_view*") Pointer text_view) { super((Pointer)null); allocate(text_view); } + @StringView BytePointer text_view) { super((Pointer)null); allocate(text_view); } private native void allocate( - @ByVal @Cast("c10::string_view*") Pointer text_view); + @StringView BytePointer text_view); public Source( - @ByVal @Cast("c10::string_view*") Pointer text_view, + @StringView String text_view, @ByVal(nullValue = "c10::optional(c10::nullopt)") StringOptional filename, @Cast("size_t") long starting_line_no/*=0*/, @SharedPtr SourceRangeUnpickler gen_ranges/*=nullptr*/, @Cast("torch::jit::Source::CopiesString") int copies_str/*=torch::jit::Source::COPIES_STRING*/) { super((Pointer)null); allocate(text_view, filename, starting_line_no, gen_ranges, copies_str); } private native void allocate( - @ByVal @Cast("c10::string_view*") Pointer text_view, + @StringView String text_view, @ByVal(nullValue = "c10::optional(c10::nullopt)") StringOptional filename, @Cast("size_t") long starting_line_no/*=0*/, @SharedPtr SourceRangeUnpickler gen_ranges/*=nullptr*/, @Cast("torch::jit::Source::CopiesString") int copies_str/*=torch::jit::Source::COPIES_STRING*/); + public Source( + @StringView String text_view) { super((Pointer)null); allocate(text_view); } + private native void allocate( + @StringView String text_view); public Source( @ByVal StringCordView str, diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/SourceRange.java b/pytorch/src/gen/java/org/bytedeco/pytorch/SourceRange.java index 410544979ff..24045c32e93 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/SourceRange.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/SourceRange.java @@ -50,7 +50,7 @@ private native void allocate( @ByVal StringCordView.Iterator start_iter, @Cast("size_t") long end_); - public native @ByVal @Cast("const c10::string_view*") Pointer token_text(); + public native @StringView BytePointer token_text(); public native @Const @ByVal StringCordView text(); public native @Cast("size_t") long size(); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/StringCordView.java b/pytorch/src/gen/java/org/bytedeco/pytorch/StringCordView.java index 1f16079ecbf..656c2a06338 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/StringCordView.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/StringCordView.java @@ -41,10 +41,10 @@ public class StringCordView extends Pointer { public StringCordView(@Const @ByRef StringCordView arg0) { super((Pointer)null); allocate(arg0); } private native void allocate(@Const @ByRef StringCordView arg0); public StringCordView( - @Cast("c10::string_view*") @StdVector Pointer inputs, + @ByVal StringViewVector inputs, @ByVal @Cast("std::vector >*") Pointer ownerships) { super((Pointer)null); allocate(inputs, ownerships); } private native void allocate( - @Cast("c10::string_view*") @StdVector Pointer inputs, + @ByVal StringViewVector inputs, @ByVal @Cast("std::vector >*") Pointer ownerships); public native @ByRef @Name("operator =") StringCordView put(@Const @ByRef StringCordView arg0); @@ -65,7 +65,7 @@ private native void allocate( public native @Cast("bool") @Name("operator ==") boolean equals(@Const @ByRef StringCordView rhs); - public native @ByVal @Cast("c10::string_view*") Pointer piece(@Cast("size_t") long index); + public native @StringView BytePointer piece(@Cast("size_t") long index); @NoOffset public static class Iterator extends Pointer { static { Loader.load(); } @@ -116,7 +116,7 @@ private native void allocate( public native @Cast("char") @Name("operator *") byte multiply(); // returns rest of the line of the current iterator - public native @ByVal @Cast("c10::string_view*") Pointer rest_line(); + public native @StringView BytePointer rest_line(); public native @Cast("size_t") long pos(); } diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/StringView.java b/pytorch/src/gen/java/org/bytedeco/pytorch/StringView.java deleted file mode 100644 index f309d2040ae..00000000000 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/StringView.java +++ /dev/null @@ -1,53 +0,0 @@ -// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE - -package org.bytedeco.pytorch; - -import org.bytedeco.pytorch.Allocator; -import org.bytedeco.pytorch.Function; -import org.bytedeco.pytorch.functions.*; -import org.bytedeco.pytorch.Module; -import org.bytedeco.javacpp.annotation.Cast; -import java.nio.*; -import org.bytedeco.javacpp.*; -import org.bytedeco.javacpp.annotation.*; - -import static org.bytedeco.javacpp.presets.javacpp.*; -import static org.bytedeco.openblas.global.openblas_nolapack.*; -import static org.bytedeco.openblas.global.openblas.*; - -import static org.bytedeco.pytorch.global.torch.*; - // namespace std - -@Namespace("at") @NoOffset @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) -public class StringView extends Pointer { - static { Loader.load(); } - /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ - public StringView(Pointer p) { super(p); } - /** Native array allocator. Access with {@link Pointer#position(long)}. */ - public StringView(long size) { super((Pointer)null); allocateArray(size); } - private native void allocateArray(long size); - @Override public StringView position(long position) { - return (StringView)super.position(position); - } - @Override public StringView getPointer(long i) { - return new StringView((Pointer)this).offsetAddress(i); - } - - public StringView() { super((Pointer)null); allocate(); } - private native void allocate(); - public StringView(@Cast("const char*") BytePointer str_ptr) { super((Pointer)null); allocate(str_ptr); } - private native void allocate(@Cast("const char*") BytePointer str_ptr); - public StringView(String str_ptr) { super((Pointer)null); allocate(str_ptr); } - private native void allocate(String str_ptr); - - public native @Cast("const char*") BytePointer str(); - - private static native @Namespace @Cast("std::ostream*") @ByRef @Name("operator <<") Pointer shiftLeft(@Cast("std::ostream*") @ByRef Pointer os, @Const @ByRef StringView dt); - public Pointer shiftLeft(Pointer os) { return shiftLeft(os, this); } - - private static native @Namespace @Cast("bool") @Name("operator ==") boolean equals(@Const @ByRef StringView lhs, @Const @ByRef StringView rhs); - public boolean equals(StringView rhs) { return equals(this, rhs); } - - private static native @Namespace @Cast("bool") @Name("operator !=") boolean notEquals(@Const @ByRef StringView lhs, @Const @ByRef StringView rhs); - public boolean notEquals(StringView rhs) { return notEquals(this, rhs); } -} diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/StringViewOptional.java b/pytorch/src/gen/java/org/bytedeco/pytorch/StringViewOptional.java new file mode 100644 index 00000000000..2930bab568f --- /dev/null +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/StringViewOptional.java @@ -0,0 +1,37 @@ +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE + +package org.bytedeco.pytorch; + +import org.bytedeco.pytorch.Allocator; +import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; +import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; +import java.nio.*; +import org.bytedeco.javacpp.*; +import org.bytedeco.javacpp.annotation.*; + +import static org.bytedeco.javacpp.presets.javacpp.*; +import static org.bytedeco.openblas.global.openblas_nolapack.*; +import static org.bytedeco.openblas.global.openblas.*; + +import static org.bytedeco.pytorch.global.torch.*; + +@NoOffset @Name("c10::optional") @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) +public class StringViewOptional extends Pointer { + static { Loader.load(); } + /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ + public StringViewOptional(Pointer p) { super(p); } + public StringViewOptional(BytePointer value) { this(); put(value); } + public StringViewOptional(String value) { this(); put(value); } + public StringViewOptional() { allocate(); } + private native void allocate(); + public native @Name("operator =") @ByRef StringViewOptional put(@ByRef StringViewOptional x); + + public native boolean has_value(); + public native void reset(); + public native @Name("value") @StringView BytePointer get(); + @ValueSetter public native StringViewOptional put(@StringView BytePointer value); + @ValueSetter public native StringViewOptional put(@StringView String value); +} + diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/SharedAnyModuleVector.java b/pytorch/src/gen/java/org/bytedeco/pytorch/StringViewVector.java similarity index 50% rename from pytorch/src/gen/java/org/bytedeco/pytorch/SharedAnyModuleVector.java rename to pytorch/src/gen/java/org/bytedeco/pytorch/StringViewVector.java index 63db0877207..17cb19d1614 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/SharedAnyModuleVector.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/StringViewVector.java @@ -17,30 +17,33 @@ import static org.bytedeco.pytorch.global.torch.*; -@Name("std::vector >") @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) -public class SharedAnyModuleVector extends Pointer { +@Name("std::vector") @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) +public class StringViewVector extends Pointer { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ - public SharedAnyModuleVector(Pointer p) { super(p); } - public SharedAnyModuleVector(AnyModule value) { this(1); put(0, value); } - public SharedAnyModuleVector(AnyModule ... array) { this(array.length); put(array); } - public SharedAnyModuleVector() { allocate(); } - public SharedAnyModuleVector(long n) { allocate(n); } + public StringViewVector(Pointer p) { super(p); } + public StringViewVector(BytePointer value) { this(1); put(0, value); } + public StringViewVector(BytePointer ... array) { this(array.length); put(array); } + public StringViewVector(String value) { this(1); put(0, value); } + public StringViewVector(String ... array) { this(array.length); put(array); } + public StringViewVector() { allocate(); } + public StringViewVector(long n) { allocate(n); } private native void allocate(); private native void allocate(@Cast("size_t") long n); - public native @Name("operator =") @ByRef SharedAnyModuleVector put(@ByRef SharedAnyModuleVector x); + public native @Name("operator =") @ByRef StringViewVector put(@ByRef StringViewVector x); public boolean empty() { return size() == 0; } public native long size(); public void clear() { resize(0); } public native void resize(@Cast("size_t") long n); - public AnyModule front() { return get(0); } - public AnyModule back() { return get(size() - 1); } - @Index(function = "at") public native @SharedPtr("torch::nn::AnyModule") AnyModule get(@Cast("size_t") long i); - public native SharedAnyModuleVector put(@Cast("size_t") long i, AnyModule value); + public BytePointer front() { return get(0); } + public BytePointer back() { return get(size() - 1); } + @Index(function = "at") public native @StringView BytePointer get(@Cast("size_t") long i); + public native StringViewVector put(@Cast("size_t") long i, BytePointer value); + @ValueSetter @Index(function = "at") public native StringViewVector put(@Cast("size_t") long i, @StringView String value); - public native @ByVal Iterator insert(@ByVal Iterator pos, @SharedPtr("torch::nn::AnyModule") AnyModule value); + public native @ByVal Iterator insert(@ByVal Iterator pos, @StringView BytePointer value); public native @ByVal Iterator erase(@ByVal Iterator pos); public native @ByVal Iterator begin(); public native @ByVal Iterator end(); @@ -50,11 +53,11 @@ public Iterator() { } public native @Name("operator ++") @ByRef Iterator increment(); public native @Name("operator ==") boolean equals(@ByRef Iterator it); - public native @Name("operator *") @SharedPtr("torch::nn::AnyModule") @Const AnyModule get(); + public native @Name("operator *") @StringView BytePointer get(); } - public AnyModule[] get() { - AnyModule[] array = new AnyModule[size() < Integer.MAX_VALUE ? (int)size() : Integer.MAX_VALUE]; + public BytePointer[] get() { + BytePointer[] array = new BytePointer[size() < Integer.MAX_VALUE ? (int)size() : Integer.MAX_VALUE]; for (int i = 0; i < array.length; i++) { array[i] = get(i); } @@ -64,22 +67,39 @@ public AnyModule[] get() { return java.util.Arrays.toString(get()); } - public AnyModule pop_back() { + public BytePointer pop_back() { long size = size(); - AnyModule value = get(size - 1); + BytePointer value = get(size - 1); resize(size - 1); return value; } - public SharedAnyModuleVector push_back(AnyModule value) { + public StringViewVector push_back(BytePointer value) { long size = size(); resize(size + 1); return put(size, value); } - public SharedAnyModuleVector put(AnyModule value) { + public StringViewVector put(BytePointer value) { if (size() != 1) { resize(1); } return put(0, value); } - public SharedAnyModuleVector put(AnyModule ... array) { + public StringViewVector put(BytePointer ... array) { + if (size() != array.length) { resize(array.length); } + for (int i = 0; i < array.length; i++) { + put(i, array[i]); + } + return this; + } + + public StringViewVector push_back(String value) { + long size = size(); + resize(size + 1); + return put(size, value); + } + public StringViewVector put(String value) { + if (size() != 1) { resize(1); } + return put(0, value); + } + public StringViewVector put(String ... array) { if (size() != array.length) { resize(array.length); } for (int i = 0; i < array.length; i++) { put(i, array[i]); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/StringViewVectorOptional.java b/pytorch/src/gen/java/org/bytedeco/pytorch/StringViewVectorOptional.java new file mode 100644 index 00000000000..f2271a219c5 --- /dev/null +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/StringViewVectorOptional.java @@ -0,0 +1,35 @@ +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE + +package org.bytedeco.pytorch; + +import org.bytedeco.pytorch.Allocator; +import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; +import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; +import java.nio.*; +import org.bytedeco.javacpp.*; +import org.bytedeco.javacpp.annotation.*; + +import static org.bytedeco.javacpp.presets.javacpp.*; +import static org.bytedeco.openblas.global.openblas_nolapack.*; +import static org.bytedeco.openblas.global.openblas.*; + +import static org.bytedeco.pytorch.global.torch.*; + +@NoOffset @Name("c10::optional >") @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) +public class StringViewVectorOptional extends Pointer { + static { Loader.load(); } + /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ + public StringViewVectorOptional(Pointer p) { super(p); } + public StringViewVectorOptional(StringViewVector value) { this(); put(value); } + public StringViewVectorOptional() { allocate(); } + private native void allocate(); + public native @Name("operator =") @ByRef StringViewVectorOptional put(@ByRef StringViewVectorOptional x); + + public native boolean has_value(); + public native void reset(); + public native @Name("value") @ByRef StringViewVector get(); + @ValueSetter public native StringViewVectorOptional put(@ByRef StringViewVector value); +} + diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/TanhImpl.java b/pytorch/src/gen/java/org/bytedeco/pytorch/TanhImpl.java index 00690e3c568..2072257a425 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/TanhImpl.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/TanhImpl.java @@ -28,18 +28,12 @@ public class TanhImpl extends TanhImplCloneable { static { Loader.load(); } /** Default native constructor. */ public TanhImpl() { super((Pointer)null); allocate(); } - /** Native array allocator. Access with {@link Pointer#position(long)}. */ - public TanhImpl(long size) { super((Pointer)null); allocateArray(size); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public TanhImpl(Pointer p) { super(p); } - private native void allocate(); - private native void allocateArray(long size); - @Override public TanhImpl position(long position) { - return (TanhImpl)super.position(position); - } - @Override public TanhImpl getPointer(long i) { - return new TanhImpl((Pointer)this).offsetAddress(i); - } + @SharedPtr @Name("std::make_shared") private native void allocate(); + /** Downcast constructor. */ + public TanhImpl(Module pointer) { super((Pointer)null); allocate(pointer); } + @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::dynamic_pointer_cast") void allocate(@SharedPtr Module pointer); public native @ByVal Tensor forward(@Const @ByRef Tensor input); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/TanhImplCloneable.java b/pytorch/src/gen/java/org/bytedeco/pytorch/TanhImplCloneable.java index b626e48b46b..8855ed5fbe2 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/TanhImplCloneable.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/TanhImplCloneable.java @@ -22,6 +22,9 @@ public class TanhImplCloneable extends Module { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public TanhImplCloneable(Pointer p) { super(p); } + /** Downcast constructor. */ + public TanhImplCloneable(Module pointer) { super((Pointer)null); allocate(pointer); } + @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::dynamic_pointer_cast, torch::nn::Module>") void allocate(@SharedPtr Module pointer); @Override public Module asModule() { return asModule(this); } @Namespace public static native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast>") Module asModule(@SharedPtr TanhImplCloneable pointer); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/TanhshrinkImpl.java b/pytorch/src/gen/java/org/bytedeco/pytorch/TanhshrinkImpl.java index d9b3ecb991a..022e8cbab2b 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/TanhshrinkImpl.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/TanhshrinkImpl.java @@ -28,18 +28,12 @@ public class TanhshrinkImpl extends TanhshrinkImplCloneable { static { Loader.load(); } /** Default native constructor. */ public TanhshrinkImpl() { super((Pointer)null); allocate(); } - /** Native array allocator. Access with {@link Pointer#position(long)}. */ - public TanhshrinkImpl(long size) { super((Pointer)null); allocateArray(size); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public TanhshrinkImpl(Pointer p) { super(p); } - private native void allocate(); - private native void allocateArray(long size); - @Override public TanhshrinkImpl position(long position) { - return (TanhshrinkImpl)super.position(position); - } - @Override public TanhshrinkImpl getPointer(long i) { - return new TanhshrinkImpl((Pointer)this).offsetAddress(i); - } + @SharedPtr @Name("std::make_shared") private native void allocate(); + /** Downcast constructor. */ + public TanhshrinkImpl(Module pointer) { super((Pointer)null); allocate(pointer); } + @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::dynamic_pointer_cast") void allocate(@SharedPtr Module pointer); public native @ByVal Tensor forward(@Const @ByRef Tensor input); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/TanhshrinkImplCloneable.java b/pytorch/src/gen/java/org/bytedeco/pytorch/TanhshrinkImplCloneable.java index b2e5fa0e9b7..e23d952c622 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/TanhshrinkImplCloneable.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/TanhshrinkImplCloneable.java @@ -22,6 +22,9 @@ public class TanhshrinkImplCloneable extends Module { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public TanhshrinkImplCloneable(Pointer p) { super(p); } + /** Downcast constructor. */ + public TanhshrinkImplCloneable(Module pointer) { super((Pointer)null); allocate(pointer); } + @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::dynamic_pointer_cast, torch::nn::Module>") void allocate(@SharedPtr Module pointer); @Override public Module asModule() { return asModule(this); } @Namespace public static native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast>") Module asModule(@SharedPtr TanhshrinkImplCloneable pointer); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/Tensor.java b/pytorch/src/gen/java/org/bytedeco/pytorch/Tensor.java index 0ebf5a50d82..c392efaf2dc 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/Tensor.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/Tensor.java @@ -51,19 +51,19 @@ public class Tensor extends TensorBase { } public Tensor() { super((Pointer)null); allocate(); } - @SharedPtr private native void allocate(); + private native void allocate(); // This constructor should not be used by end users and is an implementation // detail invoked by autogenerated code. public Tensor( @ByVal TensorImplPtr tensor_impl) { super((Pointer)null); allocate(tensor_impl); } - @SharedPtr private native void allocate( + private native void allocate( @ByVal TensorImplPtr tensor_impl); public Tensor(@Const @ByRef Tensor tensor) { super((Pointer)null); allocate(tensor); } - @SharedPtr private native void allocate(@Const @ByRef Tensor tensor); + private native void allocate(@Const @ByRef Tensor tensor); // Implicitly move-constructible from TensorBase, but must be explicit to increase refcount public Tensor(@Const @ByRef TensorBase base) { super((Pointer)null); allocate(base); } - @SharedPtr private native void allocate(@Const @ByRef TensorBase base); + private native void allocate(@Const @ByRef TensorBase base); /*implicit*/ // Creates a new wrapper from TensorImpl. Intentionally a free method because @@ -572,20 +572,20 @@ public Tensor( public native @ByVal Tensor diff(); public native @ByVal Tensor div(@Const @ByRef Tensor other); public native @ByRef Tensor div_(@Const @ByRef Tensor other); - public native @ByVal Tensor div(@Const @ByRef Tensor other, @ByVal @Cast("c10::optional*") Pointer rounding_mode); - public native @ByRef Tensor div_(@Const @ByRef Tensor other, @ByVal @Cast("c10::optional*") Pointer rounding_mode); + public native @ByVal Tensor div(@Const @ByRef Tensor other, @ByVal StringViewOptional rounding_mode); + public native @ByRef Tensor div_(@Const @ByRef Tensor other, @ByVal StringViewOptional rounding_mode); public native @ByVal Tensor div(@Const @ByRef Scalar other); public native @ByRef Tensor div_(@Const @ByRef Scalar other); - public native @ByVal Tensor div(@Const @ByRef Scalar other, @ByVal @Cast("c10::optional*") Pointer rounding_mode); - public native @ByRef Tensor div_(@Const @ByRef Scalar other, @ByVal @Cast("c10::optional*") Pointer rounding_mode); + public native @ByVal Tensor div(@Const @ByRef Scalar other, @ByVal StringViewOptional rounding_mode); + public native @ByRef Tensor div_(@Const @ByRef Scalar other, @ByVal StringViewOptional rounding_mode); public native @ByVal Tensor divide(@Const @ByRef Tensor other); public native @ByRef Tensor divide_(@Const @ByRef Tensor other); public native @ByVal Tensor divide(@Const @ByRef Scalar other); public native @ByRef Tensor divide_(@Const @ByRef Scalar other); - public native @ByVal Tensor divide(@Const @ByRef Tensor other, @ByVal @Cast("c10::optional*") Pointer rounding_mode); - public native @ByRef Tensor divide_(@Const @ByRef Tensor other, @ByVal @Cast("c10::optional*") Pointer rounding_mode); - public native @ByVal Tensor divide(@Const @ByRef Scalar other, @ByVal @Cast("c10::optional*") Pointer rounding_mode); - public native @ByRef Tensor divide_(@Const @ByRef Scalar other, @ByVal @Cast("c10::optional*") Pointer rounding_mode); + public native @ByVal Tensor divide(@Const @ByRef Tensor other, @ByVal StringViewOptional rounding_mode); + public native @ByRef Tensor divide_(@Const @ByRef Tensor other, @ByVal StringViewOptional rounding_mode); + public native @ByVal Tensor divide(@Const @ByRef Scalar other, @ByVal StringViewOptional rounding_mode); + public native @ByRef Tensor divide_(@Const @ByRef Scalar other, @ByVal StringViewOptional rounding_mode); public native @ByVal Tensor true_divide(@Const @ByRef Tensor other); public native @ByRef Tensor true_divide_(@Const @ByRef Tensor other); public native @ByVal Tensor true_divide(@Const @ByRef Scalar other); @@ -952,7 +952,8 @@ public Tensor( public native @ByVal Tensor sspaddmm(@Const @ByRef Tensor mat1, @Const @ByRef Tensor mat2, @Const @ByRef(nullValue = "at::Scalar(1)") Scalar beta, @Const @ByRef(nullValue = "at::Scalar(1)") Scalar alpha); public native @ByVal Tensor sspaddmm(@Const @ByRef Tensor mat1, @Const @ByRef Tensor mat2); public native @ByVal Tensor stft(@Cast("int64_t") long n_fft, @ByVal LongOptional hop_length, @ByVal LongOptional win_length, @Const @ByRef TensorOptional window, @Cast("bool") boolean normalized, @ByVal(nullValue = "c10::optional(c10::nullopt)") BoolOptional onesided, @ByVal(nullValue = "c10::optional(c10::nullopt)") BoolOptional return_complex); - public native @ByVal Tensor stft(@Cast("int64_t") long n_fft, @ByVal(nullValue = "c10::optional(c10::nullopt)") LongOptional hop_length, @ByVal(nullValue = "c10::optional(c10::nullopt)") LongOptional win_length, @Const @ByRef(nullValue = "c10::optional{}") TensorOptional window, @Cast("bool") boolean center/*=true*/, @ByVal(nullValue = "c10::string_view(\"reflect\")") @Cast("c10::string_view*") Pointer pad_mode, @Cast("bool") boolean normalized/*=false*/, @ByVal(nullValue = "c10::optional(c10::nullopt)") BoolOptional onesided, @ByVal(nullValue = "c10::optional(c10::nullopt)") BoolOptional return_complex); + public native @ByVal Tensor stft(@Cast("int64_t") long n_fft, @ByVal(nullValue = "c10::optional(c10::nullopt)") LongOptional hop_length, @ByVal(nullValue = "c10::optional(c10::nullopt)") LongOptional win_length, @Const @ByRef(nullValue = "c10::optional{}") TensorOptional window, @Cast("bool") boolean center/*=true*/, @StringView BytePointer pad_mode/*="reflect"*/, @Cast("bool") boolean normalized/*=false*/, @ByVal(nullValue = "c10::optional(c10::nullopt)") BoolOptional onesided, @ByVal(nullValue = "c10::optional(c10::nullopt)") BoolOptional return_complex); + public native @ByVal Tensor stft(@Cast("int64_t") long n_fft, @ByVal(nullValue = "c10::optional(c10::nullopt)") LongOptional hop_length, @ByVal(nullValue = "c10::optional(c10::nullopt)") LongOptional win_length, @Const @ByRef(nullValue = "c10::optional{}") TensorOptional window, @Cast("bool") boolean center/*=true*/, @StringView String pad_mode/*="reflect"*/, @Cast("bool") boolean normalized/*=false*/, @ByVal(nullValue = "c10::optional(c10::nullopt)") BoolOptional onesided, @ByVal(nullValue = "c10::optional(c10::nullopt)") BoolOptional return_complex); public native @ByVal Tensor istft(@Cast("int64_t") long n_fft, @ByVal(nullValue = "c10::optional(c10::nullopt)") LongOptional hop_length, @ByVal(nullValue = "c10::optional(c10::nullopt)") LongOptional win_length, @Const @ByRef(nullValue = "c10::optional{}") TensorOptional window, @Cast("bool") boolean center/*=true*/, @Cast("bool") boolean normalized/*=false*/, @ByVal(nullValue = "c10::optional(c10::nullopt)") BoolOptional onesided, @ByVal(nullValue = "c10::optional(c10::nullopt)") LongOptional length, @Cast("bool") boolean return_complex/*=false*/); public native @ByVal Tensor istft(@Cast("int64_t") long n_fft); public native @Cast("int64_t") long stride(@ByVal Dimname dim); @@ -1184,10 +1185,14 @@ public Tensor( public native @ByVal Tensor index_add(@Cast("int64_t") long dim, @Const @ByRef Tensor index, @Const @ByRef Tensor source); public native @ByVal Tensor index_add(@ByVal Dimname dim, @Const @ByRef Tensor index, @Const @ByRef Tensor source, @Const @ByRef(nullValue = "at::Scalar(1)") Scalar alpha); public native @ByVal Tensor index_add(@ByVal Dimname dim, @Const @ByRef Tensor index, @Const @ByRef Tensor source); - public native @ByRef Tensor index_reduce_(@Cast("int64_t") long dim, @Const @ByRef Tensor index, @Const @ByRef Tensor source, @ByVal @Cast("c10::string_view*") Pointer reduce, @Cast("bool") boolean include_self/*=true*/); - public native @ByRef Tensor index_reduce_(@Cast("int64_t") long dim, @Const @ByRef Tensor index, @Const @ByRef Tensor source, @ByVal @Cast("c10::string_view*") Pointer reduce); - public native @ByVal Tensor index_reduce(@Cast("int64_t") long dim, @Const @ByRef Tensor index, @Const @ByRef Tensor source, @ByVal @Cast("c10::string_view*") Pointer reduce, @Cast("bool") boolean include_self/*=true*/); - public native @ByVal Tensor index_reduce(@Cast("int64_t") long dim, @Const @ByRef Tensor index, @Const @ByRef Tensor source, @ByVal @Cast("c10::string_view*") Pointer reduce); + public native @ByRef Tensor index_reduce_(@Cast("int64_t") long dim, @Const @ByRef Tensor index, @Const @ByRef Tensor source, @StringView BytePointer reduce, @Cast("bool") boolean include_self/*=true*/); + public native @ByRef Tensor index_reduce_(@Cast("int64_t") long dim, @Const @ByRef Tensor index, @Const @ByRef Tensor source, @StringView BytePointer reduce); + public native @ByRef Tensor index_reduce_(@Cast("int64_t") long dim, @Const @ByRef Tensor index, @Const @ByRef Tensor source, @StringView String reduce, @Cast("bool") boolean include_self/*=true*/); + public native @ByRef Tensor index_reduce_(@Cast("int64_t") long dim, @Const @ByRef Tensor index, @Const @ByRef Tensor source, @StringView String reduce); + public native @ByVal Tensor index_reduce(@Cast("int64_t") long dim, @Const @ByRef Tensor index, @Const @ByRef Tensor source, @StringView BytePointer reduce, @Cast("bool") boolean include_self/*=true*/); + public native @ByVal Tensor index_reduce(@Cast("int64_t") long dim, @Const @ByRef Tensor index, @Const @ByRef Tensor source, @StringView BytePointer reduce); + public native @ByVal Tensor index_reduce(@Cast("int64_t") long dim, @Const @ByRef Tensor index, @Const @ByRef Tensor source, @StringView String reduce, @Cast("bool") boolean include_self/*=true*/); + public native @ByVal Tensor index_reduce(@Cast("int64_t") long dim, @Const @ByRef Tensor index, @Const @ByRef Tensor source, @StringView String reduce); public native @ByRef Tensor index_fill_(@Cast("int64_t") long dim, @Const @ByRef Tensor index, @Const @ByRef Scalar value); public native @ByVal Tensor index_fill(@Cast("int64_t") long dim, @Const @ByRef Tensor index, @Const @ByRef Scalar value); public native @ByRef Tensor index_fill_(@Cast("int64_t") long dim, @Const @ByRef Tensor index, @Const @ByRef Tensor value); @@ -1200,19 +1205,27 @@ public Tensor( public native @ByRef Tensor scatter_(@Cast("int64_t") long dim, @Const @ByRef Tensor index, @Const @ByRef Tensor src); public native @ByVal Tensor scatter(@Cast("int64_t") long dim, @Const @ByRef Tensor index, @Const @ByRef Scalar value); public native @ByRef Tensor scatter_(@Cast("int64_t") long dim, @Const @ByRef Tensor index, @Const @ByRef Scalar value); - public native @ByVal Tensor scatter(@Cast("int64_t") long dim, @Const @ByRef Tensor index, @Const @ByRef Tensor src, @ByVal @Cast("c10::string_view*") Pointer reduce); - public native @ByRef Tensor scatter_(@Cast("int64_t") long dim, @Const @ByRef Tensor index, @Const @ByRef Tensor src, @ByVal @Cast("c10::string_view*") Pointer reduce); - public native @ByVal Tensor scatter(@Cast("int64_t") long dim, @Const @ByRef Tensor index, @Const @ByRef Scalar value, @ByVal @Cast("c10::string_view*") Pointer reduce); - public native @ByRef Tensor scatter_(@Cast("int64_t") long dim, @Const @ByRef Tensor index, @Const @ByRef Scalar value, @ByVal @Cast("c10::string_view*") Pointer reduce); + public native @ByVal Tensor scatter(@Cast("int64_t") long dim, @Const @ByRef Tensor index, @Const @ByRef Tensor src, @StringView BytePointer reduce); + public native @ByVal Tensor scatter(@Cast("int64_t") long dim, @Const @ByRef Tensor index, @Const @ByRef Tensor src, @StringView String reduce); + public native @ByRef Tensor scatter_(@Cast("int64_t") long dim, @Const @ByRef Tensor index, @Const @ByRef Tensor src, @StringView BytePointer reduce); + public native @ByRef Tensor scatter_(@Cast("int64_t") long dim, @Const @ByRef Tensor index, @Const @ByRef Tensor src, @StringView String reduce); + public native @ByVal Tensor scatter(@Cast("int64_t") long dim, @Const @ByRef Tensor index, @Const @ByRef Scalar value, @StringView BytePointer reduce); + public native @ByVal Tensor scatter(@Cast("int64_t") long dim, @Const @ByRef Tensor index, @Const @ByRef Scalar value, @StringView String reduce); + public native @ByRef Tensor scatter_(@Cast("int64_t") long dim, @Const @ByRef Tensor index, @Const @ByRef Scalar value, @StringView BytePointer reduce); + public native @ByRef Tensor scatter_(@Cast("int64_t") long dim, @Const @ByRef Tensor index, @Const @ByRef Scalar value, @StringView String reduce); public native @ByVal Tensor scatter(@ByVal Dimname dim, @Const @ByRef Tensor index, @Const @ByRef Tensor src); public native @ByVal Tensor scatter(@ByVal Dimname dim, @Const @ByRef Tensor index, @Const @ByRef Scalar value); public native @ByVal Tensor scatter_add(@Cast("int64_t") long dim, @Const @ByRef Tensor index, @Const @ByRef Tensor src); public native @ByRef Tensor scatter_add_(@Cast("int64_t") long dim, @Const @ByRef Tensor index, @Const @ByRef Tensor src); public native @ByVal Tensor scatter_add(@ByVal Dimname dim, @Const @ByRef Tensor index, @Const @ByRef Tensor src); - public native @ByVal Tensor scatter_reduce(@Cast("int64_t") long dim, @Const @ByRef Tensor index, @Const @ByRef Tensor src, @ByVal @Cast("c10::string_view*") Pointer reduce, @Cast("bool") boolean include_self/*=true*/); - public native @ByVal Tensor scatter_reduce(@Cast("int64_t") long dim, @Const @ByRef Tensor index, @Const @ByRef Tensor src, @ByVal @Cast("c10::string_view*") Pointer reduce); - public native @ByRef Tensor scatter_reduce_(@Cast("int64_t") long dim, @Const @ByRef Tensor index, @Const @ByRef Tensor src, @ByVal @Cast("c10::string_view*") Pointer reduce, @Cast("bool") boolean include_self/*=true*/); - public native @ByRef Tensor scatter_reduce_(@Cast("int64_t") long dim, @Const @ByRef Tensor index, @Const @ByRef Tensor src, @ByVal @Cast("c10::string_view*") Pointer reduce); + public native @ByVal Tensor scatter_reduce(@Cast("int64_t") long dim, @Const @ByRef Tensor index, @Const @ByRef Tensor src, @StringView BytePointer reduce, @Cast("bool") boolean include_self/*=true*/); + public native @ByVal Tensor scatter_reduce(@Cast("int64_t") long dim, @Const @ByRef Tensor index, @Const @ByRef Tensor src, @StringView BytePointer reduce); + public native @ByVal Tensor scatter_reduce(@Cast("int64_t") long dim, @Const @ByRef Tensor index, @Const @ByRef Tensor src, @StringView String reduce, @Cast("bool") boolean include_self/*=true*/); + public native @ByVal Tensor scatter_reduce(@Cast("int64_t") long dim, @Const @ByRef Tensor index, @Const @ByRef Tensor src, @StringView String reduce); + public native @ByRef Tensor scatter_reduce_(@Cast("int64_t") long dim, @Const @ByRef Tensor index, @Const @ByRef Tensor src, @StringView BytePointer reduce, @Cast("bool") boolean include_self/*=true*/); + public native @ByRef Tensor scatter_reduce_(@Cast("int64_t") long dim, @Const @ByRef Tensor index, @Const @ByRef Tensor src, @StringView BytePointer reduce); + public native @ByRef Tensor scatter_reduce_(@Cast("int64_t") long dim, @Const @ByRef Tensor index, @Const @ByRef Tensor src, @StringView String reduce, @Cast("bool") boolean include_self/*=true*/); + public native @ByRef Tensor scatter_reduce_(@Cast("int64_t") long dim, @Const @ByRef Tensor index, @Const @ByRef Tensor src, @StringView String reduce); public native @ByRef Tensor eq_(@Const @ByRef Scalar other); public native @ByRef Tensor eq_(@Const @ByRef Tensor other); public native @ByVal Tensor bitwise_and(@Const @ByRef Scalar other); @@ -1427,14 +1440,18 @@ public Tensor( public native @ByVal Tensor max(@Const @ByRef Tensor other); public native @ByVal Tensor minimum(@Const @ByRef Tensor other); public native @ByVal Tensor min(@Const @ByRef Tensor other); - public native @ByVal Tensor quantile(@Const @ByRef Tensor q, @ByVal(nullValue = "c10::optional(c10::nullopt)") LongOptional dim, @Cast("bool") boolean keepdim/*=false*/, @ByVal(nullValue = "c10::string_view(\"linear\")") @Cast("c10::string_view*") Pointer interpolation); + public native @ByVal Tensor quantile(@Const @ByRef Tensor q, @ByVal(nullValue = "c10::optional(c10::nullopt)") LongOptional dim, @Cast("bool") boolean keepdim/*=false*/, @StringView BytePointer interpolation/*="linear"*/); public native @ByVal Tensor quantile(@Const @ByRef Tensor q); - public native @ByVal Tensor quantile(double q, @ByVal(nullValue = "c10::optional(c10::nullopt)") LongOptional dim, @Cast("bool") boolean keepdim/*=false*/, @ByVal(nullValue = "c10::string_view(\"linear\")") @Cast("c10::string_view*") Pointer interpolation); + public native @ByVal Tensor quantile(@Const @ByRef Tensor q, @ByVal(nullValue = "c10::optional(c10::nullopt)") LongOptional dim, @Cast("bool") boolean keepdim/*=false*/, @StringView String interpolation/*="linear"*/); + public native @ByVal Tensor quantile(double q, @ByVal(nullValue = "c10::optional(c10::nullopt)") LongOptional dim, @Cast("bool") boolean keepdim/*=false*/, @StringView BytePointer interpolation/*="linear"*/); public native @ByVal Tensor quantile(double q); - public native @ByVal Tensor nanquantile(@Const @ByRef Tensor q, @ByVal(nullValue = "c10::optional(c10::nullopt)") LongOptional dim, @Cast("bool") boolean keepdim/*=false*/, @ByVal(nullValue = "c10::string_view(\"linear\")") @Cast("c10::string_view*") Pointer interpolation); + public native @ByVal Tensor quantile(double q, @ByVal(nullValue = "c10::optional(c10::nullopt)") LongOptional dim, @Cast("bool") boolean keepdim/*=false*/, @StringView String interpolation/*="linear"*/); + public native @ByVal Tensor nanquantile(@Const @ByRef Tensor q, @ByVal(nullValue = "c10::optional(c10::nullopt)") LongOptional dim, @Cast("bool") boolean keepdim/*=false*/, @StringView BytePointer interpolation/*="linear"*/); public native @ByVal Tensor nanquantile(@Const @ByRef Tensor q); - public native @ByVal Tensor nanquantile(double q, @ByVal(nullValue = "c10::optional(c10::nullopt)") LongOptional dim, @Cast("bool") boolean keepdim/*=false*/, @ByVal(nullValue = "c10::string_view(\"linear\")") @Cast("c10::string_view*") Pointer interpolation); + public native @ByVal Tensor nanquantile(@Const @ByRef Tensor q, @ByVal(nullValue = "c10::optional(c10::nullopt)") LongOptional dim, @Cast("bool") boolean keepdim/*=false*/, @StringView String interpolation/*="linear"*/); + public native @ByVal Tensor nanquantile(double q, @ByVal(nullValue = "c10::optional(c10::nullopt)") LongOptional dim, @Cast("bool") boolean keepdim/*=false*/, @StringView BytePointer interpolation/*="linear"*/); public native @ByVal Tensor nanquantile(double q); + public native @ByVal Tensor nanquantile(double q, @ByVal(nullValue = "c10::optional(c10::nullopt)") LongOptional dim, @Cast("bool") boolean keepdim/*=false*/, @StringView String interpolation/*="linear"*/); public native @ByVal T_TensorTensor_T sort(@Cast("int64_t") long dim/*=-1*/, @Cast("bool") boolean descending/*=false*/); public native @ByVal T_TensorTensor_T sort(); public native @ByVal T_TensorTensor_T sort(@ByVal BoolOptional stable, @Cast("int64_t") long dim/*=-1*/, @Cast("bool") boolean descending/*=false*/); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/ThresholdImpl.java b/pytorch/src/gen/java/org/bytedeco/pytorch/ThresholdImpl.java index 9cb48dfd47b..f117dcb39f9 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/ThresholdImpl.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/ThresholdImpl.java @@ -36,11 +36,14 @@ public class ThresholdImpl extends ThresholdImplCloneable { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public ThresholdImpl(Pointer p) { super(p); } + /** Downcast constructor. */ + public ThresholdImpl(Module pointer) { super((Pointer)null); allocate(pointer); } + @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::dynamic_pointer_cast") void allocate(@SharedPtr Module pointer); public ThresholdImpl(double threshold, double value) { super((Pointer)null); allocate(threshold, value); } - @SharedPtr private native void allocate(double threshold, double value); + @SharedPtr @Name("std::make_shared") private native void allocate(double threshold, double value); public ThresholdImpl(@Const @ByRef ThresholdOptions options_) { super((Pointer)null); allocate(options_); } - @SharedPtr private native void allocate(@Const @ByRef ThresholdOptions options_); + @SharedPtr @Name("std::make_shared") private native void allocate(@Const @ByRef ThresholdOptions options_); public native @ByVal Tensor forward(@ByVal Tensor input); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/ThresholdImplCloneable.java b/pytorch/src/gen/java/org/bytedeco/pytorch/ThresholdImplCloneable.java index 3a77b49fe07..2207226fc07 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/ThresholdImplCloneable.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/ThresholdImplCloneable.java @@ -22,6 +22,9 @@ public class ThresholdImplCloneable extends Module { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public ThresholdImplCloneable(Pointer p) { super(p); } + /** Downcast constructor. */ + public ThresholdImplCloneable(Module pointer) { super((Pointer)null); allocate(pointer); } + @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::dynamic_pointer_cast, torch::nn::Module>") void allocate(@SharedPtr Module pointer); @Override public Module asModule() { return asModule(this); } @Namespace public static native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast>") Module asModule(@SharedPtr ThresholdImplCloneable pointer); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/TransformerDecoderImpl.java b/pytorch/src/gen/java/org/bytedeco/pytorch/TransformerDecoderImpl.java index 76cdd2dda20..a9b5ea47125 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/TransformerDecoderImpl.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/TransformerDecoderImpl.java @@ -43,9 +43,12 @@ public class TransformerDecoderImpl extends TransformerDecoderImplCloneable { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public TransformerDecoderImpl(Pointer p) { super(p); } + /** Downcast constructor. */ + public TransformerDecoderImpl(Module pointer) { super((Pointer)null); allocate(pointer); } + @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::dynamic_pointer_cast") void allocate(@SharedPtr Module pointer); public TransformerDecoderImpl(@ByVal TransformerDecoderOptions options_) { super((Pointer)null); allocate(options_); } - @SharedPtr private native void allocate(@ByVal TransformerDecoderOptions options_); + @SharedPtr @Name("std::make_shared") private native void allocate(@ByVal TransformerDecoderOptions options_); public native void reset(); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/TransformerDecoderImplCloneable.java b/pytorch/src/gen/java/org/bytedeco/pytorch/TransformerDecoderImplCloneable.java index 2479f98bd87..e70336775b5 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/TransformerDecoderImplCloneable.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/TransformerDecoderImplCloneable.java @@ -22,6 +22,9 @@ public class TransformerDecoderImplCloneable extends Module { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public TransformerDecoderImplCloneable(Pointer p) { super(p); } + /** Downcast constructor. */ + public TransformerDecoderImplCloneable(Module pointer) { super((Pointer)null); allocate(pointer); } + @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::dynamic_pointer_cast, torch::nn::Module>") void allocate(@SharedPtr Module pointer); @Override public Module asModule() { return asModule(this); } @Namespace public static native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast>") Module asModule(@SharedPtr TransformerDecoderImplCloneable pointer); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/TransformerDecoderLayerImpl.java b/pytorch/src/gen/java/org/bytedeco/pytorch/TransformerDecoderLayerImpl.java index 7b663b2cd3b..37c21308239 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/TransformerDecoderLayerImpl.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/TransformerDecoderLayerImpl.java @@ -44,11 +44,14 @@ public class TransformerDecoderLayerImpl extends TransformerDecoderLayerImplClon static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public TransformerDecoderLayerImpl(Pointer p) { super(p); } + /** Downcast constructor. */ + public TransformerDecoderLayerImpl(Module pointer) { super((Pointer)null); allocate(pointer); } + @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::dynamic_pointer_cast") void allocate(@SharedPtr Module pointer); public TransformerDecoderLayerImpl(@Cast("int64_t") long d_model, @Cast("int64_t") long nhead) { super((Pointer)null); allocate(d_model, nhead); } - @SharedPtr private native void allocate(@Cast("int64_t") long d_model, @Cast("int64_t") long nhead); + @SharedPtr @Name("std::make_shared") private native void allocate(@Cast("int64_t") long d_model, @Cast("int64_t") long nhead); public TransformerDecoderLayerImpl(@ByVal TransformerDecoderLayerOptions options_) { super((Pointer)null); allocate(options_); } - @SharedPtr private native void allocate(@ByVal TransformerDecoderLayerOptions options_); + @SharedPtr @Name("std::make_shared") private native void allocate(@ByVal TransformerDecoderLayerOptions options_); public native void reset(); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/TransformerDecoderLayerImplCloneable.java b/pytorch/src/gen/java/org/bytedeco/pytorch/TransformerDecoderLayerImplCloneable.java index b517ae6423a..c5a2c1e439a 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/TransformerDecoderLayerImplCloneable.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/TransformerDecoderLayerImplCloneable.java @@ -22,6 +22,9 @@ public class TransformerDecoderLayerImplCloneable extends Module { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public TransformerDecoderLayerImplCloneable(Pointer p) { super(p); } + /** Downcast constructor. */ + public TransformerDecoderLayerImplCloneable(Module pointer) { super((Pointer)null); allocate(pointer); } + @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::dynamic_pointer_cast, torch::nn::Module>") void allocate(@SharedPtr Module pointer); @Override public Module asModule() { return asModule(this); } @Namespace public static native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast>") Module asModule(@SharedPtr TransformerDecoderLayerImplCloneable pointer); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/TransformerEncoderImpl.java b/pytorch/src/gen/java/org/bytedeco/pytorch/TransformerEncoderImpl.java index 6e014e47e4f..ec3c0fff6b7 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/TransformerEncoderImpl.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/TransformerEncoderImpl.java @@ -41,9 +41,12 @@ public class TransformerEncoderImpl extends TransformerEncoderImplCloneable { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public TransformerEncoderImpl(Pointer p) { super(p); } + /** Downcast constructor. */ + public TransformerEncoderImpl(Module pointer) { super((Pointer)null); allocate(pointer); } + @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::dynamic_pointer_cast") void allocate(@SharedPtr Module pointer); public TransformerEncoderImpl(@ByVal TransformerEncoderOptions options_) { super((Pointer)null); allocate(options_); } - @SharedPtr private native void allocate(@ByVal TransformerEncoderOptions options_); + @SharedPtr @Name("std::make_shared") private native void allocate(@ByVal TransformerEncoderOptions options_); public native @ByVal Tensor forward( @Const @ByRef Tensor src, diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/TransformerEncoderImplCloneable.java b/pytorch/src/gen/java/org/bytedeco/pytorch/TransformerEncoderImplCloneable.java index 6acc934eab8..ab0704b9752 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/TransformerEncoderImplCloneable.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/TransformerEncoderImplCloneable.java @@ -22,6 +22,9 @@ public class TransformerEncoderImplCloneable extends Module { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public TransformerEncoderImplCloneable(Pointer p) { super(p); } + /** Downcast constructor. */ + public TransformerEncoderImplCloneable(Module pointer) { super((Pointer)null); allocate(pointer); } + @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::dynamic_pointer_cast, torch::nn::Module>") void allocate(@SharedPtr Module pointer); @Override public Module asModule() { return asModule(this); } @Namespace public static native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast>") Module asModule(@SharedPtr TransformerEncoderImplCloneable pointer); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/TransformerEncoderLayerImpl.java b/pytorch/src/gen/java/org/bytedeco/pytorch/TransformerEncoderLayerImpl.java index a45806b1195..48640e4a79e 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/TransformerEncoderLayerImpl.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/TransformerEncoderLayerImpl.java @@ -39,11 +39,14 @@ public class TransformerEncoderLayerImpl extends TransformerEncoderLayerImplClon static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public TransformerEncoderLayerImpl(Pointer p) { super(p); } + /** Downcast constructor. */ + public TransformerEncoderLayerImpl(Module pointer) { super((Pointer)null); allocate(pointer); } + @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::dynamic_pointer_cast") void allocate(@SharedPtr Module pointer); public TransformerEncoderLayerImpl(@Cast("int64_t") long d_model, @Cast("int64_t") long nhead) { super((Pointer)null); allocate(d_model, nhead); } - @SharedPtr private native void allocate(@Cast("int64_t") long d_model, @Cast("int64_t") long nhead); + @SharedPtr @Name("std::make_shared") private native void allocate(@Cast("int64_t") long d_model, @Cast("int64_t") long nhead); public TransformerEncoderLayerImpl(@ByVal TransformerEncoderLayerOptions options_) { super((Pointer)null); allocate(options_); } - @SharedPtr private native void allocate(@ByVal TransformerEncoderLayerOptions options_); + @SharedPtr @Name("std::make_shared") private native void allocate(@ByVal TransformerEncoderLayerOptions options_); public native @ByVal Tensor forward( @Const @ByRef Tensor src, diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/TransformerEncoderLayerImplCloneable.java b/pytorch/src/gen/java/org/bytedeco/pytorch/TransformerEncoderLayerImplCloneable.java index 6f777ba5e2a..d4026f2fdc8 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/TransformerEncoderLayerImplCloneable.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/TransformerEncoderLayerImplCloneable.java @@ -22,6 +22,9 @@ public class TransformerEncoderLayerImplCloneable extends Module { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public TransformerEncoderLayerImplCloneable(Pointer p) { super(p); } + /** Downcast constructor. */ + public TransformerEncoderLayerImplCloneable(Module pointer) { super((Pointer)null); allocate(pointer); } + @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::dynamic_pointer_cast, torch::nn::Module>") void allocate(@SharedPtr Module pointer); @Override public Module asModule() { return asModule(this); } @Namespace public static native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast>") Module asModule(@SharedPtr TransformerEncoderLayerImplCloneable pointer); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/TransformerImpl.java b/pytorch/src/gen/java/org/bytedeco/pytorch/TransformerImpl.java index 1b8fb417fbe..a32a1c58a3a 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/TransformerImpl.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/TransformerImpl.java @@ -41,6 +41,9 @@ public class TransformerImpl extends TransformerImplCloneable { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public TransformerImpl(Pointer p) { super(p); } + /** Downcast constructor. */ + public TransformerImpl(Module pointer) { super((Pointer)null); allocate(pointer); } + @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::dynamic_pointer_cast") void allocate(@SharedPtr Module pointer); /// @@ -50,7 +53,7 @@ public class TransformerImpl extends TransformerImplCloneable { /// /// public TransformerImpl(@ByVal TransformerOptions options_) { super((Pointer)null); allocate(options_); } - @SharedPtr private native void allocate(@ByVal TransformerOptions options_); + @SharedPtr @Name("std::make_shared") private native void allocate(@ByVal TransformerOptions options_); /** forward function for Transformer Module * Args: diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/TransformerImplCloneable.java b/pytorch/src/gen/java/org/bytedeco/pytorch/TransformerImplCloneable.java index a84e89bfaf2..fb7d0cf682e 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/TransformerImplCloneable.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/TransformerImplCloneable.java @@ -22,6 +22,9 @@ public class TransformerImplCloneable extends Module { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public TransformerImplCloneable(Pointer p) { super(p); } + /** Downcast constructor. */ + public TransformerImplCloneable(Module pointer) { super((Pointer)null); allocate(pointer); } + @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::dynamic_pointer_cast, torch::nn::Module>") void allocate(@SharedPtr Module pointer); @Override public Module asModule() { return asModule(this); } @Namespace public static native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast>") Module asModule(@SharedPtr TransformerImplCloneable pointer); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/TripletMarginLossImpl.java b/pytorch/src/gen/java/org/bytedeco/pytorch/TripletMarginLossImpl.java index 7e54999d5b9..bba134d30a6 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/TripletMarginLossImpl.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/TripletMarginLossImpl.java @@ -43,20 +43,14 @@ public class TripletMarginLossImpl extends TripletMarginLossImplCloneable { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public TripletMarginLossImpl(Pointer p) { super(p); } - /** Native array allocator. Access with {@link Pointer#position(long)}. */ - public TripletMarginLossImpl(long size) { super((Pointer)null); allocateArray(size); } - private native void allocateArray(long size); - @Override public TripletMarginLossImpl position(long position) { - return (TripletMarginLossImpl)super.position(position); - } - @Override public TripletMarginLossImpl getPointer(long i) { - return new TripletMarginLossImpl((Pointer)this).offsetAddress(i); - } + /** Downcast constructor. */ + public TripletMarginLossImpl(Module pointer) { super((Pointer)null); allocate(pointer); } + @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::dynamic_pointer_cast") void allocate(@SharedPtr Module pointer); public TripletMarginLossImpl(@ByVal(nullValue = "torch::nn::TripletMarginLossOptions{}") TripletMarginLossOptions options_) { super((Pointer)null); allocate(options_); } - @SharedPtr private native void allocate(@ByVal(nullValue = "torch::nn::TripletMarginLossOptions{}") TripletMarginLossOptions options_); + @SharedPtr @Name("std::make_shared") private native void allocate(@ByVal(nullValue = "torch::nn::TripletMarginLossOptions{}") TripletMarginLossOptions options_); public TripletMarginLossImpl() { super((Pointer)null); allocate(); } - @SharedPtr private native void allocate(); + @SharedPtr @Name("std::make_shared") private native void allocate(); public native void reset(); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/TripletMarginLossImplCloneable.java b/pytorch/src/gen/java/org/bytedeco/pytorch/TripletMarginLossImplCloneable.java index 349062bee6a..7a641dd919b 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/TripletMarginLossImplCloneable.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/TripletMarginLossImplCloneable.java @@ -22,6 +22,9 @@ public class TripletMarginLossImplCloneable extends Module { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public TripletMarginLossImplCloneable(Pointer p) { super(p); } + /** Downcast constructor. */ + public TripletMarginLossImplCloneable(Module pointer) { super((Pointer)null); allocate(pointer); } + @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::dynamic_pointer_cast, torch::nn::Module>") void allocate(@SharedPtr Module pointer); @Override public Module asModule() { return asModule(this); } @Namespace public static native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast>") Module asModule(@SharedPtr TripletMarginLossImplCloneable pointer); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/TripletMarginWithDistanceLossImpl.java b/pytorch/src/gen/java/org/bytedeco/pytorch/TripletMarginWithDistanceLossImpl.java index 2c8b548cb13..9c2f6768d00 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/TripletMarginWithDistanceLossImpl.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/TripletMarginWithDistanceLossImpl.java @@ -45,22 +45,16 @@ public class TripletMarginWithDistanceLossImpl extends TripletMarginWithDistance static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public TripletMarginWithDistanceLossImpl(Pointer p) { super(p); } - /** Native array allocator. Access with {@link Pointer#position(long)}. */ - public TripletMarginWithDistanceLossImpl(long size) { super((Pointer)null); allocateArray(size); } - private native void allocateArray(long size); - @Override public TripletMarginWithDistanceLossImpl position(long position) { - return (TripletMarginWithDistanceLossImpl)super.position(position); - } - @Override public TripletMarginWithDistanceLossImpl getPointer(long i) { - return new TripletMarginWithDistanceLossImpl((Pointer)this).offsetAddress(i); - } + /** Downcast constructor. */ + public TripletMarginWithDistanceLossImpl(Module pointer) { super((Pointer)null); allocate(pointer); } + @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::dynamic_pointer_cast") void allocate(@SharedPtr Module pointer); public TripletMarginWithDistanceLossImpl( @ByVal(nullValue = "torch::nn::TripletMarginWithDistanceLossOptions{}") TripletMarginWithDistanceLossOptions options_) { super((Pointer)null); allocate(options_); } - @SharedPtr private native void allocate( + @SharedPtr @Name("std::make_shared") private native void allocate( @ByVal(nullValue = "torch::nn::TripletMarginWithDistanceLossOptions{}") TripletMarginWithDistanceLossOptions options_); public TripletMarginWithDistanceLossImpl() { super((Pointer)null); allocate(); } - @SharedPtr private native void allocate(); + @SharedPtr @Name("std::make_shared") private native void allocate(); public native void reset(); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/TripletMarginWithDistanceLossImplCloneable.java b/pytorch/src/gen/java/org/bytedeco/pytorch/TripletMarginWithDistanceLossImplCloneable.java index d561758eaad..0cd026739b5 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/TripletMarginWithDistanceLossImplCloneable.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/TripletMarginWithDistanceLossImplCloneable.java @@ -22,6 +22,9 @@ public class TripletMarginWithDistanceLossImplCloneable extends Module { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public TripletMarginWithDistanceLossImplCloneable(Pointer p) { super(p); } + /** Downcast constructor. */ + public TripletMarginWithDistanceLossImplCloneable(Module pointer) { super((Pointer)null); allocate(pointer); } + @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::dynamic_pointer_cast, torch::nn::Module>") void allocate(@SharedPtr Module pointer); @Override public Module asModule() { return asModule(this); } @Namespace public static native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast>") Module asModule(@SharedPtr TripletMarginWithDistanceLossImplCloneable pointer); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/TupleType.java b/pytorch/src/gen/java/org/bytedeco/pytorch/TupleType.java index d831ce68bed..4fde17c4aaa 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/TupleType.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/TupleType.java @@ -35,7 +35,7 @@ public class TupleType extends NamedType { @Const @ByRef TypeVector field_types); public static native @SharedPtr TupleType createNamed(@Const @ByRef QualifiedNameOptional name, - @Cast("c10::string_view*") @StdVector Pointer field_names, + @Const @ByRef StringViewVector field_names, @Const @ByRef TypeVector field_types); public static native @SharedPtr TupleType create( @@ -53,7 +53,7 @@ public class TupleType extends NamedType { public native @ByVal Type.TypePtr createWithContained( @ByVal TypeVector contained_types); public native @SharedPtr FunctionSchema schema(); - public native @ByVal @Cast("c10::optional >*") Pointer names(); + public native @ByVal StringViewVectorOptional names(); @MemberGetter public static native TypeKind Kind(); } diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/TypeMeta.java b/pytorch/src/gen/java/org/bytedeco/pytorch/TypeMeta.java index 520b4a4bd34..6b35f9ace60 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/TypeMeta.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/TypeMeta.java @@ -94,7 +94,7 @@ public class TypeMeta extends Pointer { /** * Returns a printable name for the type. */ - public native @ByVal @Cast("c10::string_view*") @NoException(true) Pointer name(); + public native @StringView @NoException(true) BytePointer name(); private static native @Namespace @Cast("bool") @Name("operator ==") @NoException(true) boolean equals(@Const @ByVal TypeMeta lhs, @Const @ByVal TypeMeta rhs); public boolean equals(TypeMeta rhs) { return equals(this, rhs); } diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/TypeMetaData.java b/pytorch/src/gen/java/org/bytedeco/pytorch/TypeMetaData.java index 6fd512a0277..905f7993d70 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/TypeMetaData.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/TypeMetaData.java @@ -88,7 +88,7 @@ public TypeMetaData( PlacementDelete placementDelete, Delete deleteFn, @ByVal TypeIdentifier id, - @ByVal @Cast("c10::string_view*") Pointer name) { super((Pointer)null); allocate(itemsize, newFn, placementNew, copy, placementDelete, deleteFn, id, name); } + @StringView BytePointer name) { super((Pointer)null); allocate(itemsize, newFn, placementNew, copy, placementDelete, deleteFn, id, name); } @NoException(true) private native void allocate( @Cast("size_t") long itemsize, New newFn, @@ -97,7 +97,25 @@ public TypeMetaData( PlacementDelete placementDelete, Delete deleteFn, @ByVal TypeIdentifier id, - @ByVal @Cast("c10::string_view*") Pointer name); + @StringView BytePointer name); + public TypeMetaData( + @Cast("size_t") long itemsize, + New newFn, + PlacementNew placementNew, + Copy copy, + PlacementDelete placementDelete, + Delete deleteFn, + @ByVal TypeIdentifier id, + @StringView String name) { super((Pointer)null); allocate(itemsize, newFn, placementNew, copy, placementDelete, deleteFn, id, name); } + @NoException(true) private native void allocate( + @Cast("size_t") long itemsize, + New newFn, + PlacementNew placementNew, + Copy copy, + PlacementDelete placementDelete, + Delete deleteFn, + @ByVal TypeIdentifier id, + @StringView String name); public native @Cast("size_t") long itemsize_(); public native TypeMetaData itemsize_(long setter); public native New new_(); public native TypeMetaData new_(New setter); @@ -106,5 +124,5 @@ public TypeMetaData( public native PlacementDelete placementDelete_(); public native TypeMetaData placementDelete_(PlacementDelete setter); public native Delete delete_(); public native TypeMetaData delete_(Delete setter); public native @ByRef TypeIdentifier id_(); public native TypeMetaData id_(TypeIdentifier setter); - public native @ByRef @Cast("c10::string_view*") Pointer name_(); public native TypeMetaData name_(Pointer setter); + public native @StringView BytePointer name_(); public native TypeMetaData name_(BytePointer setter); } diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/UnflattenImpl.java b/pytorch/src/gen/java/org/bytedeco/pytorch/UnflattenImpl.java index faa29468c18..a616f075b69 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/UnflattenImpl.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/UnflattenImpl.java @@ -38,15 +38,18 @@ public class UnflattenImpl extends UnflattenImplCloneable { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public UnflattenImpl(Pointer p) { super(p); } + /** Downcast constructor. */ + public UnflattenImpl(Module pointer) { super((Pointer)null); allocate(pointer); } + @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::dynamic_pointer_cast") void allocate(@SharedPtr Module pointer); public UnflattenImpl(@Cast("int64_t") long dim, @ByVal @Cast("std::vector*") LongVector sizes) { super((Pointer)null); allocate(dim, sizes); } - @SharedPtr private native void allocate(@Cast("int64_t") long dim, @ByVal @Cast("std::vector*") LongVector sizes); + @SharedPtr @Name("std::make_shared") private native void allocate(@Cast("int64_t") long dim, @ByVal @Cast("std::vector*") LongVector sizes); public UnflattenImpl(@StdString BytePointer dimname, @ByVal @Cast("torch::nn::UnflattenOptions::namedshape_t*") StringLongVector namedshape) { super((Pointer)null); allocate(dimname, namedshape); } - @SharedPtr private native void allocate(@StdString BytePointer dimname, @ByVal @Cast("torch::nn::UnflattenOptions::namedshape_t*") StringLongVector namedshape); + @SharedPtr @Name("std::make_shared") private native void allocate(@StdString BytePointer dimname, @ByVal @Cast("torch::nn::UnflattenOptions::namedshape_t*") StringLongVector namedshape); public UnflattenImpl(@StdString String dimname, @ByVal @Cast("torch::nn::UnflattenOptions::namedshape_t*") StringLongVector namedshape) { super((Pointer)null); allocate(dimname, namedshape); } - @SharedPtr private native void allocate(@StdString String dimname, @ByVal @Cast("torch::nn::UnflattenOptions::namedshape_t*") StringLongVector namedshape); + @SharedPtr @Name("std::make_shared") private native void allocate(@StdString String dimname, @ByVal @Cast("torch::nn::UnflattenOptions::namedshape_t*") StringLongVector namedshape); public UnflattenImpl(@ByVal UnflattenOptions options_) { super((Pointer)null); allocate(options_); } - @SharedPtr private native void allocate(@ByVal UnflattenOptions options_); + @SharedPtr @Name("std::make_shared") private native void allocate(@ByVal UnflattenOptions options_); public native void reset(); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/UnflattenImplCloneable.java b/pytorch/src/gen/java/org/bytedeco/pytorch/UnflattenImplCloneable.java index a3f7407af7b..df70c72520d 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/UnflattenImplCloneable.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/UnflattenImplCloneable.java @@ -22,6 +22,9 @@ public class UnflattenImplCloneable extends Module { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public UnflattenImplCloneable(Pointer p) { super(p); } + /** Downcast constructor. */ + public UnflattenImplCloneable(Module pointer) { super((Pointer)null); allocate(pointer); } + @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::dynamic_pointer_cast, torch::nn::Module>") void allocate(@SharedPtr Module pointer); @Override public Module asModule() { return asModule(this); } @Namespace public static native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast>") Module asModule(@SharedPtr UnflattenImplCloneable pointer); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/UnfoldImpl.java b/pytorch/src/gen/java/org/bytedeco/pytorch/UnfoldImpl.java index a12e85dc614..01caa847387 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/UnfoldImpl.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/UnfoldImpl.java @@ -36,11 +36,14 @@ public class UnfoldImpl extends UnfoldImplCloneable { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public UnfoldImpl(Pointer p) { super(p); } + /** Downcast constructor. */ + public UnfoldImpl(Module pointer) { super((Pointer)null); allocate(pointer); } + @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::dynamic_pointer_cast") void allocate(@SharedPtr Module pointer); public UnfoldImpl(@ByVal @Cast("torch::ExpandingArray<2>*") LongPointer kernel_size) { super((Pointer)null); allocate(kernel_size); } - @SharedPtr private native void allocate(@ByVal @Cast("torch::ExpandingArray<2>*") LongPointer kernel_size); + @SharedPtr @Name("std::make_shared") private native void allocate(@ByVal @Cast("torch::ExpandingArray<2>*") LongPointer kernel_size); public UnfoldImpl(@Const @ByRef UnfoldOptions options_) { super((Pointer)null); allocate(options_); } - @SharedPtr private native void allocate(@Const @ByRef UnfoldOptions options_); + @SharedPtr @Name("std::make_shared") private native void allocate(@Const @ByRef UnfoldOptions options_); public native void reset(); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/UnfoldImplCloneable.java b/pytorch/src/gen/java/org/bytedeco/pytorch/UnfoldImplCloneable.java index 05662e463ca..dd110bc5dda 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/UnfoldImplCloneable.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/UnfoldImplCloneable.java @@ -22,6 +22,9 @@ public class UnfoldImplCloneable extends Module { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public UnfoldImplCloneable(Pointer p) { super(p); } + /** Downcast constructor. */ + public UnfoldImplCloneable(Module pointer) { super((Pointer)null); allocate(pointer); } + @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::dynamic_pointer_cast, torch::nn::Module>") void allocate(@SharedPtr Module pointer); @Override public Module asModule() { return asModule(this); } @Namespace public static native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast>") Module asModule(@SharedPtr UnfoldImplCloneable pointer); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/UpsampleImpl.java b/pytorch/src/gen/java/org/bytedeco/pytorch/UpsampleImpl.java index 69a6ad15fcf..59040ece2d2 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/UpsampleImpl.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/UpsampleImpl.java @@ -38,20 +38,14 @@ public class UpsampleImpl extends UpsampleImplCloneable { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public UpsampleImpl(Pointer p) { super(p); } - /** Native array allocator. Access with {@link Pointer#position(long)}. */ - public UpsampleImpl(long size) { super((Pointer)null); allocateArray(size); } - private native void allocateArray(long size); - @Override public UpsampleImpl position(long position) { - return (UpsampleImpl)super.position(position); - } - @Override public UpsampleImpl getPointer(long i) { - return new UpsampleImpl((Pointer)this).offsetAddress(i); - } + /** Downcast constructor. */ + public UpsampleImpl(Module pointer) { super((Pointer)null); allocate(pointer); } + @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::dynamic_pointer_cast") void allocate(@SharedPtr Module pointer); public UpsampleImpl(@Const @ByRef(nullValue = "torch::nn::UpsampleOptions{}") UpsampleOptions options_) { super((Pointer)null); allocate(options_); } - @SharedPtr private native void allocate(@Const @ByRef(nullValue = "torch::nn::UpsampleOptions{}") UpsampleOptions options_); + @SharedPtr @Name("std::make_shared") private native void allocate(@Const @ByRef(nullValue = "torch::nn::UpsampleOptions{}") UpsampleOptions options_); public UpsampleImpl() { super((Pointer)null); allocate(); } - @SharedPtr private native void allocate(); + @SharedPtr @Name("std::make_shared") private native void allocate(); public native void reset(); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/UpsampleImplCloneable.java b/pytorch/src/gen/java/org/bytedeco/pytorch/UpsampleImplCloneable.java index 2db43f0538e..5ff05b8f6ca 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/UpsampleImplCloneable.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/UpsampleImplCloneable.java @@ -22,6 +22,9 @@ public class UpsampleImplCloneable extends Module { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public UpsampleImplCloneable(Pointer p) { super(p); } + /** Downcast constructor. */ + public UpsampleImplCloneable(Module pointer) { super((Pointer)null); allocate(pointer); } + @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::dynamic_pointer_cast, torch::nn::Module>") void allocate(@SharedPtr Module pointer); @Override public Module asModule() { return asModule(this); } @Namespace public static native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast>") Module asModule(@SharedPtr UpsampleImplCloneable pointer); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/ZeroPad2dImpl.java b/pytorch/src/gen/java/org/bytedeco/pytorch/ZeroPad2dImpl.java index eecb2aecaa8..8f1f6d01f8a 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/ZeroPad2dImpl.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/ZeroPad2dImpl.java @@ -36,11 +36,14 @@ public class ZeroPad2dImpl extends ZeroPad2dImplCloneable { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public ZeroPad2dImpl(Pointer p) { super(p); } + /** Downcast constructor. */ + public ZeroPad2dImpl(Module pointer) { super((Pointer)null); allocate(pointer); } + @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::dynamic_pointer_cast") void allocate(@SharedPtr Module pointer); public ZeroPad2dImpl(@ByVal @Cast("torch::ExpandingArray<4>*") LongPointer padding) { super((Pointer)null); allocate(padding); } - @SharedPtr private native void allocate(@ByVal @Cast("torch::ExpandingArray<4>*") LongPointer padding); + @SharedPtr @Name("std::make_shared") private native void allocate(@ByVal @Cast("torch::ExpandingArray<4>*") LongPointer padding); public ZeroPad2dImpl(@Const @ByRef ZeroPad2dOptions options_) { super((Pointer)null); allocate(options_); } - @SharedPtr private native void allocate(@Const @ByRef ZeroPad2dOptions options_); + @SharedPtr @Name("std::make_shared") private native void allocate(@Const @ByRef ZeroPad2dOptions options_); public native void reset(); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/ZeroPad2dImplCloneable.java b/pytorch/src/gen/java/org/bytedeco/pytorch/ZeroPad2dImplCloneable.java index ac0ea437f5a..1dfbf7415da 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/ZeroPad2dImplCloneable.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/ZeroPad2dImplCloneable.java @@ -22,6 +22,9 @@ public class ZeroPad2dImplCloneable extends Module { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public ZeroPad2dImplCloneable(Pointer p) { super(p); } + /** Downcast constructor. */ + public ZeroPad2dImplCloneable(Module pointer) { super((Pointer)null); allocate(pointer); } + @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::dynamic_pointer_cast, torch::nn::Module>") void allocate(@SharedPtr Module pointer); @Override public Module asModule() { return asModule(this); } @Namespace public static native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast>") Module asModule(@SharedPtr ZeroPad2dImplCloneable pointer); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/global/torch.java b/pytorch/src/gen/java/org/bytedeco/pytorch/global/torch.java index cac335c4717..9a755548443 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/global/torch.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/global/torch.java @@ -203,6 +203,12 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // Targeting ../T_TypePtrLong_TOptional.java +// Targeting ../StringViewOptional.java + + +// Targeting ../StringViewVectorOptional.java + + // Targeting ../ExampleVectorOptional.java @@ -356,6 +362,9 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // Targeting ../StringVector.java +// Targeting ../StringViewVector.java + + // Targeting ../StringLongVector.java @@ -440,9 +449,6 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // Targeting ../SharedModuleVector.java -// Targeting ../SharedAnyModuleVector.java - - // Targeting ../StringTensorVector.java @@ -1494,64 +1500,6 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // namespace c10 -// Parsed from c10/util/string_view.h - -// #pragma once - -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include - -// #if __cpp_lib_string_view -// #include -// #define C10_HAS_STD_STRING_VIEW() 1 -// #define C10_HAS_STD_EXPERIMENTAL_STRING_VIEW() 0 -// #elif defined(__has_include) -// #if __has_include() -// libc++ 7.0 has experimental/string_view but it's just a #error -// #if !defined(_LIBCPP_VERSION) || (_LIBCPP_VERSION < 7000) -// #include -// #endif -// #if __cpp_lib_experimental_string_view -// #define C10_HAS_STD_STRING_VIEW() 0 -// #define C10_HAS_STD_EXPERIMENTAL_STRING_VIEW() 1 -// #endif -// #endif -// #endif - -// #ifndef C10_HAS_STD_STRING_VIEW -// #define C10_HAS_STD_STRING_VIEW() 0 -// #endif -// #ifndef C10_HAS_STD_EXPERIMENTAL_STRING_VIEW -// #define C10_HAS_STD_EXPERIMENTAL_STRING_VIEW() 0 -// #endif - -// #if C10_CLANG_HAS_WARNING("-Wdeprecated") -// #endif - -/** - * Reimplementation of std::string_view for C++11. - * Implemented following the interface definition in - * https://en.cppreference.com/w/cpp/string/basic_string_view - * See there for the API documentation. - * - * Difference: We don't have a Traits template parameter because - * std::char_traits isn't constexpr and we'd have to reimplement - * std::char_traits if we wanted to use it with our constexpr basic_string_view. - */ - - - - // namespace c10 - // namespace std - - - // Parsed from c10/util/StringUtil.h // #ifndef C10_UTIL_STRINGUTIL_H_ @@ -1596,7 +1544,8 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // Replace all occurrences of "from" substring to "to" string. // Returns number of replacements -@Namespace("c10") public static native @Cast("size_t") long ReplaceAll(@StdString @ByRef BytePointer s, @ByVal @Cast("c10::string_view*") Pointer from, @ByVal @Cast("c10::string_view*") Pointer to); +@Namespace("c10") public static native @Cast("size_t") long ReplaceAll(@StdString @ByRef BytePointer s, @StringView BytePointer from, @StringView BytePointer to); +@Namespace("c10") public static native @Cast("size_t") long ReplaceAll(@StdString @ByRef BytePointer s, @StringView String from, @StringView String to); // Targeting ../SourceLocation.java @@ -1606,7 +1555,8 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // unix isprint but insensitive to locale @Namespace("c10") public static native @Cast("bool") boolean isPrint(@Cast("char") byte s); -@Namespace("c10") public static native void printQuotedString(@Cast("std::ostream*") @ByRef Pointer stmt, @ByVal @Cast("const c10::string_view*") Pointer str); +@Namespace("c10") public static native void printQuotedString(@Cast("std::ostream*") @ByRef Pointer stmt, @StringView BytePointer str); +@Namespace("c10") public static native void printQuotedString(@Cast("std::ostream*") @ByRef Pointer stmt, @StringView String str); // namespace c10 @@ -5252,9 +5202,10 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include -// Targeting ../MaybeOwnedTraitsGenericImplTensor.java - +/** MaybeOwnedTraits describes how to borrow from T. Here is how we + * can implement borrowing from an arbitrary type T using a raw + * pointer to const: */ /** It is possible to eliminate the extra layer of indirection for * borrows for some types that we control. For examples, see @@ -5549,7 +5500,8 @@ public class torch extends org.bytedeco.pytorch.presets.torch { @Namespace("c10::util") public static native @Const @ByVal crc64_t crc64(@Cast("const char*") BytePointer str, @Cast("size_t") long size); @Namespace("c10::util") public static native @Const @ByVal crc64_t crc64(String str, @Cast("size_t") long size); -@Namespace("c10::util") public static native @Const @ByVal crc64_t crc64(@ByVal @Cast("c10::string_view*") Pointer str); +@Namespace("c10::util") public static native @Const @ByVal crc64_t crc64(@StringView BytePointer str); +@Namespace("c10::util") public static native @Const @ByVal crc64_t crc64(@StringView String str); // namespace util // namespace c10 @@ -5605,10 +5557,14 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #error "You're running a too old version of Clang. We need Clang 4 or later." // #endif -@Namespace("c10::util::detail") public static native @ByVal @Cast("const c10::string_view*") Pointer extract( - @ByVal @Cast("c10::string_view*") Pointer prefix, - @ByVal @Cast("c10::string_view*") Pointer suffix, - @ByVal @Cast("c10::string_view*") Pointer str); +@Namespace("c10::util::detail") public static native @StringView BytePointer extract( + @StringView BytePointer prefix, + @StringView BytePointer suffix, + @StringView BytePointer str); +@Namespace("c10::util::detail") public static native @StringView String extract( + @StringView String prefix, + @StringView String suffix, + @StringView String str); // #if !defined(__CUDA_ARCH__) // #endif @@ -15168,10 +15124,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { } // namespace at - -// Targeting ../StringView.java - - + // namespace std // Soft limit on the number of callbacks to use; @Namespace("at") @MemberGetter public static native @Cast("const std::size_t") long kSoftLimitCallbacks(); @@ -15436,7 +15389,8 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #endif -@Namespace("c10::impl") public static native @Cast("const bool") boolean allowlist_contains(@ByVal @Cast("c10::string_view*") Pointer allowlist, @ByVal @Cast("c10::string_view*") Pointer item); // Forward Declare +@Namespace("c10::impl") public static native @Cast("const bool") boolean allowlist_contains(@StringView BytePointer allowlist, @StringView BytePointer item); +@Namespace("c10::impl") public static native @Cast("const bool") boolean allowlist_contains(@StringView String allowlist, @StringView String item); // Forward Declare /** * In selective build mode returns true/false depending on whether a build @@ -15489,19 +15443,23 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // Returns true iff the given op name is on the allowlist // and should be registered -@Namespace("c10::impl") public static native @Cast("const bool") boolean op_allowlist_check(@ByVal @Cast("c10::string_view*") Pointer op_name); +@Namespace("c10::impl") public static native @Cast("const bool") boolean op_allowlist_check(@StringView BytePointer op_name); +@Namespace("c10::impl") public static native @Cast("const bool") boolean op_allowlist_check(@StringView String op_name); // Returns true iff the given schema string is on the allowlist // and should be registered -@Namespace("c10::impl") public static native @Cast("const bool") boolean schema_allowlist_check(@ByVal @Cast("c10::string_view*") Pointer schema); +@Namespace("c10::impl") public static native @Cast("const bool") boolean schema_allowlist_check(@StringView BytePointer schema); +@Namespace("c10::impl") public static native @Cast("const bool") boolean schema_allowlist_check(@StringView String schema); // Returns true iff the given custom class name is on the allowlist // and should be registered -@Namespace("c10::impl") public static native @Cast("const bool") boolean custom_class_allowlist_check(@ByVal @Cast("c10::string_view*") Pointer custom_class_name); +@Namespace("c10::impl") public static native @Cast("const bool") boolean custom_class_allowlist_check(@StringView BytePointer custom_class_name); +@Namespace("c10::impl") public static native @Cast("const bool") boolean custom_class_allowlist_check(@StringView String custom_class_name); // schema_allowlist_check() implicitly depends on a macro, TORCH_OPERATOR_WHITELIST. // Add this API to pass arbitrary allowlist. -@Namespace("c10::impl") public static native @Cast("const bool") boolean op_allowlist_contains_name_in_schema(@ByVal @Cast("c10::string_view*") Pointer allowlist, @ByVal @Cast("c10::string_view*") Pointer schema); +@Namespace("c10::impl") public static native @Cast("const bool") boolean op_allowlist_contains_name_in_schema(@StringView BytePointer allowlist, @StringView BytePointer schema); +@Namespace("c10::impl") public static native @Cast("const bool") boolean op_allowlist_contains_name_in_schema(@StringView String allowlist, @StringView String schema); // Returns true iff the given dispatch key is on the allowlist // and should be registered. When we turn this on, the list of valid @@ -23225,10 +23183,10 @@ public class torch extends org.bytedeco.pytorch.presets.torch { @Namespace("at") public static native @ByVal Tensor conv1d(@Const @ByRef Tensor input, @Const @ByRef Tensor weight, @Const @ByRef(nullValue = "c10::optional{}") TensorOptional bias, @ByVal(nullValue = "at::IntArrayRef(1)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] stride, @ByVal(nullValue = "at::IntArrayRef(0)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] padding, @ByVal(nullValue = "at::IntArrayRef(1)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dilation, @Cast("int64_t") long groups/*=1*/); // aten::conv1d.padding(Tensor input, Tensor weight, Tensor? bias=None, int[1] stride=1, str padding="valid", int[1] dilation=1, int groups=1) -> Tensor -@Namespace("at") public static native @ByVal Tensor conv1d(@Const @ByRef Tensor input, @Const @ByRef Tensor weight, @Const @ByRef TensorOptional bias, @ByVal LongArrayRef stride, @ByVal @Cast("c10::string_view*") Pointer padding, @ByVal(nullValue = "at::IntArrayRef(1)") LongArrayRef dilation, @Cast("int64_t") long groups/*=1*/); -@Namespace("at") public static native @ByVal Tensor conv1d(@Const @ByRef Tensor input, @Const @ByRef Tensor weight, @Const @ByRef TensorOptional bias, @ByVal LongArrayRef stride, @ByVal @Cast("c10::string_view*") Pointer padding); -@Namespace("at") public static native @ByVal Tensor conv1d(@Const @ByRef Tensor input, @Const @ByRef Tensor weight, @Const @ByRef TensorOptional bias, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] stride, @ByVal @Cast("c10::string_view*") Pointer padding, @ByVal(nullValue = "at::IntArrayRef(1)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dilation, @Cast("int64_t") long groups/*=1*/); -@Namespace("at") public static native @ByVal Tensor conv1d(@Const @ByRef Tensor input, @Const @ByRef Tensor weight, @Const @ByRef TensorOptional bias, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] stride, @ByVal @Cast("c10::string_view*") Pointer padding); +@Namespace("at") public static native @ByVal Tensor conv1d(@Const @ByRef Tensor input, @Const @ByRef Tensor weight, @Const @ByRef TensorOptional bias, @ByVal LongArrayRef stride, @StringView BytePointer padding, @ByVal(nullValue = "at::IntArrayRef(1)") LongArrayRef dilation, @Cast("int64_t") long groups/*=1*/); +@Namespace("at") public static native @ByVal Tensor conv1d(@Const @ByRef Tensor input, @Const @ByRef Tensor weight, @Const @ByRef TensorOptional bias, @ByVal LongArrayRef stride, @StringView BytePointer padding); +@Namespace("at") public static native @ByVal Tensor conv1d(@Const @ByRef Tensor input, @Const @ByRef Tensor weight, @Const @ByRef TensorOptional bias, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] stride, @StringView String padding, @ByVal(nullValue = "at::IntArrayRef(1)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dilation, @Cast("int64_t") long groups/*=1*/); +@Namespace("at") public static native @ByVal Tensor conv1d(@Const @ByRef Tensor input, @Const @ByRef Tensor weight, @Const @ByRef TensorOptional bias, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] stride, @StringView String padding); @@ -23263,10 +23221,10 @@ public class torch extends org.bytedeco.pytorch.presets.torch { @Namespace("at") public static native @ByVal Tensor conv2d(@Const @ByRef Tensor input, @Const @ByRef Tensor weight, @Const @ByRef(nullValue = "c10::optional{}") TensorOptional bias, @ByVal(nullValue = "at::IntArrayRef(1)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] stride, @ByVal(nullValue = "at::IntArrayRef(0)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] padding, @ByVal(nullValue = "at::IntArrayRef(1)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dilation, @Cast("int64_t") long groups/*=1*/); // aten::conv2d.padding(Tensor input, Tensor weight, Tensor? bias=None, int[2] stride=1, str padding="valid", int[2] dilation=1, int groups=1) -> Tensor -@Namespace("at") public static native @ByVal Tensor conv2d(@Const @ByRef Tensor input, @Const @ByRef Tensor weight, @Const @ByRef TensorOptional bias, @ByVal LongArrayRef stride, @ByVal @Cast("c10::string_view*") Pointer padding, @ByVal(nullValue = "at::IntArrayRef(1)") LongArrayRef dilation, @Cast("int64_t") long groups/*=1*/); -@Namespace("at") public static native @ByVal Tensor conv2d(@Const @ByRef Tensor input, @Const @ByRef Tensor weight, @Const @ByRef TensorOptional bias, @ByVal LongArrayRef stride, @ByVal @Cast("c10::string_view*") Pointer padding); -@Namespace("at") public static native @ByVal Tensor conv2d(@Const @ByRef Tensor input, @Const @ByRef Tensor weight, @Const @ByRef TensorOptional bias, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] stride, @ByVal @Cast("c10::string_view*") Pointer padding, @ByVal(nullValue = "at::IntArrayRef(1)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dilation, @Cast("int64_t") long groups/*=1*/); -@Namespace("at") public static native @ByVal Tensor conv2d(@Const @ByRef Tensor input, @Const @ByRef Tensor weight, @Const @ByRef TensorOptional bias, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] stride, @ByVal @Cast("c10::string_view*") Pointer padding); +@Namespace("at") public static native @ByVal Tensor conv2d(@Const @ByRef Tensor input, @Const @ByRef Tensor weight, @Const @ByRef TensorOptional bias, @ByVal LongArrayRef stride, @StringView BytePointer padding, @ByVal(nullValue = "at::IntArrayRef(1)") LongArrayRef dilation, @Cast("int64_t") long groups/*=1*/); +@Namespace("at") public static native @ByVal Tensor conv2d(@Const @ByRef Tensor input, @Const @ByRef Tensor weight, @Const @ByRef TensorOptional bias, @ByVal LongArrayRef stride, @StringView BytePointer padding); +@Namespace("at") public static native @ByVal Tensor conv2d(@Const @ByRef Tensor input, @Const @ByRef Tensor weight, @Const @ByRef TensorOptional bias, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] stride, @StringView String padding, @ByVal(nullValue = "at::IntArrayRef(1)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dilation, @Cast("int64_t") long groups/*=1*/); +@Namespace("at") public static native @ByVal Tensor conv2d(@Const @ByRef Tensor input, @Const @ByRef Tensor weight, @Const @ByRef TensorOptional bias, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] stride, @StringView String padding); @@ -23301,10 +23259,10 @@ public class torch extends org.bytedeco.pytorch.presets.torch { @Namespace("at") public static native @ByVal Tensor conv3d(@Const @ByRef Tensor input, @Const @ByRef Tensor weight, @Const @ByRef(nullValue = "c10::optional{}") TensorOptional bias, @ByVal(nullValue = "at::IntArrayRef(1)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] stride, @ByVal(nullValue = "at::IntArrayRef(0)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] padding, @ByVal(nullValue = "at::IntArrayRef(1)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dilation, @Cast("int64_t") long groups/*=1*/); // aten::conv3d.padding(Tensor input, Tensor weight, Tensor? bias=None, int[3] stride=1, str padding="valid", int[3] dilation=1, int groups=1) -> Tensor -@Namespace("at") public static native @ByVal Tensor conv3d(@Const @ByRef Tensor input, @Const @ByRef Tensor weight, @Const @ByRef TensorOptional bias, @ByVal LongArrayRef stride, @ByVal @Cast("c10::string_view*") Pointer padding, @ByVal(nullValue = "at::IntArrayRef(1)") LongArrayRef dilation, @Cast("int64_t") long groups/*=1*/); -@Namespace("at") public static native @ByVal Tensor conv3d(@Const @ByRef Tensor input, @Const @ByRef Tensor weight, @Const @ByRef TensorOptional bias, @ByVal LongArrayRef stride, @ByVal @Cast("c10::string_view*") Pointer padding); -@Namespace("at") public static native @ByVal Tensor conv3d(@Const @ByRef Tensor input, @Const @ByRef Tensor weight, @Const @ByRef TensorOptional bias, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] stride, @ByVal @Cast("c10::string_view*") Pointer padding, @ByVal(nullValue = "at::IntArrayRef(1)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dilation, @Cast("int64_t") long groups/*=1*/); -@Namespace("at") public static native @ByVal Tensor conv3d(@Const @ByRef Tensor input, @Const @ByRef Tensor weight, @Const @ByRef TensorOptional bias, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] stride, @ByVal @Cast("c10::string_view*") Pointer padding); +@Namespace("at") public static native @ByVal Tensor conv3d(@Const @ByRef Tensor input, @Const @ByRef Tensor weight, @Const @ByRef TensorOptional bias, @ByVal LongArrayRef stride, @StringView BytePointer padding, @ByVal(nullValue = "at::IntArrayRef(1)") LongArrayRef dilation, @Cast("int64_t") long groups/*=1*/); +@Namespace("at") public static native @ByVal Tensor conv3d(@Const @ByRef Tensor input, @Const @ByRef Tensor weight, @Const @ByRef TensorOptional bias, @ByVal LongArrayRef stride, @StringView BytePointer padding); +@Namespace("at") public static native @ByVal Tensor conv3d(@Const @ByRef Tensor input, @Const @ByRef Tensor weight, @Const @ByRef TensorOptional bias, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] stride, @StringView String padding, @ByVal(nullValue = "at::IntArrayRef(1)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dilation, @Cast("int64_t") long groups/*=1*/); +@Namespace("at") public static native @ByVal Tensor conv3d(@Const @ByRef Tensor input, @Const @ByRef Tensor weight, @Const @ByRef TensorOptional bias, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] stride, @StringView String padding); @@ -25573,18 +25531,18 @@ public class torch extends org.bytedeco.pytorch.presets.torch { @Namespace("at") public static native @ByRef Tensor div_outf(@Const @ByRef Tensor self, @Const @ByRef Tensor other, @ByRef Tensor out); // aten::div.Tensor_mode(Tensor self, Tensor other, *, str? rounding_mode) -> Tensor -@Namespace("at") public static native @ByVal Tensor div(@Const @ByRef Tensor self, @Const @ByRef Tensor other, @ByVal @Cast("c10::optional*") Pointer rounding_mode); +@Namespace("at") public static native @ByVal Tensor div(@Const @ByRef Tensor self, @Const @ByRef Tensor other, @ByVal StringViewOptional rounding_mode); // aten::div.out_mode(Tensor self, Tensor other, *, str? rounding_mode, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor div_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Const @ByRef Tensor other, @ByVal @Cast("c10::optional*") Pointer rounding_mode); +@Namespace("at") public static native @ByRef Tensor div_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Const @ByRef Tensor other, @ByVal StringViewOptional rounding_mode); // aten::div.out_mode(Tensor self, Tensor other, *, str? rounding_mode, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor div_outf(@Const @ByRef Tensor self, @Const @ByRef Tensor other, @ByVal @Cast("c10::optional*") Pointer rounding_mode, @ByRef Tensor out); +@Namespace("at") public static native @ByRef Tensor div_outf(@Const @ByRef Tensor self, @Const @ByRef Tensor other, @ByVal StringViewOptional rounding_mode, @ByRef Tensor out); // aten::div.Scalar(Tensor self, Scalar other) -> Tensor @Namespace("at") public static native @ByVal Tensor div(@Const @ByRef Tensor self, @Const @ByRef Scalar other); // aten::div.Scalar_mode(Tensor self, Scalar other, *, str? rounding_mode) -> Tensor -@Namespace("at") public static native @ByVal Tensor div(@Const @ByRef Tensor self, @Const @ByRef Scalar other, @ByVal @Cast("c10::optional*") Pointer rounding_mode); +@Namespace("at") public static native @ByVal Tensor div(@Const @ByRef Tensor self, @Const @ByRef Scalar other, @ByVal StringViewOptional rounding_mode); // aten::div.Scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!) @Namespace("at") public static native @ByRef Tensor div_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Const @ByRef Scalar other); @@ -25592,9 +25550,9 @@ public class torch extends org.bytedeco.pytorch.presets.torch { @Namespace("at") public static native @ByRef Tensor div_outf(@Const @ByRef Tensor self, @Const @ByRef Scalar other, @ByRef Tensor out); // aten::div.Scalar_mode_out(Tensor self, Scalar other, *, str? rounding_mode, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor div_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Const @ByRef Scalar other, @ByVal @Cast("c10::optional*") Pointer rounding_mode); +@Namespace("at") public static native @ByRef Tensor div_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Const @ByRef Scalar other, @ByVal StringViewOptional rounding_mode); // aten::div.Scalar_mode_out(Tensor self, Scalar other, *, str? rounding_mode, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor div_outf(@Const @ByRef Tensor self, @Const @ByRef Scalar other, @ByVal @Cast("c10::optional*") Pointer rounding_mode, @ByRef Tensor out); +@Namespace("at") public static native @ByRef Tensor div_outf(@Const @ByRef Tensor self, @Const @ByRef Scalar other, @ByVal StringViewOptional rounding_mode, @ByRef Tensor out); @@ -25635,15 +25593,15 @@ public class torch extends org.bytedeco.pytorch.presets.torch { @Namespace("at") public static native @ByVal Tensor divide(@Const @ByRef Tensor self, @Const @ByRef Scalar other); // aten::divide.Tensor_mode(Tensor self, Tensor other, *, str? rounding_mode) -> Tensor -@Namespace("at") public static native @ByVal Tensor divide(@Const @ByRef Tensor self, @Const @ByRef Tensor other, @ByVal @Cast("c10::optional*") Pointer rounding_mode); +@Namespace("at") public static native @ByVal Tensor divide(@Const @ByRef Tensor self, @Const @ByRef Tensor other, @ByVal StringViewOptional rounding_mode); // aten::divide.out_mode(Tensor self, Tensor other, *, str? rounding_mode, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor divide_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Const @ByRef Tensor other, @ByVal @Cast("c10::optional*") Pointer rounding_mode); +@Namespace("at") public static native @ByRef Tensor divide_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Const @ByRef Tensor other, @ByVal StringViewOptional rounding_mode); // aten::divide.out_mode(Tensor self, Tensor other, *, str? rounding_mode, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor divide_outf(@Const @ByRef Tensor self, @Const @ByRef Tensor other, @ByVal @Cast("c10::optional*") Pointer rounding_mode, @ByRef Tensor out); +@Namespace("at") public static native @ByRef Tensor divide_outf(@Const @ByRef Tensor self, @Const @ByRef Tensor other, @ByVal StringViewOptional rounding_mode, @ByRef Tensor out); // aten::divide.Scalar_mode(Tensor self, Scalar other, *, str? rounding_mode) -> Tensor -@Namespace("at") public static native @ByVal Tensor divide(@Const @ByRef Tensor self, @Const @ByRef Scalar other, @ByVal @Cast("c10::optional*") Pointer rounding_mode); +@Namespace("at") public static native @ByVal Tensor divide(@Const @ByRef Tensor self, @Const @ByRef Scalar other, @ByVal StringViewOptional rounding_mode); @@ -25810,9 +25768,10 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // aten::einsum(str equation, Tensor[] tensors, *, int[]? path=None) -> Tensor -@Namespace("at") public static native @ByVal Tensor einsum(@ByVal @Cast("c10::string_view*") Pointer equation, @ByVal @Cast("at::TensorList*") TensorArrayRef tensors, @ByVal(nullValue = "at::OptionalIntArrayRef(c10::nullopt)") LongArrayRefOptional path); -@Namespace("at") public static native @ByVal Tensor einsum(@ByVal @Cast("c10::string_view*") Pointer equation, @ByVal @Cast("at::TensorList*") TensorArrayRef tensors); -@Namespace("at") public static native @ByVal Tensor einsum(@ByVal @Cast("c10::string_view*") Pointer equation, @ByVal @Cast("at::TensorList*") TensorArrayRef tensors, @ByVal(nullValue = "at::OptionalIntArrayRef(c10::nullopt)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... path); +@Namespace("at") public static native @ByVal Tensor einsum(@StringView BytePointer equation, @ByVal @Cast("at::TensorList*") TensorArrayRef tensors, @ByVal(nullValue = "at::OptionalIntArrayRef(c10::nullopt)") LongArrayRefOptional path); +@Namespace("at") public static native @ByVal Tensor einsum(@StringView BytePointer equation, @ByVal @Cast("at::TensorList*") TensorArrayRef tensors); +@Namespace("at") public static native @ByVal Tensor einsum(@StringView String equation, @ByVal @Cast("at::TensorList*") TensorArrayRef tensors, @ByVal(nullValue = "at::OptionalIntArrayRef(c10::nullopt)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... path); +@Namespace("at") public static native @ByVal Tensor einsum(@StringView String equation, @ByVal @Cast("at::TensorList*") TensorArrayRef tensors); @@ -27370,14 +27329,14 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // aten::fft_fft(Tensor self, int? n=None, int dim=-1, str? norm=None) -> Tensor -@Namespace("at") public static native @ByVal Tensor fft_fft(@Const @ByRef Tensor self, @ByVal(nullValue = "c10::optional(c10::nullopt)") LongOptional n, @Cast("int64_t") long dim/*=-1*/, @ByVal(nullValue = "c10::optional(c10::nullopt)") @Cast("c10::optional*") Pointer norm); +@Namespace("at") public static native @ByVal Tensor fft_fft(@Const @ByRef Tensor self, @ByVal(nullValue = "c10::optional(c10::nullopt)") LongOptional n, @Cast("int64_t") long dim/*=-1*/, @ByVal(nullValue = "c10::optional(c10::nullopt)") StringViewOptional norm); @Namespace("at") public static native @ByVal Tensor fft_fft(@Const @ByRef Tensor self); // aten::fft_fft.out(Tensor self, int? n=None, int dim=-1, str? norm=None, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor fft_fft_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal(nullValue = "c10::optional(c10::nullopt)") LongOptional n, @Cast("int64_t") long dim/*=-1*/, @ByVal(nullValue = "c10::optional(c10::nullopt)") @Cast("c10::optional*") Pointer norm); +@Namespace("at") public static native @ByRef Tensor fft_fft_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal(nullValue = "c10::optional(c10::nullopt)") LongOptional n, @Cast("int64_t") long dim/*=-1*/, @ByVal(nullValue = "c10::optional(c10::nullopt)") StringViewOptional norm); @Namespace("at") public static native @ByRef Tensor fft_fft_out(@ByRef Tensor out, @Const @ByRef Tensor self); // aten::fft_fft.out(Tensor self, int? n=None, int dim=-1, str? norm=None, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor fft_fft_outf(@Const @ByRef Tensor self, @ByVal LongOptional n, @Cast("int64_t") long dim, @ByVal @Cast("c10::optional*") Pointer norm, @ByRef Tensor out); +@Namespace("at") public static native @ByRef Tensor fft_fft_outf(@Const @ByRef Tensor self, @ByVal LongOptional n, @Cast("int64_t") long dim, @ByVal StringViewOptional norm, @ByRef Tensor out); @@ -27407,17 +27366,17 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // aten::fft_fft2(Tensor self, int[1]? s=None, int[1] dim=[-2,-1], str? norm=None) -> Tensor -@Namespace("at") public static native @ByVal Tensor fft_fft2(@Const @ByRef Tensor self, @ByVal(nullValue = "at::OptionalIntArrayRef(c10::nullopt)") LongArrayRefOptional s, @ByVal(nullValue = "at::IntArrayRef({-2,-1})") LongArrayRef dim, @ByVal(nullValue = "c10::optional(c10::nullopt)") @Cast("c10::optional*") Pointer norm); +@Namespace("at") public static native @ByVal Tensor fft_fft2(@Const @ByRef Tensor self, @ByVal(nullValue = "at::OptionalIntArrayRef(c10::nullopt)") LongArrayRefOptional s, @ByVal(nullValue = "at::IntArrayRef({-2,-1})") LongArrayRef dim, @ByVal(nullValue = "c10::optional(c10::nullopt)") StringViewOptional norm); @Namespace("at") public static native @ByVal Tensor fft_fft2(@Const @ByRef Tensor self); -@Namespace("at") public static native @ByVal Tensor fft_fft2(@Const @ByRef Tensor self, @ByVal(nullValue = "at::OptionalIntArrayRef(c10::nullopt)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] s, @ByVal(nullValue = "at::IntArrayRef({-2,-1})") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dim, @ByVal(nullValue = "c10::optional(c10::nullopt)") @Cast("c10::optional*") Pointer norm); +@Namespace("at") public static native @ByVal Tensor fft_fft2(@Const @ByRef Tensor self, @ByVal(nullValue = "at::OptionalIntArrayRef(c10::nullopt)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] s, @ByVal(nullValue = "at::IntArrayRef({-2,-1})") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dim, @ByVal(nullValue = "c10::optional(c10::nullopt)") StringViewOptional norm); // aten::fft_fft2.out(Tensor self, int[1]? s=None, int[1] dim=[-2,-1], str? norm=None, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor fft_fft2_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal(nullValue = "at::OptionalIntArrayRef(c10::nullopt)") LongArrayRefOptional s, @ByVal(nullValue = "at::IntArrayRef({-2,-1})") LongArrayRef dim, @ByVal(nullValue = "c10::optional(c10::nullopt)") @Cast("c10::optional*") Pointer norm); +@Namespace("at") public static native @ByRef Tensor fft_fft2_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal(nullValue = "at::OptionalIntArrayRef(c10::nullopt)") LongArrayRefOptional s, @ByVal(nullValue = "at::IntArrayRef({-2,-1})") LongArrayRef dim, @ByVal(nullValue = "c10::optional(c10::nullopt)") StringViewOptional norm); @Namespace("at") public static native @ByRef Tensor fft_fft2_out(@ByRef Tensor out, @Const @ByRef Tensor self); -@Namespace("at") public static native @ByRef Tensor fft_fft2_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal(nullValue = "at::OptionalIntArrayRef(c10::nullopt)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] s, @ByVal(nullValue = "at::IntArrayRef({-2,-1})") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dim, @ByVal(nullValue = "c10::optional(c10::nullopt)") @Cast("c10::optional*") Pointer norm); +@Namespace("at") public static native @ByRef Tensor fft_fft2_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal(nullValue = "at::OptionalIntArrayRef(c10::nullopt)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] s, @ByVal(nullValue = "at::IntArrayRef({-2,-1})") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dim, @ByVal(nullValue = "c10::optional(c10::nullopt)") StringViewOptional norm); // aten::fft_fft2.out(Tensor self, int[1]? s=None, int[1] dim=[-2,-1], str? norm=None, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor fft_fft2_outf(@Const @ByRef Tensor self, @ByVal LongArrayRefOptional s, @ByVal LongArrayRef dim, @ByVal @Cast("c10::optional*") Pointer norm, @ByRef Tensor out); -@Namespace("at") public static native @ByRef Tensor fft_fft2_outf(@Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] s, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dim, @ByVal @Cast("c10::optional*") Pointer norm, @ByRef Tensor out); +@Namespace("at") public static native @ByRef Tensor fft_fft2_outf(@Const @ByRef Tensor self, @ByVal LongArrayRefOptional s, @ByVal LongArrayRef dim, @ByVal StringViewOptional norm, @ByRef Tensor out); +@Namespace("at") public static native @ByRef Tensor fft_fft2_outf(@Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] s, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dim, @ByVal StringViewOptional norm, @ByRef Tensor out); @@ -27486,17 +27445,17 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // aten::fft_fftn(Tensor self, int[1]? s=None, int[1]? dim=None, str? norm=None) -> Tensor -@Namespace("at") public static native @ByVal Tensor fft_fftn(@Const @ByRef Tensor self, @ByVal(nullValue = "at::OptionalIntArrayRef(c10::nullopt)") LongArrayRefOptional s, @ByVal(nullValue = "at::OptionalIntArrayRef(c10::nullopt)") LongArrayRefOptional dim, @ByVal(nullValue = "c10::optional(c10::nullopt)") @Cast("c10::optional*") Pointer norm); +@Namespace("at") public static native @ByVal Tensor fft_fftn(@Const @ByRef Tensor self, @ByVal(nullValue = "at::OptionalIntArrayRef(c10::nullopt)") LongArrayRefOptional s, @ByVal(nullValue = "at::OptionalIntArrayRef(c10::nullopt)") LongArrayRefOptional dim, @ByVal(nullValue = "c10::optional(c10::nullopt)") StringViewOptional norm); @Namespace("at") public static native @ByVal Tensor fft_fftn(@Const @ByRef Tensor self); -@Namespace("at") public static native @ByVal Tensor fft_fftn(@Const @ByRef Tensor self, @ByVal(nullValue = "at::OptionalIntArrayRef(c10::nullopt)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] s, @ByVal(nullValue = "at::OptionalIntArrayRef(c10::nullopt)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dim, @ByVal(nullValue = "c10::optional(c10::nullopt)") @Cast("c10::optional*") Pointer norm); +@Namespace("at") public static native @ByVal Tensor fft_fftn(@Const @ByRef Tensor self, @ByVal(nullValue = "at::OptionalIntArrayRef(c10::nullopt)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] s, @ByVal(nullValue = "at::OptionalIntArrayRef(c10::nullopt)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dim, @ByVal(nullValue = "c10::optional(c10::nullopt)") StringViewOptional norm); // aten::fft_fftn.out(Tensor self, int[1]? s=None, int[1]? dim=None, str? norm=None, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor fft_fftn_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal(nullValue = "at::OptionalIntArrayRef(c10::nullopt)") LongArrayRefOptional s, @ByVal(nullValue = "at::OptionalIntArrayRef(c10::nullopt)") LongArrayRefOptional dim, @ByVal(nullValue = "c10::optional(c10::nullopt)") @Cast("c10::optional*") Pointer norm); +@Namespace("at") public static native @ByRef Tensor fft_fftn_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal(nullValue = "at::OptionalIntArrayRef(c10::nullopt)") LongArrayRefOptional s, @ByVal(nullValue = "at::OptionalIntArrayRef(c10::nullopt)") LongArrayRefOptional dim, @ByVal(nullValue = "c10::optional(c10::nullopt)") StringViewOptional norm); @Namespace("at") public static native @ByRef Tensor fft_fftn_out(@ByRef Tensor out, @Const @ByRef Tensor self); -@Namespace("at") public static native @ByRef Tensor fft_fftn_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal(nullValue = "at::OptionalIntArrayRef(c10::nullopt)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] s, @ByVal(nullValue = "at::OptionalIntArrayRef(c10::nullopt)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dim, @ByVal(nullValue = "c10::optional(c10::nullopt)") @Cast("c10::optional*") Pointer norm); +@Namespace("at") public static native @ByRef Tensor fft_fftn_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal(nullValue = "at::OptionalIntArrayRef(c10::nullopt)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] s, @ByVal(nullValue = "at::OptionalIntArrayRef(c10::nullopt)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dim, @ByVal(nullValue = "c10::optional(c10::nullopt)") StringViewOptional norm); // aten::fft_fftn.out(Tensor self, int[1]? s=None, int[1]? dim=None, str? norm=None, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor fft_fftn_outf(@Const @ByRef Tensor self, @ByVal LongArrayRefOptional s, @ByVal LongArrayRefOptional dim, @ByVal @Cast("c10::optional*") Pointer norm, @ByRef Tensor out); -@Namespace("at") public static native @ByRef Tensor fft_fftn_outf(@Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] s, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dim, @ByVal @Cast("c10::optional*") Pointer norm, @ByRef Tensor out); +@Namespace("at") public static native @ByRef Tensor fft_fftn_outf(@Const @ByRef Tensor self, @ByVal LongArrayRefOptional s, @ByVal LongArrayRefOptional dim, @ByVal StringViewOptional norm, @ByRef Tensor out); +@Namespace("at") public static native @ByRef Tensor fft_fftn_outf(@Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] s, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dim, @ByVal StringViewOptional norm, @ByRef Tensor out); @@ -27558,14 +27517,14 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // aten::fft_hfft(Tensor self, int? n=None, int dim=-1, str? norm=None) -> Tensor -@Namespace("at") public static native @ByVal Tensor fft_hfft(@Const @ByRef Tensor self, @ByVal(nullValue = "c10::optional(c10::nullopt)") LongOptional n, @Cast("int64_t") long dim/*=-1*/, @ByVal(nullValue = "c10::optional(c10::nullopt)") @Cast("c10::optional*") Pointer norm); +@Namespace("at") public static native @ByVal Tensor fft_hfft(@Const @ByRef Tensor self, @ByVal(nullValue = "c10::optional(c10::nullopt)") LongOptional n, @Cast("int64_t") long dim/*=-1*/, @ByVal(nullValue = "c10::optional(c10::nullopt)") StringViewOptional norm); @Namespace("at") public static native @ByVal Tensor fft_hfft(@Const @ByRef Tensor self); // aten::fft_hfft.out(Tensor self, int? n=None, int dim=-1, str? norm=None, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor fft_hfft_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal(nullValue = "c10::optional(c10::nullopt)") LongOptional n, @Cast("int64_t") long dim/*=-1*/, @ByVal(nullValue = "c10::optional(c10::nullopt)") @Cast("c10::optional*") Pointer norm); +@Namespace("at") public static native @ByRef Tensor fft_hfft_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal(nullValue = "c10::optional(c10::nullopt)") LongOptional n, @Cast("int64_t") long dim/*=-1*/, @ByVal(nullValue = "c10::optional(c10::nullopt)") StringViewOptional norm); @Namespace("at") public static native @ByRef Tensor fft_hfft_out(@ByRef Tensor out, @Const @ByRef Tensor self); // aten::fft_hfft.out(Tensor self, int? n=None, int dim=-1, str? norm=None, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor fft_hfft_outf(@Const @ByRef Tensor self, @ByVal LongOptional n, @Cast("int64_t") long dim, @ByVal @Cast("c10::optional*") Pointer norm, @ByRef Tensor out); +@Namespace("at") public static native @ByRef Tensor fft_hfft_outf(@Const @ByRef Tensor self, @ByVal LongOptional n, @Cast("int64_t") long dim, @ByVal StringViewOptional norm, @ByRef Tensor out); @@ -27595,17 +27554,17 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // aten::fft_hfft2(Tensor self, int[1]? s=None, int[1] dim=[-2,-1], str? norm=None) -> Tensor -@Namespace("at") public static native @ByVal Tensor fft_hfft2(@Const @ByRef Tensor self, @ByVal(nullValue = "at::OptionalIntArrayRef(c10::nullopt)") LongArrayRefOptional s, @ByVal(nullValue = "at::IntArrayRef({-2,-1})") LongArrayRef dim, @ByVal(nullValue = "c10::optional(c10::nullopt)") @Cast("c10::optional*") Pointer norm); +@Namespace("at") public static native @ByVal Tensor fft_hfft2(@Const @ByRef Tensor self, @ByVal(nullValue = "at::OptionalIntArrayRef(c10::nullopt)") LongArrayRefOptional s, @ByVal(nullValue = "at::IntArrayRef({-2,-1})") LongArrayRef dim, @ByVal(nullValue = "c10::optional(c10::nullopt)") StringViewOptional norm); @Namespace("at") public static native @ByVal Tensor fft_hfft2(@Const @ByRef Tensor self); -@Namespace("at") public static native @ByVal Tensor fft_hfft2(@Const @ByRef Tensor self, @ByVal(nullValue = "at::OptionalIntArrayRef(c10::nullopt)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] s, @ByVal(nullValue = "at::IntArrayRef({-2,-1})") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dim, @ByVal(nullValue = "c10::optional(c10::nullopt)") @Cast("c10::optional*") Pointer norm); +@Namespace("at") public static native @ByVal Tensor fft_hfft2(@Const @ByRef Tensor self, @ByVal(nullValue = "at::OptionalIntArrayRef(c10::nullopt)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] s, @ByVal(nullValue = "at::IntArrayRef({-2,-1})") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dim, @ByVal(nullValue = "c10::optional(c10::nullopt)") StringViewOptional norm); // aten::fft_hfft2.out(Tensor self, int[1]? s=None, int[1] dim=[-2,-1], str? norm=None, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @Const @ByRef Tensor fft_hfft2_out(@Const @ByRef Tensor out, @Const @ByRef Tensor self, @ByVal(nullValue = "at::OptionalIntArrayRef(c10::nullopt)") LongArrayRefOptional s, @ByVal(nullValue = "at::IntArrayRef({-2,-1})") LongArrayRef dim, @ByVal(nullValue = "c10::optional(c10::nullopt)") @Cast("c10::optional*") Pointer norm); +@Namespace("at") public static native @Const @ByRef Tensor fft_hfft2_out(@Const @ByRef Tensor out, @Const @ByRef Tensor self, @ByVal(nullValue = "at::OptionalIntArrayRef(c10::nullopt)") LongArrayRefOptional s, @ByVal(nullValue = "at::IntArrayRef({-2,-1})") LongArrayRef dim, @ByVal(nullValue = "c10::optional(c10::nullopt)") StringViewOptional norm); @Namespace("at") public static native @Const @ByRef Tensor fft_hfft2_out(@Const @ByRef Tensor out, @Const @ByRef Tensor self); -@Namespace("at") public static native @Const @ByRef Tensor fft_hfft2_out(@Const @ByRef Tensor out, @Const @ByRef Tensor self, @ByVal(nullValue = "at::OptionalIntArrayRef(c10::nullopt)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] s, @ByVal(nullValue = "at::IntArrayRef({-2,-1})") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dim, @ByVal(nullValue = "c10::optional(c10::nullopt)") @Cast("c10::optional*") Pointer norm); +@Namespace("at") public static native @Const @ByRef Tensor fft_hfft2_out(@Const @ByRef Tensor out, @Const @ByRef Tensor self, @ByVal(nullValue = "at::OptionalIntArrayRef(c10::nullopt)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] s, @ByVal(nullValue = "at::IntArrayRef({-2,-1})") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dim, @ByVal(nullValue = "c10::optional(c10::nullopt)") StringViewOptional norm); // aten::fft_hfft2.out(Tensor self, int[1]? s=None, int[1] dim=[-2,-1], str? norm=None, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @Const @ByRef Tensor fft_hfft2_outf(@Const @ByRef Tensor self, @ByVal LongArrayRefOptional s, @ByVal LongArrayRef dim, @ByVal @Cast("c10::optional*") Pointer norm, @Const @ByRef Tensor out); -@Namespace("at") public static native @Const @ByRef Tensor fft_hfft2_outf(@Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] s, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dim, @ByVal @Cast("c10::optional*") Pointer norm, @Const @ByRef Tensor out); +@Namespace("at") public static native @Const @ByRef Tensor fft_hfft2_outf(@Const @ByRef Tensor self, @ByVal LongArrayRefOptional s, @ByVal LongArrayRef dim, @ByVal StringViewOptional norm, @Const @ByRef Tensor out); +@Namespace("at") public static native @Const @ByRef Tensor fft_hfft2_outf(@Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] s, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dim, @ByVal StringViewOptional norm, @Const @ByRef Tensor out); @@ -27635,17 +27594,17 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // aten::fft_hfftn(Tensor self, int[1]? s=None, int[1]? dim=None, str? norm=None) -> Tensor -@Namespace("at") public static native @ByVal Tensor fft_hfftn(@Const @ByRef Tensor self, @ByVal(nullValue = "at::OptionalIntArrayRef(c10::nullopt)") LongArrayRefOptional s, @ByVal(nullValue = "at::OptionalIntArrayRef(c10::nullopt)") LongArrayRefOptional dim, @ByVal(nullValue = "c10::optional(c10::nullopt)") @Cast("c10::optional*") Pointer norm); +@Namespace("at") public static native @ByVal Tensor fft_hfftn(@Const @ByRef Tensor self, @ByVal(nullValue = "at::OptionalIntArrayRef(c10::nullopt)") LongArrayRefOptional s, @ByVal(nullValue = "at::OptionalIntArrayRef(c10::nullopt)") LongArrayRefOptional dim, @ByVal(nullValue = "c10::optional(c10::nullopt)") StringViewOptional norm); @Namespace("at") public static native @ByVal Tensor fft_hfftn(@Const @ByRef Tensor self); -@Namespace("at") public static native @ByVal Tensor fft_hfftn(@Const @ByRef Tensor self, @ByVal(nullValue = "at::OptionalIntArrayRef(c10::nullopt)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] s, @ByVal(nullValue = "at::OptionalIntArrayRef(c10::nullopt)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dim, @ByVal(nullValue = "c10::optional(c10::nullopt)") @Cast("c10::optional*") Pointer norm); +@Namespace("at") public static native @ByVal Tensor fft_hfftn(@Const @ByRef Tensor self, @ByVal(nullValue = "at::OptionalIntArrayRef(c10::nullopt)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] s, @ByVal(nullValue = "at::OptionalIntArrayRef(c10::nullopt)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dim, @ByVal(nullValue = "c10::optional(c10::nullopt)") StringViewOptional norm); // aten::fft_hfftn.out(Tensor self, int[1]? s=None, int[1]? dim=None, str? norm=None, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @Const @ByRef Tensor fft_hfftn_out(@Const @ByRef Tensor out, @Const @ByRef Tensor self, @ByVal(nullValue = "at::OptionalIntArrayRef(c10::nullopt)") LongArrayRefOptional s, @ByVal(nullValue = "at::OptionalIntArrayRef(c10::nullopt)") LongArrayRefOptional dim, @ByVal(nullValue = "c10::optional(c10::nullopt)") @Cast("c10::optional*") Pointer norm); +@Namespace("at") public static native @Const @ByRef Tensor fft_hfftn_out(@Const @ByRef Tensor out, @Const @ByRef Tensor self, @ByVal(nullValue = "at::OptionalIntArrayRef(c10::nullopt)") LongArrayRefOptional s, @ByVal(nullValue = "at::OptionalIntArrayRef(c10::nullopt)") LongArrayRefOptional dim, @ByVal(nullValue = "c10::optional(c10::nullopt)") StringViewOptional norm); @Namespace("at") public static native @Const @ByRef Tensor fft_hfftn_out(@Const @ByRef Tensor out, @Const @ByRef Tensor self); -@Namespace("at") public static native @Const @ByRef Tensor fft_hfftn_out(@Const @ByRef Tensor out, @Const @ByRef Tensor self, @ByVal(nullValue = "at::OptionalIntArrayRef(c10::nullopt)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] s, @ByVal(nullValue = "at::OptionalIntArrayRef(c10::nullopt)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dim, @ByVal(nullValue = "c10::optional(c10::nullopt)") @Cast("c10::optional*") Pointer norm); +@Namespace("at") public static native @Const @ByRef Tensor fft_hfftn_out(@Const @ByRef Tensor out, @Const @ByRef Tensor self, @ByVal(nullValue = "at::OptionalIntArrayRef(c10::nullopt)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] s, @ByVal(nullValue = "at::OptionalIntArrayRef(c10::nullopt)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dim, @ByVal(nullValue = "c10::optional(c10::nullopt)") StringViewOptional norm); // aten::fft_hfftn.out(Tensor self, int[1]? s=None, int[1]? dim=None, str? norm=None, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @Const @ByRef Tensor fft_hfftn_outf(@Const @ByRef Tensor self, @ByVal LongArrayRefOptional s, @ByVal LongArrayRefOptional dim, @ByVal @Cast("c10::optional*") Pointer norm, @Const @ByRef Tensor out); -@Namespace("at") public static native @Const @ByRef Tensor fft_hfftn_outf(@Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] s, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dim, @ByVal @Cast("c10::optional*") Pointer norm, @Const @ByRef Tensor out); +@Namespace("at") public static native @Const @ByRef Tensor fft_hfftn_outf(@Const @ByRef Tensor self, @ByVal LongArrayRefOptional s, @ByVal LongArrayRefOptional dim, @ByVal StringViewOptional norm, @Const @ByRef Tensor out); +@Namespace("at") public static native @Const @ByRef Tensor fft_hfftn_outf(@Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] s, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dim, @ByVal StringViewOptional norm, @Const @ByRef Tensor out); @@ -27675,14 +27634,14 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // aten::fft_ifft(Tensor self, int? n=None, int dim=-1, str? norm=None) -> Tensor -@Namespace("at") public static native @ByVal Tensor fft_ifft(@Const @ByRef Tensor self, @ByVal(nullValue = "c10::optional(c10::nullopt)") LongOptional n, @Cast("int64_t") long dim/*=-1*/, @ByVal(nullValue = "c10::optional(c10::nullopt)") @Cast("c10::optional*") Pointer norm); +@Namespace("at") public static native @ByVal Tensor fft_ifft(@Const @ByRef Tensor self, @ByVal(nullValue = "c10::optional(c10::nullopt)") LongOptional n, @Cast("int64_t") long dim/*=-1*/, @ByVal(nullValue = "c10::optional(c10::nullopt)") StringViewOptional norm); @Namespace("at") public static native @ByVal Tensor fft_ifft(@Const @ByRef Tensor self); // aten::fft_ifft.out(Tensor self, int? n=None, int dim=-1, str? norm=None, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor fft_ifft_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal(nullValue = "c10::optional(c10::nullopt)") LongOptional n, @Cast("int64_t") long dim/*=-1*/, @ByVal(nullValue = "c10::optional(c10::nullopt)") @Cast("c10::optional*") Pointer norm); +@Namespace("at") public static native @ByRef Tensor fft_ifft_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal(nullValue = "c10::optional(c10::nullopt)") LongOptional n, @Cast("int64_t") long dim/*=-1*/, @ByVal(nullValue = "c10::optional(c10::nullopt)") StringViewOptional norm); @Namespace("at") public static native @ByRef Tensor fft_ifft_out(@ByRef Tensor out, @Const @ByRef Tensor self); // aten::fft_ifft.out(Tensor self, int? n=None, int dim=-1, str? norm=None, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor fft_ifft_outf(@Const @ByRef Tensor self, @ByVal LongOptional n, @Cast("int64_t") long dim, @ByVal @Cast("c10::optional*") Pointer norm, @ByRef Tensor out); +@Namespace("at") public static native @ByRef Tensor fft_ifft_outf(@Const @ByRef Tensor self, @ByVal LongOptional n, @Cast("int64_t") long dim, @ByVal StringViewOptional norm, @ByRef Tensor out); @@ -27712,17 +27671,17 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // aten::fft_ifft2(Tensor self, int[1]? s=None, int[1] dim=[-2,-1], str? norm=None) -> Tensor -@Namespace("at") public static native @ByVal Tensor fft_ifft2(@Const @ByRef Tensor self, @ByVal(nullValue = "at::OptionalIntArrayRef(c10::nullopt)") LongArrayRefOptional s, @ByVal(nullValue = "at::IntArrayRef({-2,-1})") LongArrayRef dim, @ByVal(nullValue = "c10::optional(c10::nullopt)") @Cast("c10::optional*") Pointer norm); +@Namespace("at") public static native @ByVal Tensor fft_ifft2(@Const @ByRef Tensor self, @ByVal(nullValue = "at::OptionalIntArrayRef(c10::nullopt)") LongArrayRefOptional s, @ByVal(nullValue = "at::IntArrayRef({-2,-1})") LongArrayRef dim, @ByVal(nullValue = "c10::optional(c10::nullopt)") StringViewOptional norm); @Namespace("at") public static native @ByVal Tensor fft_ifft2(@Const @ByRef Tensor self); -@Namespace("at") public static native @ByVal Tensor fft_ifft2(@Const @ByRef Tensor self, @ByVal(nullValue = "at::OptionalIntArrayRef(c10::nullopt)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] s, @ByVal(nullValue = "at::IntArrayRef({-2,-1})") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dim, @ByVal(nullValue = "c10::optional(c10::nullopt)") @Cast("c10::optional*") Pointer norm); +@Namespace("at") public static native @ByVal Tensor fft_ifft2(@Const @ByRef Tensor self, @ByVal(nullValue = "at::OptionalIntArrayRef(c10::nullopt)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] s, @ByVal(nullValue = "at::IntArrayRef({-2,-1})") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dim, @ByVal(nullValue = "c10::optional(c10::nullopt)") StringViewOptional norm); // aten::fft_ifft2.out(Tensor self, int[1]? s=None, int[1] dim=[-2,-1], str? norm=None, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor fft_ifft2_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal(nullValue = "at::OptionalIntArrayRef(c10::nullopt)") LongArrayRefOptional s, @ByVal(nullValue = "at::IntArrayRef({-2,-1})") LongArrayRef dim, @ByVal(nullValue = "c10::optional(c10::nullopt)") @Cast("c10::optional*") Pointer norm); +@Namespace("at") public static native @ByRef Tensor fft_ifft2_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal(nullValue = "at::OptionalIntArrayRef(c10::nullopt)") LongArrayRefOptional s, @ByVal(nullValue = "at::IntArrayRef({-2,-1})") LongArrayRef dim, @ByVal(nullValue = "c10::optional(c10::nullopt)") StringViewOptional norm); @Namespace("at") public static native @ByRef Tensor fft_ifft2_out(@ByRef Tensor out, @Const @ByRef Tensor self); -@Namespace("at") public static native @ByRef Tensor fft_ifft2_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal(nullValue = "at::OptionalIntArrayRef(c10::nullopt)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] s, @ByVal(nullValue = "at::IntArrayRef({-2,-1})") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dim, @ByVal(nullValue = "c10::optional(c10::nullopt)") @Cast("c10::optional*") Pointer norm); +@Namespace("at") public static native @ByRef Tensor fft_ifft2_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal(nullValue = "at::OptionalIntArrayRef(c10::nullopt)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] s, @ByVal(nullValue = "at::IntArrayRef({-2,-1})") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dim, @ByVal(nullValue = "c10::optional(c10::nullopt)") StringViewOptional norm); // aten::fft_ifft2.out(Tensor self, int[1]? s=None, int[1] dim=[-2,-1], str? norm=None, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor fft_ifft2_outf(@Const @ByRef Tensor self, @ByVal LongArrayRefOptional s, @ByVal LongArrayRef dim, @ByVal @Cast("c10::optional*") Pointer norm, @ByRef Tensor out); -@Namespace("at") public static native @ByRef Tensor fft_ifft2_outf(@Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] s, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dim, @ByVal @Cast("c10::optional*") Pointer norm, @ByRef Tensor out); +@Namespace("at") public static native @ByRef Tensor fft_ifft2_outf(@Const @ByRef Tensor self, @ByVal LongArrayRefOptional s, @ByVal LongArrayRef dim, @ByVal StringViewOptional norm, @ByRef Tensor out); +@Namespace("at") public static native @ByRef Tensor fft_ifft2_outf(@Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] s, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dim, @ByVal StringViewOptional norm, @ByRef Tensor out); @@ -27752,17 +27711,17 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // aten::fft_ifftn(Tensor self, int[1]? s=None, int[1]? dim=None, str? norm=None) -> Tensor -@Namespace("at") public static native @ByVal Tensor fft_ifftn(@Const @ByRef Tensor self, @ByVal(nullValue = "at::OptionalIntArrayRef(c10::nullopt)") LongArrayRefOptional s, @ByVal(nullValue = "at::OptionalIntArrayRef(c10::nullopt)") LongArrayRefOptional dim, @ByVal(nullValue = "c10::optional(c10::nullopt)") @Cast("c10::optional*") Pointer norm); +@Namespace("at") public static native @ByVal Tensor fft_ifftn(@Const @ByRef Tensor self, @ByVal(nullValue = "at::OptionalIntArrayRef(c10::nullopt)") LongArrayRefOptional s, @ByVal(nullValue = "at::OptionalIntArrayRef(c10::nullopt)") LongArrayRefOptional dim, @ByVal(nullValue = "c10::optional(c10::nullopt)") StringViewOptional norm); @Namespace("at") public static native @ByVal Tensor fft_ifftn(@Const @ByRef Tensor self); -@Namespace("at") public static native @ByVal Tensor fft_ifftn(@Const @ByRef Tensor self, @ByVal(nullValue = "at::OptionalIntArrayRef(c10::nullopt)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] s, @ByVal(nullValue = "at::OptionalIntArrayRef(c10::nullopt)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dim, @ByVal(nullValue = "c10::optional(c10::nullopt)") @Cast("c10::optional*") Pointer norm); +@Namespace("at") public static native @ByVal Tensor fft_ifftn(@Const @ByRef Tensor self, @ByVal(nullValue = "at::OptionalIntArrayRef(c10::nullopt)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] s, @ByVal(nullValue = "at::OptionalIntArrayRef(c10::nullopt)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dim, @ByVal(nullValue = "c10::optional(c10::nullopt)") StringViewOptional norm); // aten::fft_ifftn.out(Tensor self, int[1]? s=None, int[1]? dim=None, str? norm=None, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor fft_ifftn_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal(nullValue = "at::OptionalIntArrayRef(c10::nullopt)") LongArrayRefOptional s, @ByVal(nullValue = "at::OptionalIntArrayRef(c10::nullopt)") LongArrayRefOptional dim, @ByVal(nullValue = "c10::optional(c10::nullopt)") @Cast("c10::optional*") Pointer norm); +@Namespace("at") public static native @ByRef Tensor fft_ifftn_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal(nullValue = "at::OptionalIntArrayRef(c10::nullopt)") LongArrayRefOptional s, @ByVal(nullValue = "at::OptionalIntArrayRef(c10::nullopt)") LongArrayRefOptional dim, @ByVal(nullValue = "c10::optional(c10::nullopt)") StringViewOptional norm); @Namespace("at") public static native @ByRef Tensor fft_ifftn_out(@ByRef Tensor out, @Const @ByRef Tensor self); -@Namespace("at") public static native @ByRef Tensor fft_ifftn_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal(nullValue = "at::OptionalIntArrayRef(c10::nullopt)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] s, @ByVal(nullValue = "at::OptionalIntArrayRef(c10::nullopt)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dim, @ByVal(nullValue = "c10::optional(c10::nullopt)") @Cast("c10::optional*") Pointer norm); +@Namespace("at") public static native @ByRef Tensor fft_ifftn_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal(nullValue = "at::OptionalIntArrayRef(c10::nullopt)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] s, @ByVal(nullValue = "at::OptionalIntArrayRef(c10::nullopt)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dim, @ByVal(nullValue = "c10::optional(c10::nullopt)") StringViewOptional norm); // aten::fft_ifftn.out(Tensor self, int[1]? s=None, int[1]? dim=None, str? norm=None, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor fft_ifftn_outf(@Const @ByRef Tensor self, @ByVal LongArrayRefOptional s, @ByVal LongArrayRefOptional dim, @ByVal @Cast("c10::optional*") Pointer norm, @ByRef Tensor out); -@Namespace("at") public static native @ByRef Tensor fft_ifftn_outf(@Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] s, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dim, @ByVal @Cast("c10::optional*") Pointer norm, @ByRef Tensor out); +@Namespace("at") public static native @ByRef Tensor fft_ifftn_outf(@Const @ByRef Tensor self, @ByVal LongArrayRefOptional s, @ByVal LongArrayRefOptional dim, @ByVal StringViewOptional norm, @ByRef Tensor out); +@Namespace("at") public static native @ByRef Tensor fft_ifftn_outf(@Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] s, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dim, @ByVal StringViewOptional norm, @ByRef Tensor out); @@ -27824,14 +27783,14 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // aten::fft_ihfft(Tensor self, int? n=None, int dim=-1, str? norm=None) -> Tensor -@Namespace("at") public static native @ByVal Tensor fft_ihfft(@Const @ByRef Tensor self, @ByVal(nullValue = "c10::optional(c10::nullopt)") LongOptional n, @Cast("int64_t") long dim/*=-1*/, @ByVal(nullValue = "c10::optional(c10::nullopt)") @Cast("c10::optional*") Pointer norm); +@Namespace("at") public static native @ByVal Tensor fft_ihfft(@Const @ByRef Tensor self, @ByVal(nullValue = "c10::optional(c10::nullopt)") LongOptional n, @Cast("int64_t") long dim/*=-1*/, @ByVal(nullValue = "c10::optional(c10::nullopt)") StringViewOptional norm); @Namespace("at") public static native @ByVal Tensor fft_ihfft(@Const @ByRef Tensor self); // aten::fft_ihfft.out(Tensor self, int? n=None, int dim=-1, str? norm=None, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor fft_ihfft_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal(nullValue = "c10::optional(c10::nullopt)") LongOptional n, @Cast("int64_t") long dim/*=-1*/, @ByVal(nullValue = "c10::optional(c10::nullopt)") @Cast("c10::optional*") Pointer norm); +@Namespace("at") public static native @ByRef Tensor fft_ihfft_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal(nullValue = "c10::optional(c10::nullopt)") LongOptional n, @Cast("int64_t") long dim/*=-1*/, @ByVal(nullValue = "c10::optional(c10::nullopt)") StringViewOptional norm); @Namespace("at") public static native @ByRef Tensor fft_ihfft_out(@ByRef Tensor out, @Const @ByRef Tensor self); // aten::fft_ihfft.out(Tensor self, int? n=None, int dim=-1, str? norm=None, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor fft_ihfft_outf(@Const @ByRef Tensor self, @ByVal LongOptional n, @Cast("int64_t") long dim, @ByVal @Cast("c10::optional*") Pointer norm, @ByRef Tensor out); +@Namespace("at") public static native @ByRef Tensor fft_ihfft_outf(@Const @ByRef Tensor self, @ByVal LongOptional n, @Cast("int64_t") long dim, @ByVal StringViewOptional norm, @ByRef Tensor out); @@ -27861,17 +27820,17 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // aten::fft_ihfft2(Tensor self, int[1]? s=None, int[1] dim=[-2,-1], str? norm=None) -> Tensor -@Namespace("at") public static native @ByVal Tensor fft_ihfft2(@Const @ByRef Tensor self, @ByVal(nullValue = "at::OptionalIntArrayRef(c10::nullopt)") LongArrayRefOptional s, @ByVal(nullValue = "at::IntArrayRef({-2,-1})") LongArrayRef dim, @ByVal(nullValue = "c10::optional(c10::nullopt)") @Cast("c10::optional*") Pointer norm); +@Namespace("at") public static native @ByVal Tensor fft_ihfft2(@Const @ByRef Tensor self, @ByVal(nullValue = "at::OptionalIntArrayRef(c10::nullopt)") LongArrayRefOptional s, @ByVal(nullValue = "at::IntArrayRef({-2,-1})") LongArrayRef dim, @ByVal(nullValue = "c10::optional(c10::nullopt)") StringViewOptional norm); @Namespace("at") public static native @ByVal Tensor fft_ihfft2(@Const @ByRef Tensor self); -@Namespace("at") public static native @ByVal Tensor fft_ihfft2(@Const @ByRef Tensor self, @ByVal(nullValue = "at::OptionalIntArrayRef(c10::nullopt)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] s, @ByVal(nullValue = "at::IntArrayRef({-2,-1})") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dim, @ByVal(nullValue = "c10::optional(c10::nullopt)") @Cast("c10::optional*") Pointer norm); +@Namespace("at") public static native @ByVal Tensor fft_ihfft2(@Const @ByRef Tensor self, @ByVal(nullValue = "at::OptionalIntArrayRef(c10::nullopt)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] s, @ByVal(nullValue = "at::IntArrayRef({-2,-1})") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dim, @ByVal(nullValue = "c10::optional(c10::nullopt)") StringViewOptional norm); // aten::fft_ihfft2.out(Tensor self, int[1]? s=None, int[1] dim=[-2,-1], str? norm=None, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @Const @ByRef Tensor fft_ihfft2_out(@Const @ByRef Tensor out, @Const @ByRef Tensor self, @ByVal(nullValue = "at::OptionalIntArrayRef(c10::nullopt)") LongArrayRefOptional s, @ByVal(nullValue = "at::IntArrayRef({-2,-1})") LongArrayRef dim, @ByVal(nullValue = "c10::optional(c10::nullopt)") @Cast("c10::optional*") Pointer norm); +@Namespace("at") public static native @Const @ByRef Tensor fft_ihfft2_out(@Const @ByRef Tensor out, @Const @ByRef Tensor self, @ByVal(nullValue = "at::OptionalIntArrayRef(c10::nullopt)") LongArrayRefOptional s, @ByVal(nullValue = "at::IntArrayRef({-2,-1})") LongArrayRef dim, @ByVal(nullValue = "c10::optional(c10::nullopt)") StringViewOptional norm); @Namespace("at") public static native @Const @ByRef Tensor fft_ihfft2_out(@Const @ByRef Tensor out, @Const @ByRef Tensor self); -@Namespace("at") public static native @Const @ByRef Tensor fft_ihfft2_out(@Const @ByRef Tensor out, @Const @ByRef Tensor self, @ByVal(nullValue = "at::OptionalIntArrayRef(c10::nullopt)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] s, @ByVal(nullValue = "at::IntArrayRef({-2,-1})") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dim, @ByVal(nullValue = "c10::optional(c10::nullopt)") @Cast("c10::optional*") Pointer norm); +@Namespace("at") public static native @Const @ByRef Tensor fft_ihfft2_out(@Const @ByRef Tensor out, @Const @ByRef Tensor self, @ByVal(nullValue = "at::OptionalIntArrayRef(c10::nullopt)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] s, @ByVal(nullValue = "at::IntArrayRef({-2,-1})") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dim, @ByVal(nullValue = "c10::optional(c10::nullopt)") StringViewOptional norm); // aten::fft_ihfft2.out(Tensor self, int[1]? s=None, int[1] dim=[-2,-1], str? norm=None, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @Const @ByRef Tensor fft_ihfft2_outf(@Const @ByRef Tensor self, @ByVal LongArrayRefOptional s, @ByVal LongArrayRef dim, @ByVal @Cast("c10::optional*") Pointer norm, @Const @ByRef Tensor out); -@Namespace("at") public static native @Const @ByRef Tensor fft_ihfft2_outf(@Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] s, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dim, @ByVal @Cast("c10::optional*") Pointer norm, @Const @ByRef Tensor out); +@Namespace("at") public static native @Const @ByRef Tensor fft_ihfft2_outf(@Const @ByRef Tensor self, @ByVal LongArrayRefOptional s, @ByVal LongArrayRef dim, @ByVal StringViewOptional norm, @Const @ByRef Tensor out); +@Namespace("at") public static native @Const @ByRef Tensor fft_ihfft2_outf(@Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] s, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dim, @ByVal StringViewOptional norm, @Const @ByRef Tensor out); @@ -27901,17 +27860,17 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // aten::fft_ihfftn(Tensor self, int[1]? s=None, int[1]? dim=None, str? norm=None) -> Tensor -@Namespace("at") public static native @ByVal Tensor fft_ihfftn(@Const @ByRef Tensor self, @ByVal(nullValue = "at::OptionalIntArrayRef(c10::nullopt)") LongArrayRefOptional s, @ByVal(nullValue = "at::OptionalIntArrayRef(c10::nullopt)") LongArrayRefOptional dim, @ByVal(nullValue = "c10::optional(c10::nullopt)") @Cast("c10::optional*") Pointer norm); +@Namespace("at") public static native @ByVal Tensor fft_ihfftn(@Const @ByRef Tensor self, @ByVal(nullValue = "at::OptionalIntArrayRef(c10::nullopt)") LongArrayRefOptional s, @ByVal(nullValue = "at::OptionalIntArrayRef(c10::nullopt)") LongArrayRefOptional dim, @ByVal(nullValue = "c10::optional(c10::nullopt)") StringViewOptional norm); @Namespace("at") public static native @ByVal Tensor fft_ihfftn(@Const @ByRef Tensor self); -@Namespace("at") public static native @ByVal Tensor fft_ihfftn(@Const @ByRef Tensor self, @ByVal(nullValue = "at::OptionalIntArrayRef(c10::nullopt)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] s, @ByVal(nullValue = "at::OptionalIntArrayRef(c10::nullopt)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dim, @ByVal(nullValue = "c10::optional(c10::nullopt)") @Cast("c10::optional*") Pointer norm); +@Namespace("at") public static native @ByVal Tensor fft_ihfftn(@Const @ByRef Tensor self, @ByVal(nullValue = "at::OptionalIntArrayRef(c10::nullopt)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] s, @ByVal(nullValue = "at::OptionalIntArrayRef(c10::nullopt)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dim, @ByVal(nullValue = "c10::optional(c10::nullopt)") StringViewOptional norm); // aten::fft_ihfftn.out(Tensor self, int[1]? s=None, int[1]? dim=None, str? norm=None, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @Const @ByRef Tensor fft_ihfftn_out(@Const @ByRef Tensor out, @Const @ByRef Tensor self, @ByVal(nullValue = "at::OptionalIntArrayRef(c10::nullopt)") LongArrayRefOptional s, @ByVal(nullValue = "at::OptionalIntArrayRef(c10::nullopt)") LongArrayRefOptional dim, @ByVal(nullValue = "c10::optional(c10::nullopt)") @Cast("c10::optional*") Pointer norm); +@Namespace("at") public static native @Const @ByRef Tensor fft_ihfftn_out(@Const @ByRef Tensor out, @Const @ByRef Tensor self, @ByVal(nullValue = "at::OptionalIntArrayRef(c10::nullopt)") LongArrayRefOptional s, @ByVal(nullValue = "at::OptionalIntArrayRef(c10::nullopt)") LongArrayRefOptional dim, @ByVal(nullValue = "c10::optional(c10::nullopt)") StringViewOptional norm); @Namespace("at") public static native @Const @ByRef Tensor fft_ihfftn_out(@Const @ByRef Tensor out, @Const @ByRef Tensor self); -@Namespace("at") public static native @Const @ByRef Tensor fft_ihfftn_out(@Const @ByRef Tensor out, @Const @ByRef Tensor self, @ByVal(nullValue = "at::OptionalIntArrayRef(c10::nullopt)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] s, @ByVal(nullValue = "at::OptionalIntArrayRef(c10::nullopt)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dim, @ByVal(nullValue = "c10::optional(c10::nullopt)") @Cast("c10::optional*") Pointer norm); +@Namespace("at") public static native @Const @ByRef Tensor fft_ihfftn_out(@Const @ByRef Tensor out, @Const @ByRef Tensor self, @ByVal(nullValue = "at::OptionalIntArrayRef(c10::nullopt)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] s, @ByVal(nullValue = "at::OptionalIntArrayRef(c10::nullopt)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dim, @ByVal(nullValue = "c10::optional(c10::nullopt)") StringViewOptional norm); // aten::fft_ihfftn.out(Tensor self, int[1]? s=None, int[1]? dim=None, str? norm=None, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @Const @ByRef Tensor fft_ihfftn_outf(@Const @ByRef Tensor self, @ByVal LongArrayRefOptional s, @ByVal LongArrayRefOptional dim, @ByVal @Cast("c10::optional*") Pointer norm, @Const @ByRef Tensor out); -@Namespace("at") public static native @Const @ByRef Tensor fft_ihfftn_outf(@Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] s, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dim, @ByVal @Cast("c10::optional*") Pointer norm, @Const @ByRef Tensor out); +@Namespace("at") public static native @Const @ByRef Tensor fft_ihfftn_outf(@Const @ByRef Tensor self, @ByVal LongArrayRefOptional s, @ByVal LongArrayRefOptional dim, @ByVal StringViewOptional norm, @Const @ByRef Tensor out); +@Namespace("at") public static native @Const @ByRef Tensor fft_ihfftn_outf(@Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] s, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dim, @ByVal StringViewOptional norm, @Const @ByRef Tensor out); @@ -27941,14 +27900,14 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // aten::fft_irfft(Tensor self, int? n=None, int dim=-1, str? norm=None) -> Tensor -@Namespace("at") public static native @ByVal Tensor fft_irfft(@Const @ByRef Tensor self, @ByVal(nullValue = "c10::optional(c10::nullopt)") LongOptional n, @Cast("int64_t") long dim/*=-1*/, @ByVal(nullValue = "c10::optional(c10::nullopt)") @Cast("c10::optional*") Pointer norm); +@Namespace("at") public static native @ByVal Tensor fft_irfft(@Const @ByRef Tensor self, @ByVal(nullValue = "c10::optional(c10::nullopt)") LongOptional n, @Cast("int64_t") long dim/*=-1*/, @ByVal(nullValue = "c10::optional(c10::nullopt)") StringViewOptional norm); @Namespace("at") public static native @ByVal Tensor fft_irfft(@Const @ByRef Tensor self); // aten::fft_irfft.out(Tensor self, int? n=None, int dim=-1, str? norm=None, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor fft_irfft_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal(nullValue = "c10::optional(c10::nullopt)") LongOptional n, @Cast("int64_t") long dim/*=-1*/, @ByVal(nullValue = "c10::optional(c10::nullopt)") @Cast("c10::optional*") Pointer norm); +@Namespace("at") public static native @ByRef Tensor fft_irfft_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal(nullValue = "c10::optional(c10::nullopt)") LongOptional n, @Cast("int64_t") long dim/*=-1*/, @ByVal(nullValue = "c10::optional(c10::nullopt)") StringViewOptional norm); @Namespace("at") public static native @ByRef Tensor fft_irfft_out(@ByRef Tensor out, @Const @ByRef Tensor self); // aten::fft_irfft.out(Tensor self, int? n=None, int dim=-1, str? norm=None, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor fft_irfft_outf(@Const @ByRef Tensor self, @ByVal LongOptional n, @Cast("int64_t") long dim, @ByVal @Cast("c10::optional*") Pointer norm, @ByRef Tensor out); +@Namespace("at") public static native @ByRef Tensor fft_irfft_outf(@Const @ByRef Tensor self, @ByVal LongOptional n, @Cast("int64_t") long dim, @ByVal StringViewOptional norm, @ByRef Tensor out); @@ -27978,17 +27937,17 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // aten::fft_irfft2(Tensor self, int[1]? s=None, int[1] dim=[-2,-1], str? norm=None) -> Tensor -@Namespace("at") public static native @ByVal Tensor fft_irfft2(@Const @ByRef Tensor self, @ByVal(nullValue = "at::OptionalIntArrayRef(c10::nullopt)") LongArrayRefOptional s, @ByVal(nullValue = "at::IntArrayRef({-2,-1})") LongArrayRef dim, @ByVal(nullValue = "c10::optional(c10::nullopt)") @Cast("c10::optional*") Pointer norm); +@Namespace("at") public static native @ByVal Tensor fft_irfft2(@Const @ByRef Tensor self, @ByVal(nullValue = "at::OptionalIntArrayRef(c10::nullopt)") LongArrayRefOptional s, @ByVal(nullValue = "at::IntArrayRef({-2,-1})") LongArrayRef dim, @ByVal(nullValue = "c10::optional(c10::nullopt)") StringViewOptional norm); @Namespace("at") public static native @ByVal Tensor fft_irfft2(@Const @ByRef Tensor self); -@Namespace("at") public static native @ByVal Tensor fft_irfft2(@Const @ByRef Tensor self, @ByVal(nullValue = "at::OptionalIntArrayRef(c10::nullopt)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] s, @ByVal(nullValue = "at::IntArrayRef({-2,-1})") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dim, @ByVal(nullValue = "c10::optional(c10::nullopt)") @Cast("c10::optional*") Pointer norm); +@Namespace("at") public static native @ByVal Tensor fft_irfft2(@Const @ByRef Tensor self, @ByVal(nullValue = "at::OptionalIntArrayRef(c10::nullopt)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] s, @ByVal(nullValue = "at::IntArrayRef({-2,-1})") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dim, @ByVal(nullValue = "c10::optional(c10::nullopt)") StringViewOptional norm); // aten::fft_irfft2.out(Tensor self, int[1]? s=None, int[1] dim=[-2,-1], str? norm=None, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor fft_irfft2_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal(nullValue = "at::OptionalIntArrayRef(c10::nullopt)") LongArrayRefOptional s, @ByVal(nullValue = "at::IntArrayRef({-2,-1})") LongArrayRef dim, @ByVal(nullValue = "c10::optional(c10::nullopt)") @Cast("c10::optional*") Pointer norm); +@Namespace("at") public static native @ByRef Tensor fft_irfft2_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal(nullValue = "at::OptionalIntArrayRef(c10::nullopt)") LongArrayRefOptional s, @ByVal(nullValue = "at::IntArrayRef({-2,-1})") LongArrayRef dim, @ByVal(nullValue = "c10::optional(c10::nullopt)") StringViewOptional norm); @Namespace("at") public static native @ByRef Tensor fft_irfft2_out(@ByRef Tensor out, @Const @ByRef Tensor self); -@Namespace("at") public static native @ByRef Tensor fft_irfft2_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal(nullValue = "at::OptionalIntArrayRef(c10::nullopt)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] s, @ByVal(nullValue = "at::IntArrayRef({-2,-1})") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dim, @ByVal(nullValue = "c10::optional(c10::nullopt)") @Cast("c10::optional*") Pointer norm); +@Namespace("at") public static native @ByRef Tensor fft_irfft2_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal(nullValue = "at::OptionalIntArrayRef(c10::nullopt)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] s, @ByVal(nullValue = "at::IntArrayRef({-2,-1})") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dim, @ByVal(nullValue = "c10::optional(c10::nullopt)") StringViewOptional norm); // aten::fft_irfft2.out(Tensor self, int[1]? s=None, int[1] dim=[-2,-1], str? norm=None, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor fft_irfft2_outf(@Const @ByRef Tensor self, @ByVal LongArrayRefOptional s, @ByVal LongArrayRef dim, @ByVal @Cast("c10::optional*") Pointer norm, @ByRef Tensor out); -@Namespace("at") public static native @ByRef Tensor fft_irfft2_outf(@Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] s, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dim, @ByVal @Cast("c10::optional*") Pointer norm, @ByRef Tensor out); +@Namespace("at") public static native @ByRef Tensor fft_irfft2_outf(@Const @ByRef Tensor self, @ByVal LongArrayRefOptional s, @ByVal LongArrayRef dim, @ByVal StringViewOptional norm, @ByRef Tensor out); +@Namespace("at") public static native @ByRef Tensor fft_irfft2_outf(@Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] s, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dim, @ByVal StringViewOptional norm, @ByRef Tensor out); @@ -28018,17 +27977,17 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // aten::fft_irfftn(Tensor self, int[1]? s=None, int[1]? dim=None, str? norm=None) -> Tensor -@Namespace("at") public static native @ByVal Tensor fft_irfftn(@Const @ByRef Tensor self, @ByVal(nullValue = "at::OptionalIntArrayRef(c10::nullopt)") LongArrayRefOptional s, @ByVal(nullValue = "at::OptionalIntArrayRef(c10::nullopt)") LongArrayRefOptional dim, @ByVal(nullValue = "c10::optional(c10::nullopt)") @Cast("c10::optional*") Pointer norm); +@Namespace("at") public static native @ByVal Tensor fft_irfftn(@Const @ByRef Tensor self, @ByVal(nullValue = "at::OptionalIntArrayRef(c10::nullopt)") LongArrayRefOptional s, @ByVal(nullValue = "at::OptionalIntArrayRef(c10::nullopt)") LongArrayRefOptional dim, @ByVal(nullValue = "c10::optional(c10::nullopt)") StringViewOptional norm); @Namespace("at") public static native @ByVal Tensor fft_irfftn(@Const @ByRef Tensor self); -@Namespace("at") public static native @ByVal Tensor fft_irfftn(@Const @ByRef Tensor self, @ByVal(nullValue = "at::OptionalIntArrayRef(c10::nullopt)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] s, @ByVal(nullValue = "at::OptionalIntArrayRef(c10::nullopt)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dim, @ByVal(nullValue = "c10::optional(c10::nullopt)") @Cast("c10::optional*") Pointer norm); +@Namespace("at") public static native @ByVal Tensor fft_irfftn(@Const @ByRef Tensor self, @ByVal(nullValue = "at::OptionalIntArrayRef(c10::nullopt)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] s, @ByVal(nullValue = "at::OptionalIntArrayRef(c10::nullopt)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dim, @ByVal(nullValue = "c10::optional(c10::nullopt)") StringViewOptional norm); // aten::fft_irfftn.out(Tensor self, int[1]? s=None, int[1]? dim=None, str? norm=None, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor fft_irfftn_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal(nullValue = "at::OptionalIntArrayRef(c10::nullopt)") LongArrayRefOptional s, @ByVal(nullValue = "at::OptionalIntArrayRef(c10::nullopt)") LongArrayRefOptional dim, @ByVal(nullValue = "c10::optional(c10::nullopt)") @Cast("c10::optional*") Pointer norm); +@Namespace("at") public static native @ByRef Tensor fft_irfftn_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal(nullValue = "at::OptionalIntArrayRef(c10::nullopt)") LongArrayRefOptional s, @ByVal(nullValue = "at::OptionalIntArrayRef(c10::nullopt)") LongArrayRefOptional dim, @ByVal(nullValue = "c10::optional(c10::nullopt)") StringViewOptional norm); @Namespace("at") public static native @ByRef Tensor fft_irfftn_out(@ByRef Tensor out, @Const @ByRef Tensor self); -@Namespace("at") public static native @ByRef Tensor fft_irfftn_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal(nullValue = "at::OptionalIntArrayRef(c10::nullopt)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] s, @ByVal(nullValue = "at::OptionalIntArrayRef(c10::nullopt)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dim, @ByVal(nullValue = "c10::optional(c10::nullopt)") @Cast("c10::optional*") Pointer norm); +@Namespace("at") public static native @ByRef Tensor fft_irfftn_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal(nullValue = "at::OptionalIntArrayRef(c10::nullopt)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] s, @ByVal(nullValue = "at::OptionalIntArrayRef(c10::nullopt)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dim, @ByVal(nullValue = "c10::optional(c10::nullopt)") StringViewOptional norm); // aten::fft_irfftn.out(Tensor self, int[1]? s=None, int[1]? dim=None, str? norm=None, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor fft_irfftn_outf(@Const @ByRef Tensor self, @ByVal LongArrayRefOptional s, @ByVal LongArrayRefOptional dim, @ByVal @Cast("c10::optional*") Pointer norm, @ByRef Tensor out); -@Namespace("at") public static native @ByRef Tensor fft_irfftn_outf(@Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] s, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dim, @ByVal @Cast("c10::optional*") Pointer norm, @ByRef Tensor out); +@Namespace("at") public static native @ByRef Tensor fft_irfftn_outf(@Const @ByRef Tensor self, @ByVal LongArrayRefOptional s, @ByVal LongArrayRefOptional dim, @ByVal StringViewOptional norm, @ByRef Tensor out); +@Namespace("at") public static native @ByRef Tensor fft_irfftn_outf(@Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] s, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dim, @ByVal StringViewOptional norm, @ByRef Tensor out); @@ -28058,14 +28017,14 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // aten::fft_rfft(Tensor self, int? n=None, int dim=-1, str? norm=None) -> Tensor -@Namespace("at") public static native @ByVal Tensor fft_rfft(@Const @ByRef Tensor self, @ByVal(nullValue = "c10::optional(c10::nullopt)") LongOptional n, @Cast("int64_t") long dim/*=-1*/, @ByVal(nullValue = "c10::optional(c10::nullopt)") @Cast("c10::optional*") Pointer norm); +@Namespace("at") public static native @ByVal Tensor fft_rfft(@Const @ByRef Tensor self, @ByVal(nullValue = "c10::optional(c10::nullopt)") LongOptional n, @Cast("int64_t") long dim/*=-1*/, @ByVal(nullValue = "c10::optional(c10::nullopt)") StringViewOptional norm); @Namespace("at") public static native @ByVal Tensor fft_rfft(@Const @ByRef Tensor self); // aten::fft_rfft.out(Tensor self, int? n=None, int dim=-1, str? norm=None, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor fft_rfft_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal(nullValue = "c10::optional(c10::nullopt)") LongOptional n, @Cast("int64_t") long dim/*=-1*/, @ByVal(nullValue = "c10::optional(c10::nullopt)") @Cast("c10::optional*") Pointer norm); +@Namespace("at") public static native @ByRef Tensor fft_rfft_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal(nullValue = "c10::optional(c10::nullopt)") LongOptional n, @Cast("int64_t") long dim/*=-1*/, @ByVal(nullValue = "c10::optional(c10::nullopt)") StringViewOptional norm); @Namespace("at") public static native @ByRef Tensor fft_rfft_out(@ByRef Tensor out, @Const @ByRef Tensor self); // aten::fft_rfft.out(Tensor self, int? n=None, int dim=-1, str? norm=None, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor fft_rfft_outf(@Const @ByRef Tensor self, @ByVal LongOptional n, @Cast("int64_t") long dim, @ByVal @Cast("c10::optional*") Pointer norm, @ByRef Tensor out); +@Namespace("at") public static native @ByRef Tensor fft_rfft_outf(@Const @ByRef Tensor self, @ByVal LongOptional n, @Cast("int64_t") long dim, @ByVal StringViewOptional norm, @ByRef Tensor out); @@ -28095,17 +28054,17 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // aten::fft_rfft2(Tensor self, int[1]? s=None, int[1] dim=[-2,-1], str? norm=None) -> Tensor -@Namespace("at") public static native @ByVal Tensor fft_rfft2(@Const @ByRef Tensor self, @ByVal(nullValue = "at::OptionalIntArrayRef(c10::nullopt)") LongArrayRefOptional s, @ByVal(nullValue = "at::IntArrayRef({-2,-1})") LongArrayRef dim, @ByVal(nullValue = "c10::optional(c10::nullopt)") @Cast("c10::optional*") Pointer norm); +@Namespace("at") public static native @ByVal Tensor fft_rfft2(@Const @ByRef Tensor self, @ByVal(nullValue = "at::OptionalIntArrayRef(c10::nullopt)") LongArrayRefOptional s, @ByVal(nullValue = "at::IntArrayRef({-2,-1})") LongArrayRef dim, @ByVal(nullValue = "c10::optional(c10::nullopt)") StringViewOptional norm); @Namespace("at") public static native @ByVal Tensor fft_rfft2(@Const @ByRef Tensor self); -@Namespace("at") public static native @ByVal Tensor fft_rfft2(@Const @ByRef Tensor self, @ByVal(nullValue = "at::OptionalIntArrayRef(c10::nullopt)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] s, @ByVal(nullValue = "at::IntArrayRef({-2,-1})") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dim, @ByVal(nullValue = "c10::optional(c10::nullopt)") @Cast("c10::optional*") Pointer norm); +@Namespace("at") public static native @ByVal Tensor fft_rfft2(@Const @ByRef Tensor self, @ByVal(nullValue = "at::OptionalIntArrayRef(c10::nullopt)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] s, @ByVal(nullValue = "at::IntArrayRef({-2,-1})") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dim, @ByVal(nullValue = "c10::optional(c10::nullopt)") StringViewOptional norm); // aten::fft_rfft2.out(Tensor self, int[1]? s=None, int[1] dim=[-2,-1], str? norm=None, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor fft_rfft2_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal(nullValue = "at::OptionalIntArrayRef(c10::nullopt)") LongArrayRefOptional s, @ByVal(nullValue = "at::IntArrayRef({-2,-1})") LongArrayRef dim, @ByVal(nullValue = "c10::optional(c10::nullopt)") @Cast("c10::optional*") Pointer norm); +@Namespace("at") public static native @ByRef Tensor fft_rfft2_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal(nullValue = "at::OptionalIntArrayRef(c10::nullopt)") LongArrayRefOptional s, @ByVal(nullValue = "at::IntArrayRef({-2,-1})") LongArrayRef dim, @ByVal(nullValue = "c10::optional(c10::nullopt)") StringViewOptional norm); @Namespace("at") public static native @ByRef Tensor fft_rfft2_out(@ByRef Tensor out, @Const @ByRef Tensor self); -@Namespace("at") public static native @ByRef Tensor fft_rfft2_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal(nullValue = "at::OptionalIntArrayRef(c10::nullopt)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] s, @ByVal(nullValue = "at::IntArrayRef({-2,-1})") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dim, @ByVal(nullValue = "c10::optional(c10::nullopt)") @Cast("c10::optional*") Pointer norm); +@Namespace("at") public static native @ByRef Tensor fft_rfft2_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal(nullValue = "at::OptionalIntArrayRef(c10::nullopt)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] s, @ByVal(nullValue = "at::IntArrayRef({-2,-1})") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dim, @ByVal(nullValue = "c10::optional(c10::nullopt)") StringViewOptional norm); // aten::fft_rfft2.out(Tensor self, int[1]? s=None, int[1] dim=[-2,-1], str? norm=None, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor fft_rfft2_outf(@Const @ByRef Tensor self, @ByVal LongArrayRefOptional s, @ByVal LongArrayRef dim, @ByVal @Cast("c10::optional*") Pointer norm, @ByRef Tensor out); -@Namespace("at") public static native @ByRef Tensor fft_rfft2_outf(@Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] s, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dim, @ByVal @Cast("c10::optional*") Pointer norm, @ByRef Tensor out); +@Namespace("at") public static native @ByRef Tensor fft_rfft2_outf(@Const @ByRef Tensor self, @ByVal LongArrayRefOptional s, @ByVal LongArrayRef dim, @ByVal StringViewOptional norm, @ByRef Tensor out); +@Namespace("at") public static native @ByRef Tensor fft_rfft2_outf(@Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] s, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dim, @ByVal StringViewOptional norm, @ByRef Tensor out); @@ -28174,17 +28133,17 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // aten::fft_rfftn(Tensor self, int[1]? s=None, int[1]? dim=None, str? norm=None) -> Tensor -@Namespace("at") public static native @ByVal Tensor fft_rfftn(@Const @ByRef Tensor self, @ByVal(nullValue = "at::OptionalIntArrayRef(c10::nullopt)") LongArrayRefOptional s, @ByVal(nullValue = "at::OptionalIntArrayRef(c10::nullopt)") LongArrayRefOptional dim, @ByVal(nullValue = "c10::optional(c10::nullopt)") @Cast("c10::optional*") Pointer norm); +@Namespace("at") public static native @ByVal Tensor fft_rfftn(@Const @ByRef Tensor self, @ByVal(nullValue = "at::OptionalIntArrayRef(c10::nullopt)") LongArrayRefOptional s, @ByVal(nullValue = "at::OptionalIntArrayRef(c10::nullopt)") LongArrayRefOptional dim, @ByVal(nullValue = "c10::optional(c10::nullopt)") StringViewOptional norm); @Namespace("at") public static native @ByVal Tensor fft_rfftn(@Const @ByRef Tensor self); -@Namespace("at") public static native @ByVal Tensor fft_rfftn(@Const @ByRef Tensor self, @ByVal(nullValue = "at::OptionalIntArrayRef(c10::nullopt)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] s, @ByVal(nullValue = "at::OptionalIntArrayRef(c10::nullopt)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dim, @ByVal(nullValue = "c10::optional(c10::nullopt)") @Cast("c10::optional*") Pointer norm); +@Namespace("at") public static native @ByVal Tensor fft_rfftn(@Const @ByRef Tensor self, @ByVal(nullValue = "at::OptionalIntArrayRef(c10::nullopt)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] s, @ByVal(nullValue = "at::OptionalIntArrayRef(c10::nullopt)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dim, @ByVal(nullValue = "c10::optional(c10::nullopt)") StringViewOptional norm); // aten::fft_rfftn.out(Tensor self, int[1]? s=None, int[1]? dim=None, str? norm=None, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor fft_rfftn_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal(nullValue = "at::OptionalIntArrayRef(c10::nullopt)") LongArrayRefOptional s, @ByVal(nullValue = "at::OptionalIntArrayRef(c10::nullopt)") LongArrayRefOptional dim, @ByVal(nullValue = "c10::optional(c10::nullopt)") @Cast("c10::optional*") Pointer norm); +@Namespace("at") public static native @ByRef Tensor fft_rfftn_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal(nullValue = "at::OptionalIntArrayRef(c10::nullopt)") LongArrayRefOptional s, @ByVal(nullValue = "at::OptionalIntArrayRef(c10::nullopt)") LongArrayRefOptional dim, @ByVal(nullValue = "c10::optional(c10::nullopt)") StringViewOptional norm); @Namespace("at") public static native @ByRef Tensor fft_rfftn_out(@ByRef Tensor out, @Const @ByRef Tensor self); -@Namespace("at") public static native @ByRef Tensor fft_rfftn_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal(nullValue = "at::OptionalIntArrayRef(c10::nullopt)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] s, @ByVal(nullValue = "at::OptionalIntArrayRef(c10::nullopt)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dim, @ByVal(nullValue = "c10::optional(c10::nullopt)") @Cast("c10::optional*") Pointer norm); +@Namespace("at") public static native @ByRef Tensor fft_rfftn_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal(nullValue = "at::OptionalIntArrayRef(c10::nullopt)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] s, @ByVal(nullValue = "at::OptionalIntArrayRef(c10::nullopt)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dim, @ByVal(nullValue = "c10::optional(c10::nullopt)") StringViewOptional norm); // aten::fft_rfftn.out(Tensor self, int[1]? s=None, int[1]? dim=None, str? norm=None, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor fft_rfftn_outf(@Const @ByRef Tensor self, @ByVal LongArrayRefOptional s, @ByVal LongArrayRefOptional dim, @ByVal @Cast("c10::optional*") Pointer norm, @ByRef Tensor out); -@Namespace("at") public static native @ByRef Tensor fft_rfftn_outf(@Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] s, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dim, @ByVal @Cast("c10::optional*") Pointer norm, @ByRef Tensor out); +@Namespace("at") public static native @ByRef Tensor fft_rfftn_outf(@Const @ByRef Tensor self, @ByVal LongArrayRefOptional s, @ByVal LongArrayRefOptional dim, @ByVal StringViewOptional norm, @ByRef Tensor out); +@Namespace("at") public static native @ByRef Tensor fft_rfftn_outf(@Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] s, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dim, @ByVal StringViewOptional norm, @ByRef Tensor out); @@ -29269,16 +29228,22 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // aten::from_file(str filename, bool? shared=None, int? size=0, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor -@Namespace("at") public static native @ByVal Tensor from_file(@ByVal @Cast("c10::string_view*") Pointer filename, @ByVal(nullValue = "c10::optional(c10::nullopt)") BoolOptional shared, @ByVal(nullValue = "c10::optional(0)") LongOptional size, @ByVal(nullValue = "at::TensorOptions{}") TensorOptions options); -@Namespace("at") public static native @ByVal Tensor from_file(@ByVal @Cast("c10::string_view*") Pointer filename); +@Namespace("at") public static native @ByVal Tensor from_file(@StringView BytePointer filename, @ByVal(nullValue = "c10::optional(c10::nullopt)") BoolOptional shared, @ByVal(nullValue = "c10::optional(0)") LongOptional size, @ByVal(nullValue = "at::TensorOptions{}") TensorOptions options); +@Namespace("at") public static native @ByVal Tensor from_file(@StringView BytePointer filename); +@Namespace("at") public static native @ByVal Tensor from_file(@StringView String filename, @ByVal(nullValue = "c10::optional(c10::nullopt)") BoolOptional shared, @ByVal(nullValue = "c10::optional(0)") LongOptional size, @ByVal(nullValue = "at::TensorOptions{}") TensorOptions options); +@Namespace("at") public static native @ByVal Tensor from_file(@StringView String filename); // aten::from_file(str filename, bool? shared=None, int? size=0, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor -@Namespace("at") public static native @ByVal Tensor from_file(@ByVal @Cast("c10::string_view*") Pointer filename, @ByVal BoolOptional shared, @ByVal LongOptional size, @ByVal ScalarTypeOptional dtype, @ByVal LayoutOptional layout, @ByVal DeviceOptional device, @ByVal BoolOptional pin_memory); +@Namespace("at") public static native @ByVal Tensor from_file(@StringView BytePointer filename, @ByVal BoolOptional shared, @ByVal LongOptional size, @ByVal ScalarTypeOptional dtype, @ByVal LayoutOptional layout, @ByVal DeviceOptional device, @ByVal BoolOptional pin_memory); +@Namespace("at") public static native @ByVal Tensor from_file(@StringView String filename, @ByVal BoolOptional shared, @ByVal LongOptional size, @ByVal ScalarTypeOptional dtype, @ByVal LayoutOptional layout, @ByVal DeviceOptional device, @ByVal BoolOptional pin_memory); // aten::from_file.out(str filename, bool? shared=None, int? size=0, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor from_file_out(@ByRef Tensor out, @ByVal @Cast("c10::string_view*") Pointer filename, @ByVal(nullValue = "c10::optional(c10::nullopt)") BoolOptional shared, @ByVal(nullValue = "c10::optional(0)") LongOptional size); -@Namespace("at") public static native @ByRef Tensor from_file_out(@ByRef Tensor out, @ByVal @Cast("c10::string_view*") Pointer filename); +@Namespace("at") public static native @ByRef Tensor from_file_out(@ByRef Tensor out, @StringView BytePointer filename, @ByVal(nullValue = "c10::optional(c10::nullopt)") BoolOptional shared, @ByVal(nullValue = "c10::optional(0)") LongOptional size); +@Namespace("at") public static native @ByRef Tensor from_file_out(@ByRef Tensor out, @StringView BytePointer filename); +@Namespace("at") public static native @ByRef Tensor from_file_out(@ByRef Tensor out, @StringView String filename, @ByVal(nullValue = "c10::optional(c10::nullopt)") BoolOptional shared, @ByVal(nullValue = "c10::optional(0)") LongOptional size); +@Namespace("at") public static native @ByRef Tensor from_file_out(@ByRef Tensor out, @StringView String filename); // aten::from_file.out(str filename, bool? shared=None, int? size=0, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor from_file_outf(@ByVal @Cast("c10::string_view*") Pointer filename, @ByVal BoolOptional shared, @ByVal LongOptional size, @ByRef Tensor out); +@Namespace("at") public static native @ByRef Tensor from_file_outf(@StringView BytePointer filename, @ByVal BoolOptional shared, @ByVal LongOptional size, @ByRef Tensor out); +@Namespace("at") public static native @ByRef Tensor from_file_outf(@StringView String filename, @ByVal BoolOptional shared, @ByVal LongOptional size, @ByRef Tensor out); @@ -29618,18 +29583,22 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // aten::gelu.out(Tensor self, *, str approximate='none', Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor gelu_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal(nullValue = "c10::string_view(\"none\")") @Cast("c10::string_view*") Pointer approximate); +@Namespace("at") public static native @ByRef Tensor gelu_out(@ByRef Tensor out, @Const @ByRef Tensor self, @StringView BytePointer approximate/*="none"*/); @Namespace("at") public static native @ByRef Tensor gelu_out(@ByRef Tensor out, @Const @ByRef Tensor self); +@Namespace("at") public static native @ByRef Tensor gelu_out(@ByRef Tensor out, @Const @ByRef Tensor self, @StringView String approximate/*="none"*/); // aten::gelu.out(Tensor self, *, str approximate='none', Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor gelu_outf(@Const @ByRef Tensor self, @ByVal @Cast("c10::string_view*") Pointer approximate, @ByRef Tensor out); +@Namespace("at") public static native @ByRef Tensor gelu_outf(@Const @ByRef Tensor self, @StringView BytePointer approximate, @ByRef Tensor out); +@Namespace("at") public static native @ByRef Tensor gelu_outf(@Const @ByRef Tensor self, @StringView String approximate, @ByRef Tensor out); // aten::gelu_(Tensor(a!) self, *, str approximate='none') -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor gelu_(@ByRef Tensor self, @ByVal(nullValue = "c10::string_view(\"none\")") @Cast("c10::string_view*") Pointer approximate); +@Namespace("at") public static native @ByRef Tensor gelu_(@ByRef Tensor self, @StringView BytePointer approximate/*="none"*/); @Namespace("at") public static native @ByRef Tensor gelu_(@ByRef Tensor self); +@Namespace("at") public static native @ByRef Tensor gelu_(@ByRef Tensor self, @StringView String approximate/*="none"*/); // aten::gelu(Tensor self, *, str approximate='none') -> Tensor -@Namespace("at") public static native @ByVal Tensor gelu(@Const @ByRef Tensor self, @ByVal(nullValue = "c10::string_view(\"none\")") @Cast("c10::string_view*") Pointer approximate); +@Namespace("at") public static native @ByVal Tensor gelu(@Const @ByRef Tensor self, @StringView BytePointer approximate/*="none"*/); @Namespace("at") public static native @ByVal Tensor gelu(@Const @ByRef Tensor self); +@Namespace("at") public static native @ByVal Tensor gelu(@Const @ByRef Tensor self, @StringView String approximate/*="none"*/); @@ -29659,14 +29628,17 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // aten::gelu_backward.grad_input(Tensor grad_output, Tensor self, *, str approximate='none', Tensor(a!) grad_input) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor gelu_backward_out(@ByRef Tensor grad_input, @Const @ByRef Tensor grad_output, @Const @ByRef Tensor self, @ByVal(nullValue = "c10::string_view(\"none\")") @Cast("c10::string_view*") Pointer approximate); +@Namespace("at") public static native @ByRef Tensor gelu_backward_out(@ByRef Tensor grad_input, @Const @ByRef Tensor grad_output, @Const @ByRef Tensor self, @StringView BytePointer approximate/*="none"*/); @Namespace("at") public static native @ByRef Tensor gelu_backward_out(@ByRef Tensor grad_input, @Const @ByRef Tensor grad_output, @Const @ByRef Tensor self); +@Namespace("at") public static native @ByRef Tensor gelu_backward_out(@ByRef Tensor grad_input, @Const @ByRef Tensor grad_output, @Const @ByRef Tensor self, @StringView String approximate/*="none"*/); // aten::gelu_backward.grad_input(Tensor grad_output, Tensor self, *, str approximate='none', Tensor(a!) grad_input) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor gelu_backward_outf(@Const @ByRef Tensor grad_output, @Const @ByRef Tensor self, @ByVal @Cast("c10::string_view*") Pointer approximate, @ByRef Tensor grad_input); +@Namespace("at") public static native @ByRef Tensor gelu_backward_outf(@Const @ByRef Tensor grad_output, @Const @ByRef Tensor self, @StringView BytePointer approximate, @ByRef Tensor grad_input); +@Namespace("at") public static native @ByRef Tensor gelu_backward_outf(@Const @ByRef Tensor grad_output, @Const @ByRef Tensor self, @StringView String approximate, @ByRef Tensor grad_input); // aten::gelu_backward(Tensor grad_output, Tensor self, *, str approximate='none') -> Tensor -@Namespace("at") public static native @ByVal Tensor gelu_backward(@Const @ByRef Tensor grad_output, @Const @ByRef Tensor self, @ByVal(nullValue = "c10::string_view(\"none\")") @Cast("c10::string_view*") Pointer approximate); +@Namespace("at") public static native @ByVal Tensor gelu_backward(@Const @ByRef Tensor grad_output, @Const @ByRef Tensor self, @StringView BytePointer approximate/*="none"*/); @Namespace("at") public static native @ByVal Tensor gelu_backward(@Const @ByRef Tensor grad_output, @Const @ByRef Tensor self); +@Namespace("at") public static native @ByVal Tensor gelu_backward(@Const @ByRef Tensor grad_output, @Const @ByRef Tensor self, @StringView String approximate/*="none"*/); @@ -31598,14 +31570,19 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // aten::index_reduce.out(Tensor self, int dim, Tensor index, Tensor source, str reduce, *, bool include_self=True, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor index_reduce_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Cast("int64_t") long dim, @Const @ByRef Tensor index, @Const @ByRef Tensor source, @ByVal @Cast("c10::string_view*") Pointer reduce, @Cast("bool") boolean include_self/*=true*/); -@Namespace("at") public static native @ByRef Tensor index_reduce_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Cast("int64_t") long dim, @Const @ByRef Tensor index, @Const @ByRef Tensor source, @ByVal @Cast("c10::string_view*") Pointer reduce); +@Namespace("at") public static native @ByRef Tensor index_reduce_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Cast("int64_t") long dim, @Const @ByRef Tensor index, @Const @ByRef Tensor source, @StringView BytePointer reduce, @Cast("bool") boolean include_self/*=true*/); +@Namespace("at") public static native @ByRef Tensor index_reduce_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Cast("int64_t") long dim, @Const @ByRef Tensor index, @Const @ByRef Tensor source, @StringView BytePointer reduce); +@Namespace("at") public static native @ByRef Tensor index_reduce_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Cast("int64_t") long dim, @Const @ByRef Tensor index, @Const @ByRef Tensor source, @StringView String reduce, @Cast("bool") boolean include_self/*=true*/); +@Namespace("at") public static native @ByRef Tensor index_reduce_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Cast("int64_t") long dim, @Const @ByRef Tensor index, @Const @ByRef Tensor source, @StringView String reduce); // aten::index_reduce.out(Tensor self, int dim, Tensor index, Tensor source, str reduce, *, bool include_self=True, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor index_reduce_outf(@Const @ByRef Tensor self, @Cast("int64_t") long dim, @Const @ByRef Tensor index, @Const @ByRef Tensor source, @ByVal @Cast("c10::string_view*") Pointer reduce, @Cast("bool") boolean include_self, @ByRef Tensor out); +@Namespace("at") public static native @ByRef Tensor index_reduce_outf(@Const @ByRef Tensor self, @Cast("int64_t") long dim, @Const @ByRef Tensor index, @Const @ByRef Tensor source, @StringView BytePointer reduce, @Cast("bool") boolean include_self, @ByRef Tensor out); +@Namespace("at") public static native @ByRef Tensor index_reduce_outf(@Const @ByRef Tensor self, @Cast("int64_t") long dim, @Const @ByRef Tensor index, @Const @ByRef Tensor source, @StringView String reduce, @Cast("bool") boolean include_self, @ByRef Tensor out); // aten::index_reduce(Tensor self, int dim, Tensor index, Tensor source, str reduce, *, bool include_self=True) -> Tensor -@Namespace("at") public static native @ByVal Tensor index_reduce(@Const @ByRef Tensor self, @Cast("int64_t") long dim, @Const @ByRef Tensor index, @Const @ByRef Tensor source, @ByVal @Cast("c10::string_view*") Pointer reduce, @Cast("bool") boolean include_self/*=true*/); -@Namespace("at") public static native @ByVal Tensor index_reduce(@Const @ByRef Tensor self, @Cast("int64_t") long dim, @Const @ByRef Tensor index, @Const @ByRef Tensor source, @ByVal @Cast("c10::string_view*") Pointer reduce); +@Namespace("at") public static native @ByVal Tensor index_reduce(@Const @ByRef Tensor self, @Cast("int64_t") long dim, @Const @ByRef Tensor index, @Const @ByRef Tensor source, @StringView BytePointer reduce, @Cast("bool") boolean include_self/*=true*/); +@Namespace("at") public static native @ByVal Tensor index_reduce(@Const @ByRef Tensor self, @Cast("int64_t") long dim, @Const @ByRef Tensor index, @Const @ByRef Tensor source, @StringView BytePointer reduce); +@Namespace("at") public static native @ByVal Tensor index_reduce(@Const @ByRef Tensor self, @Cast("int64_t") long dim, @Const @ByRef Tensor index, @Const @ByRef Tensor source, @StringView String reduce, @Cast("bool") boolean include_self/*=true*/); +@Namespace("at") public static native @ByVal Tensor index_reduce(@Const @ByRef Tensor self, @Cast("int64_t") long dim, @Const @ByRef Tensor index, @Const @ByRef Tensor source, @StringView String reduce); @@ -33487,12 +33464,15 @@ public class torch extends org.bytedeco.pytorch.presets.torch { @Namespace("at") public static native @ByRef Tensor linalg_cond_outf(@Const @ByRef Tensor self, @Const @ByRef ScalarOptional p, @ByRef Tensor out); // aten::linalg_cond.p_str(Tensor self, str p) -> Tensor -@Namespace("at") public static native @ByVal Tensor linalg_cond(@Const @ByRef Tensor self, @ByVal @Cast("c10::string_view*") Pointer p); +@Namespace("at") public static native @ByVal Tensor linalg_cond(@Const @ByRef Tensor self, @StringView BytePointer p); +@Namespace("at") public static native @ByVal Tensor linalg_cond(@Const @ByRef Tensor self, @StringView String p); // aten::linalg_cond.p_str_out(Tensor self, str p, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor linalg_cond_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal @Cast("c10::string_view*") Pointer p); +@Namespace("at") public static native @ByRef Tensor linalg_cond_out(@ByRef Tensor out, @Const @ByRef Tensor self, @StringView BytePointer p); +@Namespace("at") public static native @ByRef Tensor linalg_cond_out(@ByRef Tensor out, @Const @ByRef Tensor self, @StringView String p); // aten::linalg_cond.p_str_out(Tensor self, str p, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor linalg_cond_outf(@Const @ByRef Tensor self, @ByVal @Cast("c10::string_view*") Pointer p, @ByRef Tensor out); +@Namespace("at") public static native @ByRef Tensor linalg_cond_outf(@Const @ByRef Tensor self, @StringView BytePointer p, @ByRef Tensor out); +@Namespace("at") public static native @ByRef Tensor linalg_cond_outf(@Const @ByRef Tensor self, @StringView String p, @ByRef Tensor out); @@ -33660,14 +33640,17 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // aten::linalg_eigh(Tensor self, str UPLO="L") -> (Tensor eigenvalues, Tensor eigenvectors) -@Namespace("at") public static native @ByVal T_TensorTensor_T linalg_eigh(@Const @ByRef Tensor self, @ByVal(nullValue = "c10::string_view(\"L\")") @Cast("c10::string_view*") Pointer UPLO); +@Namespace("at") public static native @ByVal T_TensorTensor_T linalg_eigh(@Const @ByRef Tensor self, @StringView BytePointer UPLO/*="L"*/); @Namespace("at") public static native @ByVal T_TensorTensor_T linalg_eigh(@Const @ByRef Tensor self); +@Namespace("at") public static native @ByVal T_TensorTensor_T linalg_eigh(@Const @ByRef Tensor self, @StringView String UPLO/*="L"*/); // aten::linalg_eigh.eigvals(Tensor self, str UPLO="L", *, Tensor(a!) eigvals, Tensor(b!) eigvecs) -> (Tensor(a!) eigenvalues, Tensor(b!) eigenvectors) -@Namespace("at") public static native @ByVal T_TensorTensor_T linalg_eigh_out(@ByRef Tensor eigvals, @ByRef Tensor eigvecs, @Const @ByRef Tensor self, @ByVal(nullValue = "c10::string_view(\"L\")") @Cast("c10::string_view*") Pointer UPLO); +@Namespace("at") public static native @ByVal T_TensorTensor_T linalg_eigh_out(@ByRef Tensor eigvals, @ByRef Tensor eigvecs, @Const @ByRef Tensor self, @StringView BytePointer UPLO/*="L"*/); @Namespace("at") public static native @ByVal T_TensorTensor_T linalg_eigh_out(@ByRef Tensor eigvals, @ByRef Tensor eigvecs, @Const @ByRef Tensor self); +@Namespace("at") public static native @ByVal T_TensorTensor_T linalg_eigh_out(@ByRef Tensor eigvals, @ByRef Tensor eigvecs, @Const @ByRef Tensor self, @StringView String UPLO/*="L"*/); // aten::linalg_eigh.eigvals(Tensor self, str UPLO="L", *, Tensor(a!) eigvals, Tensor(b!) eigvecs) -> (Tensor(a!) eigenvalues, Tensor(b!) eigenvectors) -@Namespace("at") public static native @ByVal T_TensorTensor_T linalg_eigh_outf(@Const @ByRef Tensor self, @ByVal @Cast("c10::string_view*") Pointer UPLO, @ByRef Tensor eigvals, @ByRef Tensor eigvecs); +@Namespace("at") public static native @ByVal T_TensorTensor_T linalg_eigh_outf(@Const @ByRef Tensor self, @StringView BytePointer UPLO, @ByRef Tensor eigvals, @ByRef Tensor eigvecs); +@Namespace("at") public static native @ByVal T_TensorTensor_T linalg_eigh_outf(@Const @ByRef Tensor self, @StringView String UPLO, @ByRef Tensor eigvals, @ByRef Tensor eigvecs); @@ -33732,14 +33715,17 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // aten::linalg_eigvalsh(Tensor self, str UPLO="L") -> Tensor -@Namespace("at") public static native @ByVal Tensor linalg_eigvalsh(@Const @ByRef Tensor self, @ByVal(nullValue = "c10::string_view(\"L\")") @Cast("c10::string_view*") Pointer UPLO); +@Namespace("at") public static native @ByVal Tensor linalg_eigvalsh(@Const @ByRef Tensor self, @StringView BytePointer UPLO/*="L"*/); @Namespace("at") public static native @ByVal Tensor linalg_eigvalsh(@Const @ByRef Tensor self); +@Namespace("at") public static native @ByVal Tensor linalg_eigvalsh(@Const @ByRef Tensor self, @StringView String UPLO/*="L"*/); // aten::linalg_eigvalsh.out(Tensor self, str UPLO="L", *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor linalg_eigvalsh_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal(nullValue = "c10::string_view(\"L\")") @Cast("c10::string_view*") Pointer UPLO); +@Namespace("at") public static native @ByRef Tensor linalg_eigvalsh_out(@ByRef Tensor out, @Const @ByRef Tensor self, @StringView BytePointer UPLO/*="L"*/); @Namespace("at") public static native @ByRef Tensor linalg_eigvalsh_out(@ByRef Tensor out, @Const @ByRef Tensor self); +@Namespace("at") public static native @ByRef Tensor linalg_eigvalsh_out(@ByRef Tensor out, @Const @ByRef Tensor self, @StringView String UPLO/*="L"*/); // aten::linalg_eigvalsh.out(Tensor self, str UPLO="L", *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor linalg_eigvalsh_outf(@Const @ByRef Tensor self, @ByVal @Cast("c10::string_view*") Pointer UPLO, @ByRef Tensor out); +@Namespace("at") public static native @ByRef Tensor linalg_eigvalsh_outf(@Const @ByRef Tensor self, @StringView BytePointer UPLO, @ByRef Tensor out); +@Namespace("at") public static native @ByRef Tensor linalg_eigvalsh_outf(@Const @ByRef Tensor self, @StringView String UPLO, @ByRef Tensor out); @@ -33987,14 +33973,14 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // aten::linalg_lstsq(Tensor self, Tensor b, float? rcond=None, *, str? driver=None) -> (Tensor solution, Tensor residuals, Tensor rank, Tensor singular_values) -@Namespace("at") public static native @ByVal T_TensorTensorTensorTensor_T linalg_lstsq(@Const @ByRef Tensor self, @Const @ByRef Tensor b, @ByVal(nullValue = "c10::optional(c10::nullopt)") DoubleOptional rcond, @ByVal(nullValue = "c10::optional(c10::nullopt)") @Cast("c10::optional*") Pointer driver); +@Namespace("at") public static native @ByVal T_TensorTensorTensorTensor_T linalg_lstsq(@Const @ByRef Tensor self, @Const @ByRef Tensor b, @ByVal(nullValue = "c10::optional(c10::nullopt)") DoubleOptional rcond, @ByVal(nullValue = "c10::optional(c10::nullopt)") StringViewOptional driver); @Namespace("at") public static native @ByVal T_TensorTensorTensorTensor_T linalg_lstsq(@Const @ByRef Tensor self, @Const @ByRef Tensor b); // aten::linalg_lstsq.out(Tensor self, Tensor b, float? rcond=None, *, str? driver=None, Tensor(a!) solution, Tensor(b!) residuals, Tensor(c!) rank, Tensor(d!) singular_values) -> (Tensor(a!) solution, Tensor(b!) residuals, Tensor(c!) rank, Tensor(d!) singular_values) -@Namespace("at") public static native @ByVal T_TensorTensorTensorTensor_T linalg_lstsq_out(@ByRef Tensor solution, @ByRef Tensor residuals, @ByRef Tensor rank, @ByRef Tensor singular_values, @Const @ByRef Tensor self, @Const @ByRef Tensor b, @ByVal(nullValue = "c10::optional(c10::nullopt)") DoubleOptional rcond, @ByVal(nullValue = "c10::optional(c10::nullopt)") @Cast("c10::optional*") Pointer driver); +@Namespace("at") public static native @ByVal T_TensorTensorTensorTensor_T linalg_lstsq_out(@ByRef Tensor solution, @ByRef Tensor residuals, @ByRef Tensor rank, @ByRef Tensor singular_values, @Const @ByRef Tensor self, @Const @ByRef Tensor b, @ByVal(nullValue = "c10::optional(c10::nullopt)") DoubleOptional rcond, @ByVal(nullValue = "c10::optional(c10::nullopt)") StringViewOptional driver); @Namespace("at") public static native @ByVal T_TensorTensorTensorTensor_T linalg_lstsq_out(@ByRef Tensor solution, @ByRef Tensor residuals, @ByRef Tensor rank, @ByRef Tensor singular_values, @Const @ByRef Tensor self, @Const @ByRef Tensor b); // aten::linalg_lstsq.out(Tensor self, Tensor b, float? rcond=None, *, str? driver=None, Tensor(a!) solution, Tensor(b!) residuals, Tensor(c!) rank, Tensor(d!) singular_values) -> (Tensor(a!) solution, Tensor(b!) residuals, Tensor(c!) rank, Tensor(d!) singular_values) -@Namespace("at") public static native @ByVal T_TensorTensorTensorTensor_T linalg_lstsq_outf(@Const @ByRef Tensor self, @Const @ByRef Tensor b, @ByVal DoubleOptional rcond, @ByVal @Cast("c10::optional*") Pointer driver, @ByRef Tensor solution, @ByRef Tensor residuals, @ByRef Tensor rank, @ByRef Tensor singular_values); +@Namespace("at") public static native @ByVal T_TensorTensorTensorTensor_T linalg_lstsq_outf(@Const @ByRef Tensor self, @Const @ByRef Tensor b, @ByVal DoubleOptional rcond, @ByVal StringViewOptional driver, @ByRef Tensor solution, @ByRef Tensor residuals, @ByRef Tensor rank, @ByRef Tensor singular_values); @@ -34255,17 +34241,17 @@ public class torch extends org.bytedeco.pytorch.presets.torch { @Namespace("at") public static native @ByRef Tensor linalg_matrix_norm_outf(@Const @ByRef Tensor self, @Const @ByRef Scalar ord, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dim, @Cast("bool") boolean keepdim, @ByVal ScalarTypeOptional dtype, @ByRef Tensor out); // aten::linalg_matrix_norm.str_ord(Tensor self, str ord='fro', int[] dim=[-2,-1], bool keepdim=False, *, ScalarType? dtype=None) -> Tensor -@Namespace("at") public static native @ByVal Tensor linalg_matrix_norm(@Const @ByRef Tensor self, @ByVal(nullValue = "c10::string_view(\"fro\")") @Cast("c10::string_view*") Pointer ord, @ByVal(nullValue = "at::IntArrayRef({-2,-1})") LongArrayRef dim, @Cast("bool") boolean keepdim/*=false*/, @ByVal(nullValue = "c10::optional(c10::nullopt)") ScalarTypeOptional dtype); +@Namespace("at") public static native @ByVal Tensor linalg_matrix_norm(@Const @ByRef Tensor self, @StringView BytePointer ord/*="fro"*/, @ByVal(nullValue = "at::IntArrayRef({-2,-1})") LongArrayRef dim, @Cast("bool") boolean keepdim/*=false*/, @ByVal(nullValue = "c10::optional(c10::nullopt)") ScalarTypeOptional dtype); @Namespace("at") public static native @ByVal Tensor linalg_matrix_norm(@Const @ByRef Tensor self); -@Namespace("at") public static native @ByVal Tensor linalg_matrix_norm(@Const @ByRef Tensor self, @ByVal(nullValue = "c10::string_view(\"fro\")") @Cast("c10::string_view*") Pointer ord, @ByVal(nullValue = "at::IntArrayRef({-2,-1})") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dim, @Cast("bool") boolean keepdim/*=false*/, @ByVal(nullValue = "c10::optional(c10::nullopt)") ScalarTypeOptional dtype); +@Namespace("at") public static native @ByVal Tensor linalg_matrix_norm(@Const @ByRef Tensor self, @StringView String ord/*="fro"*/, @ByVal(nullValue = "at::IntArrayRef({-2,-1})") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dim, @Cast("bool") boolean keepdim/*=false*/, @ByVal(nullValue = "c10::optional(c10::nullopt)") ScalarTypeOptional dtype); // aten::linalg_matrix_norm.str_ord_out(Tensor self, str ord='fro', int[] dim=[-2,-1], bool keepdim=False, *, ScalarType? dtype=None, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor linalg_matrix_norm_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal(nullValue = "c10::string_view(\"fro\")") @Cast("c10::string_view*") Pointer ord, @ByVal(nullValue = "at::IntArrayRef({-2,-1})") LongArrayRef dim, @Cast("bool") boolean keepdim/*=false*/, @ByVal(nullValue = "c10::optional(c10::nullopt)") ScalarTypeOptional dtype); +@Namespace("at") public static native @ByRef Tensor linalg_matrix_norm_out(@ByRef Tensor out, @Const @ByRef Tensor self, @StringView BytePointer ord/*="fro"*/, @ByVal(nullValue = "at::IntArrayRef({-2,-1})") LongArrayRef dim, @Cast("bool") boolean keepdim/*=false*/, @ByVal(nullValue = "c10::optional(c10::nullopt)") ScalarTypeOptional dtype); @Namespace("at") public static native @ByRef Tensor linalg_matrix_norm_out(@ByRef Tensor out, @Const @ByRef Tensor self); -@Namespace("at") public static native @ByRef Tensor linalg_matrix_norm_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal(nullValue = "c10::string_view(\"fro\")") @Cast("c10::string_view*") Pointer ord, @ByVal(nullValue = "at::IntArrayRef({-2,-1})") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dim, @Cast("bool") boolean keepdim/*=false*/, @ByVal(nullValue = "c10::optional(c10::nullopt)") ScalarTypeOptional dtype); +@Namespace("at") public static native @ByRef Tensor linalg_matrix_norm_out(@ByRef Tensor out, @Const @ByRef Tensor self, @StringView String ord/*="fro"*/, @ByVal(nullValue = "at::IntArrayRef({-2,-1})") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dim, @Cast("bool") boolean keepdim/*=false*/, @ByVal(nullValue = "c10::optional(c10::nullopt)") ScalarTypeOptional dtype); // aten::linalg_matrix_norm.str_ord_out(Tensor self, str ord='fro', int[] dim=[-2,-1], bool keepdim=False, *, ScalarType? dtype=None, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor linalg_matrix_norm_outf(@Const @ByRef Tensor self, @ByVal @Cast("c10::string_view*") Pointer ord, @ByVal LongArrayRef dim, @Cast("bool") boolean keepdim, @ByVal ScalarTypeOptional dtype, @ByRef Tensor out); -@Namespace("at") public static native @ByRef Tensor linalg_matrix_norm_outf(@Const @ByRef Tensor self, @ByVal @Cast("c10::string_view*") Pointer ord, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dim, @Cast("bool") boolean keepdim, @ByVal ScalarTypeOptional dtype, @ByRef Tensor out); +@Namespace("at") public static native @ByRef Tensor linalg_matrix_norm_outf(@Const @ByRef Tensor self, @StringView BytePointer ord, @ByVal LongArrayRef dim, @Cast("bool") boolean keepdim, @ByVal ScalarTypeOptional dtype, @ByRef Tensor out); +@Namespace("at") public static native @ByRef Tensor linalg_matrix_norm_outf(@Const @ByRef Tensor self, @StringView String ord, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dim, @Cast("bool") boolean keepdim, @ByVal ScalarTypeOptional dtype, @ByRef Tensor out); @@ -34437,9 +34423,10 @@ public class torch extends org.bytedeco.pytorch.presets.torch { @Namespace("at") public static native @ByVal Tensor linalg_norm(@Const @ByRef Tensor self, @Const @ByRef(nullValue = "c10::optional(c10::nullopt)") ScalarOptional ord, @ByVal(nullValue = "at::OptionalIntArrayRef(c10::nullopt)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dim, @Cast("bool") boolean keepdim/*=false*/, @ByVal(nullValue = "c10::optional(c10::nullopt)") ScalarTypeOptional dtype); // aten::linalg_norm.ord_str(Tensor self, str ord, int[1]? dim=None, bool keepdim=False, *, ScalarType? dtype=None) -> Tensor -@Namespace("at") public static native @ByVal Tensor linalg_norm(@Const @ByRef Tensor self, @ByVal @Cast("c10::string_view*") Pointer ord, @ByVal(nullValue = "at::OptionalIntArrayRef(c10::nullopt)") LongArrayRefOptional dim, @Cast("bool") boolean keepdim/*=false*/, @ByVal(nullValue = "c10::optional(c10::nullopt)") ScalarTypeOptional dtype); -@Namespace("at") public static native @ByVal Tensor linalg_norm(@Const @ByRef Tensor self, @ByVal @Cast("c10::string_view*") Pointer ord); -@Namespace("at") public static native @ByVal Tensor linalg_norm(@Const @ByRef Tensor self, @ByVal @Cast("c10::string_view*") Pointer ord, @ByVal(nullValue = "at::OptionalIntArrayRef(c10::nullopt)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dim, @Cast("bool") boolean keepdim/*=false*/, @ByVal(nullValue = "c10::optional(c10::nullopt)") ScalarTypeOptional dtype); +@Namespace("at") public static native @ByVal Tensor linalg_norm(@Const @ByRef Tensor self, @StringView BytePointer ord, @ByVal(nullValue = "at::OptionalIntArrayRef(c10::nullopt)") LongArrayRefOptional dim, @Cast("bool") boolean keepdim/*=false*/, @ByVal(nullValue = "c10::optional(c10::nullopt)") ScalarTypeOptional dtype); +@Namespace("at") public static native @ByVal Tensor linalg_norm(@Const @ByRef Tensor self, @StringView BytePointer ord); +@Namespace("at") public static native @ByVal Tensor linalg_norm(@Const @ByRef Tensor self, @StringView String ord, @ByVal(nullValue = "at::OptionalIntArrayRef(c10::nullopt)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dim, @Cast("bool") boolean keepdim/*=false*/, @ByVal(nullValue = "c10::optional(c10::nullopt)") ScalarTypeOptional dtype); +@Namespace("at") public static native @ByVal Tensor linalg_norm(@Const @ByRef Tensor self, @StringView String ord); // aten::linalg_norm.out(Tensor self, Scalar? ord=None, int[1]? dim=None, bool keepdim=False, *, ScalarType? dtype=None, Tensor(a!) out) -> Tensor(a!) @Namespace("at") public static native @ByRef Tensor linalg_norm_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Const @ByRef(nullValue = "c10::optional(c10::nullopt)") ScalarOptional ord, @ByVal(nullValue = "at::OptionalIntArrayRef(c10::nullopt)") LongArrayRefOptional dim, @Cast("bool") boolean keepdim/*=false*/, @ByVal(nullValue = "c10::optional(c10::nullopt)") ScalarTypeOptional dtype); @@ -34450,12 +34437,13 @@ public class torch extends org.bytedeco.pytorch.presets.torch { @Namespace("at") public static native @ByRef Tensor linalg_norm_outf(@Const @ByRef Tensor self, @Const @ByRef ScalarOptional ord, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dim, @Cast("bool") boolean keepdim, @ByVal ScalarTypeOptional dtype, @ByRef Tensor out); // aten::linalg_norm.ord_str_out(Tensor self, str ord, int[1]? dim=None, bool keepdim=False, *, ScalarType? dtype=None, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor linalg_norm_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal @Cast("c10::string_view*") Pointer ord, @ByVal(nullValue = "at::OptionalIntArrayRef(c10::nullopt)") LongArrayRefOptional dim, @Cast("bool") boolean keepdim/*=false*/, @ByVal(nullValue = "c10::optional(c10::nullopt)") ScalarTypeOptional dtype); -@Namespace("at") public static native @ByRef Tensor linalg_norm_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal @Cast("c10::string_view*") Pointer ord); -@Namespace("at") public static native @ByRef Tensor linalg_norm_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal @Cast("c10::string_view*") Pointer ord, @ByVal(nullValue = "at::OptionalIntArrayRef(c10::nullopt)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dim, @Cast("bool") boolean keepdim/*=false*/, @ByVal(nullValue = "c10::optional(c10::nullopt)") ScalarTypeOptional dtype); +@Namespace("at") public static native @ByRef Tensor linalg_norm_out(@ByRef Tensor out, @Const @ByRef Tensor self, @StringView BytePointer ord, @ByVal(nullValue = "at::OptionalIntArrayRef(c10::nullopt)") LongArrayRefOptional dim, @Cast("bool") boolean keepdim/*=false*/, @ByVal(nullValue = "c10::optional(c10::nullopt)") ScalarTypeOptional dtype); +@Namespace("at") public static native @ByRef Tensor linalg_norm_out(@ByRef Tensor out, @Const @ByRef Tensor self, @StringView BytePointer ord); +@Namespace("at") public static native @ByRef Tensor linalg_norm_out(@ByRef Tensor out, @Const @ByRef Tensor self, @StringView String ord, @ByVal(nullValue = "at::OptionalIntArrayRef(c10::nullopt)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dim, @Cast("bool") boolean keepdim/*=false*/, @ByVal(nullValue = "c10::optional(c10::nullopt)") ScalarTypeOptional dtype); +@Namespace("at") public static native @ByRef Tensor linalg_norm_out(@ByRef Tensor out, @Const @ByRef Tensor self, @StringView String ord); // aten::linalg_norm.ord_str_out(Tensor self, str ord, int[1]? dim=None, bool keepdim=False, *, ScalarType? dtype=None, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor linalg_norm_outf(@Const @ByRef Tensor self, @ByVal @Cast("c10::string_view*") Pointer ord, @ByVal LongArrayRefOptional dim, @Cast("bool") boolean keepdim, @ByVal ScalarTypeOptional dtype, @ByRef Tensor out); -@Namespace("at") public static native @ByRef Tensor linalg_norm_outf(@Const @ByRef Tensor self, @ByVal @Cast("c10::string_view*") Pointer ord, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dim, @Cast("bool") boolean keepdim, @ByVal ScalarTypeOptional dtype, @ByRef Tensor out); +@Namespace("at") public static native @ByRef Tensor linalg_norm_outf(@Const @ByRef Tensor self, @StringView BytePointer ord, @ByVal LongArrayRefOptional dim, @Cast("bool") boolean keepdim, @ByVal ScalarTypeOptional dtype, @ByRef Tensor out); +@Namespace("at") public static native @ByRef Tensor linalg_norm_outf(@Const @ByRef Tensor self, @StringView String ord, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dim, @Cast("bool") boolean keepdim, @ByVal ScalarTypeOptional dtype, @ByRef Tensor out); @@ -34552,14 +34540,17 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // aten::linalg_qr(Tensor A, str mode='reduced') -> (Tensor Q, Tensor R) -@Namespace("at") public static native @ByVal T_TensorTensor_T linalg_qr(@Const @ByRef Tensor A, @ByVal(nullValue = "c10::string_view(\"reduced\")") @Cast("c10::string_view*") Pointer mode); +@Namespace("at") public static native @ByVal T_TensorTensor_T linalg_qr(@Const @ByRef Tensor A, @StringView BytePointer mode/*="reduced"*/); @Namespace("at") public static native @ByVal T_TensorTensor_T linalg_qr(@Const @ByRef Tensor A); +@Namespace("at") public static native @ByVal T_TensorTensor_T linalg_qr(@Const @ByRef Tensor A, @StringView String mode/*="reduced"*/); // aten::linalg_qr.out(Tensor A, str mode='reduced', *, Tensor(a!) Q, Tensor(b!) R) -> (Tensor(a!) Q, Tensor(b!) R) -@Namespace("at") public static native @ByVal T_TensorTensor_T linalg_qr_out(@ByRef Tensor Q, @ByRef Tensor R, @Const @ByRef Tensor A, @ByVal(nullValue = "c10::string_view(\"reduced\")") @Cast("c10::string_view*") Pointer mode); +@Namespace("at") public static native @ByVal T_TensorTensor_T linalg_qr_out(@ByRef Tensor Q, @ByRef Tensor R, @Const @ByRef Tensor A, @StringView BytePointer mode/*="reduced"*/); @Namespace("at") public static native @ByVal T_TensorTensor_T linalg_qr_out(@ByRef Tensor Q, @ByRef Tensor R, @Const @ByRef Tensor A); +@Namespace("at") public static native @ByVal T_TensorTensor_T linalg_qr_out(@ByRef Tensor Q, @ByRef Tensor R, @Const @ByRef Tensor A, @StringView String mode/*="reduced"*/); // aten::linalg_qr.out(Tensor A, str mode='reduced', *, Tensor(a!) Q, Tensor(b!) R) -> (Tensor(a!) Q, Tensor(b!) R) -@Namespace("at") public static native @ByVal T_TensorTensor_T linalg_qr_outf(@Const @ByRef Tensor A, @ByVal @Cast("c10::string_view*") Pointer mode, @ByRef Tensor Q, @ByRef Tensor R); +@Namespace("at") public static native @ByVal T_TensorTensor_T linalg_qr_outf(@Const @ByRef Tensor A, @StringView BytePointer mode, @ByRef Tensor Q, @ByRef Tensor R); +@Namespace("at") public static native @ByVal T_TensorTensor_T linalg_qr_outf(@Const @ByRef Tensor A, @StringView String mode, @ByRef Tensor Q, @ByRef Tensor R); @@ -34735,14 +34726,14 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // aten::linalg_svd(Tensor A, bool full_matrices=True, *, str? driver=None) -> (Tensor U, Tensor S, Tensor Vh) -@Namespace("at") public static native @ByVal T_TensorTensorTensor_T linalg_svd(@Const @ByRef Tensor A, @Cast("bool") boolean full_matrices/*=true*/, @ByVal(nullValue = "c10::optional(c10::nullopt)") @Cast("c10::optional*") Pointer driver); +@Namespace("at") public static native @ByVal T_TensorTensorTensor_T linalg_svd(@Const @ByRef Tensor A, @Cast("bool") boolean full_matrices/*=true*/, @ByVal(nullValue = "c10::optional(c10::nullopt)") StringViewOptional driver); @Namespace("at") public static native @ByVal T_TensorTensorTensor_T linalg_svd(@Const @ByRef Tensor A); // aten::linalg_svd.U(Tensor A, bool full_matrices=True, *, str? driver=None, Tensor(a!) U, Tensor(b!) S, Tensor(c!) Vh) -> (Tensor(a!) U, Tensor(b!) S, Tensor(c!) Vh) -@Namespace("at") public static native @ByVal T_TensorTensorTensor_T linalg_svd_out(@ByRef Tensor U, @ByRef Tensor S, @ByRef Tensor Vh, @Const @ByRef Tensor A, @Cast("bool") boolean full_matrices/*=true*/, @ByVal(nullValue = "c10::optional(c10::nullopt)") @Cast("c10::optional*") Pointer driver); +@Namespace("at") public static native @ByVal T_TensorTensorTensor_T linalg_svd_out(@ByRef Tensor U, @ByRef Tensor S, @ByRef Tensor Vh, @Const @ByRef Tensor A, @Cast("bool") boolean full_matrices/*=true*/, @ByVal(nullValue = "c10::optional(c10::nullopt)") StringViewOptional driver); @Namespace("at") public static native @ByVal T_TensorTensorTensor_T linalg_svd_out(@ByRef Tensor U, @ByRef Tensor S, @ByRef Tensor Vh, @Const @ByRef Tensor A); // aten::linalg_svd.U(Tensor A, bool full_matrices=True, *, str? driver=None, Tensor(a!) U, Tensor(b!) S, Tensor(c!) Vh) -> (Tensor(a!) U, Tensor(b!) S, Tensor(c!) Vh) -@Namespace("at") public static native @ByVal T_TensorTensorTensor_T linalg_svd_outf(@Const @ByRef Tensor A, @Cast("bool") boolean full_matrices, @ByVal @Cast("c10::optional*") Pointer driver, @ByRef Tensor U, @ByRef Tensor S, @ByRef Tensor Vh); +@Namespace("at") public static native @ByVal T_TensorTensorTensor_T linalg_svd_outf(@Const @ByRef Tensor A, @Cast("bool") boolean full_matrices, @ByVal StringViewOptional driver, @ByRef Tensor U, @ByRef Tensor S, @ByRef Tensor Vh); @@ -34772,14 +34763,14 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // aten::linalg_svdvals(Tensor A, *, str? driver=None) -> Tensor -@Namespace("at") public static native @ByVal Tensor linalg_svdvals(@Const @ByRef Tensor A, @ByVal(nullValue = "c10::optional(c10::nullopt)") @Cast("c10::optional*") Pointer driver); +@Namespace("at") public static native @ByVal Tensor linalg_svdvals(@Const @ByRef Tensor A, @ByVal(nullValue = "c10::optional(c10::nullopt)") StringViewOptional driver); @Namespace("at") public static native @ByVal Tensor linalg_svdvals(@Const @ByRef Tensor A); // aten::linalg_svdvals.out(Tensor A, *, str? driver=None, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor linalg_svdvals_out(@ByRef Tensor out, @Const @ByRef Tensor A, @ByVal(nullValue = "c10::optional(c10::nullopt)") @Cast("c10::optional*") Pointer driver); +@Namespace("at") public static native @ByRef Tensor linalg_svdvals_out(@ByRef Tensor out, @Const @ByRef Tensor A, @ByVal(nullValue = "c10::optional(c10::nullopt)") StringViewOptional driver); @Namespace("at") public static native @ByRef Tensor linalg_svdvals_out(@ByRef Tensor out, @Const @ByRef Tensor A); // aten::linalg_svdvals.out(Tensor A, *, str? driver=None, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor linalg_svdvals_outf(@Const @ByRef Tensor A, @ByVal @Cast("c10::optional*") Pointer driver, @ByRef Tensor out); +@Namespace("at") public static native @ByRef Tensor linalg_svdvals_outf(@Const @ByRef Tensor A, @ByVal StringViewOptional driver, @ByRef Tensor out); @@ -37193,7 +37184,8 @@ public class torch extends org.bytedeco.pytorch.presets.torch { @Namespace("at") public static native @Cast({"", "std::vector"}) @StdMove TensorVector meshgrid(@ByVal @Cast("at::TensorList*") TensorArrayRef tensors); // aten::meshgrid.indexing(Tensor[] tensors, *, str indexing) -> Tensor[] -@Namespace("at") public static native @Cast({"", "std::vector"}) @StdMove TensorVector meshgrid(@ByVal @Cast("at::TensorList*") TensorArrayRef tensors, @ByVal @Cast("c10::string_view*") Pointer indexing); +@Namespace("at") public static native @Cast({"", "std::vector"}) @StdMove TensorVector meshgrid(@ByVal @Cast("at::TensorList*") TensorArrayRef tensors, @StringView BytePointer indexing); +@Namespace("at") public static native @Cast({"", "std::vector"}) @StdMove TensorVector meshgrid(@ByVal @Cast("at::TensorList*") TensorArrayRef tensors, @StringView String indexing); @@ -39200,24 +39192,30 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // aten::nanquantile(Tensor self, Tensor q, int? dim=None, bool keepdim=False, *, str interpolation='linear') -> Tensor -@Namespace("at") public static native @ByVal Tensor nanquantile(@Const @ByRef Tensor self, @Const @ByRef Tensor q, @ByVal(nullValue = "c10::optional(c10::nullopt)") LongOptional dim, @Cast("bool") boolean keepdim/*=false*/, @ByVal(nullValue = "c10::string_view(\"linear\")") @Cast("c10::string_view*") Pointer interpolation); +@Namespace("at") public static native @ByVal Tensor nanquantile(@Const @ByRef Tensor self, @Const @ByRef Tensor q, @ByVal(nullValue = "c10::optional(c10::nullopt)") LongOptional dim, @Cast("bool") boolean keepdim/*=false*/, @StringView BytePointer interpolation/*="linear"*/); @Namespace("at") public static native @ByVal Tensor nanquantile(@Const @ByRef Tensor self, @Const @ByRef Tensor q); +@Namespace("at") public static native @ByVal Tensor nanquantile(@Const @ByRef Tensor self, @Const @ByRef Tensor q, @ByVal(nullValue = "c10::optional(c10::nullopt)") LongOptional dim, @Cast("bool") boolean keepdim/*=false*/, @StringView String interpolation/*="linear"*/); // aten::nanquantile.out(Tensor self, Tensor q, int? dim=None, bool keepdim=False, *, str interpolation='linear', Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor nanquantile_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Const @ByRef Tensor q, @ByVal(nullValue = "c10::optional(c10::nullopt)") LongOptional dim, @Cast("bool") boolean keepdim/*=false*/, @ByVal(nullValue = "c10::string_view(\"linear\")") @Cast("c10::string_view*") Pointer interpolation); +@Namespace("at") public static native @ByRef Tensor nanquantile_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Const @ByRef Tensor q, @ByVal(nullValue = "c10::optional(c10::nullopt)") LongOptional dim, @Cast("bool") boolean keepdim/*=false*/, @StringView BytePointer interpolation/*="linear"*/); @Namespace("at") public static native @ByRef Tensor nanquantile_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Const @ByRef Tensor q); +@Namespace("at") public static native @ByRef Tensor nanquantile_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Const @ByRef Tensor q, @ByVal(nullValue = "c10::optional(c10::nullopt)") LongOptional dim, @Cast("bool") boolean keepdim/*=false*/, @StringView String interpolation/*="linear"*/); // aten::nanquantile.out(Tensor self, Tensor q, int? dim=None, bool keepdim=False, *, str interpolation='linear', Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor nanquantile_outf(@Const @ByRef Tensor self, @Const @ByRef Tensor q, @ByVal LongOptional dim, @Cast("bool") boolean keepdim, @ByVal @Cast("c10::string_view*") Pointer interpolation, @ByRef Tensor out); +@Namespace("at") public static native @ByRef Tensor nanquantile_outf(@Const @ByRef Tensor self, @Const @ByRef Tensor q, @ByVal LongOptional dim, @Cast("bool") boolean keepdim, @StringView BytePointer interpolation, @ByRef Tensor out); +@Namespace("at") public static native @ByRef Tensor nanquantile_outf(@Const @ByRef Tensor self, @Const @ByRef Tensor q, @ByVal LongOptional dim, @Cast("bool") boolean keepdim, @StringView String interpolation, @ByRef Tensor out); // aten::nanquantile.scalar(Tensor self, float q, int? dim=None, bool keepdim=False, *, str interpolation='linear') -> Tensor -@Namespace("at") public static native @ByVal Tensor nanquantile(@Const @ByRef Tensor self, double q, @ByVal(nullValue = "c10::optional(c10::nullopt)") LongOptional dim, @Cast("bool") boolean keepdim/*=false*/, @ByVal(nullValue = "c10::string_view(\"linear\")") @Cast("c10::string_view*") Pointer interpolation); +@Namespace("at") public static native @ByVal Tensor nanquantile(@Const @ByRef Tensor self, double q, @ByVal(nullValue = "c10::optional(c10::nullopt)") LongOptional dim, @Cast("bool") boolean keepdim/*=false*/, @StringView BytePointer interpolation/*="linear"*/); @Namespace("at") public static native @ByVal Tensor nanquantile(@Const @ByRef Tensor self, double q); +@Namespace("at") public static native @ByVal Tensor nanquantile(@Const @ByRef Tensor self, double q, @ByVal(nullValue = "c10::optional(c10::nullopt)") LongOptional dim, @Cast("bool") boolean keepdim/*=false*/, @StringView String interpolation/*="linear"*/); // aten::nanquantile.scalar_out(Tensor self, float q, int? dim=None, bool keepdim=False, *, str interpolation='linear', Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor nanquantile_out(@ByRef Tensor out, @Const @ByRef Tensor self, double q, @ByVal(nullValue = "c10::optional(c10::nullopt)") LongOptional dim, @Cast("bool") boolean keepdim/*=false*/, @ByVal(nullValue = "c10::string_view(\"linear\")") @Cast("c10::string_view*") Pointer interpolation); +@Namespace("at") public static native @ByRef Tensor nanquantile_out(@ByRef Tensor out, @Const @ByRef Tensor self, double q, @ByVal(nullValue = "c10::optional(c10::nullopt)") LongOptional dim, @Cast("bool") boolean keepdim/*=false*/, @StringView BytePointer interpolation/*="linear"*/); @Namespace("at") public static native @ByRef Tensor nanquantile_out(@ByRef Tensor out, @Const @ByRef Tensor self, double q); +@Namespace("at") public static native @ByRef Tensor nanquantile_out(@ByRef Tensor out, @Const @ByRef Tensor self, double q, @ByVal(nullValue = "c10::optional(c10::nullopt)") LongOptional dim, @Cast("bool") boolean keepdim/*=false*/, @StringView String interpolation/*="linear"*/); // aten::nanquantile.scalar_out(Tensor self, float q, int? dim=None, bool keepdim=False, *, str interpolation='linear', Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor nanquantile_outf(@Const @ByRef Tensor self, double q, @ByVal LongOptional dim, @Cast("bool") boolean keepdim, @ByVal @Cast("c10::string_view*") Pointer interpolation, @ByRef Tensor out); +@Namespace("at") public static native @ByRef Tensor nanquantile_outf(@Const @ByRef Tensor self, double q, @ByVal LongOptional dim, @Cast("bool") boolean keepdim, @StringView BytePointer interpolation, @ByRef Tensor out); +@Namespace("at") public static native @ByRef Tensor nanquantile_outf(@Const @ByRef Tensor self, double q, @ByVal LongOptional dim, @Cast("bool") boolean keepdim, @StringView String interpolation, @ByRef Tensor out); @@ -41319,15 +41317,16 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // aten::pad(Tensor self, SymInt[] pad, str mode="constant", float? value=None) -> Tensor -@Namespace("at") public static native @ByVal Tensor pad(@Const @ByRef Tensor self, @ByVal LongArrayRef pad, @ByVal(nullValue = "c10::string_view(\"constant\")") @Cast("c10::string_view*") Pointer mode, @ByVal(nullValue = "c10::optional(c10::nullopt)") DoubleOptional value); +@Namespace("at") public static native @ByVal Tensor pad(@Const @ByRef Tensor self, @ByVal LongArrayRef pad, @StringView BytePointer mode/*="constant"*/, @ByVal(nullValue = "c10::optional(c10::nullopt)") DoubleOptional value); @Namespace("at") public static native @ByVal Tensor pad(@Const @ByRef Tensor self, @ByVal LongArrayRef pad); -@Namespace("at") public static native @ByVal Tensor pad(@Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] pad, @ByVal(nullValue = "c10::string_view(\"constant\")") @Cast("c10::string_view*") Pointer mode, @ByVal(nullValue = "c10::optional(c10::nullopt)") DoubleOptional value); +@Namespace("at") public static native @ByVal Tensor pad(@Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] pad, @StringView String mode/*="constant"*/, @ByVal(nullValue = "c10::optional(c10::nullopt)") DoubleOptional value); @Namespace("at") public static native @ByVal Tensor pad(@Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... pad); // aten::pad(Tensor self, SymInt[] pad, str mode="constant", float? value=None) -> Tensor -@Namespace("at") public static native @ByVal Tensor pad_symint(@Const @ByRef Tensor self, @ByVal SymIntArrayRef pad, @ByVal(nullValue = "c10::string_view(\"constant\")") @Cast("c10::string_view*") Pointer mode, @ByVal(nullValue = "c10::optional(c10::nullopt)") DoubleOptional value); +@Namespace("at") public static native @ByVal Tensor pad_symint(@Const @ByRef Tensor self, @ByVal SymIntArrayRef pad, @StringView BytePointer mode/*="constant"*/, @ByVal(nullValue = "c10::optional(c10::nullopt)") DoubleOptional value); @Namespace("at") public static native @ByVal Tensor pad_symint(@Const @ByRef Tensor self, @ByVal SymIntArrayRef pad); +@Namespace("at") public static native @ByVal Tensor pad_symint(@Const @ByRef Tensor self, @ByVal SymIntArrayRef pad, @StringView String mode/*="constant"*/, @ByVal(nullValue = "c10::optional(c10::nullopt)") DoubleOptional value); @@ -42246,24 +42245,30 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // aten::quantile(Tensor self, Tensor q, int? dim=None, bool keepdim=False, *, str interpolation='linear') -> Tensor -@Namespace("at") public static native @ByVal Tensor quantile(@Const @ByRef Tensor self, @Const @ByRef Tensor q, @ByVal(nullValue = "c10::optional(c10::nullopt)") LongOptional dim, @Cast("bool") boolean keepdim/*=false*/, @ByVal(nullValue = "c10::string_view(\"linear\")") @Cast("c10::string_view*") Pointer interpolation); +@Namespace("at") public static native @ByVal Tensor quantile(@Const @ByRef Tensor self, @Const @ByRef Tensor q, @ByVal(nullValue = "c10::optional(c10::nullopt)") LongOptional dim, @Cast("bool") boolean keepdim/*=false*/, @StringView BytePointer interpolation/*="linear"*/); @Namespace("at") public static native @ByVal Tensor quantile(@Const @ByRef Tensor self, @Const @ByRef Tensor q); +@Namespace("at") public static native @ByVal Tensor quantile(@Const @ByRef Tensor self, @Const @ByRef Tensor q, @ByVal(nullValue = "c10::optional(c10::nullopt)") LongOptional dim, @Cast("bool") boolean keepdim/*=false*/, @StringView String interpolation/*="linear"*/); // aten::quantile.out(Tensor self, Tensor q, int? dim=None, bool keepdim=False, *, str interpolation='linear', Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor quantile_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Const @ByRef Tensor q, @ByVal(nullValue = "c10::optional(c10::nullopt)") LongOptional dim, @Cast("bool") boolean keepdim/*=false*/, @ByVal(nullValue = "c10::string_view(\"linear\")") @Cast("c10::string_view*") Pointer interpolation); +@Namespace("at") public static native @ByRef Tensor quantile_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Const @ByRef Tensor q, @ByVal(nullValue = "c10::optional(c10::nullopt)") LongOptional dim, @Cast("bool") boolean keepdim/*=false*/, @StringView BytePointer interpolation/*="linear"*/); @Namespace("at") public static native @ByRef Tensor quantile_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Const @ByRef Tensor q); +@Namespace("at") public static native @ByRef Tensor quantile_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Const @ByRef Tensor q, @ByVal(nullValue = "c10::optional(c10::nullopt)") LongOptional dim, @Cast("bool") boolean keepdim/*=false*/, @StringView String interpolation/*="linear"*/); // aten::quantile.out(Tensor self, Tensor q, int? dim=None, bool keepdim=False, *, str interpolation='linear', Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor quantile_outf(@Const @ByRef Tensor self, @Const @ByRef Tensor q, @ByVal LongOptional dim, @Cast("bool") boolean keepdim, @ByVal @Cast("c10::string_view*") Pointer interpolation, @ByRef Tensor out); +@Namespace("at") public static native @ByRef Tensor quantile_outf(@Const @ByRef Tensor self, @Const @ByRef Tensor q, @ByVal LongOptional dim, @Cast("bool") boolean keepdim, @StringView BytePointer interpolation, @ByRef Tensor out); +@Namespace("at") public static native @ByRef Tensor quantile_outf(@Const @ByRef Tensor self, @Const @ByRef Tensor q, @ByVal LongOptional dim, @Cast("bool") boolean keepdim, @StringView String interpolation, @ByRef Tensor out); // aten::quantile.scalar(Tensor self, float q, int? dim=None, bool keepdim=False, *, str interpolation='linear') -> Tensor -@Namespace("at") public static native @ByVal Tensor quantile(@Const @ByRef Tensor self, double q, @ByVal(nullValue = "c10::optional(c10::nullopt)") LongOptional dim, @Cast("bool") boolean keepdim/*=false*/, @ByVal(nullValue = "c10::string_view(\"linear\")") @Cast("c10::string_view*") Pointer interpolation); +@Namespace("at") public static native @ByVal Tensor quantile(@Const @ByRef Tensor self, double q, @ByVal(nullValue = "c10::optional(c10::nullopt)") LongOptional dim, @Cast("bool") boolean keepdim/*=false*/, @StringView BytePointer interpolation/*="linear"*/); @Namespace("at") public static native @ByVal Tensor quantile(@Const @ByRef Tensor self, double q); +@Namespace("at") public static native @ByVal Tensor quantile(@Const @ByRef Tensor self, double q, @ByVal(nullValue = "c10::optional(c10::nullopt)") LongOptional dim, @Cast("bool") boolean keepdim/*=false*/, @StringView String interpolation/*="linear"*/); // aten::quantile.scalar_out(Tensor self, float q, int? dim=None, bool keepdim=False, *, str interpolation='linear', Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor quantile_out(@ByRef Tensor out, @Const @ByRef Tensor self, double q, @ByVal(nullValue = "c10::optional(c10::nullopt)") LongOptional dim, @Cast("bool") boolean keepdim/*=false*/, @ByVal(nullValue = "c10::string_view(\"linear\")") @Cast("c10::string_view*") Pointer interpolation); +@Namespace("at") public static native @ByRef Tensor quantile_out(@ByRef Tensor out, @Const @ByRef Tensor self, double q, @ByVal(nullValue = "c10::optional(c10::nullopt)") LongOptional dim, @Cast("bool") boolean keepdim/*=false*/, @StringView BytePointer interpolation/*="linear"*/); @Namespace("at") public static native @ByRef Tensor quantile_out(@ByRef Tensor out, @Const @ByRef Tensor self, double q); +@Namespace("at") public static native @ByRef Tensor quantile_out(@ByRef Tensor out, @Const @ByRef Tensor self, double q, @ByVal(nullValue = "c10::optional(c10::nullopt)") LongOptional dim, @Cast("bool") boolean keepdim/*=false*/, @StringView String interpolation/*="linear"*/); // aten::quantile.scalar_out(Tensor self, float q, int? dim=None, bool keepdim=False, *, str interpolation='linear', Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor quantile_outf(@Const @ByRef Tensor self, double q, @ByVal LongOptional dim, @Cast("bool") boolean keepdim, @ByVal @Cast("c10::string_view*") Pointer interpolation, @ByRef Tensor out); +@Namespace("at") public static native @ByRef Tensor quantile_outf(@Const @ByRef Tensor self, double q, @ByVal LongOptional dim, @Cast("bool") boolean keepdim, @StringView BytePointer interpolation, @ByRef Tensor out); +@Namespace("at") public static native @ByRef Tensor quantile_outf(@Const @ByRef Tensor self, double q, @ByVal LongOptional dim, @Cast("bool") boolean keepdim, @StringView String interpolation, @ByRef Tensor out); @@ -45674,20 +45679,26 @@ public class torch extends org.bytedeco.pytorch.presets.torch { @Namespace("at") public static native @ByRef Tensor scatter_outf(@Const @ByRef Tensor self, @Cast("int64_t") long dim, @Const @ByRef Tensor index, @Const @ByRef Scalar value, @ByRef Tensor out); // aten::scatter.reduce(Tensor self, int dim, Tensor index, Tensor src, *, str reduce) -> Tensor -@Namespace("at") public static native @ByVal Tensor scatter(@Const @ByRef Tensor self, @Cast("int64_t") long dim, @Const @ByRef Tensor index, @Const @ByRef Tensor src, @ByVal @Cast("c10::string_view*") Pointer reduce); +@Namespace("at") public static native @ByVal Tensor scatter(@Const @ByRef Tensor self, @Cast("int64_t") long dim, @Const @ByRef Tensor index, @Const @ByRef Tensor src, @StringView BytePointer reduce); +@Namespace("at") public static native @ByVal Tensor scatter(@Const @ByRef Tensor self, @Cast("int64_t") long dim, @Const @ByRef Tensor index, @Const @ByRef Tensor src, @StringView String reduce); // aten::scatter.reduce_out(Tensor self, int dim, Tensor index, Tensor src, *, str reduce, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor scatter_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Cast("int64_t") long dim, @Const @ByRef Tensor index, @Const @ByRef Tensor src, @ByVal @Cast("c10::string_view*") Pointer reduce); +@Namespace("at") public static native @ByRef Tensor scatter_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Cast("int64_t") long dim, @Const @ByRef Tensor index, @Const @ByRef Tensor src, @StringView BytePointer reduce); +@Namespace("at") public static native @ByRef Tensor scatter_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Cast("int64_t") long dim, @Const @ByRef Tensor index, @Const @ByRef Tensor src, @StringView String reduce); // aten::scatter.reduce_out(Tensor self, int dim, Tensor index, Tensor src, *, str reduce, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor scatter_outf(@Const @ByRef Tensor self, @Cast("int64_t") long dim, @Const @ByRef Tensor index, @Const @ByRef Tensor src, @ByVal @Cast("c10::string_view*") Pointer reduce, @ByRef Tensor out); +@Namespace("at") public static native @ByRef Tensor scatter_outf(@Const @ByRef Tensor self, @Cast("int64_t") long dim, @Const @ByRef Tensor index, @Const @ByRef Tensor src, @StringView BytePointer reduce, @ByRef Tensor out); +@Namespace("at") public static native @ByRef Tensor scatter_outf(@Const @ByRef Tensor self, @Cast("int64_t") long dim, @Const @ByRef Tensor index, @Const @ByRef Tensor src, @StringView String reduce, @ByRef Tensor out); // aten::scatter.value_reduce(Tensor self, int dim, Tensor index, Scalar value, *, str reduce) -> Tensor -@Namespace("at") public static native @ByVal Tensor scatter(@Const @ByRef Tensor self, @Cast("int64_t") long dim, @Const @ByRef Tensor index, @Const @ByRef Scalar value, @ByVal @Cast("c10::string_view*") Pointer reduce); +@Namespace("at") public static native @ByVal Tensor scatter(@Const @ByRef Tensor self, @Cast("int64_t") long dim, @Const @ByRef Tensor index, @Const @ByRef Scalar value, @StringView BytePointer reduce); +@Namespace("at") public static native @ByVal Tensor scatter(@Const @ByRef Tensor self, @Cast("int64_t") long dim, @Const @ByRef Tensor index, @Const @ByRef Scalar value, @StringView String reduce); // aten::scatter.value_reduce_out(Tensor self, int dim, Tensor index, Scalar value, *, str reduce, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor scatter_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Cast("int64_t") long dim, @Const @ByRef Tensor index, @Const @ByRef Scalar value, @ByVal @Cast("c10::string_view*") Pointer reduce); +@Namespace("at") public static native @ByRef Tensor scatter_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Cast("int64_t") long dim, @Const @ByRef Tensor index, @Const @ByRef Scalar value, @StringView BytePointer reduce); +@Namespace("at") public static native @ByRef Tensor scatter_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Cast("int64_t") long dim, @Const @ByRef Tensor index, @Const @ByRef Scalar value, @StringView String reduce); // aten::scatter.value_reduce_out(Tensor self, int dim, Tensor index, Scalar value, *, str reduce, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor scatter_outf(@Const @ByRef Tensor self, @Cast("int64_t") long dim, @Const @ByRef Tensor index, @Const @ByRef Scalar value, @ByVal @Cast("c10::string_view*") Pointer reduce, @ByRef Tensor out); +@Namespace("at") public static native @ByRef Tensor scatter_outf(@Const @ByRef Tensor self, @Cast("int64_t") long dim, @Const @ByRef Tensor index, @Const @ByRef Scalar value, @StringView BytePointer reduce, @ByRef Tensor out); +@Namespace("at") public static native @ByRef Tensor scatter_outf(@Const @ByRef Tensor self, @Cast("int64_t") long dim, @Const @ByRef Tensor index, @Const @ByRef Scalar value, @StringView String reduce, @ByRef Tensor out); // aten::scatter.dimname_src(Tensor self, Dimname dim, Tensor index, Tensor src) -> Tensor @Namespace("at") public static native @ByVal Tensor scatter(@Const @ByRef Tensor self, @ByVal Dimname dim, @Const @ByRef Tensor index, @Const @ByRef Tensor src); @@ -45761,14 +45772,19 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // aten::scatter_reduce.two(Tensor self, int dim, Tensor index, Tensor src, str reduce, *, bool include_self=True) -> Tensor -@Namespace("at") public static native @ByVal Tensor scatter_reduce(@Const @ByRef Tensor self, @Cast("int64_t") long dim, @Const @ByRef Tensor index, @Const @ByRef Tensor src, @ByVal @Cast("c10::string_view*") Pointer reduce, @Cast("bool") boolean include_self/*=true*/); -@Namespace("at") public static native @ByVal Tensor scatter_reduce(@Const @ByRef Tensor self, @Cast("int64_t") long dim, @Const @ByRef Tensor index, @Const @ByRef Tensor src, @ByVal @Cast("c10::string_view*") Pointer reduce); +@Namespace("at") public static native @ByVal Tensor scatter_reduce(@Const @ByRef Tensor self, @Cast("int64_t") long dim, @Const @ByRef Tensor index, @Const @ByRef Tensor src, @StringView BytePointer reduce, @Cast("bool") boolean include_self/*=true*/); +@Namespace("at") public static native @ByVal Tensor scatter_reduce(@Const @ByRef Tensor self, @Cast("int64_t") long dim, @Const @ByRef Tensor index, @Const @ByRef Tensor src, @StringView BytePointer reduce); +@Namespace("at") public static native @ByVal Tensor scatter_reduce(@Const @ByRef Tensor self, @Cast("int64_t") long dim, @Const @ByRef Tensor index, @Const @ByRef Tensor src, @StringView String reduce, @Cast("bool") boolean include_self/*=true*/); +@Namespace("at") public static native @ByVal Tensor scatter_reduce(@Const @ByRef Tensor self, @Cast("int64_t") long dim, @Const @ByRef Tensor index, @Const @ByRef Tensor src, @StringView String reduce); // aten::scatter_reduce.two_out(Tensor self, int dim, Tensor index, Tensor src, str reduce, *, bool include_self=True, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor scatter_reduce_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Cast("int64_t") long dim, @Const @ByRef Tensor index, @Const @ByRef Tensor src, @ByVal @Cast("c10::string_view*") Pointer reduce, @Cast("bool") boolean include_self/*=true*/); -@Namespace("at") public static native @ByRef Tensor scatter_reduce_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Cast("int64_t") long dim, @Const @ByRef Tensor index, @Const @ByRef Tensor src, @ByVal @Cast("c10::string_view*") Pointer reduce); +@Namespace("at") public static native @ByRef Tensor scatter_reduce_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Cast("int64_t") long dim, @Const @ByRef Tensor index, @Const @ByRef Tensor src, @StringView BytePointer reduce, @Cast("bool") boolean include_self/*=true*/); +@Namespace("at") public static native @ByRef Tensor scatter_reduce_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Cast("int64_t") long dim, @Const @ByRef Tensor index, @Const @ByRef Tensor src, @StringView BytePointer reduce); +@Namespace("at") public static native @ByRef Tensor scatter_reduce_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Cast("int64_t") long dim, @Const @ByRef Tensor index, @Const @ByRef Tensor src, @StringView String reduce, @Cast("bool") boolean include_self/*=true*/); +@Namespace("at") public static native @ByRef Tensor scatter_reduce_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Cast("int64_t") long dim, @Const @ByRef Tensor index, @Const @ByRef Tensor src, @StringView String reduce); // aten::scatter_reduce.two_out(Tensor self, int dim, Tensor index, Tensor src, str reduce, *, bool include_self=True, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor scatter_reduce_outf(@Const @ByRef Tensor self, @Cast("int64_t") long dim, @Const @ByRef Tensor index, @Const @ByRef Tensor src, @ByVal @Cast("c10::string_view*") Pointer reduce, @Cast("bool") boolean include_self, @ByRef Tensor out); +@Namespace("at") public static native @ByRef Tensor scatter_reduce_outf(@Const @ByRef Tensor self, @Cast("int64_t") long dim, @Const @ByRef Tensor index, @Const @ByRef Tensor src, @StringView BytePointer reduce, @Cast("bool") boolean include_self, @ByRef Tensor out); +@Namespace("at") public static native @ByRef Tensor scatter_reduce_outf(@Const @ByRef Tensor self, @Cast("int64_t") long dim, @Const @ByRef Tensor index, @Const @ByRef Tensor src, @StringView String reduce, @Cast("bool") boolean include_self, @ByRef Tensor out); @@ -45798,24 +45814,24 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // aten::searchsorted.Tensor(Tensor sorted_sequence, Tensor self, *, bool out_int32=False, bool right=False, str? side=None, Tensor? sorter=None) -> Tensor -@Namespace("at") public static native @ByVal Tensor searchsorted(@Const @ByRef Tensor sorted_sequence, @Const @ByRef Tensor self, @Cast("bool") boolean out_int32/*=false*/, @Cast("bool") boolean right/*=false*/, @ByVal(nullValue = "c10::optional(c10::nullopt)") @Cast("c10::optional*") Pointer side, @Const @ByRef(nullValue = "c10::optional{}") TensorOptional sorter); +@Namespace("at") public static native @ByVal Tensor searchsorted(@Const @ByRef Tensor sorted_sequence, @Const @ByRef Tensor self, @Cast("bool") boolean out_int32/*=false*/, @Cast("bool") boolean right/*=false*/, @ByVal(nullValue = "c10::optional(c10::nullopt)") StringViewOptional side, @Const @ByRef(nullValue = "c10::optional{}") TensorOptional sorter); @Namespace("at") public static native @ByVal Tensor searchsorted(@Const @ByRef Tensor sorted_sequence, @Const @ByRef Tensor self); // aten::searchsorted.Tensor_out(Tensor sorted_sequence, Tensor self, *, bool out_int32=False, bool right=False, str? side=None, Tensor? sorter=None, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor searchsorted_out(@ByRef Tensor out, @Const @ByRef Tensor sorted_sequence, @Const @ByRef Tensor self, @Cast("bool") boolean out_int32/*=false*/, @Cast("bool") boolean right/*=false*/, @ByVal(nullValue = "c10::optional(c10::nullopt)") @Cast("c10::optional*") Pointer side, @Const @ByRef(nullValue = "c10::optional{}") TensorOptional sorter); +@Namespace("at") public static native @ByRef Tensor searchsorted_out(@ByRef Tensor out, @Const @ByRef Tensor sorted_sequence, @Const @ByRef Tensor self, @Cast("bool") boolean out_int32/*=false*/, @Cast("bool") boolean right/*=false*/, @ByVal(nullValue = "c10::optional(c10::nullopt)") StringViewOptional side, @Const @ByRef(nullValue = "c10::optional{}") TensorOptional sorter); @Namespace("at") public static native @ByRef Tensor searchsorted_out(@ByRef Tensor out, @Const @ByRef Tensor sorted_sequence, @Const @ByRef Tensor self); // aten::searchsorted.Tensor_out(Tensor sorted_sequence, Tensor self, *, bool out_int32=False, bool right=False, str? side=None, Tensor? sorter=None, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor searchsorted_outf(@Const @ByRef Tensor sorted_sequence, @Const @ByRef Tensor self, @Cast("bool") boolean out_int32, @Cast("bool") boolean right, @ByVal @Cast("c10::optional*") Pointer side, @Const @ByRef TensorOptional sorter, @ByRef Tensor out); +@Namespace("at") public static native @ByRef Tensor searchsorted_outf(@Const @ByRef Tensor sorted_sequence, @Const @ByRef Tensor self, @Cast("bool") boolean out_int32, @Cast("bool") boolean right, @ByVal StringViewOptional side, @Const @ByRef TensorOptional sorter, @ByRef Tensor out); // aten::searchsorted.Scalar(Tensor sorted_sequence, Scalar self, *, bool out_int32=False, bool right=False, str? side=None, Tensor? sorter=None) -> Tensor -@Namespace("at") public static native @ByVal Tensor searchsorted(@Const @ByRef Tensor sorted_sequence, @Const @ByRef Scalar self, @Cast("bool") boolean out_int32/*=false*/, @Cast("bool") boolean right/*=false*/, @ByVal(nullValue = "c10::optional(c10::nullopt)") @Cast("c10::optional*") Pointer side, @Const @ByRef(nullValue = "c10::optional{}") TensorOptional sorter); +@Namespace("at") public static native @ByVal Tensor searchsorted(@Const @ByRef Tensor sorted_sequence, @Const @ByRef Scalar self, @Cast("bool") boolean out_int32/*=false*/, @Cast("bool") boolean right/*=false*/, @ByVal(nullValue = "c10::optional(c10::nullopt)") StringViewOptional side, @Const @ByRef(nullValue = "c10::optional{}") TensorOptional sorter); @Namespace("at") public static native @ByVal Tensor searchsorted(@Const @ByRef Tensor sorted_sequence, @Const @ByRef Scalar self); // aten::searchsorted.Scalar_out(Tensor sorted_sequence, Scalar self, *, bool out_int32=False, bool right=False, str? side=None, Tensor? sorter=None, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor searchsorted_out(@ByRef Tensor out, @Const @ByRef Tensor sorted_sequence, @Const @ByRef Scalar self, @Cast("bool") boolean out_int32/*=false*/, @Cast("bool") boolean right/*=false*/, @ByVal(nullValue = "c10::optional(c10::nullopt)") @Cast("c10::optional*") Pointer side, @Const @ByRef(nullValue = "c10::optional{}") TensorOptional sorter); +@Namespace("at") public static native @ByRef Tensor searchsorted_out(@ByRef Tensor out, @Const @ByRef Tensor sorted_sequence, @Const @ByRef Scalar self, @Cast("bool") boolean out_int32/*=false*/, @Cast("bool") boolean right/*=false*/, @ByVal(nullValue = "c10::optional(c10::nullopt)") StringViewOptional side, @Const @ByRef(nullValue = "c10::optional{}") TensorOptional sorter); @Namespace("at") public static native @ByRef Tensor searchsorted_out(@ByRef Tensor out, @Const @ByRef Tensor sorted_sequence, @Const @ByRef Scalar self); // aten::searchsorted.Scalar_out(Tensor sorted_sequence, Scalar self, *, bool out_int32=False, bool right=False, str? side=None, Tensor? sorter=None, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor searchsorted_outf(@Const @ByRef Tensor sorted_sequence, @Const @ByRef Scalar self, @Cast("bool") boolean out_int32, @Cast("bool") boolean right, @ByVal @Cast("c10::optional*") Pointer side, @Const @ByRef TensorOptional sorter, @ByRef Tensor out); +@Namespace("at") public static native @ByRef Tensor searchsorted_outf(@Const @ByRef Tensor sorted_sequence, @Const @ByRef Scalar self, @Cast("bool") boolean out_int32, @Cast("bool") boolean right, @ByVal StringViewOptional side, @Const @ByRef TensorOptional sorter, @ByRef Tensor out); @@ -45845,14 +45861,19 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // aten::segment_reduce(Tensor data, str reduce, *, Tensor? lengths=None, Tensor? indices=None, Tensor? offsets=None, int axis=0, bool unsafe=False, Scalar? initial=None) -> Tensor -@Namespace("at") public static native @ByVal Tensor segment_reduce(@Const @ByRef Tensor data, @ByVal @Cast("c10::string_view*") Pointer reduce, @Const @ByRef(nullValue = "c10::optional{}") TensorOptional lengths, @Const @ByRef(nullValue = "c10::optional{}") TensorOptional indices, @Const @ByRef(nullValue = "c10::optional{}") TensorOptional offsets, @Cast("int64_t") long axis/*=0*/, @Cast("bool") boolean unsafe/*=false*/, @Const @ByRef(nullValue = "c10::optional(c10::nullopt)") ScalarOptional initial); -@Namespace("at") public static native @ByVal Tensor segment_reduce(@Const @ByRef Tensor data, @ByVal @Cast("c10::string_view*") Pointer reduce); +@Namespace("at") public static native @ByVal Tensor segment_reduce(@Const @ByRef Tensor data, @StringView BytePointer reduce, @Const @ByRef(nullValue = "c10::optional{}") TensorOptional lengths, @Const @ByRef(nullValue = "c10::optional{}") TensorOptional indices, @Const @ByRef(nullValue = "c10::optional{}") TensorOptional offsets, @Cast("int64_t") long axis/*=0*/, @Cast("bool") boolean unsafe/*=false*/, @Const @ByRef(nullValue = "c10::optional(c10::nullopt)") ScalarOptional initial); +@Namespace("at") public static native @ByVal Tensor segment_reduce(@Const @ByRef Tensor data, @StringView BytePointer reduce); +@Namespace("at") public static native @ByVal Tensor segment_reduce(@Const @ByRef Tensor data, @StringView String reduce, @Const @ByRef(nullValue = "c10::optional{}") TensorOptional lengths, @Const @ByRef(nullValue = "c10::optional{}") TensorOptional indices, @Const @ByRef(nullValue = "c10::optional{}") TensorOptional offsets, @Cast("int64_t") long axis/*=0*/, @Cast("bool") boolean unsafe/*=false*/, @Const @ByRef(nullValue = "c10::optional(c10::nullopt)") ScalarOptional initial); +@Namespace("at") public static native @ByVal Tensor segment_reduce(@Const @ByRef Tensor data, @StringView String reduce); // aten::segment_reduce.out(Tensor data, str reduce, *, Tensor? lengths=None, Tensor? indices=None, Tensor? offsets=None, int axis=0, bool unsafe=False, Scalar? initial=None, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor segment_reduce_out(@ByRef Tensor out, @Const @ByRef Tensor data, @ByVal @Cast("c10::string_view*") Pointer reduce, @Const @ByRef(nullValue = "c10::optional{}") TensorOptional lengths, @Const @ByRef(nullValue = "c10::optional{}") TensorOptional indices, @Const @ByRef(nullValue = "c10::optional{}") TensorOptional offsets, @Cast("int64_t") long axis/*=0*/, @Cast("bool") boolean unsafe/*=false*/, @Const @ByRef(nullValue = "c10::optional(c10::nullopt)") ScalarOptional initial); -@Namespace("at") public static native @ByRef Tensor segment_reduce_out(@ByRef Tensor out, @Const @ByRef Tensor data, @ByVal @Cast("c10::string_view*") Pointer reduce); +@Namespace("at") public static native @ByRef Tensor segment_reduce_out(@ByRef Tensor out, @Const @ByRef Tensor data, @StringView BytePointer reduce, @Const @ByRef(nullValue = "c10::optional{}") TensorOptional lengths, @Const @ByRef(nullValue = "c10::optional{}") TensorOptional indices, @Const @ByRef(nullValue = "c10::optional{}") TensorOptional offsets, @Cast("int64_t") long axis/*=0*/, @Cast("bool") boolean unsafe/*=false*/, @Const @ByRef(nullValue = "c10::optional(c10::nullopt)") ScalarOptional initial); +@Namespace("at") public static native @ByRef Tensor segment_reduce_out(@ByRef Tensor out, @Const @ByRef Tensor data, @StringView BytePointer reduce); +@Namespace("at") public static native @ByRef Tensor segment_reduce_out(@ByRef Tensor out, @Const @ByRef Tensor data, @StringView String reduce, @Const @ByRef(nullValue = "c10::optional{}") TensorOptional lengths, @Const @ByRef(nullValue = "c10::optional{}") TensorOptional indices, @Const @ByRef(nullValue = "c10::optional{}") TensorOptional offsets, @Cast("int64_t") long axis/*=0*/, @Cast("bool") boolean unsafe/*=false*/, @Const @ByRef(nullValue = "c10::optional(c10::nullopt)") ScalarOptional initial); +@Namespace("at") public static native @ByRef Tensor segment_reduce_out(@ByRef Tensor out, @Const @ByRef Tensor data, @StringView String reduce); // aten::segment_reduce.out(Tensor data, str reduce, *, Tensor? lengths=None, Tensor? indices=None, Tensor? offsets=None, int axis=0, bool unsafe=False, Scalar? initial=None, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor segment_reduce_outf(@Const @ByRef Tensor data, @ByVal @Cast("c10::string_view*") Pointer reduce, @Const @ByRef TensorOptional lengths, @Const @ByRef TensorOptional indices, @Const @ByRef TensorOptional offsets, @Cast("int64_t") long axis, @Cast("bool") boolean unsafe, @Const @ByRef ScalarOptional initial, @ByRef Tensor out); +@Namespace("at") public static native @ByRef Tensor segment_reduce_outf(@Const @ByRef Tensor data, @StringView BytePointer reduce, @Const @ByRef TensorOptional lengths, @Const @ByRef TensorOptional indices, @Const @ByRef TensorOptional offsets, @Cast("int64_t") long axis, @Cast("bool") boolean unsafe, @Const @ByRef ScalarOptional initial, @ByRef Tensor out); +@Namespace("at") public static native @ByRef Tensor segment_reduce_outf(@Const @ByRef Tensor data, @StringView String reduce, @Const @ByRef TensorOptional lengths, @Const @ByRef TensorOptional indices, @Const @ByRef TensorOptional offsets, @Cast("int64_t") long axis, @Cast("bool") boolean unsafe, @Const @ByRef ScalarOptional initial, @ByRef Tensor out); @@ -50878,7 +50899,8 @@ public class torch extends org.bytedeco.pytorch.presets.torch { @Namespace("at") public static native @ByVal Tensor stft(@Const @ByRef Tensor self, @Cast("int64_t") long n_fft, @ByVal LongOptional hop_length, @ByVal LongOptional win_length, @Const @ByRef TensorOptional window, @Cast("bool") boolean normalized, @ByVal(nullValue = "c10::optional(c10::nullopt)") BoolOptional onesided, @ByVal(nullValue = "c10::optional(c10::nullopt)") BoolOptional return_complex); // aten::stft.center(Tensor self, int n_fft, int? hop_length=None, int? win_length=None, Tensor? window=None, bool center=True, str pad_mode="reflect", bool normalized=False, bool? onesided=None, bool? return_complex=None) -> Tensor -@Namespace("at") public static native @ByVal Tensor stft(@Const @ByRef Tensor self, @Cast("int64_t") long n_fft, @ByVal(nullValue = "c10::optional(c10::nullopt)") LongOptional hop_length, @ByVal(nullValue = "c10::optional(c10::nullopt)") LongOptional win_length, @Const @ByRef(nullValue = "c10::optional{}") TensorOptional window, @Cast("bool") boolean center/*=true*/, @ByVal(nullValue = "c10::string_view(\"reflect\")") @Cast("c10::string_view*") Pointer pad_mode, @Cast("bool") boolean normalized/*=false*/, @ByVal(nullValue = "c10::optional(c10::nullopt)") BoolOptional onesided, @ByVal(nullValue = "c10::optional(c10::nullopt)") BoolOptional return_complex); +@Namespace("at") public static native @ByVal Tensor stft(@Const @ByRef Tensor self, @Cast("int64_t") long n_fft, @ByVal(nullValue = "c10::optional(c10::nullopt)") LongOptional hop_length, @ByVal(nullValue = "c10::optional(c10::nullopt)") LongOptional win_length, @Const @ByRef(nullValue = "c10::optional{}") TensorOptional window, @Cast("bool") boolean center/*=true*/, @StringView BytePointer pad_mode/*="reflect"*/, @Cast("bool") boolean normalized/*=false*/, @ByVal(nullValue = "c10::optional(c10::nullopt)") BoolOptional onesided, @ByVal(nullValue = "c10::optional(c10::nullopt)") BoolOptional return_complex); +@Namespace("at") public static native @ByVal Tensor stft(@Const @ByRef Tensor self, @Cast("int64_t") long n_fft, @ByVal(nullValue = "c10::optional(c10::nullopt)") LongOptional hop_length, @ByVal(nullValue = "c10::optional(c10::nullopt)") LongOptional win_length, @Const @ByRef(nullValue = "c10::optional{}") TensorOptional window, @Cast("bool") boolean center/*=true*/, @StringView String pad_mode/*="reflect"*/, @Cast("bool") boolean normalized/*=false*/, @ByVal(nullValue = "c10::optional(c10::nullopt)") BoolOptional onesided, @ByVal(nullValue = "c10::optional(c10::nullopt)") BoolOptional return_complex); @@ -60179,8 +60201,10 @@ public class torch extends org.bytedeco.pytorch.presets.torch { @Namespace("torch") public static native @ByVal @Name("full") Tensor torch_full(@ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] size, @Const @ByRef Scalar fill_value); @Namespace("torch") public static native @ByVal @Name("full_like") Tensor torch_full_like(@Const @ByRef Tensor self, @Const @ByRef Scalar fill_value, @ByVal(nullValue = "at::TensorOptions{}") TensorOptions options, @ByVal(nullValue = "c10::optional(c10::nullopt)") MemoryFormatOptional memory_format); @Namespace("torch") public static native @ByVal @Name("full_like") Tensor torch_full_like(@Const @ByRef Tensor self, @Const @ByRef Scalar fill_value); -@Namespace("torch") public static native @ByVal @Name("from_file") Tensor torch_from_file(@ByVal @Cast("c10::string_view*") Pointer filename, @ByVal(nullValue = "c10::optional(c10::nullopt)") BoolOptional shared, @ByVal(nullValue = "c10::optional(0)") LongOptional size, @ByVal(nullValue = "at::TensorOptions{}") TensorOptions options); -@Namespace("torch") public static native @ByVal @Name("from_file") Tensor torch_from_file(@ByVal @Cast("c10::string_view*") Pointer filename); +@Namespace("torch") public static native @ByVal @Name("from_file") Tensor torch_from_file(@StringView BytePointer filename, @ByVal(nullValue = "c10::optional(c10::nullopt)") BoolOptional shared, @ByVal(nullValue = "c10::optional(0)") LongOptional size, @ByVal(nullValue = "at::TensorOptions{}") TensorOptions options); +@Namespace("torch") public static native @ByVal @Name("from_file") Tensor torch_from_file(@StringView BytePointer filename); +@Namespace("torch") public static native @ByVal @Name("from_file") Tensor torch_from_file(@StringView String filename, @ByVal(nullValue = "c10::optional(c10::nullopt)") BoolOptional shared, @ByVal(nullValue = "c10::optional(0)") LongOptional size, @ByVal(nullValue = "at::TensorOptions{}") TensorOptions options); +@Namespace("torch") public static native @ByVal @Name("from_file") Tensor torch_from_file(@StringView String filename); @Namespace("torch") public static native @ByVal @Name("hann_window") Tensor torch_hann_window(@Cast("int64_t") long window_length, @ByVal(nullValue = "at::TensorOptions{}") TensorOptions options); @Namespace("torch") public static native @ByVal @Name("hann_window") Tensor torch_hann_window(@Cast("int64_t") long window_length); @Namespace("torch") public static native @ByVal @Name("hann_window") Tensor torch_hann_window(@Cast("int64_t") long window_length, @Cast("bool") boolean periodic, @ByVal(nullValue = "at::TensorOptions{}") TensorOptions options); @@ -63174,7 +63198,7 @@ The list of (type, depth) pairs controls the type of specializations and the num @Const @ByRef Tensor self, @ByVal(nullValue = "c10::optional(c10::nullopt)") LongOptional n, @Cast("int64_t") long dim/*=-1*/, - @ByVal(nullValue = "c10::optional(c10::nullopt)") @Cast("c10::optional*") Pointer norm); + @ByVal(nullValue = "c10::optional(c10::nullopt)") StringViewOptional norm); @Namespace("torch::fft") public static native @ByVal Tensor fft( @Const @ByRef Tensor self); @@ -63192,7 +63216,7 @@ The list of (type, depth) pairs controls the type of specializations and the num @Const @ByRef Tensor self, @ByVal(nullValue = "c10::optional(c10::nullopt)") LongOptional n, @Cast("int64_t") long dim/*=-1*/, - @ByVal(nullValue = "c10::optional(c10::nullopt)") @Cast("c10::optional*") Pointer norm); + @ByVal(nullValue = "c10::optional(c10::nullopt)") StringViewOptional norm); @Namespace("torch::fft") public static native @ByVal Tensor ifft( @Const @ByRef Tensor self); @@ -63210,14 +63234,14 @@ The list of (type, depth) pairs controls the type of specializations and the num @Const @ByRef Tensor self, @ByVal(nullValue = "c10::OptionalIntArrayRef(c10::nullopt)") LongArrayRefOptional s, @ByVal(nullValue = "torch::IntArrayRef({-2, -1})") LongArrayRef dim, - @ByVal(nullValue = "c10::optional(c10::nullopt)") @Cast("c10::optional*") Pointer norm); + @ByVal(nullValue = "c10::optional(c10::nullopt)") StringViewOptional norm); @Namespace("torch::fft") public static native @ByVal Tensor fft2( @Const @ByRef Tensor self); @Namespace("torch::fft") public static native @ByVal Tensor fft2( @Const @ByRef Tensor self, @ByVal(nullValue = "c10::OptionalIntArrayRef(c10::nullopt)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] s, @ByVal(nullValue = "torch::IntArrayRef({-2, -1})") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dim, - @ByVal(nullValue = "c10::optional(c10::nullopt)") @Cast("c10::optional*") Pointer norm); + @ByVal(nullValue = "c10::optional(c10::nullopt)") StringViewOptional norm); /** Computes the inverse of torch.fft.fft2 * See https://pytorch.org/docs/master/fft.html#torch.fft.ifft2. @@ -63233,14 +63257,14 @@ The list of (type, depth) pairs controls the type of specializations and the num @Const @ByRef Tensor self, @ByVal(nullValue = "at::OptionalIntArrayRef(c10::nullopt)") LongArrayRefOptional s, @ByVal(nullValue = "torch::IntArrayRef({-2, -1})") LongArrayRef dim, - @ByVal(nullValue = "c10::optional(c10::nullopt)") @Cast("c10::optional*") Pointer norm); + @ByVal(nullValue = "c10::optional(c10::nullopt)") StringViewOptional norm); @Namespace("torch::fft") public static native @ByVal Tensor ifft2( @Const @ByRef Tensor self); @Namespace("torch::fft") public static native @ByVal Tensor ifft2( @Const @ByRef Tensor self, @ByVal(nullValue = "at::OptionalIntArrayRef(c10::nullopt)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] s, @ByVal(nullValue = "torch::IntArrayRef({-2, -1})") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dim, - @ByVal(nullValue = "c10::optional(c10::nullopt)") @Cast("c10::optional*") Pointer norm); + @ByVal(nullValue = "c10::optional(c10::nullopt)") StringViewOptional norm); /** Computes the N dimensional fast Fourier transform over given dimensions. * See https://pytorch.org/docs/master/fft.html#torch.fft.fftn. @@ -63256,14 +63280,14 @@ The list of (type, depth) pairs controls the type of specializations and the num @Const @ByRef Tensor self, @ByVal(nullValue = "at::OptionalIntArrayRef(c10::nullopt)") LongArrayRefOptional s, @ByVal(nullValue = "at::OptionalIntArrayRef(c10::nullopt)") LongArrayRefOptional dim, - @ByVal(nullValue = "c10::optional(c10::nullopt)") @Cast("c10::optional*") Pointer norm); + @ByVal(nullValue = "c10::optional(c10::nullopt)") StringViewOptional norm); @Namespace("torch::fft") public static native @ByVal Tensor fftn( @Const @ByRef Tensor self); @Namespace("torch::fft") public static native @ByVal Tensor fftn( @Const @ByRef Tensor self, @ByVal(nullValue = "at::OptionalIntArrayRef(c10::nullopt)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] s, @ByVal(nullValue = "at::OptionalIntArrayRef(c10::nullopt)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dim, - @ByVal(nullValue = "c10::optional(c10::nullopt)") @Cast("c10::optional*") Pointer norm); + @ByVal(nullValue = "c10::optional(c10::nullopt)") StringViewOptional norm); /** Computes the N dimensional fast Fourier transform over given dimensions. * See https://pytorch.org/docs/master/fft.html#torch.fft.ifftn. @@ -63279,14 +63303,14 @@ The list of (type, depth) pairs controls the type of specializations and the num @Const @ByRef Tensor self, @ByVal(nullValue = "at::OptionalIntArrayRef(c10::nullopt)") LongArrayRefOptional s, @ByVal(nullValue = "at::OptionalIntArrayRef(c10::nullopt)") LongArrayRefOptional dim, - @ByVal(nullValue = "c10::optional(c10::nullopt)") @Cast("c10::optional*") Pointer norm); + @ByVal(nullValue = "c10::optional(c10::nullopt)") StringViewOptional norm); @Namespace("torch::fft") public static native @ByVal Tensor ifftn( @Const @ByRef Tensor self); @Namespace("torch::fft") public static native @ByVal Tensor ifftn( @Const @ByRef Tensor self, @ByVal(nullValue = "at::OptionalIntArrayRef(c10::nullopt)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] s, @ByVal(nullValue = "at::OptionalIntArrayRef(c10::nullopt)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dim, - @ByVal(nullValue = "c10::optional(c10::nullopt)") @Cast("c10::optional*") Pointer norm); + @ByVal(nullValue = "c10::optional(c10::nullopt)") StringViewOptional norm); /** Computes the 1 dimensional FFT of real input with onesided Hermitian output. * See https://pytorch.org/docs/master/fft.html#torch.fft.rfft. @@ -63304,7 +63328,7 @@ The list of (type, depth) pairs controls the type of specializations and the num @Const @ByRef Tensor self, @ByVal(nullValue = "c10::optional(c10::nullopt)") LongOptional n, @Cast("int64_t") long dim/*=-1*/, - @ByVal(nullValue = "c10::optional(c10::nullopt)") @Cast("c10::optional*") Pointer norm); + @ByVal(nullValue = "c10::optional(c10::nullopt)") StringViewOptional norm); @Namespace("torch::fft") public static native @ByVal Tensor rfft( @Const @ByRef Tensor self); @@ -63325,7 +63349,7 @@ The list of (type, depth) pairs controls the type of specializations and the num @Const @ByRef Tensor self, @ByVal(nullValue = "c10::optional(c10::nullopt)") LongOptional n, @Cast("int64_t") long dim/*=-1*/, - @ByVal(nullValue = "c10::optional(c10::nullopt)") @Cast("c10::optional*") Pointer norm); + @ByVal(nullValue = "c10::optional(c10::nullopt)") StringViewOptional norm); @Namespace("torch::fft") public static native @ByVal Tensor irfft( @Const @ByRef Tensor self); @@ -63343,14 +63367,14 @@ The list of (type, depth) pairs controls the type of specializations and the num @Const @ByRef Tensor self, @ByVal(nullValue = "at::OptionalIntArrayRef(c10::nullopt)") LongArrayRefOptional s, @ByVal(nullValue = "torch::IntArrayRef({-2, -1})") LongArrayRef dim, - @ByVal(nullValue = "c10::optional(c10::nullopt)") @Cast("c10::optional*") Pointer norm); + @ByVal(nullValue = "c10::optional(c10::nullopt)") StringViewOptional norm); @Namespace("torch::fft") public static native @ByVal Tensor rfft2( @Const @ByRef Tensor self); @Namespace("torch::fft") public static native @ByVal Tensor rfft2( @Const @ByRef Tensor self, @ByVal(nullValue = "at::OptionalIntArrayRef(c10::nullopt)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] s, @ByVal(nullValue = "torch::IntArrayRef({-2, -1})") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dim, - @ByVal(nullValue = "c10::optional(c10::nullopt)") @Cast("c10::optional*") Pointer norm); + @ByVal(nullValue = "c10::optional(c10::nullopt)") StringViewOptional norm); /** Computes the inverse of torch.fft.rfft2. * See https://pytorch.org/docs/master/fft.html#torch.fft.irfft2. @@ -63366,14 +63390,14 @@ The list of (type, depth) pairs controls the type of specializations and the num @Const @ByRef Tensor self, @ByVal(nullValue = "at::OptionalIntArrayRef(c10::nullopt)") LongArrayRefOptional s, @ByVal(nullValue = "torch::IntArrayRef({-2, -1})") LongArrayRef dim, - @ByVal(nullValue = "c10::optional(c10::nullopt)") @Cast("c10::optional*") Pointer norm); + @ByVal(nullValue = "c10::optional(c10::nullopt)") StringViewOptional norm); @Namespace("torch::fft") public static native @ByVal Tensor irfft2( @Const @ByRef Tensor self); @Namespace("torch::fft") public static native @ByVal Tensor irfft2( @Const @ByRef Tensor self, @ByVal(nullValue = "at::OptionalIntArrayRef(c10::nullopt)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] s, @ByVal(nullValue = "torch::IntArrayRef({-2, -1})") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dim, - @ByVal(nullValue = "c10::optional(c10::nullopt)") @Cast("c10::optional*") Pointer norm); + @ByVal(nullValue = "c10::optional(c10::nullopt)") StringViewOptional norm); /** Computes the N dimensional FFT of real input with onesided Hermitian output. * See https://pytorch.org/docs/master/fft.html#torch.fft.rfftn @@ -63389,14 +63413,14 @@ The list of (type, depth) pairs controls the type of specializations and the num @Const @ByRef Tensor self, @ByVal(nullValue = "at::OptionalIntArrayRef(c10::nullopt)") LongArrayRefOptional s, @ByVal(nullValue = "at::OptionalIntArrayRef(c10::nullopt)") LongArrayRefOptional dim, - @ByVal(nullValue = "c10::optional(c10::nullopt)") @Cast("c10::optional*") Pointer norm); + @ByVal(nullValue = "c10::optional(c10::nullopt)") StringViewOptional norm); @Namespace("torch::fft") public static native @ByVal Tensor rfftn( @Const @ByRef Tensor self); @Namespace("torch::fft") public static native @ByVal Tensor rfftn( @Const @ByRef Tensor self, @ByVal(nullValue = "at::OptionalIntArrayRef(c10::nullopt)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] s, @ByVal(nullValue = "at::OptionalIntArrayRef(c10::nullopt)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dim, - @ByVal(nullValue = "c10::optional(c10::nullopt)") @Cast("c10::optional*") Pointer norm); + @ByVal(nullValue = "c10::optional(c10::nullopt)") StringViewOptional norm); /** Computes the inverse of torch.fft.rfftn. * See https://pytorch.org/docs/master/fft.html#torch.fft.irfftn. @@ -63413,14 +63437,14 @@ The list of (type, depth) pairs controls the type of specializations and the num @Const @ByRef Tensor self, @ByVal(nullValue = "at::OptionalIntArrayRef(c10::nullopt)") LongArrayRefOptional s, @ByVal(nullValue = "at::OptionalIntArrayRef(c10::nullopt)") LongArrayRefOptional dim, - @ByVal(nullValue = "c10::optional(c10::nullopt)") @Cast("c10::optional*") Pointer norm); + @ByVal(nullValue = "c10::optional(c10::nullopt)") StringViewOptional norm); @Namespace("torch::fft") public static native @ByVal Tensor irfftn( @Const @ByRef Tensor self); @Namespace("torch::fft") public static native @ByVal Tensor irfftn( @Const @ByRef Tensor self, @ByVal(nullValue = "at::OptionalIntArrayRef(c10::nullopt)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] s, @ByVal(nullValue = "at::OptionalIntArrayRef(c10::nullopt)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dim, - @ByVal(nullValue = "c10::optional(c10::nullopt)") @Cast("c10::optional*") Pointer norm); + @ByVal(nullValue = "c10::optional(c10::nullopt)") StringViewOptional norm); /** Computes the 1 dimensional FFT of a onesided Hermitian signal * @@ -63441,7 +63465,7 @@ The list of (type, depth) pairs controls the type of specializations and the num @Const @ByRef Tensor self, @ByVal(nullValue = "c10::optional(c10::nullopt)") LongOptional n, @Cast("int64_t") long dim/*=-1*/, - @ByVal(nullValue = "c10::optional(c10::nullopt)") @Cast("c10::optional*") Pointer norm); + @ByVal(nullValue = "c10::optional(c10::nullopt)") StringViewOptional norm); @Namespace("torch::fft") public static native @ByVal Tensor hfft( @Const @ByRef Tensor self); @@ -63463,7 +63487,7 @@ The list of (type, depth) pairs controls the type of specializations and the num @Const @ByRef Tensor self, @ByVal(nullValue = "c10::optional(c10::nullopt)") LongOptional n, @Cast("int64_t") long dim/*=-1*/, - @ByVal(nullValue = "c10::optional(c10::nullopt)") @Cast("c10::optional*") Pointer norm); + @ByVal(nullValue = "c10::optional(c10::nullopt)") StringViewOptional norm); @Namespace("torch::fft") public static native @ByVal Tensor ihfft( @Const @ByRef Tensor self); @@ -63485,14 +63509,14 @@ The list of (type, depth) pairs controls the type of specializations and the num @Const @ByRef Tensor self, @ByVal(nullValue = "at::OptionalIntArrayRef(c10::nullopt)") LongArrayRefOptional s, @ByVal(nullValue = "torch::IntArrayRef({-2, -1})") LongArrayRef dim, - @ByVal(nullValue = "c10::optional(c10::nullopt)") @Cast("c10::optional*") Pointer norm); + @ByVal(nullValue = "c10::optional(c10::nullopt)") StringViewOptional norm); @Namespace("torch::fft") public static native @ByVal Tensor hfft2( @Const @ByRef Tensor self); @Namespace("torch::fft") public static native @ByVal Tensor hfft2( @Const @ByRef Tensor self, @ByVal(nullValue = "at::OptionalIntArrayRef(c10::nullopt)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] s, @ByVal(nullValue = "torch::IntArrayRef({-2, -1})") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dim, - @ByVal(nullValue = "c10::optional(c10::nullopt)") @Cast("c10::optional*") Pointer norm); + @ByVal(nullValue = "c10::optional(c10::nullopt)") StringViewOptional norm); /** Computes the 2-dimensional IFFT of a real input signal. * @@ -63513,14 +63537,14 @@ The list of (type, depth) pairs controls the type of specializations and the num @Const @ByRef Tensor self, @ByVal(nullValue = "at::OptionalIntArrayRef(c10::nullopt)") LongArrayRefOptional s, @ByVal(nullValue = "torch::IntArrayRef({-2, -1})") LongArrayRef dim, - @ByVal(nullValue = "c10::optional(c10::nullopt)") @Cast("c10::optional*") Pointer norm); + @ByVal(nullValue = "c10::optional(c10::nullopt)") StringViewOptional norm); @Namespace("torch::fft") public static native @ByVal Tensor ihfft2( @Const @ByRef Tensor self); @Namespace("torch::fft") public static native @ByVal Tensor ihfft2( @Const @ByRef Tensor self, @ByVal(nullValue = "at::OptionalIntArrayRef(c10::nullopt)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] s, @ByVal(nullValue = "torch::IntArrayRef({-2, -1})") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dim, - @ByVal(nullValue = "c10::optional(c10::nullopt)") @Cast("c10::optional*") Pointer norm); + @ByVal(nullValue = "c10::optional(c10::nullopt)") StringViewOptional norm); /** Computes the N-dimensional FFT of a Hermitian symmetric input signal. * @@ -63540,14 +63564,14 @@ The list of (type, depth) pairs controls the type of specializations and the num @Const @ByRef Tensor self, @ByVal(nullValue = "at::OptionalIntArrayRef(c10::nullopt)") LongArrayRefOptional s, @ByVal(nullValue = "torch::IntArrayRef({-2, -1})") LongArrayRef dim, - @ByVal(nullValue = "c10::optional(c10::nullopt)") @Cast("c10::optional*") Pointer norm); + @ByVal(nullValue = "c10::optional(c10::nullopt)") StringViewOptional norm); @Namespace("torch::fft") public static native @ByVal Tensor hfftn( @Const @ByRef Tensor self); @Namespace("torch::fft") public static native @ByVal Tensor hfftn( @Const @ByRef Tensor self, @ByVal(nullValue = "at::OptionalIntArrayRef(c10::nullopt)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] s, @ByVal(nullValue = "torch::IntArrayRef({-2, -1})") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dim, - @ByVal(nullValue = "c10::optional(c10::nullopt)") @Cast("c10::optional*") Pointer norm); + @ByVal(nullValue = "c10::optional(c10::nullopt)") StringViewOptional norm); /** Computes the N-dimensional IFFT of a real input signal. * @@ -63568,14 +63592,14 @@ The list of (type, depth) pairs controls the type of specializations and the num @Const @ByRef Tensor self, @ByVal(nullValue = "at::OptionalIntArrayRef(c10::nullopt)") LongArrayRefOptional s, @ByVal(nullValue = "torch::IntArrayRef({-2, -1})") LongArrayRef dim, - @ByVal(nullValue = "c10::optional(c10::nullopt)") @Cast("c10::optional*") Pointer norm); + @ByVal(nullValue = "c10::optional(c10::nullopt)") StringViewOptional norm); @Namespace("torch::fft") public static native @ByVal Tensor ihfftn( @Const @ByRef Tensor self); @Namespace("torch::fft") public static native @ByVal Tensor ihfftn( @Const @ByRef Tensor self, @ByVal(nullValue = "at::OptionalIntArrayRef(c10::nullopt)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] s, @ByVal(nullValue = "torch::IntArrayRef({-2, -1})") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dim, - @ByVal(nullValue = "c10::optional(c10::nullopt)") @Cast("c10::optional*") Pointer norm); + @ByVal(nullValue = "c10::optional(c10::nullopt)") StringViewOptional norm); /** Computes the discrete Fourier Transform sample frequencies for a signal of * size n. @@ -63716,20 +63740,33 @@ The list of (type, depth) pairs controls the type of specializations and the num @Namespace("torch::linalg::detail") public static native @ByVal T_TensorTensor_T eigh( @Const @ByRef Tensor self, - @ByVal @Cast("c10::string_view*") Pointer uplo); + @StringView BytePointer uplo); +@Namespace("torch::linalg::detail") public static native @ByVal T_TensorTensor_T eigh( + @Const @ByRef Tensor self, + @StringView String uplo); @Namespace("torch::linalg::detail") public static native @ByVal @Cast("std::tuple*") PointerPointer eigh_out( @ByRef Tensor eigvals, @ByRef Tensor eigvecs, @Const @ByRef Tensor self, - @ByVal @Cast("c10::string_view*") Pointer uplo); + @StringView BytePointer uplo); +@Namespace("torch::linalg::detail") public static native @ByVal @Cast("std::tuple*") PointerPointer eigh_out( + @ByRef Tensor eigvals, + @ByRef Tensor eigvecs, + @Const @ByRef Tensor self, + @StringView String uplo); -@Namespace("torch::linalg::detail") public static native @ByVal Tensor eigvalsh(@Const @ByRef Tensor self, @ByVal @Cast("c10::string_view*") Pointer uplo); +@Namespace("torch::linalg::detail") public static native @ByVal Tensor eigvalsh(@Const @ByRef Tensor self, @StringView BytePointer uplo); +@Namespace("torch::linalg::detail") public static native @ByVal Tensor eigvalsh(@Const @ByRef Tensor self, @StringView String uplo); @Namespace("torch::linalg::detail") public static native @ByRef Tensor eigvalsh_out( @ByRef Tensor result, @Const @ByRef Tensor self, - @ByVal @Cast("c10::string_view*") Pointer uplo); + @StringView BytePointer uplo); +@Namespace("torch::linalg::detail") public static native @ByRef Tensor eigvalsh_out( + @ByRef Tensor result, + @Const @ByRef Tensor self, + @StringView String uplo); @Namespace("torch::linalg::detail") public static native @ByVal Tensor householder_product(@Const @ByRef Tensor input, @Const @ByRef Tensor tau); @@ -63763,7 +63800,7 @@ The list of (type, depth) pairs controls the type of specializations and the num @Const @ByRef Tensor self, @Const @ByRef Tensor b, @ByVal DoubleOptional cond, - @ByVal @Cast("c10::optional*") Pointer driver); + @ByVal StringViewOptional driver); @Namespace("torch::linalg::detail") public static native @ByVal Tensor norm( @Const @ByRef Tensor self, @@ -63780,13 +63817,13 @@ The list of (type, depth) pairs controls the type of specializations and the num @Namespace("torch::linalg::detail") public static native @ByVal Tensor norm( @Const @ByRef Tensor self, - @ByVal @Cast("c10::string_view*") Pointer ord, + @StringView BytePointer ord, @ByVal LongArrayRefOptional opt_dim, @Cast("bool") boolean keepdim, @ByVal ScalarTypeOptional opt_dtype); @Namespace("torch::linalg::detail") public static native @ByVal Tensor norm( @Const @ByRef Tensor self, - @ByVal @Cast("c10::string_view*") Pointer ord, + @StringView String ord, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] opt_dim, @Cast("bool") boolean keepdim, @ByVal ScalarTypeOptional opt_dtype); @@ -63809,14 +63846,14 @@ The list of (type, depth) pairs controls the type of specializations and the num @Namespace("torch::linalg::detail") public static native @ByRef Tensor norm_out( @ByRef Tensor result, @Const @ByRef Tensor self, - @ByVal @Cast("c10::string_view*") Pointer ord, + @StringView BytePointer ord, @ByVal LongArrayRefOptional opt_dim, @Cast("bool") boolean keepdim, @ByVal ScalarTypeOptional opt_dtype); @Namespace("torch::linalg::detail") public static native @ByRef Tensor norm_out( @ByRef Tensor result, @Const @ByRef Tensor self, - @ByVal @Cast("c10::string_view*") Pointer ord, + @StringView String ord, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] opt_dim, @Cast("bool") boolean keepdim, @ByVal ScalarTypeOptional opt_dtype); @@ -63966,13 +64003,21 @@ The list of (type, depth) pairs controls the type of specializations and the num @Namespace("torch::linalg::detail") public static native @ByVal T_TensorTensor_T qr( @Const @ByRef Tensor input, - @ByVal @Cast("c10::string_view*") Pointer mode); + @StringView BytePointer mode); +@Namespace("torch::linalg::detail") public static native @ByVal T_TensorTensor_T qr( + @Const @ByRef Tensor input, + @StringView String mode); @Namespace("torch::linalg::detail") public static native @ByVal @Cast("std::tuple*") PointerPointer qr_out( @ByRef Tensor Q, @ByRef Tensor R, @Const @ByRef Tensor input, - @ByVal @Cast("c10::string_view*") Pointer mode); + @StringView BytePointer mode); +@Namespace("torch::linalg::detail") public static native @ByVal @Cast("std::tuple*") PointerPointer qr_out( + @ByRef Tensor Q, + @ByRef Tensor R, + @Const @ByRef Tensor input, + @StringView String mode); @Namespace("torch::linalg::detail") public static native @ByVal T_TensorTensor_T solve_ex( @Const @ByRef Tensor input, @@ -64014,7 +64059,7 @@ The list of (type, depth) pairs controls the type of specializations and the num @Namespace("torch::linalg::detail") public static native @ByVal T_TensorTensorTensor_T svd( @Const @ByRef Tensor input, @Cast("bool") boolean full_matrices, - @ByVal @Cast("c10::optional*") Pointer driver); + @ByVal StringViewOptional driver); @Namespace("torch::linalg::detail") public static native @ByVal @Cast("std::tuple*") PointerPointer svd_out( @ByRef Tensor U, @@ -64022,16 +64067,16 @@ The list of (type, depth) pairs controls the type of specializations and the num @ByRef Tensor Vh, @Const @ByRef Tensor input, @Cast("bool") boolean full_matrices, - @ByVal @Cast("c10::optional*") Pointer driver); + @ByVal StringViewOptional driver); @Namespace("torch::linalg::detail") public static native @ByVal Tensor svdvals( @Const @ByRef Tensor input, - @ByVal @Cast("c10::optional*") Pointer driver); + @ByVal StringViewOptional driver); @Namespace("torch::linalg::detail") public static native @ByRef Tensor svdvals_out( @ByRef Tensor result, @Const @ByRef Tensor input, - @ByVal @Cast("c10::optional*") Pointer driver); + @ByVal StringViewOptional driver); @Namespace("torch::linalg::detail") public static native @ByVal Tensor tensorinv(@Const @ByRef Tensor self, @Cast("int64_t") long ind); @@ -64145,34 +64190,6 @@ The list of (type, depth) pairs controls the type of specializations and the num @ByRef Tensor U, @Const @ByRef Tensor self); -@Namespace("torch::linalg") public static native @ByVal Tensor norm( - @Const @ByRef Tensor self, - @StdString BytePointer ord, - @ByVal LongArrayRefOptional opt_dim, - @Cast("bool") boolean keepdim, - @ByVal ScalarTypeOptional opt_dtype); -@Namespace("torch::linalg") public static native @ByVal Tensor norm( - @Const @ByRef Tensor self, - @StdString String ord, - @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] opt_dim, - @Cast("bool") boolean keepdim, - @ByVal ScalarTypeOptional opt_dtype); - -@Namespace("torch::linalg") public static native @ByRef Tensor norm_out( - @ByRef Tensor result, - @Const @ByRef Tensor self, - @StdString BytePointer ord, - @ByVal LongArrayRefOptional opt_dim, - @Cast("bool") boolean keepdim, - @ByVal ScalarTypeOptional opt_dtype); -@Namespace("torch::linalg") public static native @ByRef Tensor norm_out( - @ByRef Tensor result, - @Const @ByRef Tensor self, - @StdString String ord, - @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] opt_dim, - @Cast("bool") boolean keepdim, - @ByVal ScalarTypeOptional opt_dtype); - /** See https://pytorch.org/docs/master/linalg.html#torch.linalg.vector_norm */ /** See https://pytorch.org/docs/master/linalg.html#torch.linalg.matrix_norm */ @@ -67475,8 +67492,6 @@ scalar_t sf(scalar_t x, scalar_t y) // ============================================================================ // #ifndef DOXYGEN_SHOULD_SKIP_THIS -@Namespace("torch::nn::functional::detail") public static native @ByVal Tensor gelu(@Const @ByRef Tensor input, @StdString BytePointer approximate); -@Namespace("torch::nn::functional::detail") public static native @ByVal Tensor gelu(@Const @ByRef Tensor input, @StdString String approximate); // namespace detail // #endif /* DOXYGEN_SHOULD_SKIP_THIS */ @@ -68694,8 +68709,8 @@ scalar_t sf(scalar_t x, scalar_t y) @Override public String toString() { return intern().name(); } } -@Namespace("at") public static native @ByVal @Cast("c10::string_view*") Pointer padding_mode_string(padding_mode m); -@Namespace("at") public static native @ByVal @Cast("c10::string_view*") Pointer padding_mode_string(@Cast("at::padding_mode") int m); +@Namespace("at") public static native @StringView BytePointer padding_mode_string(padding_mode m); +@Namespace("at") public static native @StringView String padding_mode_string(@Cast("at::padding_mode") int m); // namespace at diff --git a/pytorch/src/main/java/org/bytedeco/pytorch/StringView.java b/pytorch/src/main/java/org/bytedeco/pytorch/StringView.java new file mode 100644 index 00000000000..f4343ac5bf8 --- /dev/null +++ b/pytorch/src/main/java/org/bytedeco/pytorch/StringView.java @@ -0,0 +1,12 @@ +package org.bytedeco.pytorch; + +import org.bytedeco.javacpp.annotation.Adapter; +import org.bytedeco.javacpp.annotation.Cast; + +import java.lang.annotation.*; + +@Documented @Retention(RetentionPolicy.RUNTIME) +@Target({ElementType.METHOD, ElementType.PARAMETER}) +@Cast("c10::string_view&") @Adapter("StringViewAdapter") +public @interface StringView { +} diff --git a/pytorch/src/main/java/org/bytedeco/pytorch/presets/torch.java b/pytorch/src/main/java/org/bytedeco/pytorch/presets/torch.java index e2ff9467348..e0b246b634a 100644 --- a/pytorch/src/main/java/org/bytedeco/pytorch/presets/torch.java +++ b/pytorch/src/main/java/org/bytedeco/pytorch/presets/torch.java @@ -66,7 +66,9 @@ // For inclusion in JNI only, not parsed (compiler needs some complete definitions) "torch/csrc/jit/runtime/instruction.h", - "torch/csrc/jit/serialization/source_range_serialization.h" + "torch/csrc/jit/serialization/source_range_serialization.h", + + "pytorch_adapters.h" }, link = {"c10", "torch_cpu", "torch"}, preload = {"gomp@.1", "iomp5", "omp", "tbb@.2", "asmjit", "fbgemm"} @@ -209,7 +211,7 @@ public void mapModule(InfoMap infoMap, String name, String base, String baseBase } infoMap.put(new Info("torch::nn::" + name + "Impl")) // Ensure qualified name is in Info when Cloneable inheritance is parsed (and before class XImpl is finished parsing) - .put(new Info("torch::nn::" + name + "Impl::" + name + "Impl").annotations("@SharedPtr")) + .put(new Info("torch::nn::" + name + "Impl::" + name + "Impl").annotations("@SharedPtr", "@Name(\"std::make_shared\")")) .put(new Info("torch::nn::Cloneable").pointerTypes(name + "ImplCloneable").purify()) .put(new Info("torch::nn::ModuleHolder").skip()) .put(new Info("torch::nn::" + name).skip()); @@ -218,7 +220,7 @@ public void mapModule(InfoMap infoMap, String name, String base, String baseBase anyModuleConstructors += "public AnyModule(" + name + "Impl module) { super((Pointer)null); allocate(module); }\n" + // We need a @Cast because AnyModule constructor is explicit - "@SharedPtr private native void allocate(@SharedPtr @Cast({\"\", \"std::shared_ptr\"}) " + name + "Impl module);\n"; + "private native void allocate(@SharedPtr @Cast({\"\", \"std::shared_ptr\"}) " + name + "Impl module);\n"; infoMap.put(new Info("torch::nn::SequentialImpl::push_back").javaNames("push_back")); } } @@ -340,7 +342,6 @@ public void map(InfoMap infoMap) { .put(new Info("c10::MaybeOwned").valueTypes("@Cast({\"\", \"c10::MaybeOwned&&\"}) @StdMove TensorMaybeOwned").pointerTypes("TensorMaybeOwned")) .put(new Info("c10::MaybeOwned").valueTypes("@Cast({\"\", \"c10::MaybeOwned&&\"}) @StdMove TensorBaseMaybeOwned").pointerTypes("TensorBaseMaybeOwned")) .put(new Info("c10::MaybeOwnedTraits").pointerTypes("MaybeOwnedTraitsTensor")) - .put(new Info("c10::MaybeOwnedTraitsGenericImpl >").pointerTypes("MaybeOwnedTraitsGenericImplTensor")) .put(new Info("at::InferExpandGeometryResult").pointerTypes("DimVectorInferExpandGeometryResult")) .put(new Info("at::namedinference::TensorName").valueTypes("@Cast({\"\", \"at::namedinference::TensorName&&\"}) @StdMove TensorName").pointerTypes("TensorName")) .put(new Info("c10::remove_symint::type").valueTypes("long")) @@ -442,6 +443,8 @@ public void map(InfoMap infoMap) { .put(new Info("c10::optional >").pointerTypes("T_StringSizeTSizeT_TOptional").define()) .put(new Info("torch::optional >").pointerTypes("T_TensorTensor_TOptional").define()) .put(new Info("c10::optional >", "c10::optional >").pointerTypes("T_TypePtrLong_TOptional").cast().define()) + .put(new Info("c10::optional").pointerTypes("StringViewOptional").define()) + .put(new Info("c10::optional >").pointerTypes("StringViewVectorOptional").define()) ; @@ -595,6 +598,7 @@ public void map(InfoMap infoMap) { .put(new Info("std::vector").cast().pointerTypes("DoubleVector").define()) .put(new Info("std::vector").cast().pointerTypes("SizeTVector").define()) .put(new Info("std::vector").pointerTypes("StringVector").define()) + .put(new Info("std::vector").pointerTypes("StringViewVector").define()) .put(new Info("std::vector >").pointerTypes("StringLongVector").define()) .put(new Info("const std::vector >", "std::vector >").pointerTypes("RecordFunctionCallbackHandleVector").define()) @@ -643,8 +647,6 @@ public void map(InfoMap infoMap) { .put(new Info("std::vector::iterator").pointerTypes("AnyModuleVector.Iterator")) .put(new Info("std::vector >").pointerTypes("SharedModuleVector").define()) .put(new Info("std::vector >::iterator").pointerTypes("SharedModuleVector.Iterator")) - .put(new Info("std::vector >").pointerTypes("SharedAnyModuleVector").define()) - .put(new Info("std::vector >::iterator").pointerTypes("SharedAnyModuleVector.Iterator")) .put(new Info("std::vector >").pointerTypes("StringTensorVector").define()) .put(new Info("std::vector >").pointerTypes("StringModuleVector").define()) .put(new Info("std::vector >").pointerTypes("StringAnyModuleVector").define()) @@ -1625,16 +1627,17 @@ public void map(InfoMap infoMap) { //// Classes handled with @SharedPtr + // Annotating the constructor is normally needed for all classes for which + // at least an API call takes a shared pointer of this class AND + // if instances of this class can be created from a Java constructor. for (PointerInfo pi : new PointerInfo[]{ new PointerInfo("torch::jit::Graph"), new PointerInfo("torch::jit::Operator"), new PointerInfo("torch::jit::Resolver"), - new PointerInfo("at::Tensor"), new PointerInfo("torch::jit::tensorexpr::analysis::AccessInfo"), new PointerInfo("c10::ClassType"), new PointerInfo("c10::TensorType").otherCppNames("c10::TensorTypePtr", "at::TensorTypePtr", "torch::TensorTypePtr"), new PointerInfo("torch::autograd::FunctionPreHook"), - new PointerInfo("torch::nn::AnyModule"), new PointerInfo("torch::nn::Module"), new PointerInfo("const at::functorch::FuncTorchTLSBase"), new PointerInfo("const torch::jit::CompilationUnit"), @@ -1652,7 +1655,8 @@ public void map(InfoMap infoMap) { // Also annotate constructor of target class to ensure only one shared_ptr exists for each instance String n = pi.argumentNames[0].substring(pi.argumentNames[0].lastIndexOf(' ') + 1); // Remove possible const - infoMap.put(new Info(n + n.substring(n.lastIndexOf("::"))).annotations("@SharedPtr")); + String n2 = n.equals("torch::nn::Module") ? "JavaCPP_torch_0003a_0003ann_0003a_0003aModule" : n; + infoMap.put(new Info(n + n.substring(n.lastIndexOf("::"))).annotations("@SharedPtr", "@Name(\"std::make_shared<" + n2 + ">\")")); } @@ -1887,6 +1891,10 @@ We need either to put an annotation info on each member, or javaName("@NoOffset .put(new Info("c10::cast_and_store<" + t[0] + ">").javaNames("cast_and_store_from_" + t[1])); } + + //// c10::string_view + infoMap.put(new Info("c10::basic_string_view", "c10::string_view").annotations("@StringView").valueTypes("BytePointer", "String")); + // Registries. // Skipped them for now. Much burden with variadic args and creator function pointers. // We cannot map ThreadPoolRegistry because it takes 3 arguments in the variadic Args Registry template arguments @@ -2115,7 +2123,8 @@ We need either to put an annotation info on each member, or javaName("@NoOffset "torch::nn::functions::CrossMapLRN2d", "torch::profiler::impl::HashCombine", - "torch::autograd::_jvp_fn_t", "torch::autograd::profiler::post_process_t" + "torch::autograd::_jvp_fn_t", "torch::autograd::profiler::post_process_t", + "at::StringView" // Confusion with string_view and @StringView, and doesn't seem to be of any use in API ).skip()) ; @@ -2150,7 +2159,7 @@ We need either to put an annotation info on each member, or javaName("@NoOffset //// Classes kept but passed as generic pointer - .put(new Info("c10::intrusive_ptr_target", "c10::nullopt", "c10::nullopt_t", "c10::string_view", "c10::impl::PyObjectSlot", + .put(new Info("c10::intrusive_ptr_target", "c10::nullopt", "c10::nullopt_t", "c10::impl::PyObjectSlot", "_object", "PyObject", "std::function", "THPObjectPtr", "pyobj_list", "std::chrono::milliseconds", "std::exception_ptr", "std::type_info", "std::pair", "std::stack >", "torch::autograd::utils::DelayWarningHandler", @@ -2160,7 +2169,7 @@ We need either to put an annotation info on each member, or javaName("@NoOffset "std::shared_ptr", "caffe2::serialize::PyTorchStreamWriter", "c10::detail::DictImpl::dict_map_type::iterator", "std::iterator >", - "c10::optional", "c10::optional", "c10::optional >", "c10::optional", + "c10::optional", "c10::optional", "c10::intrusive_ptr", "c10::intrusive_ptr", "c10::intrusive_ptr", "c10::ArrayRef >", "torch::jit::DetachedBuffer::UniqueDetachedBuffer", "c10::optional", diff --git a/pytorch/src/main/resources/org/bytedeco/pytorch/include/pytorch_adapters.h b/pytorch/src/main/resources/org/bytedeco/pytorch/include/pytorch_adapters.h new file mode 100644 index 00000000000..ab69f0072d7 --- /dev/null +++ b/pytorch/src/main/resources/org/bytedeco/pytorch/include/pytorch_adapters.h @@ -0,0 +1,27 @@ +class JavaCPP_hidden StringViewAdapter final { + public: + using SizeT = typename std::basic_string::size_type; + StringViewAdapter(const c10::string_view &sv) : ptr(sv.data()), size(sv.size()), svRef((c10::string_view &)sv) {} + StringViewAdapter( c10::string_view &sv) : ptr(sv.data()), size(sv.size()), svRef(sv) {} + + StringViewAdapter(const unsigned char *p, SizeT s, void *o) : ptr((const char *) p), size(s > 0 ? s : strlen(ptr)), sv(ptr, size), svRef(sv) { } + StringViewAdapter( unsigned char *p, SizeT s, void *o) : ptr((const char *) p), size(s > 0 ? s : strlen(ptr)), sv(ptr, size), svRef(sv) { } + StringViewAdapter(const signed char *p, SizeT s, void *o) : ptr((const char *) p), size(s > 0 ? s : strlen(ptr)), sv(ptr, size), svRef(sv) { } + StringViewAdapter( signed char *p, SizeT s, void *o) : ptr((const char *) p), size(s > 0 ? s : strlen(ptr)), sv(ptr, size), svRef(sv) { } + StringViewAdapter(const char *p, SizeT s, void *o) : ptr( p), size(s > 0 ? s : strlen(ptr)), sv(ptr, size), svRef(sv) { } + StringViewAdapter( char *p, SizeT s, void *o) : ptr((const char *) p), size(s > 0 ? s : strlen(ptr)), sv(ptr, size), svRef(sv) { } + + static void deallocate(void *owner) { } + + operator signed char *() { return (signed char *) ptr; } // Used when a string_view argument is passed as BytePointer + operator const char *() { return ptr; } // Used when a string_view is returned by a function (as String) + + operator c10::string_view&() { return svRef; } + operator c10::string_view*() { return &svRef; } + + const char *ptr; + SizeT size; + c10::string_view sv; + c10::string_view &svRef; + void *owner = NULL; +}; \ No newline at end of file diff --git a/pytorch/src/main/resources/org/bytedeco/pytorch/presets/torch_include.h b/pytorch/src/main/resources/org/bytedeco/pytorch/presets/torch_include.h index 01cddd92ffe..ec3238af8d9 100644 --- a/pytorch/src/main/resources/org/bytedeco/pytorch/presets/torch_include.h +++ b/pytorch/src/main/resources/org/bytedeco/pytorch/presets/torch_include.h @@ -15,7 +15,7 @@ // #include "c10/util/string_utils.h" // Android only // #include "c10/util/C++17.h" #include "c10/util/reverse_iterator.h" -#include "c10/util/string_view.h" +// #include "c10/util/string_view.h" // Not mapped. Using custom adapter instead. #include "c10/util/StringUtil.h" #include "c10/util/in_place.h" // #include "c10/util/variant.h" // Not parseable and incompatible with declaring c10::variant as basic container