Skip to content

Commit

Permalink
Adapted to some API Compat complaints.
Browse files Browse the repository at this point in the history
  • Loading branch information
NiklasGustafsson committed Nov 11, 2024
1 parent ffdcd89 commit 3e77e9f
Show file tree
Hide file tree
Showing 16 changed files with 164 additions and 13 deletions.
22 changes: 20 additions & 2 deletions src/TorchSharp/NN/Activation/GELU.cs
Original file line number Diff line number Diff line change
Expand Up @@ -32,11 +32,19 @@ public static partial class torch
{
public static partial class nn
{
/// <summary>
/// Gaussian Error Linear Units
/// </summary>
public static GELU GELU()
{
return new GELU(false);
}

/// <summary>
/// Gaussian Error Linear Units
/// </summary>
/// <param name="inplace">Do the operation in-place. Default: False</param>
public static GELU GELU(bool inplace = false)
public static GELU GELU(bool inplace)
{
return new GELU(inplace);
}
Expand All @@ -48,10 +56,20 @@ public static partial class functional
/// </summary>
/// <param name="x">The input tensor</param>
/// <param name="inplace">Do the operation in-place. Default: False</param>
public static Tensor gelu(Tensor x, bool inplace = false)
public static Tensor gelu(Tensor x, bool inplace)
{
return inplace ? x.gelu_().alias() : x.gelu();
}

/// <summary>
/// Gaussian Error Linear Units
/// </summary>
/// <param name="x">The input tensor</param>
/// <remarks>The defaulting of 'inplace' to 'false' is implemented as an overload to avoid a breaking change.</remarks>
public static Tensor gelu(Tensor x)
{
return gelu(x,false);
}
}
}
}
Expand Down
9 changes: 9 additions & 0 deletions src/TorchSharp/NN/Activation/Hardshrink.cs
Original file line number Diff line number Diff line change
Expand Up @@ -57,6 +57,15 @@ public static Tensor hardshrink(Tensor x, double lambda = 0.5)
if (result == IntPtr.Zero) { torch.CheckForErrors(); }
return new Tensor(result);
}

/// <summary>
/// Hardshrink
/// </summary>
/// <param name="x">The input tensor</param>
/// <param name="lambda">The λ value for the Hardshrink formulation. Default: 0.5</param>
/// <remarks>Only here for backward comaptibility.</remarks>
[Obsolete("Not using the PyTorch naming convention.",false)]
public static Tensor Hardshrink(Tensor x, double lambda = 0.5) => hardshrink(x, lambda);
}
}
}
Expand Down
11 changes: 11 additions & 0 deletions src/TorchSharp/NN/Activation/Hardtanh.cs
Original file line number Diff line number Diff line change
Expand Up @@ -67,6 +67,17 @@ public static Tensor hardtanh(Tensor x, double min_val = -1.0, double max_val =
{
return inplace ? x.hardtanh_(min_val, max_val).alias() : x.hardtanh(min_val, max_val);
}

/// <summary>
/// Hardshrink
/// </summary>
/// <param name="x">The input tensor</param>
/// <param name="min_val">Minimum value of the linear region range.</param>
/// <param name="max_val">Maximum value of the linear region range.</param>
/// <param name="inplace">Do the operation in-place</param>
/// <remarks>Only here for backward comaptibility.</remarks>
[Obsolete("Not using the PyTorch naming convention.",false)]
public static Tensor Hardtanh(Tensor x, double min_val = -1.0, double max_val = 1.0, bool inplace = false) => hardtanh(x, min_val, max_val, inplace);
}
}
}
Expand Down
17 changes: 16 additions & 1 deletion src/TorchSharp/NN/Activation/Mish.cs
Original file line number Diff line number Diff line change
Expand Up @@ -32,11 +32,19 @@ public static partial class torch
{
public static partial class nn
{
/// <summary>
/// A Self Regularized Non-Monotonic Neural Activation Function.
/// </summary>
public static Mish Mish()
{
return new Mish(false);
}

/// <summary>
/// A Self Regularized Non-Monotonic Neural Activation Function.
/// </summary>
/// <param name="inplace">Do the operation in-place. Default: False</param>
public static Mish Mish(bool inplace = false)
public static Mish Mish(bool inplace)
{
return new Mish(inplace);
}
Expand All @@ -54,6 +62,13 @@ public static Tensor mish(Tensor x, bool inplace = false)
using var t2 = t1.tanh();
return inplace ? x.mul_(t2).alias() : x.mul(t2);
}

/// <summary>
/// A Self Regularized Non-Monotonic Neural Activation Function.
/// </summary>
/// <param name="x">The input tensor</param>
[Obsolete("Not using the PyTorch naming convention.",false)]
public static Tensor Mish(Tensor x) => mish(x, false);
}
}
}
Expand Down
10 changes: 9 additions & 1 deletion src/TorchSharp/NN/Activation/SiLU.cs
Original file line number Diff line number Diff line change
Expand Up @@ -39,7 +39,15 @@ public static partial class nn
/// <summary>
/// Sigmoid-Weighted Linear Unit
/// </summary>
public static SiLU SiLU(bool inplace = false)
public static SiLU SiLU()
{
return new SiLU(false);
}

/// <summary>
/// Sigmoid-Weighted Linear Unit
/// </summary>
public static SiLU SiLU(bool inplace)
{
return new SiLU(inplace);
}
Expand Down
21 changes: 20 additions & 1 deletion src/TorchSharp/NN/Activation/Sigmoid.cs
Original file line number Diff line number Diff line change
Expand Up @@ -31,6 +31,15 @@ public static partial class torch
{
public static partial class nn
{
/// <summary>
/// Sigmoid activation
/// </summary>
/// <returns></returns>
public static Sigmoid Sigmoid()
{
return new Sigmoid(false);
}

/// <summary>
/// Sigmoid activation
/// </summary>
Expand All @@ -49,10 +58,20 @@ public static partial class functional
/// <param name="x">The input tensor</param>
/// <param name="inplace">Do the operation in-place. Default: False</param>
/// <returns></returns>
public static Tensor sigmoid(Tensor x, bool inplace = false)
public static Tensor sigmoid(Tensor x, bool inplace)
{
return inplace ? x.sigmoid_().alias() : x.sigmoid();
}

/// <summary>
/// Gaussian Error Linear Units
/// </summary>
/// <param name="x">The input tensor</param>
/// <remarks>The defaulting of 'inplace' to 'false' is implemented as an overload to avoid a breaking change.</remarks>
public static Tensor sigmoid(Tensor x)
{
return sigmoid(x,false);
}
}
}
}
Expand Down
10 changes: 5 additions & 5 deletions src/TorchSharp/NN/Activation/Softplus.cs
Original file line number Diff line number Diff line change
Expand Up @@ -14,7 +14,7 @@ namespace Modules
/// </summary>
public sealed class Softplus : ParameterLessModule<Tensor, Tensor>
{
internal Softplus(int beta = 1, int threshold = 20) : base(nameof(Softplus))
internal Softplus(double beta = 1, double threshold = 20) : base(nameof(Softplus))
{
this.beta = beta;
this.threshold = threshold;
Expand All @@ -25,8 +25,8 @@ public override Tensor forward(Tensor tensor)
return torch.nn.functional.softplus(tensor, beta, threshold);
}

public int beta {get; set;}
public int threshold {get; set;}
public double beta {get; set;}
public double threshold {get; set;}
}
}

Expand All @@ -40,7 +40,7 @@ public static partial class nn
/// <param name="beta">The β value for the Softplus formulation.</param>
/// <param name="threshold">Values above this revert to a linear function</param>
/// <returns></returns>
public static Softplus Softplus(int beta = 1, int threshold = 20)
public static Softplus Softplus(double beta = 1, double threshold = 20)
{
return new Softplus(beta, threshold);
}
Expand All @@ -54,7 +54,7 @@ public static partial class functional
/// <param name="beta">The β value for the Softplus formulation.</param>
/// <param name="threshold">Values above this revert to a linear function</param>
/// <returns></returns>
public static Tensor softplus(Tensor x, int beta = 1, int threshold = 20)
public static Tensor softplus(Tensor x, double beta = 1, double threshold = 20)
{
return x.softplus(beta, threshold);
}
Expand Down
8 changes: 8 additions & 0 deletions src/TorchSharp/NN/Activation/Softshrink.cs
Original file line number Diff line number Diff line change
Expand Up @@ -57,6 +57,14 @@ public static Tensor softshrink(Tensor x, double lambda = 0.5)
if (result == IntPtr.Zero) { torch.CheckForErrors(); }
return new Tensor(result);
}

/// <summary>
/// Softshrink
/// </summary>
/// <param name="x">The input tensor</param>
/// <param name="lambda">The λ value for the Softshrink formulation. Default: 0.5</param>
[Obsolete("Not using the PyTorch naming convention.",false)]
public static Tensor Softshrink(Tensor x, double lambda = 0.5) => softshrink(x, lambda);
}
}
}
Expand Down
7 changes: 7 additions & 0 deletions src/TorchSharp/NN/Activation/Softsign.cs
Original file line number Diff line number Diff line change
Expand Up @@ -54,6 +54,13 @@ public static Tensor softsign(Tensor x, bool inplace = false)
using var y = 1 + abs;
return inplace ? x.div_(y).alias() : x.div(y);
}

/// <summary>
/// Softsign
/// </summary>
/// <param name="x">The input tensor</param>
[Obsolete("Not using the PyTorch naming convention.",false)]
public static Tensor Softsign(Tensor x) => softsign(x, false);
}
}
}
Expand Down
9 changes: 9 additions & 0 deletions src/TorchSharp/NN/Activation/Tanh.cs
Original file line number Diff line number Diff line change
Expand Up @@ -37,6 +37,15 @@ public static partial class torch
{
public static partial class nn
{
/// <summary>
/// Tanh activation
/// </summary>
/// <returns></returns>
public static Tanh Tanh()
{
return new Tanh(false);
}

/// <summary>
/// Tanh activation
/// </summary>
Expand Down
15 changes: 15 additions & 0 deletions src/TorchSharp/NN/Activation/Tanhshrink.cs
Original file line number Diff line number Diff line change
Expand Up @@ -32,6 +32,14 @@ public static partial class torch
{
public static partial class nn
{
/// <summary>
/// Tanhshrink
/// </summary>
public static Tanhshrink Tanhshrink()
{
return new Tanhshrink(false);
}

/// <summary>
/// Tanhshrink
/// </summary>
Expand All @@ -53,6 +61,13 @@ public static Tensor tanhshrink(Tensor x, bool inplace = false)
using var tanh_x = x.tanh();
return inplace ? x.sub_(tanh_x).alias() : x.sub(tanh_x);
}

/// <summary>
/// Tanhshrink
/// </summary>
/// <param name="x">The input tensor</param>
[Obsolete("Not using the PyTorch naming convention.",false)]
public static Tensor tanhshrink(Tensor x) => tanhshrink(x, false);
}
}
}
Expand Down
10 changes: 9 additions & 1 deletion src/TorchSharp/NN/Activation/Threshold.cs
Original file line number Diff line number Diff line change
Expand Up @@ -59,11 +59,19 @@ public static partial class functional
/// <param name="threshold">The value to threshold at</param>
/// <param name="value">The value to replace with</param>
/// <param name="inplace">Do the operation in-place</param>
/// <returns></returns>
public static Tensor threshold(Tensor x, double threshold, double value, bool inplace = false)
{
return inplace ? x.threshold_(threshold, value).alias() : x.threshold(threshold, value);
}

/// <summary>
/// Thresholds each element of the input Tensor.
/// </summary>
/// <param name="x">The input tensor</param>
/// <param name="threshold">The value to threshold at</param>
/// <param name="value">The value to replace with</param>
[Obsolete("Not using the PyTorch naming convention.",false)]
public static Tensor Threshold(Tensor x, double threshold, double value) => nn.functional.threshold(x, threshold, value, false);
}
}
}
Expand Down
9 changes: 9 additions & 0 deletions src/TorchSharp/NN/Convolution/Convolution.cs
Original file line number Diff line number Diff line change
Expand Up @@ -187,6 +187,12 @@ protected internal override nn.Module _to(ScalarType dtype, bool non_blocking) {
return this;
}

// Included to avoid API compat issues.
[Obsolete("Deprecated API", true)]
protected Convolution(IntPtr handle, IntPtr boxedHandle, long input_channels) : base(handle, boxedHandle) {
throw new NotImplementedException("Deprecated API.");
}

[ComponentName(Name = BiasComponentName)]
protected Parameter? _bias;
[ComponentName(Name = WeightComponentName)]
Expand All @@ -198,6 +204,9 @@ protected internal override nn.Module _to(ScalarType dtype, bool non_blocking) {
// reverse order than the dimension.
protected long[] _reversed_padding_repeated_twice;

[Obsolete("Deprecated.", true)]
protected long input_channels;

public long in_channels { get; }
public long out_channels { get; }
public bool transposed { get; }
Expand Down
14 changes: 13 additions & 1 deletion src/TorchSharp/NN/FeatureDropout.cs
Original file line number Diff line number Diff line change
Expand Up @@ -42,11 +42,23 @@ public static partial class nn
/// </summary>
/// <param name="p">Dropout probability of a channel to be zeroed. Default: 0.5</param>
/// <param name="inplace">If set to true, will do this operation in-place. Default: false</param>
public static FeatureAlphaDropout FeatureAlphaDropout(double p = 0.5, bool inplace = false)
public static FeatureAlphaDropout FeatureAlphaDropout(double p, bool inplace)
{
return new FeatureAlphaDropout(p, inplace);
}

/// <summary>
/// Randomly masks out entire channels (a channel is a feature map, e.g. the j-th channel of the i-th sample in the batch input is a tensor input[i,j]) of the input tensor.
/// Instead of setting activations to zero, as in regular Dropout, the activations are set to the negative saturation value of the SELU activation function.
/// Each element will be masked independently on every forward call with probability p using samples from a Bernoulli distribution.The elements to be masked are
/// randomized on every forward call, and scaled and shifted to maintain zero mean and unit variance.
/// </summary>
/// <param name="p">Dropout probability of a channel to be zeroed. Default: 0.5</param>
public static FeatureAlphaDropout FeatureAlphaDropout(double p = 0.5)
{
return new FeatureAlphaDropout(p, false);
}

public static partial class functional
{
/// <summary>
Expand Down
3 changes: 3 additions & 0 deletions src/TorchSharp/NN/Normalization/NormBase.cs
Original file line number Diff line number Diff line change
Expand Up @@ -52,6 +52,9 @@ private void ResetRunningStats()
}
}

// For backward compat.
public void reset_running_stats() => ResetRunningStats();

public void reset_parameters() {
ResetRunningStats();
if (affine) {
Expand Down
2 changes: 1 addition & 1 deletion src/TorchSharp/Tensor/Tensor.cs
Original file line number Diff line number Diff line change
Expand Up @@ -2724,7 +2724,7 @@ public Tensor softmax(long dim, ScalarType? dtype = null) =>
torch.special.softmax(this, dim, dtype);


public Tensor softplus(int beta = 1, int threshold = 20) =>
public Tensor softplus(double beta = 1, double threshold = 20) =>
softplus1(beta, threshold);

private Tensor softplus1(Scalar beta, Scalar threshold)
Expand Down

0 comments on commit 3e77e9f

Please sign in to comment.