Skip to content

Commit 7d3d63a

Browse files
authored
Merge pull request #38 from Tensor46/develop
Develop
2 parents b818ed9 + 174e53e commit 7d3d63a

File tree

4 files changed

+49
-15
lines changed

4 files changed

+49
-15
lines changed

core/NeuralLayers/convolution.py

+3
Original file line numberDiff line numberDiff line change
@@ -163,6 +163,9 @@ def __init__(self,
163163
if maintain_out_size:
164164
out_pad = (tensor_size[2]*strides[0] - self.tensor_size[2],
165165
tensor_size[3]*strides[1] - self.tensor_size[3])
166+
self.tensor_size = (tensor_size[0], out_channels,
167+
math.floor(h)+out_pad[0],
168+
math.floor(w)+out_pad[1])
166169
self.Convolution = \
167170
nn.ConvTranspose2d(tensor_size[1]//pre_expansion,
168171
out_channels*pst_expansion, filter_size,

core/NeuralLayers/linear.py

+40-9
Original file line numberDiff line numberDiff line change
@@ -22,6 +22,7 @@ class Linear(nn.Module):
2222
default = None
2323
dropout (float): 0. - 1., default = 0.
2424
bias (bool): to enable bias, default = True
25+
out_shape (tuple): a desired output shape in tuple with out batches
2526
2627
Return:
2728
torch.Tensor of shape (B, out_features)
@@ -33,30 +34,45 @@ def __init__(self,
3334
activation: str = None,
3435
dropout: float = 0.,
3536
bias: bool = True,
37+
out_shape: tuple = None,
3638
**kwargs):
3739
super(Linear, self).__init__()
40+
self.t_size = tuple(tensor_size)
3841
# Checks
39-
assert type(tensor_size) in [int, list, tuple], \
40-
"Linear: tensor_size must tuple/list"
42+
if not type(tensor_size) in [int, list, tuple]:
43+
raise TypeError("Linear: tensor_size must tuple/list")
4144

4245
if isinstance(tensor_size, int):
4346
in_features = tensor_size
4447
else:
4548
assert len(tensor_size) >= 2, \
4649
"Linear: when tuple/list, tensor_size must of length 2 or more"
4750
in_features = np.prod(tensor_size[1:])
48-
assert isinstance(out_features, int), "Linear:out_features must be int"
49-
assert isinstance(dropout, float), "Linear: dropout must be float"
50-
if dropout > 0.:
51+
52+
if not isinstance(out_features, int):
53+
raise TypeError("Linear:out_features must be int")
54+
55+
if not isinstance(dropout, float):
56+
raise TypeError("Linear: dropout must be float")
57+
if 1. > dropout > 0.:
5158
self.dropout = nn.Dropout2d(dropout)
59+
5260
if isinstance(activation, str):
5361
activation = activation.lower()
5462
assert activation in [None, "", ] + Activations.available(),\
5563
"Linear: activation must be None/''/" + \
5664
"/".join(Activations.available())
57-
assert isinstance(bias, bool), "Linear: bias must be bool"
58-
multiplier = 2 if activation in ("maxo", "rmxo") else 1
65+
self.act = activation
66+
67+
if not isinstance(bias, bool):
68+
raise TypeError("Linear: bias must be bool")
5969

70+
if out_shape is not None:
71+
assert np.prod(out_shape) == out_features, \
72+
"Linear: np.prod(out_shape) != out_features"
73+
self.out_shape = out_shape
74+
75+
multiplier = 2 if activation in ("maxo", "rmxo") else 1
6076
# get weight and bias
6177
self.weight = nn.Parameter(torch.rand(out_features*multiplier,
6278
in_features))
@@ -66,9 +82,12 @@ def __init__(self,
6682
self.bias = nn.Parameter(torch.zeros(out_features*multiplier))
6783
# get activation function
6884
if activation is not None:
69-
self.activation = Activations(activation)
85+
if activation in Activations.available():
86+
self.activation = Activations(activation)
7087
# out tensor size
7188
self.tensor_size = (1, out_features)
89+
if hasattr(self, "out_shape"):
90+
self.tensor_size = tuple([1, ] + list(out_shape))
7291

7392
def forward(self, tensor):
7493
if tensor.dim() > 2:
@@ -80,13 +99,25 @@ def forward(self, tensor):
8099
tensor = tensor + self.bias.view(1, -1)
81100
if hasattr(self, "activation"):
82101
tensor = self.activation(tensor)
102+
if hasattr(self, "out_shape"):
103+
tensor = tensor.view(-1, *self.out_shape)
83104
return tensor
84105

106+
def __repr__(self):
107+
msg = "x".join(["_"]+[str(x)for x in self.t_size[1:]]) + " -> "
108+
if hasattr(self, "dropout"):
109+
msg += "dropout -> "
110+
msg += "{}({})".format("linear", "x".join([str(x) for x in
111+
self.weight.shape]))+" -> "
112+
if hasattr(self, "activation"):
113+
msg += self.act + " -> "
114+
msg += "x".join(["_"]+[str(x)for x in self.tensor_size[1:]])
115+
return msg
85116

86117
# from core.NeuralLayers import Activations
87118
# tensor_size = (2, 3, 10, 10)
88119
# x = torch.rand(*tensor_size)
89-
# test = Linear(tensor_size, 16, "maxo", 0., bias=True)
120+
# test = Linear(tensor_size, 16, "", 0., True, (1, 4, 4))
90121
# test(x).size()
91122
# test.weight.shape
92123
# test.bias.shape

core/NeuralLayers/obfuscatedecolor.py

+3-3
Original file line numberDiff line numberDiff line change
@@ -75,9 +75,9 @@ def forward(self, tensor):
7575
tensor[i, ] = tmp.expand(-1, 3, -1, -1)
7676
self.n_decolor += 1
7777

78-
random_shit = torch.rand(1, *tensor.size()[1:])
78+
random_shift = torch.rand(1, *tensor.size()[1:])
7979
if tensor.is_cuda:
80-
random_shit = random_shit.cuda()
80+
random_shift = random_shift.cuda()
8181

8282
for i in range(tensor.size(0)):
8383
if self.n_obfuscate == self.p_obfuscate.size(0):
@@ -91,7 +91,7 @@ def forward(self, tensor):
9191

9292
sh, eh = self.sample_height[self.n_sample_height]
9393
sw, ew = self.sample_width[self.n_sample_width]
94-
tensor[i, :, sh:eh, sw:ew] = random_shit[0, :, sh:eh, sw:ew]
94+
tensor[i, :, sh:eh, sw:ew] = random_shift[0, :, sh:eh, sw:ew]
9595

9696
self.n_sample_height += 1
9797
self.n_sample_width += 1

core/NeuralLayers/sae.py

+3-3
Original file line numberDiff line numberDiff line change
@@ -14,17 +14,17 @@ class ConvolutionalSAE(nn.Module):
1414
def __init__(self, tensor_size, filter_size, out_channels, strides=(1, 1),
1515
pad=True, activation="relu", dropout=0., normalization=None,
1616
pre_nm=False, groups=1, weight_norm=False, equalized=False,
17-
shit=False, learningRate=0.1, *args, **kwargs):
17+
shift=False, learningRate=0.1, *args, **kwargs):
1818

1919
super(ConvolutionalSAE, self).__init__()
2020
self.encoder = Convolution(tensor_size, filter_size, out_channels,
2121
strides, pad, activation, dropout,
2222
normalization, pre_nm, groups, weight_norm,
23-
equalized, shit, **kwargs)
23+
equalized, shift, **kwargs)
2424
self.decoder = Convolution(self.encoder.tensor_size, filter_size,
2525
out_channels, strides, pad, activation,
2626
dropout, normalization, pre_nm, groups,
27-
weight_norm, equalized, shit,
27+
weight_norm, equalized, shift,
2828
transpose=True, **kwargs)
2929
self.decoder.tensor_size = tensor_size
3030

0 commit comments

Comments
 (0)