-
Notifications
You must be signed in to change notification settings - Fork 3
/
Copy pathconv_lstm.py
143 lines (111 loc) · 4.41 KB
/
conv_lstm.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
# reference : https://github.com/rogertrullo/pytorch_convlstm/
import torch.nn as nn
from torch.autograd import Variable
import torch
def weights_init(m):
classname = m.__class__.__name__
if classname.find('Conv') != -1:
m.weight.data.normal_(0.0, 0.02)
elif classname.find('BatchNorm') != -1:
m.weight.data.normal_(1.0, 0.02)
m.bias.data.fill_(0)
class Custom_CLSTM_cell(nn.Module):
"""Initialize a basic Conv LSTM cell.
Args:
shape: int tuple thats the height and width of the hidden states h and c()
filter_size: int that is the height and width of the filters
num_features: int thats the num of channels of the states, like hidden_size
"""
def __init__(self):
super(Custom_CLSTM_cell, self).__init__()
self.encoding = nn.Sequential(
nn.Conv2d(3, 8, kernel_size=3, stride=(2),padding=(1,1)),
nn.ELU(inplace=True),
nn.Conv2d(8, 8, kernel_size=3, stride=(1),padding=1),
nn.ELU(inplace=True),
nn.Conv2d(8, 8, kernel_size=3, stride=(2),padding=(1,1)),
nn.ELU(inplace=True),
nn.Conv2d(8, 4, kernel_size=1, stride=(1)),
nn.ELU(inplace=True),
)
self.CLSTM = nn.Conv2d(8, 4* 4, kernel_size=3, stride=(1), padding=1)
self.decoding = nn.Sequential(
nn.ConvTranspose2d(4, 8, kernel_size=1, stride=(1)),
nn.ELU(inplace=True),
nn.ConvTranspose2d(8, 8, kernel_size=2, stride=(2)),
nn.ELU(inplace=True),
nn.ConvTranspose2d(8, 3, kernel_size=2, stride=(2)),
# nn.ELU(inplace=True),
# nn.ConvTranspose2d(8, 3, kernel_size=2, stride=(2)),
)
def forward(self, input, hidden_state):
hidden,c = hidden_state #hidden and c are images with several channels
encoding = self.encoding(input)
combined = torch.cat((encoding, hidden), 1) #concatenate in the channels
B = self.CLSTM(combined)
(ai,af,ao,ag)=torch.split(B, 4 ,dim=1)#it should return 4 tensors
i=torch.sigmoid(ai)
f=torch.sigmoid(af + 1.0) #forget bias = 1
o=torch.sigmoid(ao)
g=torch.tanh(ag)
next_c= f*c+i*g
next_h=o*torch.tanh(next_c)
out = self.decoding(next_h)
return out, (next_h, next_c)
def init_hidden(self,batch_size):
return ( Variable(torch.zeros(batch_size, 4, 8, 8)).cuda(),
Variable(torch.zeros(batch_size,4,8,8)).cuda())
class CLSTM(nn.Module):
"""Initialize a basic Conv LSTM cell.
Args:
shape: int tuple thats the height and width of the hidden states h and c()
filter_size: int that is the height and width of the filters
num_features: int thats the num of channels of the states, like hidden_size
"""
def __init__(self, shape, input_chans, filter_size, num_features,num_layers):
super(CLSTM, self).__init__()
self.shape = shape#H,W
self.input_chans=input_chans
self.filter_size=filter_size
self.num_features = num_features
self.num_layers=num_layers
cell_list=[]
# cell_list.append(Custm_CLSTM_cell(self.shape, self.input_chans, self.filter_size, self.num_features).cuda())#the first
#one has a different number of input channels
for idcell in xrange(self.num_layers):
cell_list.append(Custom_CLSTM_cell().cuda())
self.cell_list=nn.ModuleList(cell_list)
def forward(self, input, hidden_state):
"""
args:
hidden_state:list of tuples, one for every layer, each tuple should be hidden_layer_i,c_layer_i
input is the tensor of shape seq_len,Batch,Chans,H,W
"""
current_input = input.transpose(0, 1) #now is seq_len,B,C,H,W
#current_input=input
next_hidden=[]#hidden states(h and c)
seq_len=current_input.size(0)
# for idlayer in xrange(self.num_layers):#loop for every layer
# hidden_c= hidden_state[idlayer]#hidden and c are images with several channels
# all_output = []
# output_inner = []
# for t in xrange(seq_len):#loop for every step
# output , hidden_c=self.cell_list[idlayer](current_input[t,...],hidden_c)
# output_inner.append(hidden_c[0])
#
# next_hidden.append(hidden_c)
# current_input = torch.cat(output_inner, 0).view(current_input.size(0), *output_inner[0].size())#seq_len,B,chans,H,W
# return next_hidden, current_input
hidden_c = hidden_state[0]
out = []
for t in xrange(seq_len):
output, hidden_c = self.cell_list[0](current_input[t,...],hidden_c)
out.append(output)
next_hidden.append(hidden_c)
out = torch.cat(out,0).view(current_input.size(0), *out[0].size()).transpose(0,1)
return out, next_hidden
def init_hidden(self,batch_size):
init_states=[]#this is a list of tuples
for i in xrange(self.num_layers):
init_states.append(self.cell_list[i].init_hidden(batch_size))
return init_states