Skip to content

Commit

Permalink
modify conflict files
Browse files Browse the repository at this point in the history
  • Loading branch information
DaeyeolKim committed Aug 31, 2021
2 parents 7027036 + 52568e4 commit c907924
Show file tree
Hide file tree
Showing 6 changed files with 238 additions and 13 deletions.
11 changes: 9 additions & 2 deletions main.py
Original file line number Diff line number Diff line change
@@ -1,6 +1,8 @@
import copy
import datetime
import json
import time
import os

import numpy as np
import torch
Expand Down Expand Up @@ -101,6 +103,7 @@
# model = DataParallelModel(model, device_ids=[0, 1, 2])
# else:
# model = DataParallel(model, output_device=0)
torch.cuda.set_device(int(options["set_gpu_device"]))
model.cuda()
else:
model = model.to('cpu')
Expand Down Expand Up @@ -140,6 +143,7 @@
Model Training Step
'''
min_val_loss = 100.0
min_val_loss_model = None

for epoch in range(hyper_params["epochs"]):
if __TIME__ and epoch == 0:
Expand Down Expand Up @@ -198,11 +202,14 @@
torch.save(checkpoint, params["checkpoint_path"] + model_params["name"] + "/"
+ params["dataset_name"] + "_" + str(epoch) + "_"
+ str(min_val_loss) + '.pth')
min_val_loss_model = copy.deepcopy(model)

if epoch + 1 == hyper_params["epochs"] or epoch % 10 == 0:
if __TIME__ and epoch == 0:
start_time = time.time()
with tqdm(test_loader, desc="test ", total=len(test_loader)/100) as tepoch:
if epoch + 1 == hyper_params["epochs"]:
model = min_val_loss_model
with tqdm(test_loader, desc="test ", total=len(test_loader)) as tepoch:
model.eval()
inference_array = []
target_array = []
Expand Down Expand Up @@ -239,4 +246,4 @@
if __TIME__ and epoch == 0:
log_info_time("inference time \t: ", datetime.timedelta(seconds=save_time - start_time))

plot_graph(0, 300, target_array, inference_array)
plot_graph(0, 300, target_array, inference_array)
1 change: 0 additions & 1 deletion models.py
Original file line number Diff line number Diff line change
Expand Up @@ -52,7 +52,6 @@ def summary(model, model_name):
if model_name == "DeepPhys" or model_name == DeepPhys_DA:
torchsummary.summary(model, (2, 3, 36, 36))
elif model_name == "PhysNet" or model_name == "PhysNet_LSTM":
# torchsummary.summary(model, (3, 32, 128, 128))
torchinfo.summary(model, (1, 3, 32, 128, 128))
elif model_name in "PPNet":
torchinfo.summary(model, (1, 1, 250))
Expand Down
40 changes: 37 additions & 3 deletions nets/blocks/cnn_blocks.py
Original file line number Diff line number Diff line change
@@ -1,7 +1,7 @@
import torch.nn

from nets.blocks.blocks import ConvBlock2D

from nets.blocks.blocks import ConvBlock3D

class cnn_blocks(torch.nn.Module):
def __init__(self):
Expand All @@ -25,7 +25,41 @@ def __init__(self):

def forward(self, x):
[batch, channel, length, width, height] = x.shape
x = x.reshape(batch * length, channel, width, height)
x = x.view(batch * length, channel, width, height)
x = self.cnn_blocks(x)
return x.reshape(batch, length, -1)
x = x.view(batch,length,-1,1,1)

return x

'''
Conv3D 1x3x3(paper architecture)
'''
# class cnn_blocks(torch.nn.Module):
# def __init__(self):
# super(cnn_blocks, self).__init__()
# self.cnn_blocks = torch.nn.Sequential(
# ConvBlock3D(3, 16, [1, 5, 5], [1, 1, 1], [0, 2, 2]),
# torch.nn.MaxPool3d((1, 2, 2), stride=(1, 2, 2)),
# ConvBlock3D(16, 32, [1, 3, 3], [1, 1, 1], [1, 1, 1]),
# ConvBlock3D(32, 64, [1, 3, 3], [1, 1, 1], [1, 1, 1]),
# torch.nn.MaxPool3d((1, 2, 2), stride=(1, 2, 2)),
# ConvBlock3D(64, 64, [1, 3, 3], [1, 1, 1], [1, 1, 1]),
# ConvBlock3D(64, 64, [1, 3, 3], [1, 1, 1], [1, 1, 1]),
# torch.nn.MaxPool3d((1, 2, 2), stride=(1, 2, 2)),
# ConvBlock3D(64, 64, [1, 3, 3], [1, 1, 1], [1, 1, 1]),
# ConvBlock3D(64, 64, [1, 3, 3], [1, 1, 1], [1, 1, 1]),
# torch.nn.MaxPool3d((1, 2, 2), stride=(1, 2, 2)),
# ConvBlock3D(64, 64, [1, 3, 3], [1, 1, 1], [1, 1, 1]),
# ConvBlock3D(64, 64, [1, 3, 3], [1, 1, 1], [1, 1, 1]),
# # torch.nn.AdaptiveMaxPool3d(1)
# )
#
# def forward(self, x):
# [batch, channel, length, width, height] = x.shape
# # x = x.reshape(batch * length, channel, width, height)
# # x = self.cnn_blocks(x)
# # x = x.reshape(batch,length,-1,1,1)
# x = self.cnn_blocks(x)
#
# return x

11 changes: 6 additions & 5 deletions nets/models/PhysNet.py
Original file line number Diff line number Diff line change
Expand Up @@ -3,6 +3,7 @@
from nets.blocks.decoder_blocks import decoder_block
from nets.blocks.encoder_blocks import encoder_block
from nets.blocks.cnn_blocks import cnn_blocks
from nets.modules.modules import ConvLSTM


class PhysNet(torch.nn.Module):
Expand All @@ -24,14 +25,14 @@ def __init__(self, frame=32):
super(PhysNet_2DCNN_LSTM, self).__init__()
self.physnet_lstm = torch.nn.ModuleDict({
'cnn_blocks' : cnn_blocks(),
'lstm' : torch.nn.LSTM(input_size=64, hidden_size=64, num_layers=2, batch_first=True),
'cnn_flatten' : torch.nn.Conv1d(64, 1, 1, stride=1, padding=0)
'cov_lstm' : ConvLSTM(64,[1,1,64],(1,1),num_layers=3, batch_first=True, bias=True, return_all_layers=False),
'cnn_flatten' : torch.nn.Conv3d(64, 1, [1, 1, 1], stride=1, padding=0)
})

def forward(self, x):
[batch, channel, length, width, height] = x.shape
x = self.physnet_lstm['cnn_blocks'](x)
x,(_,_) = self.physnet_lstm['lstm'](x)
x = x.reshape(batch, -1, length)
x,_ = self.physnet_lstm['cov_lstm'](x)
x = torch.permute(x[0], (0, 2, 1, 3, 4))
x = self.physnet_lstm['cnn_flatten'](x)
return x.reshape(-1, length)
return x.view(-1, length)
183 changes: 183 additions & 0 deletions nets/modules/modules.py
Original file line number Diff line number Diff line change
Expand Up @@ -174,3 +174,186 @@ def forward(self, x):
out += residual
out = self.relu(out)
return out

class ConvLSTMCell(torch.nn.Module):

def __init__(self, input_dim, hidden_dim, kernel_size, bias):
"""
Initialize ConvLSTM cell.
Parameters
----------
input_dim: int
Number of channels of input tensor.
hidden_dim: int
Number of channels of hidden state.
kernel_size: (int, int)
Size of the convolutional kernel.
bias: bool
Whether or not to add the bias.
"""

super(ConvLSTMCell, self).__init__()

self.input_dim = input_dim
self.hidden_dim = hidden_dim

self.kernel_size = kernel_size
self.padding = kernel_size[0] // 2, kernel_size[1] // 2
self.bias = bias

self.conv = torch.nn.Conv2d(in_channels=self.input_dim + self.hidden_dim,
out_channels=4 * self.hidden_dim,
kernel_size=self.kernel_size,
padding=self.padding,
bias=self.bias)

def forward(self, input_tensor, cur_state):
h_cur, c_cur = cur_state

combined = torch.cat([input_tensor, h_cur], dim=1) # concatenate along channel axis

combined_conv = self.conv(combined)
cc_i, cc_f, cc_o, cc_g = torch.split(combined_conv, self.hidden_dim, dim=1)
i = torch.sigmoid(cc_i)
f = torch.sigmoid(cc_f)
o = torch.sigmoid(cc_o)
g = torch.tanh(cc_g)

c_next = f * c_cur + i * g
h_next = o * torch.tanh(c_next)

return h_next, c_next

def init_hidden(self, batch_size, image_size):
height, width = image_size
return (torch.zeros(batch_size, self.hidden_dim, height, width, device=self.conv.weight.device),
torch.zeros(batch_size, self.hidden_dim, height, width, device=self.conv.weight.device))


class ConvLSTM(torch.nn.Module):

"""
Parameters:
input_dim: Number of channels in input
hidden_dim: Number of hidden channels
kernel_size: Size of kernel in convolutions
num_layers: Number of LSTM layers stacked on each other
batch_first: Whether or not dimension 0 is the batch or not
bias: Bias or no bias in Convolution
return_all_layers: Return the list of computations for all layers
Note: Will do same padding.
Input:
A tensor of size B, T, C, H, W or T, B, C, H, W
Output:
A tuple of two lists of length num_layers (or length 1 if return_all_layers is False).
0 - layer_output_list is the list of lists of length T of each output
1 - last_state_list is the list of last states
each element of the list is a tuple (h, c) for hidden state and memory
Example:
>> x = torch.rand((32, 10, 64, 128, 128))
>> convlstm = ConvLSTM(64, 16, 3, 1, True, True, False)
>> _, last_states = convlstm(x)
>> h = last_states[0][0] # 0 for layer index, 0 for h index
"""

def __init__(self, input_dim, hidden_dim, kernel_size, num_layers,
batch_first=False, bias=True, return_all_layers=False):
super(ConvLSTM, self).__init__()

self._check_kernel_size_consistency(kernel_size)

# Make sure that both `kernel_size` and `hidden_dim` are lists having len == num_layers
kernel_size = self._extend_for_multilayer(kernel_size, num_layers)
hidden_dim = self._extend_for_multilayer(hidden_dim, num_layers)
if not len(kernel_size) == len(hidden_dim) == num_layers:
raise ValueError('Inconsistent list length.')

self.input_dim = input_dim
self.hidden_dim = hidden_dim
self.kernel_size = kernel_size
self.num_layers = num_layers
self.batch_first = batch_first
self.bias = bias
self.return_all_layers = return_all_layers

cell_list = []
for i in range(0, self.num_layers):
cur_input_dim = self.input_dim if i == 0 else self.hidden_dim[i - 1]

cell_list.append(ConvLSTMCell(input_dim=cur_input_dim,
hidden_dim=self.hidden_dim[i],
kernel_size=self.kernel_size[i],
bias=self.bias))

self.cell_list = torch.nn.ModuleList(cell_list)

def forward(self, input_tensor, hidden_state=None):
"""
Parameters
----------
input_tensor: to-do
5-D Tensor either of shape (t, b, c, h, w) or (b, t, c, h, w)
hidden_state: to-do
None. to-do implement stateful
Returns
-------
last_state_list, layer_output
"""
if not self.batch_first:
# (t, b, c, h, w) -> (b, t, c, h, w)
input_tensor = input_tensor.permute(1, 0, 2, 3, 4)

b, _, _, h, w = input_tensor.size()

# Implement stateful ConvLSTM
if hidden_state is not None:
raise NotImplementedError()
else:
# Since the init is done in forward. Can send image size here
hidden_state = self._init_hidden(batch_size=b,
image_size=(h, w))

layer_output_list = []
last_state_list = []

seq_len = input_tensor.size(1)
cur_layer_input = input_tensor

for layer_idx in range(self.num_layers):

h, c = hidden_state[layer_idx]
output_inner = []
for t in range(seq_len):
h, c = self.cell_list[layer_idx](input_tensor=cur_layer_input[:, t, :, :, :],
cur_state=[h, c])
output_inner.append(h)

layer_output = torch.stack(output_inner, dim=1)
cur_layer_input = layer_output

layer_output_list.append(layer_output)
last_state_list.append([h, c])

if not self.return_all_layers:
layer_output_list = layer_output_list[-1:]
last_state_list = last_state_list[-1:]

return layer_output_list, last_state_list

def _init_hidden(self, batch_size, image_size):
init_states = []
for i in range(self.num_layers):
init_states.append(self.cell_list[i].init_hidden(batch_size, image_size))
return init_states

@staticmethod
def _check_kernel_size_consistency(kernel_size):
if not (isinstance(kernel_size, tuple) or
(isinstance(kernel_size, list) and all([isinstance(elem, tuple) for elem in kernel_size]))):
raise ValueError('`kernel_size` must be tuple or list of tuples')

@staticmethod
def _extend_for_multilayer(param, num_layers):
if not isinstance(param, list):
param = [param] * num_layers
return param
5 changes: 3 additions & 2 deletions params.json
Original file line number Diff line number Diff line change
Expand Up @@ -4,7 +4,8 @@
"__MODEL_SUMMARY__" : 0,
"options":{
"parallel_criterion" : 1,
"parallel_criterion_comment" : "TODO need to verification"
"parallel_criterion_comment" : "TODO need to verification",
"set_gpu_device" : "4"
},
"params":
{
Expand All @@ -28,7 +29,7 @@
"PPNET : 100"
],
"train_shuffle" : 0,
"test_batch_size" : 8,
"test_batch_size" : 32,
"test_shuffle" : 0
},
"hyper_params":
Expand Down

0 comments on commit c907924

Please sign in to comment.