Skip to content

Commit

Permalink
modify attentionblock
Browse files Browse the repository at this point in the history
  • Loading branch information
DaeyeolKim committed Jul 30, 2021
1 parent 2e25c61 commit 791a411
Show file tree
Hide file tree
Showing 5 changed files with 30 additions and 10 deletions.
8 changes: 6 additions & 2 deletions main.py
Original file line number Diff line number Diff line change
Expand Up @@ -198,10 +198,14 @@
else:
inference_array.extend(outputs.cpu().numpy())
target_array.extend(target.cpu().numpy())
if tepoch.n == 0 and __TIME__:
save_time = time.time()

if model_params["name"] == "DeepPhys":
inference_array = scipy.signal.detrend(np.cumsum(inference_array))
target_array = scipy.signal.detrend(np.cumsum(target_array))

if __TIME__ and epoch == 0:
log_info_time("inference time \t: ", datetime.timedelta(seconds=save_time - start_time))

plot_graph(0, 300, target_array, inference_array)
if __TIME__ and epoch == 0:
log_info_time("inference time \t: ", datetime.timedelta(seconds=time.time() - start_time))
18 changes: 17 additions & 1 deletion nets/blocks/attentionBlocks.py
Original file line number Diff line number Diff line change
Expand Up @@ -4,7 +4,7 @@
from ..modules.modules import DAModule


class AttentionBlock(nn.Module):
class AttentionBlock_DA(nn.Module):
def __init__(self, in_channels):
super().__init__()
self.attention = DAModule(in_channels)
Expand All @@ -19,3 +19,19 @@ def forward(self, input):
mask = torch.div(mask * H * W, norm)
mask = self.conv1x1(mask)
return mask



class AttentionBlock(torch.nn.Module):
def __init__(self, in_channels):
super().__init__()
self.attention = torch.nn.Conv2d(in_channels, 1, kernel_size=1, stride=1, padding=0)

def forward(self, input):
mask = self.attention(input)
mask = torch.sigmoid(mask)
B, _, H, W = input.shape
norm = 2 * torch.norm(mask, p=1, dim=(1, 2, 3))
norm = norm.reshape(B, 1, 1, 1)
mask = torch.div(mask * H * W, norm)
return mask
2 changes: 1 addition & 1 deletion nets/models/DeepPhys.py
Original file line number Diff line number Diff line change
Expand Up @@ -14,7 +14,7 @@ def __init__(self):
self.attention_mask1 = None
self.attention_mask2 = None

self.appearance_model = AppearanceModel_2D(in_channels=self.in_channels, out_channels=self.out_channels * 4,
self.appearance_model = AppearanceModel_2D(in_channels=self.in_channels, out_channels=self.out_channels,
kernel_size=self.kernel_size)
self.motion_model = MotionModel(in_channels=self.in_channels, out_channels=self.out_channels,
kernel_size=self.kernel_size)
Expand Down
2 changes: 1 addition & 1 deletion nets/models/sub_models/MotionModel.py
Original file line number Diff line number Diff line change
Expand Up @@ -28,7 +28,7 @@ def forward(self, inputs, mask1, mask2):
M1 = torch.tanh(self.m_batch_Normalization1(self.m_conv1(inputs)))
M2 = self.m_batch_Normalization2(self.m_conv2(M1))
# element wise multiplication Mask1
g1 = torch.tanh(torch.mul(1 * mask1, M2))
g1 = torch.tanh(torch.mul(torch.ones(size= mask1.shape).to('cuda')@mask1, M2))
M3 = self.m_dropout1(g1)
# pooling
M4 = self.m_avg1(M3)
Expand Down
10 changes: 5 additions & 5 deletions params.json
Original file line number Diff line number Diff line change
@@ -1,7 +1,7 @@
{
"__TIME__" : 1,
"__PREPROCESSING__" : 0,
"__MODEL_SUMMARY__" : 0,
"__MODEL_SUMMARY__" : 1,
"options":{
"parallel_criterion" : 1,
"parallel_criterion_comment" : "TODO need to verification"
Expand All @@ -18,7 +18,7 @@
"validation_ratio_comment" : "split train dataset using validation_ratio",
"train_batch_size" : 32,
"train_shuffle" : 0,
"test_batch_size" : 1,
"test_batch_size" : 32,
"test_shuffle" : 0
},
"hyper_params":
Expand All @@ -38,16 +38,16 @@
"adam","sgd","rms_prop","ada_delta","ada_grad","ada_max",
"ada_mw","a_sgd","lbfgs","n_adam","r_adam","rprop","sparse_adam"
],
"learning_rate": 1,
"learning_rate": 0.001,
"learning_rate_comment": [
"DeepPhys : lr = 1",
"PhysNet : lr = 0.001"
],
"epochs" : 1
"epochs" : 30
},
"model_params":
{
"name": "PhysNet",
"name": "DeepPhys",
"name_comment":
[
"DeepPhys",
Expand Down

0 comments on commit 791a411

Please sign in to comment.