Skip to content

Commit

Permalink
add nets dir
Browse files Browse the repository at this point in the history
- nets dir consist of [layers,blocks, modules,+ model]
  • Loading branch information
SpicyYeol committed Jul 23, 2021
1 parent 2763402 commit 9b02473
Show file tree
Hide file tree
Showing 21 changed files with 1,748 additions and 0 deletions.
86 changes: 86 additions & 0 deletions nets/AppearanceModel.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,86 @@
import torch
from torch.nn import Module
from modules.modules import DAModule
from blocks.AttentionBlocks import AttentionBlock
from blocks.blocks import EncoderBlock,DecoderBlock


class AppearanceModel_2D(Module):
def __init__(self, in_channels, out_channels, kernel_size):
# Appearance model
super().__init__()
self.a_conv1 = torch.nn.Conv2d(in_channels=in_channels, out_channels=out_channels, kernel_size=kernel_size,
stride=1, padding=1)
self.a_batch_Normalization1 = torch.nn.BatchNorm2d(out_channels)
self.a_conv2 = torch.nn.Conv2d(in_channels=out_channels, out_channels=out_channels, kernel_size=3, stride=1,
padding=1)
self.a_batch_Normalization2 = torch.nn.BatchNorm2d(out_channels)
self.a_dropout1 = torch.nn.Dropout2d(p=0.50)
# Attention mask1
self.attention_mask1 = AttentionBlock(out_channels)
self.a_avg1 = torch.nn.AvgPool2d(kernel_size=2, stride=2, padding=0)
self.a_conv3 = torch.nn.Conv2d(in_channels=out_channels, out_channels=out_channels * 2, kernel_size=3, stride=1,
padding=1)
self.a_Batch_Normalization3 = torch.nn.BatchNorm2d(out_channels * 2)
self.a_conv4 = torch.nn.Conv2d(in_channels=out_channels * 2, out_channels=out_channels * 2, kernel_size=3,
stride=1, padding=1)
self.a_Batch_Normalization4 = torch.nn.BatchNorm2d(out_channels * 2)
self.a_dropout2 = torch.nn.Dropout2d(p=0.50)
# Attention mask2
self.attention_mask2 = AttentionBlock(out_channels * 2)

def forward(self, inputs):
# Convolution layer
A1 = torch.tanh(self.a_batch_Normalization1(self.a_conv1(inputs)))
A2 = torch.tanh(self.a_batch_Normalization2(self.a_conv2(A1)))
A3 = self.a_dropout1(A2)
# Calculate Mask1
M1 = self.attention_mask1(A3)
# Pooling
A4 = self.a_avg1(A3)
# Convolution layer
A5 = torch.tanh(self.a_Batch_Normalization3(self.a_conv3(A4)))
A6 = torch.tanh(self.a_Batch_Normalization4(self.a_conv4(A5)))
A7 = self.a_dropout2(A6)
# Calculate Mask2
M2 = self.attention_mask2(A7)

return M1, M2


class AppearanceModel_DA(torch.nn.Module):
def __init__(self, in_channels, out_channels, kernel_size=3):
# in_channels = 3
# out_channels = 32
super(AppearanceModel_DA, self).__init__()
self.encoder_1 = EncoderBlock(in_channel=in_channels, out_channel=out_channels) # conv3,conv3,maxpool2
self.encoder_2 = EncoderBlock(in_channel=out_channels, out_channel=out_channels * 2)
self.encoder_3 = EncoderBlock(in_channel=out_channels * 2, out_channel=out_channels * 4) # conv3,conv3,maxpool2

self.decoder_1 = DecoderBlock(in_channel=out_channels * 2, out_channel=out_channels, scale_facotr=2)
self.decoder_2 = DecoderBlock(in_channel=out_channels * 4, out_channel=out_channels * 2, scale_facotr=2.25)

self.damodul_1 = DAModule(out_channels)
self.damodul_2 = DAModule(out_channels * 2)

self.conv1x1_1 = torch.nn.Conv2d(out_channels // 4, 1, kernel_size=1, stride=1, padding=0)
self.conv1x1_2 = torch.nn.Conv2d(out_channels // 2, 1, kernel_size=1, stride=1, padding=0)

def forward(self, x):
out_en_1 = self.encoder_1(x)
out_en_2 = self.encoder_2(out_en_1)
out_en_3 = self.encoder_3(out_en_2)

out_de_1 = self.decoder_1(out_en_2)
out_de_2 = self.decoder_2(out_en_3)

out_concat_1 = out_de_1 + out_en_1
out_concat_2 = out_de_2 + out_en_2

out_da_1 = self.damodul_1(out_concat_1)
M1 = self.conv1x1_1(out_da_1)

out_da_2 = self.damodul_2(out_concat_2)
M2 = self.conv1x1_2(out_da_2)

return M1, M2
Empty file added nets/Model.py
Empty file.
49 changes: 49 additions & 0 deletions nets/Models.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,49 @@
import torch

from AppearanceModel import AppearanceModel_2D
from MotionModel import motion_model


class Deepphys(torch.nn.Module):
def __init__(self):
super().__init__()
self.model = model
self.in_channels = 3
self.out_channels = 32
self.kernel_size = 3

self.appearance_model = AppearanceModel_2D(in_channels=self.in_channels, out_channels=self.out_channels * 4,
kernel_size=self.kernel_size)

# mot: c-b-c-b-d-a-c-b-c-b-d-a
# app: c-b-c-b-d-at-a-c-b-c-b-d-at

# """
# self.appearance_model = DA_appearance_model(in_channels=self.in_channels, out_channels=self.out_channels,
# kernel_size=self.kernel_size)
self.motion_model = motion_model(in_channels=self.in_channels, out_channels=self.out_channels,
kernel_size=self.kernel_size, model=model)

self.fully = fc()
# """
# self.appearance_model = complexAttention(in_channels=self.in_channels, out_channels=self.out_channels,
# kernel_size=self.kernel_size)
# self.fully = Compelxfc()

# def forward(self, appearance_input, motion_input):
def forward(self, appearance_input, motion_input):
"""
:param appearance_input:
:param motion_input:
:return:
original 2d model
"""
attention_mask1, attention_mask2 = self.appearance_model(appearance_input)
motion_output = self.motion_model(motion_input, attention_mask1, attention_mask2)
out = self.fully(motion_output)

# X = torch.Tensor(X).cuda()
# attention_mask1, attention_mask2,out = self.appearance_model(X)
# out = self.fully(out)

return out, attention_mask1, attention_mask2
41 changes: 41 additions & 0 deletions nets/MotionModel.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,41 @@
import torch
import torch.nn as nn
class motion_model(torch.nn.Module):
def __init__(self, in_channels, out_channels, kernel_size, model):
super().__init__()
# Motion model
self.m_conv1 = torch.nn.Conv2d(in_channels=in_channels, out_channels=out_channels, kernel_size=kernel_size,
stride=1, padding=1)
self.m_batch_Normalization1 = torch.nn.BatchNorm2d(out_channels)
self.m_conv2 = torch.nn.Conv2d(in_channels=out_channels, out_channels=out_channels, kernel_size=kernel_size,
stride=1, padding=1)
self.m_batch_Normalization2 = torch.nn.BatchNorm2d(out_channels)
self.m_dropout1 = torch.nn.Dropout2d(p=0.50)

self.m_avg1 = torch.nn.AvgPool2d(kernel_size=2, stride=2, padding=0)
self.m_conv3 = torch.nn.Conv2d(in_channels=out_channels, out_channels=out_channels*2, kernel_size=kernel_size, stride=1,
padding=1)
self.m_batch_Normalization3 = torch.nn.BatchNorm2d(out_channels*2)
self.m_conv4 = torch.nn.Conv2d(in_channels=out_channels*2, out_channels=out_channels*2, kernel_size=kernel_size, stride=1, padding=1)
self.m_batch_Normalization4 = torch.nn.BatchNorm2d(out_channels*2)
self.m_dropout2 = torch.nn.Dropout2d(p=0.50)
self.m_avg2 = torch.nn.AvgPool2d(kernel_size=2, stride=2, padding=0)

def forward(self, inputs, mask1, mask2):
M1 = torch.tanh(self.m_batch_Normalization1(self.m_conv1(inputs)))
M2 = self.m_batch_Normalization2(self.m_conv2(M1))
# element wise multiplication Mask1
g1 = torch.tanh(torch.mul(1 * mask1, M2))
M3 = self.m_dropout1(g1)
# pooling
M4 = self.m_avg1(M3)
# g1 = torch.tanh(torch.mul(1 * mask1, M4))
M5 = torch.tanh(self.m_batch_Normalization3(self.m_conv3(M4)))
M6 = self.m_batch_Normalization4(self.m_conv4(M5))
# element wise multiplication Mask2
g2 = torch.tanh(torch.mul(1 * mask2, M6))
M7 = self.m_dropout2(g2)
M8 = self.m_avg2(M7)
out = torch.tanh(M8)

return out
Empty file added nets/attentionModel.py
Empty file.
21 changes: 21 additions & 0 deletions nets/blocks/AttentionBlocks.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,21 @@
import torch
import torch.nn as nn

from ..modules.modules import DAModule


class AttentionBlock(nn.Module):
def __init__(self, in_channels):
super().__init__()
self.attention = DAModule(in_channels)
self.conv1x1 = torch.nn.Conv2d(self.attention.inter_channels, 1, kernel_size=1, stride=1, padding=0)

def forward(self, input):
mask = self.attention(input)
mask = torch.sigmoid(mask)
B, _, H, W = input.shape
norm = 2 * torch.norm(mask, p=1, dim=(1, 2, 3))
norm = norm.reshape(B, 1, 1, 1)
mask = torch.div(mask * H * W, norm)
mask = self.conv1x1(mask)
return mask
Empty file added nets/blocks/attenBlocks.py
Empty file.
22 changes: 22 additions & 0 deletions nets/blocks/attentionBlocks.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,22 @@
import torch
import torch.nn as nn
from ..


class attention_block(nn.Module):
def __init__(self, in_channels):
super().__init__()
self.attention = DAModule(in_channels)
self.conv1x1 = torch.nn.Conv2d(self.attention.inter_channels, 1, kernel_size=1, stride=1, padding=0)

def forward(self, input):
mask = self.attention(input)
mask = torch.sigmoid(mask)
B, _, H, W = input.shape
norm = 2 * torch.norm(mask, p=1, dim=(1, 2, 3))
norm = norm.reshape(B, 1, 1, 1)
mask = torch.div(mask * H * W, norm)
mask = self.conv1x1(mask)
return mask


Empty file added nets/blocks/attentionModel.py
Empty file.
78 changes: 78 additions & 0 deletions nets/blocks/blocks.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,78 @@
import torch


class EncoderBlock(torch.nn.Module):
def __init__(self, in_channel, out_channel):
super(EncoderBlock, self).__init__()
self.conv_eb = torch.nn.Sequential(
torch.nn.Conv2d(in_channels=in_channel, out_channels=out_channel, kernel_size=3, stride=1, padding=1),
torch.nn.BatchNorm2d(out_channel),
torch.nn.Conv2d(in_channels=out_channel, out_channels=out_channel, kernel_size=3, stride=1, padding=1),
torch.nn.BatchNorm2d(out_channel),
torch.nn.MaxPool2d(kernel_size=2)
)

def forward(self, x):
out = self.conv_eb(x)
return out


class DecoderBlock(torch.nn.Module):
def __init__(self, in_channel, out_channel, scale_facotr):
super(DecoderBlock, self).__init__()
self.conv_db = torch.nn.Sequential(
torch.nn.Upsample(scale_factor=scale_facotr),
torch.nn.ConvTranspose2d(in_channels=in_channel, out_channels=out_channel, kernel_size=3, stride=1,
padding=1),
torch.nn.BatchNorm2d(out_channel),
torch.nn.ConvTranspose2d(in_channels=out_channel, out_channels=out_channel, kernel_size=3, stride=1,
padding=1),
torch.nn.BatchNorm2d(out_channel)
)

def forward(self, x):
out = self.conv_db(x)
return out


class TSM(torch.nn.Module):
def __init__(self):
super().__init__()

def __call__(self, input, n_frame=4, fold_div=3):
n_frame = 4
B, C, H, W = input.shape
input = input.view(-1, n_frame, H, W, C)
fold = C // fold_div
last_fold = C - (fold_div - 1) * fold
out1, out2, out3 = torch.split(input, [fold, fold, last_fold], -1)

padding1 = torch.zeros_like(out1)
padding1 = padding1[:, -1, :, :, :]
padding1 = torch.unsqueeze(padding1, 1)
_, out1 = torch.split(out1, [1, n_frame - 1], 1)
out1 = torch.cat((out1, padding1), 1)

padding2 = torch.zeros_like(out2)
padding2 = padding2[:, 0, :, :, :]
padding2 = torch.unsqueeze(padding2, 1)
out2, _ = torch.split(out2, [n_frame - 1, 1], 1)
out2 = torch.cat((padding2, out2), 1)

out = torch.cat((out1, out2, out3), -1)
out = out.view([-1, C, H, W])

return out


class TSM_Block(torch.nn.Module):
def __init__(self, in_channels, out_channels, kernel_size):
super().__init__()
self.tsm1 = TSM()
self.t_conv1 = torch.nn.Conv2d(in_channels=in_channels, out_channels=out_channels, kernel_size=kernel_size,
padding=1)

def forward(self, input, n_frame=2, fold_div=3):
t = self.tsm1(input, n_frame, fold_div)
t = self.t_conv1(t)
return t
Loading

0 comments on commit 9b02473

Please sign in to comment.