Skip to content

Commit

Permalink
add train/vallidatiaon/test model in main.py
Browse files Browse the repository at this point in the history
  • Loading branch information
SpicyYeol committed Jul 26, 2021
1 parent 1d8df4c commit 2edaaa1
Show file tree
Hide file tree
Showing 3 changed files with 55 additions and 18 deletions.
57 changes: 44 additions & 13 deletions main.py
Original file line number Diff line number Diff line change
Expand Up @@ -5,7 +5,7 @@
import torch
import os


from tqdm import tqdm
from parallel import DataParallelModel, DataParallelCriterion
from nets.Models import Deepphys
from loss import loss_fn
Expand All @@ -22,6 +22,8 @@
params = jsonObject.get("params")
hyper_params = jsonObject.get("hyper_params")

#

'''
Generate preprocessed data hpy file
'''
Expand Down Expand Up @@ -106,10 +108,10 @@
print(hyper_params["loss_fn_comment"])
raise NotImplementedError("implement a custom function(%s) in loss.py" % hyper_params["loss_fn"])
# if torch.cuda.is_available():
# TODO: implement parallel training
# if options["parallel_criterion"] :
# print(options["parallel_criterion_comment"])
# criterion = DataParallelCriterion(criterion,device_ids=[0, 1, 2])
# TODO: implement parallel training
# if options["parallel_criterion"] :
# print(options["parallel_criterion_comment"])
# criterion = DataParallelCriterion(criterion,device_ids=[0, 1, 2])

if __TIME__:
print("setting loss func time \t: ", datetime.timedelta(seconds=time.time() - start_time))
Expand All @@ -118,19 +120,48 @@
'''
if __TIME__:
start_time = time.time()
optimizer = optimizer(model.parameters(), hyper_params["learning_rate"],hyper_params["optimizer"])
optimizer = optimizer(model.parameters(), hyper_params["learning_rate"], hyper_params["optimizer"])
if criterion is None:
print("use implemented optimizer")
print(hyper_params["optimizer_comment"])
raise NotImplementedError("implement a custom optimizer(%s) in optimizer.py" % hyper_params["optimizer"])
if __TIME__:
print("setting optimizer time \t: ", datetime.timedelta(seconds=time.time() - start_time))

for i,(appearance_data, motion_data, target) in enumerate(train_loader):
print(i)
outputs = model(appearance_data,motion_data)
loss = criterion(outputs,target)
for epoch in range(hyper_params["epochs"]):
running_loss = 0.0
with tqdm(train_loader, desc="Train ", total=len(train_loader)) as tepoch:
model.train()
running_loss = 0.0
for appearance_data, motion_data, target in tepoch:
tepoch.set_description(f"Train Epoch {epoch}")
outputs = model(appearance_data, motion_data)
loss = criterion(outputs, target)

optimizer.zero_grad()
loss.backward()
running_loss += loss.item()
optimizer.step()
tepoch.set_postfix(loss=running_loss/params["train_batch_size"])

with tqdm(validation_loader, desc="Validation ", total=len(validation_loader)) as tepoch:
model.eval()
running_loss = 0.0
with torch.no_grad():
for appearance_data, motion_data, target in tepoch:
tepoch.set_description(f"Validation")
outputs = model(appearance_data, motion_data)
loss = criterion(outputs, target)
running_loss += loss.item()
tepoch.set_postfix(loss=running_loss/params["train_batch_size"])

optimizer.zero_grad()
loss.backward()
optimizer.step()
if epoch + 1 == hyper_params["epochs"]:
with tqdm(test_loader, desc="test ", total=len(test_loader)) as tepoch:
model.eval()
with torch.no_grad():
for appearance_data, motion_data, target in tepoch:
tepoch.set_description(f"test")
outputs = model(appearance_data, motion_data)
loss = criterion(outputs, target)
running_loss += loss.item()
tepoch.set_postfix(loss=running_loss/(params["train_batch_size"]/params["test_batch_size"]))
11 changes: 8 additions & 3 deletions nets/Models.py
Original file line number Diff line number Diff line change
Expand Up @@ -10,6 +10,8 @@ def __init__(self):
self.in_channels = 3
self.out_channels = 32
self.kernel_size = 3
self.attention_mask1 = None
self.attention_mask2 = None

self.appearance_model = AppearanceModel_2D(in_channels=self.in_channels, out_channels=self.out_channels * 4,
kernel_size=self.kernel_size)
Expand All @@ -25,11 +27,14 @@ def forward(self, appearance_input, motion_input):
:return:
original 2d model
"""
attention_mask1, attention_mask2 = self.appearance_model(appearance_input)
motion_output = self.motion_model(motion_input, attention_mask1, attention_mask2)
self.attention_mask1, self.attention_mask2 = self.appearance_model(appearance_input)
motion_output = self.motion_model(motion_input, self.attention_mask1, self.attention_mask2)
out = self.linear_model(motion_output)

return out, attention_mask1, attention_mask2
return out

def get_attention_mask(self):
return self.attention_mask1, self.attention_mask2


class Deepphys_DA(torch.nn.Module):
Expand Down
5 changes: 3 additions & 2 deletions params.json
Original file line number Diff line number Diff line change
Expand Up @@ -13,7 +13,7 @@
"train_ratio_comment" : "generate train dataset using train_ratio",
"validation_ratio": 0.9,
"validation_ratio_comment" : "split train dataset using validation_ratio",
"train_batch_size" : 10,
"train_batch_size" : 32,
"train_shuffle" : 0,
"test_batch_size" : 1,
"test_shuffle" : 0
Expand All @@ -35,7 +35,8 @@
"adam","sgd","rms_prop","ada_delta","ada_grad","ada_max",
"ada_mw","a_sgd","lbfgs","n_adam","r_adam","rprop","sparse_adam"
],
"learning_rate": 0.1
"learning_rate": 1,
"epochs" : 1

}
}

0 comments on commit 2edaaa1

Please sign in to comment.