Skip to content

Commit

Permalink
dataloader problem (MetaPhys_task_Dataset)
Browse files Browse the repository at this point in the history
  • Loading branch information
yoojin committed Aug 24, 2021
1 parent f55b996 commit 0a10860
Show file tree
Hide file tree
Showing 6 changed files with 97 additions and 28 deletions.
49 changes: 46 additions & 3 deletions dataset/MetaPhysDataset.py
Original file line number Diff line number Diff line change
@@ -1,3 +1,5 @@
import random

import torch
import numpy as np
from torchmeta.utils.data import Task, MetaDataset
Expand Down Expand Up @@ -25,9 +27,6 @@ def __init__(self, num_shots_tr, num_shots_ts, option='train',
def __getitem__(self, index):
if torch.is_tensor(index):
index = index.tolist()

self.dataset_transform = ClassSplitter(shuffle=False, num_train_per_class=self.num_shots_tr,
num_test_per_class=self.num_shots_ts)
ap = []
mo = []
la = []
Expand All @@ -39,6 +38,8 @@ def __getitem__(self, index):

task = PersonTask(ap, mo, la)

self.dataset_transform = ClassSplitter(shuffle=False, num_train_per_class=self.num_shots_tr,
num_test_per_class=self.num_shots_ts)
if self.dataset_transform is not None:
task = self.dataset_transform(task)

Expand All @@ -47,10 +48,52 @@ def __getitem__(self, index):
def __len__(self):
return len(self.label)

class MetaPhys_task_Dataset(MetaDataset):
def __init__(self, num_shots_tr, num_shots_ts, option='train',
unsupervised=0,frame_depth=10,
appearance_data=None, motion_data=None, target=None):

self.transform = transforms.Compose([transforms.ToTensor()])
self.num_samples_per_task = num_shots_tr + num_shots_ts
self.frame_depth = frame_depth
self.option = option
self.num_shots_tr = num_shots_tr
self.num_shots_ts = num_shots_ts
self.unsupervised = unsupervised
self.a = appearance_data
self.m = motion_data
self.label = target
self.dataset_transform = ClassSplitter(shuffle=False, num_train_per_class=num_shots_tr,
num_test_per_class=num_shots_ts)

def __getitem__(self, index):
if torch.is_tensor(index):
index = index.tolist()
ap = []
mo = []
la = []

for i in range(4):
ap.append(self.a[index][i])
mo.append(self.m[index][i])
la.append(self.label[index][i])

task = PersonTask(ap, mo, la)

self.dataset_transform = ClassSplitter(shuffle=False, num_train_per_class=self.num_shots_tr,
num_test_per_class=self.num_shots_ts)
if self.dataset_transform is not None:
task = self.dataset_transform(task)

return task

def __len__(self):
return len(self.label)

class PersonTask(Task):
def __init__(self, a, m ,label):
super(PersonTask, self).__init__(None, None) # Regression task
self.transform = transforms.Compose([transforms.ToTensor()])
self.a = a
self.m = m
self.label = label
Expand Down
33 changes: 20 additions & 13 deletions dataset/dataset_loader.py
Original file line number Diff line number Diff line change
Expand Up @@ -4,7 +4,7 @@

from dataset.DeepPhysDataset import DeepPhysDataset
from dataset.PhysNetDataset import PhysNetDataset
from dataset.MetaPhysDataset import MetaPhysDataset
from dataset.MetaPhysDataset import MetaPhysDataset, MetaPhys_task_Dataset

def dataset_loader(save_root_path: str = "/media/hdd1/dy_dataset/",
model_name: str = "DeepPhys",
Expand Down Expand Up @@ -71,25 +71,32 @@ def dataset_loader(save_root_path: str = "/media/hdd1/dy_dataset/",
)

if model_name == "MetaPhys_task":
appearance_data = []
motion_data = []
target_data = []
print(hpy_file.keys())
for key in hpy_file.keys(): #subject1, subject10, ...
appearance_data.append(hpy_file[key]['preprocessed_video'][:, :, :, -3:])
motion_data.append(hpy_file[key]['preprocessed_video'][:, :, :, :3])
target_data.append(hpy_file[key]['preprocessed_label'][:])
appearance_data_all = []
motion_data_all = []
target_data_all = []

for key in hpy_file.keys(): #1, 2, ...
appearance_data = []
motion_data = []
target_data = []
for data in hpy_file[key]: #1/1, 1/2, ...
appearance_data.append(hpy_file[key][data]['preprocessed_video'][:, :, :, -3:])
motion_data.append(hpy_file[key][data]['preprocessed_video'][:, :, :, :3])
target_data.append(hpy_file[key][data]['preprocessed_label'][:])
appearance_data_all.append(appearance_data) #np.asarray(
motion_data_all.append(motion_data)
target_data_all.append(target_data)
hpy_file.close()

dataset = MetaPhysDataset(num_shots,
dataset = MetaPhys_task_Dataset(num_shots,
num_test_shots,
option,
unsupervised,
frame_depth=10,

appearance_data=np.asarray(appearance_data),
motion_data=np.asarray(motion_data),
target=np.asarray(target_data)
appearance_data=np.asarray(appearance_data_all),
motion_data=np.asarray(motion_data_all),
target=np.asarray(target_data_all)
)

return dataset
2 changes: 1 addition & 1 deletion meta_params.json
Original file line number Diff line number Diff line change
Expand Up @@ -50,7 +50,7 @@
},
"meta_params":
{
"num_shots" : 6,
"num_shots" : 2,
"num_test_shots" : 2,
"fs" : 30,
"fs_comment" : "sampling rate of dataset",
Expand Down
4 changes: 2 additions & 2 deletions models.py
Original file line number Diff line number Diff line change
Expand Up @@ -20,7 +20,7 @@ def get_model(model_name: str = "DeepPhys"):
return DeepPhys_DA()
elif model_name == "PhysNet":
return PhysNet()
elif model_name == "MetaPhys":
elif model_name == "MetaPhys" or "MetaPhys_task":
return TSCAN()
#elif model_name == "PhysNet_LSTM":
#return PhysNet_2DCNN_LSTM()
Expand Down Expand Up @@ -54,7 +54,7 @@ def summary(model, model_name):
#elif model_name == "PhysNet" or model_name == "PhysNet_LSTM":
# torchsummary.summary(model, (3, 32, 128, 128))
# torchinfo.summary(model,(1, 3, 32, 128, 128))
elif model_name == "MetaPhys":
elif model_name == "MetaPhys" or "MetaPhys_task":
print('rrrr')
else:
log_warning("use implemented model")
Expand Down
18 changes: 10 additions & 8 deletions nets/models/MetaPhys.py
Original file line number Diff line number Diff line change
Expand Up @@ -56,7 +56,7 @@ def maml_train(tepoch, model, inner_criterion, outer_criterion, inner_optimizer,
inputs, targets = batch['train']
test_inputs, test_targets = batch['test']

test_losses = 0.0
test_losses = []
optimizer.zero_grad()
for task_idx, (input, target, test_input, test_target) in enumerate(
zip(inputs, targets, test_inputs, test_targets)):
Expand All @@ -66,15 +66,16 @@ def maml_train(tepoch, model, inner_criterion, outer_criterion, inner_optimizer,
diffopt.step(inner_loss)
test_logit = fmodel(test_input)
test_loss = outer_criterion(test_logit, test_target)
test_losses +=test_loss.item()
test_losses.append(test_loss.detach())
test_loss.backward()

optimizer.step()
losses = test_losses / len(tepoch)
losses = sum(test_losses) / len(tepoch)
tepoch.set_postfix(loss=losses)

def maml_val(tepoch, model, inner_criterion, outer_criterion, inner_optimizer, num_adapt_steps):
model.train()
test_losses = 0.0
test_losses = []
for batch in tepoch:
tepoch.set_description(f"Validation")

Expand All @@ -94,8 +95,9 @@ def maml_val(tepoch, model, inner_criterion, outer_criterion, inner_optimizer, n
diffopt.step(inner_loss)
test_logit = fmodel(test_input).detach()
test_loss = outer_criterion(test_logit, test_target)
test_losses +=test_loss.item()
losses = test_losses / len(tepoch)
test_losses.append(test_loss.detach())

losses = sum(test_losses) / len(tepoch)
tepoch.set_postfix(loss=losses)
'''
if min_val_loss > test_losses: # save the train model
Expand All @@ -113,7 +115,7 @@ def maml_val(tepoch, model, inner_criterion, outer_criterion, inner_optimizer, n

def maml_test(tepoch, model, inner_criterion, outer_criterion, inner_optimizer, num_adapt_steps):
model.train()
test_losses = 0.0
mean_test_loss = torch.tensor(0., device='cuda:9')
inference_array = []
target_array = []
for batch in tepoch:
Expand All @@ -135,7 +137,7 @@ def maml_test(tepoch, model, inner_criterion, outer_criterion, inner_optimizer,
diffopt.step(inner_loss)
test_logit = fmodel(test_input).detach()
test_loss = outer_criterion(test_logit, test_target)
test_losses += test_loss.item()
mean_test_loss += test_loss.item()

inference_array.extend(test_logit.cpu().numpy())
target_array.extend(test_target.cpu().numpy())
Expand Down
19 changes: 18 additions & 1 deletion utils/text_preprocess.py
Original file line number Diff line number Diff line change
@@ -1,5 +1,5 @@
import numpy as np

import h5py

def Deepphys_preprocess_Label(path):
'''
Expand Down Expand Up @@ -45,3 +45,20 @@ def PhysNet_preprocess_Label(path):
f.close()

return split_raw_label

def cohface_Label(path, frame_total):
f = h5py.File(path, "r")
label = list(f['pulse'])
f.close()
label = np.interp(np.arange(0, len(frame_total)+1),
np.linspace(0, len(frame_total)+1, num=len(label)),
label)
delta_label = []
for i in range(len(label) - 1):
delta_label.append(label[i + 1] - label[i])
delta_label -= np.mean(delta_label)
delta_label /= np.std(delta_label)
delta_label = np.array(delta_label).astype('float32')
delta_pulse = delta_label.copy() # 이거 왜 있지?

return delta_pulse

0 comments on commit 0a10860

Please sign in to comment.