forked from giddyyupp/ganilla
-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathaligned_dataset.py
executable file
·69 lines (56 loc) · 2.49 KB
/
aligned_dataset.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
import os.path
import random
import torchvision.transforms as transforms
import torch
from data.base_dataset import BaseDataset
from data.image_folder import make_dataset
from PIL import Image
class AlignedDataset(BaseDataset):
@staticmethod
def modify_commandline_options(parser, is_train):
return parser
def initialize(self, opt):
self.opt = opt
self.root = opt.dataroot
self.dir_AB = os.path.join(opt.dataroot, opt.phase)
self.AB_paths = sorted(make_dataset(self.dir_AB))
assert(opt.resize_or_crop == 'resize_and_crop')
def __getitem__(self, index):
AB_path = self.AB_paths[index]
AB = Image.open(AB_path).convert('RGB')
w, h = AB.size
assert(self.opt.loadSize >= self.opt.fineSize)
w2 = int(w / 2)
A = AB.crop((0, 0, w2, h)).resize((self.opt.loadSize, self.opt.loadSize), Image.BICUBIC)
B = AB.crop((w2, 0, w, h)).resize((self.opt.loadSize, self.opt.loadSize), Image.BICUBIC)
A = transforms.ToTensor()(A)
B = transforms.ToTensor()(B)
w_offset = random.randint(0, max(0, self.opt.loadSize - self.opt.fineSize - 1))
h_offset = random.randint(0, max(0, self.opt.loadSize - self.opt.fineSize - 1))
A = A[:, h_offset:h_offset + self.opt.fineSize, w_offset:w_offset + self.opt.fineSize]
B = B[:, h_offset:h_offset + self.opt.fineSize, w_offset:w_offset + self.opt.fineSize]
A = transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))(A)
B = transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))(B)
if self.opt.direction == 'BtoA':
input_nc = self.opt.output_nc
output_nc = self.opt.input_nc
else:
input_nc = self.opt.input_nc
output_nc = self.opt.output_nc
if (not self.opt.no_flip) and random.random() < 0.5:
idx = [i for i in range(A.size(2) - 1, -1, -1)]
idx = torch.LongTensor(idx)
A = A.index_select(2, idx)
B = B.index_select(2, idx)
if input_nc == 1: # RGB to gray
tmp = A[0, ...] * 0.299 + A[1, ...] * 0.587 + A[2, ...] * 0.114
A = tmp.unsqueeze(0)
if output_nc == 1: # RGB to gray
tmp = B[0, ...] * 0.299 + B[1, ...] * 0.587 + B[2, ...] * 0.114
B = tmp.unsqueeze(0)
return {'A': A, 'B': B,
'A_paths': AB_path, 'B_paths': AB_path}
def __len__(self):
return len(self.AB_paths)
def name(self):
return 'AlignedDataset'