forked from clovaai/deep-text-recognition-benchmark
-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathdataset.py
executable file
·203 lines (160 loc) · 7.27 KB
/
dataset.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
import os
import sys
import re
import six
import lmdb
import torch
from PIL import Image
import numpy as np
from torch.utils.data import Dataset, ConcatDataset, Subset
from torch._utils import _accumulate
import torchvision.transforms as transforms
class Batch_Balanced_Dataset(object):
def __init__(self, opt):
"""
Modulate the data ratio in the batch.
For example, when select_data is "MJ-ST" and batch_ratio is "0.5-0.5",
the 50% of the batch is filled with MJ and the other 50% of the batch is filled with ST.
"""
print('-' * 80)
print(f'dataset_root: {opt.train_data}\nopt.select_data: {opt.select_data}\nopt.batch_ratio: {opt.batch_ratio}')
assert len(opt.select_data) == len(opt.batch_ratio)
_AlignCollate = AlignCollate(imgH=opt.imgH, imgW=opt.imgW)
self.data_loader_list = []
self.dataloader_iter_list = []
batch_size_list = []
Total_batch_size = 0
for selected_d, batch_ratio_d in zip(opt.select_data, opt.batch_ratio):
_batch_size = max(round(opt.batch_size * float(batch_ratio_d)), 1)
print('-' * 80)
_dataset = hierarchical_dataset(root=opt.train_data, opt=opt, select_data=[selected_d])
total_number_dataset = len(_dataset)
"""
The total number of data can be modified with opt.total_data_usage_ratio.
ex) opt.total_data_usage_ratio = 1 indicates 100% usage, and 0.2 indicates 20% usage.
See 4.2 section in our paper.
"""
number_dataset = int(total_number_dataset * float(opt.total_data_usage_ratio))
dataset_split = [number_dataset, total_number_dataset - number_dataset]
indices = range(total_number_dataset)
_dataset, _ = [Subset(_dataset, indices[offset - length:offset])
for offset, length in zip(_accumulate(dataset_split), dataset_split)]
print(f'num total samples of {selected_d}: {total_number_dataset} x {opt.total_data_usage_ratio} (total_data_usage_ratio) = {len(_dataset)}')
print(f'num samples of {selected_d} per batch: {opt.batch_size} x {float(batch_ratio_d)} (batch_ratio) = {_batch_size}')
batch_size_list.append(str(_batch_size))
Total_batch_size += _batch_size
_data_loader = torch.utils.data.DataLoader(
_dataset, batch_size=_batch_size,
shuffle=True,
num_workers=int(opt.workers),
collate_fn=_AlignCollate, pin_memory=True)
self.data_loader_list.append(_data_loader)
self.dataloader_iter_list.append(iter(_data_loader))
print('-' * 80)
print('Total_batch_size: ', '+'.join(batch_size_list), '=', str(Total_batch_size))
opt.batch_size = Total_batch_size
print('-' * 80)
def get_batch(self):
balanced_batch_images = []
balanced_batch_texts = []
for i, data_loader_iter in enumerate(self.dataloader_iter_list):
try:
image, text = data_loader_iter.next()
balanced_batch_images.append(image)
balanced_batch_texts += text
except StopIteration:
self.dataloader_iter_list[i] = iter(self.data_loader_list[i])
image, text = self.dataloader_iter_list[i].next()
balanced_batch_images.append(image)
balanced_batch_texts += text
except ValueError:
pass
balanced_batch_images = torch.cat(balanced_batch_images, 0)
return balanced_batch_images, balanced_batch_texts
def hierarchical_dataset(root, opt, select_data='/'):
""" select_data='/' contains all sub-directory of root directory """
dataset_list = []
print(f'dataset_root: {root}\t dataset: {select_data[0]}')
for dirpath, dirnames, filenames in os.walk(root):
if not dirnames:
select_flag = False
for selected_d in select_data:
if selected_d in dirpath:
select_flag = True
break
if select_flag:
dataset = LmdbDataset(dirpath, opt)
print(f'sub-directory:\t/{os.path.relpath(dirpath, root)}\t num samples: {len(dataset)}')
dataset_list.append(dataset)
concatenated_dataset = ConcatDataset(dataset_list)
return concatenated_dataset
class LmdbDataset(Dataset):
def __init__(self, root, opt):
self.root = root
self.opt = opt
self.env = lmdb.open(root, max_readers=32, readonly=True, lock=False, readahead=False, meminit=False)
if not self.env:
print('cannot create lmdb from %s' % (root))
sys.exit(0)
with self.env.begin(write=False) as txn:
nSamples = int(txn.get('num-samples'.encode()))
self.nSamples = nSamples
def __len__(self):
return self.nSamples
def __getitem__(self, index):
assert index <= len(self), 'index range error'
index += 1
with self.env.begin(write=False) as txn:
label_key = 'label-%09d'.encode() % index
label = txn.get(label_key).decode('utf-8')
img_key = 'image-%09d'.encode() % index
imgbuf = txn.get(img_key)
buf = six.BytesIO()
buf.write(imgbuf)
buf.seek(0)
try:
if self.opt.rgb:
img = Image.open(buf).convert('RGB') # for color image
else:
img = Image.open(buf).convert('L')
except IOError:
print(f'Corrupted image for {index}')
return
if len(label) > self.opt.batch_max_length:
return
if not self.opt.sensitive:
label = label.lower()
# We only train and evaluate on alphanumerics (or pre-defined character set in train.py)
out_of_char = f'[^{self.opt.character}]'
label = re.sub(out_of_char, '', label)
return (img, label)
class ResizeNormalize(object):
def __init__(self, size, interpolation=Image.BICUBIC):
self.size = size
self.interpolation = interpolation
self.toTensor = transforms.ToTensor()
def __call__(self, img):
img = img.resize(self.size, self.interpolation)
img = self.toTensor(img)
img.sub_(0.5).div_(0.5)
return img
class AlignCollate(object):
def __init__(self, imgH=32, imgW=100):
self.imgH = imgH
self.imgW = imgW
def __call__(self, batch):
batch = filter(lambda x: x is not None, batch)
images, labels = zip(*batch)
transform = ResizeNormalize((self.imgW, self.imgH))
image_tensors = [transform(image) for image in images]
image_tensors = torch.cat([t.unsqueeze(0) for t in image_tensors], 0)
return image_tensors, labels
def tensor2im(image_tensor, imtype=np.uint8):
image_numpy = image_tensor.cpu().float().numpy()
if image_numpy.shape[0] == 1:
image_numpy = np.tile(image_numpy, (3, 1, 1))
image_numpy = (np.transpose(image_numpy, (1, 2, 0)) + 1) / 2.0 * 255.0
return image_numpy.astype(imtype)
def save_image(image_numpy, image_path):
image_pil = Image.fromarray(image_numpy)
image_pil.save(image_path)