-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathemb-1-train.py
270 lines (228 loc) · 10.5 KB
/
emb-1-train.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
"""
Script used to train embedding models
"""
import json
import time
import random
import logging
import timm
import torch
import numpy as np
from tqdm import tqdm
import torch.optim as optim
from torch.utils.data import DataLoader
from torch.cuda.amp import GradScaler, autocast
from utils.save_checkpoint import SaveCheckpoint
from utils.loss import ContrastiveLoss, TripletLoss
from utils.custom_dataloader import DatasetContrastiveLoss, DatasetTripletLoss
from utils.utils import calculate_mean_std, train_transforms, default_transforms
# ### Reproducibility
torch.manual_seed(42)
torch.cuda.manual_seed(42)
np.random.seed(42)
random.seed(42)
# Timm model name
MODEL_NAME = 'convnextv2_nano' # convnextv2_nano == convnextv2_nano.fcmae_ft_in22k_in1k
IS_FLOAT16 = True
TRAIN_FOLDER = 'dataset/face-recognition-dataset/train'
# Empty string if you will not do validation
VAL_FOLDER = 'dataset/face-recognition-dataset/valid'
# ### Resume checkpoint ###
# If empty string will download model from Timm. If path to a checkpoint will use it to train
RESUME_CHECKPOINT = ''
# Whether to use all hyperparameters depending on where the checkpoint stopped (epoch, learning rate, best_loss, ...)
CONTINUE_FROM_CHECKPOINT = False # RESUME_CHECKPOINT must be different from empty string
# Whether load all images in memory before start training or load in disk every epoch
LOAD_IMAGES_MEMORY = False
# If False will only train and consider the best checkpoint with the least training loss
IS_VALID = True if VAL_FOLDER else False
SAVE_PATH = 'checkpoints'
NUM_EPOCHS = 2000
PATIENCE = 20
BATCH_SIZE = 128
LR = 0.0001
NUM_WORKERS = 8
HIDDEN_DIM = 256
IMAGE_SIZE = 112
# contrastive or triplet
LOSS_FUNCTION = 'triplet'
SAME_CLASS_PROBABILITY = 0.4 # Only for Contrastive loss
OPTIMIZER = 'adamW' # adam adamW sgd
# ### Scheduler ###
SCHEDULER = 'stepLR' # '' plateau stepLR
SCHEDULER_PATIENCE = 4
SCHEDULER_GAMMA = 0.95
# Whether calculate custom MEAN and STD for the dataset or use the default
IS_CALCULATE_MEAN_STD = False
device = 'cuda' if torch.cuda.is_available() else 'cpu'
best_results = {
'best_epoch': 0,
'train_best_loss': np.inf,
'val_best_loss': np.inf
}
start_epoch = 1
if RESUME_CHECKPOINT:
checkpoint = torch.load(RESUME_CHECKPOINT)
# In case the model was saved as float16 (half), convert it to float32
model = checkpoint['model'].to(torch.float)
results_json = json.load(open(RESUME_CHECKPOINT.replace('.pth', '_results.json')))
MODEL_NAME = results_json['model_name']
if CONTINUE_FROM_CHECKPOINT:
LR = results_json['learning_rate']
LOSS_FUNCTION = results_json['loss_function']
MEAN = results_json['mean']
STD = results_json['std']
HIDDEN_DIM = results_json['hidden_dim']
IMAGE_SIZE = results_json['image_size']
start_epoch = results_json['best_epoch'] + 1
NUM_EPOCHS += start_epoch
best_results['best_epoch'] = start_epoch
best_results['train_best_loss'] = results_json['train_best_loss']
best_results['val_best_loss'] = results_json['val_best_loss'] if results_json['val_best_loss'] else np.inf
else:
model = timm.create_model(MODEL_NAME, pretrained=True)
in_feat = model.head.fc.in_features
model.head.fc = torch.nn.Linear(in_feat, HIDDEN_DIM)
for param in model.parameters():
param.requires_grad = True
if not RESUME_CHECKPOINT or (RESUME_CHECKPOINT and CONTINUE_FROM_CHECKPOINT is False):
if IS_CALCULATE_MEAN_STD:
MEAN, STD = calculate_mean_std(TRAIN_FOLDER, IMAGE_SIZE)
else:
# [0.485, 0.456, 0.406], [0.229, 0.224, 0.225]
MEAN, STD = timm.data.IMAGENET_DEFAULT_MEAN, timm.data.IMAGENET_DEFAULT_STD
if LOSS_FUNCTION == 'contrastive':
criterion = ContrastiveLoss()
train_dataset = DatasetContrastiveLoss(TRAIN_FOLDER, transform=train_transforms(MEAN, STD, IMAGE_SIZE, 'embedding'),
same_class_probability=SAME_CLASS_PROBABILITY,
load_images_memory=LOAD_IMAGES_MEMORY)
train_loader = DataLoader(train_dataset, batch_size=BATCH_SIZE, shuffle=True, num_workers=NUM_WORKERS)
if IS_VALID:
val_dataset = DatasetContrastiveLoss(VAL_FOLDER, transform=default_transforms(MEAN, STD, IMAGE_SIZE),
same_class_probability=SAME_CLASS_PROBABILITY,
load_images_memory=LOAD_IMAGES_MEMORY)
val_loader = DataLoader(val_dataset, batch_size=BATCH_SIZE, shuffle=False, num_workers=NUM_WORKERS)
elif LOSS_FUNCTION == 'triplet':
criterion = TripletLoss()
train_dataset = DatasetTripletLoss(TRAIN_FOLDER, transform=train_transforms(MEAN, STD, IMAGE_SIZE, 'embedding'),
load_images_memory=LOAD_IMAGES_MEMORY, onehot='abs')
train_loader = DataLoader(train_dataset, batch_size=BATCH_SIZE, shuffle=True, num_workers=NUM_WORKERS)
if IS_VALID:
val_dataset = DatasetTripletLoss(VAL_FOLDER, transform=default_transforms(MEAN, STD, IMAGE_SIZE),
load_images_memory=LOAD_IMAGES_MEMORY)
val_loader = DataLoader(val_dataset, batch_size=BATCH_SIZE, shuffle=False, num_workers=NUM_WORKERS)
else:
raise ValueError(f'Invalid LOSS_FUNCTION ({LOSS_FUNCTION})')
# Prepare files and folders to save logging and checkpoints
save_checkpoint = SaveCheckpoint(SAVE_PATH, MODEL_NAME, 'embedding', RESUME_CHECKPOINT, MEAN, STD, HIDDEN_DIM, IMAGE_SIZE,
LOSS_FUNCTION, IS_FLOAT16)
if OPTIMIZER == 'sgd':
optimizer = optim.SGD(model.parameters(), lr=LR)
elif OPTIMIZER == 'adam':
optimizer = optim.Adam(model.parameters(), lr=LR)
elif OPTIMIZER == 'adamW':
optimizer = optim.AdamW(model.parameters(), lr=LR)
else:
raise ValueError(f'Invalid OPTIMIZER ({OPTIMIZER})')
if SCHEDULER == '':
scheduler = None
elif SCHEDULER == 'plateau':
scheduler = torch.optim.lr_scheduler.ReduceLROnPlateau(
optimizer, factor=SCHEDULER_GAMMA, patience=SCHEDULER_PATIENCE,
mode='max' if IS_VALID else 'min' # If is_valid look at the val accuracy otherwise train loss
)
elif SCHEDULER == 'stepLR':
scheduler = torch.optim.lr_scheduler.StepLR(optimizer, gamma=SCHEDULER_GAMMA, step_size=SCHEDULER_PATIENCE)
else:
raise ValueError(f'Invalid SCHEDULER ({SCHEDULER})')
# For float16 (half)
scaler = None
if IS_FLOAT16:
scaler = GradScaler()
model.to(device)
start_training_time = time.time()
no_improvement_count = 0
try:
for epoch in range(start_epoch, NUM_EPOCHS):
tic = time.time()
model.train()
train_running_loss = 0.0
train_loader_tqdm = tqdm(train_loader, desc=f'Train: Epoch [{epoch}/{NUM_EPOCHS}]', leave=False)
for data in train_loader_tqdm:
if IS_FLOAT16:
with autocast():
embeddings = []
for imgs in data['imgs']:
embeddings.append(model(imgs.to(device)))
else:
embeddings = []
for imgs in data['imgs']:
embeddings.append(model(imgs.to(device)))
optimizer.zero_grad()
if len(data['label']) > 0:
loss = criterion(*embeddings, data['label'].to(device))
else:
loss = criterion(*embeddings)
if IS_FLOAT16:
scaler.scale(loss).backward()
scaler.step(optimizer)
scaler.update()
else:
loss.backward()
optimizer.step()
train_running_loss += loss.item()
train_loader_tqdm.set_postfix({'batch_loss': loss.item()})
train_loader_tqdm.close()
train_epoch_loss = train_running_loss / len(train_loader)
if not IS_VALID and train_epoch_loss < best_results['train_best_loss']:
logging.info(f'##### New best train loss - before: {best_results['train_best_loss']} #####')
no_improvement_count = 0
best_results = save_checkpoint.save(model, epoch, optimizer, train_epoch_loss)
logging.info(f'Epoch [{epoch}/{NUM_EPOCHS}] (time: {int(time.time() - tic)}), Train Loss: {train_epoch_loss:.5f}, Lr: {optimizer.param_groups[0]['lr']}')
if IS_VALID:
model.eval()
val_running_loss = 0.0
val_loader_tqdm = tqdm(val_loader, desc=f'Val: Epoch [{epoch + 1}/{NUM_EPOCHS}]', leave=False)
for data in val_loader_tqdm:
if IS_FLOAT16:
with autocast():
with torch.inference_mode():
embeddings = []
for img in data['imgs']:
embeddings.append(model(img.to(device)))
else:
with torch.inference_mode():
embeddings = []
for img in data['imgs']:
embeddings.append(model(img.to(device)))
if len(data['label']) > 0:
loss = criterion(*embeddings, data['label'].to(device))
else:
loss = criterion(*embeddings)
val_running_loss += loss.item()
val_loader_tqdm.set_postfix({'batch_loss': loss.item()})
val_loader_tqdm.close()
val_epoch_loss = val_running_loss / len(val_loader)
if val_epoch_loss < best_results['val_best_loss']:
logging.info(f'##### New best loss - before: {best_results['val_best_loss']} #####')
no_improvement_count = 0
best_results = save_checkpoint.save(model, epoch, optimizer, train_epoch_loss,
val_epoch_loss=val_epoch_loss)
logging.info(f'Epoch [{epoch}/{NUM_EPOCHS}], Val Loss: {val_epoch_loss}')
if no_improvement_count >= PATIENCE:
logging.info(f'Early stopping: No improvement for {PATIENCE} consecutive epochs')
break
no_improvement_count += 1
# Reduce learning rate
if isinstance(scheduler, torch.optim.lr_scheduler.ReduceLROnPlateau):
if IS_VALID:
scheduler.step(val_epoch_loss)
else:
scheduler.step(train_epoch_loss)
else:
scheduler.step()
except KeyboardInterrupt:
logging.warning('Keyboard Interrupt: Stopping training')
logging.info(f'Training completed in {int((time.time() - start_training_time) / 60)} minutes - '
f'Best Train Loss: {best_results["train_best_loss"]} - '
f'Best Val Loss: {best_results["val_best_loss"]}')