-
Notifications
You must be signed in to change notification settings - Fork 21
/
config.yaml
38 lines (37 loc) · 1003 Bytes
/
config.yaml
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
# '''
# first step:
# pre-train the coarse model (i.e.𝐸𝑐) for two epochs with a batch size of 64,
# with 𝜆𝑙𝑚𝑘=1𝑒−4, 𝜆𝑒𝑦𝑒=1.0, 𝜆𝜷=1𝑒−4, and 𝜆𝝍=1𝑒−4
# Why:
# training with only lmk loss for good initialization,
# because the use of photometric loss needs good initialization both in regression and optimization
# and also, photometric loss needs differentiable rendering that makes the training slow
#
#
# '''
output_dir: "./training_logs"
pretrained_modelpath: './data/mica_combined_ft0_100k.tar'
dataset:
# training_data: ['vggface2', 'vox2']
training_data: ['celebahq']
eval_data: ['now']
batch_size: 1
K: 1
loss:
photo: 0.1
id: 0.
useSeg: False
reg_tex: 0.
reg_light: 0.
shape_consistency: False
lmk: 5.0
train:
resume: True
max_epochs: 500000
max_steps: 100000
log_steps: 10
vis_steps: 500
checkpoint_steps: 1000
val_steps: 500
eval_steps: 1000
use_mica: True # Uncomment this to run MICA training