-
Notifications
You must be signed in to change notification settings - Fork 22
/
Copy pathdiffqe_librispeech_5.yaml
147 lines (131 loc) · 4.09 KB
/
diffqe_librispeech_5.yaml
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
# @package _global_
sampling_rate: 16000
length: 131072
log_every_n_steps: 1000
channels: 1
patch_size: 8
factors: [4, 4, 4]
encoder_channels: 256
encoder_depth: 3
quantizer_type: block
quantizer_split_size: 4
quantizer_codebook_size: 8192
quantizer_num_residuals: 4
quantizer_num_groups: 1
model:
_target_: main.module_diffqe.Model
lr: 1e-4
lr_beta1: 0.95
lr_beta2: 0.999
lr_eps: 1e-6
lr_weight_decay: 1e-3
use_scheduler: False
scheduler_inv_gamma: 20000.0
scheduler_power: 1.0
scheduler_warmup: 0.99
in_channels: ${channels}
channels: 128
patch_size: ${patch_size}
resnet_groups: 8
kernel_multiplier_downsample: 2
kernel_sizes_init: [1, 3, 7]
multipliers: [1, 2, 4, 4]
factors: ${factors}
num_blocks: [2, 2, 2]
attentions: [False, False, False]
use_nearest_upsample: False
use_skip_scale: True
diffusion_sigma_distribution:
_target_: audio_diffusion_pytorch.LogNormalDistribution
mean: -3.0
std: 1.0
diffusion_sigma_data: 0.1
diffusion_dynamic_threshold: 0.0
encoder_channels: ${encoder_channels}
encoder_depth: ${encoder_depth}
quantizer_type: ${quantizer_type}
quantizer_codebook_size: ${quantizer_codebook_size}
quantizer_num_residuals: ${quantizer_num_residuals}
quantizer_loss_weight: 1e-5
quantizer_groups: ${quantizer_num_groups}
quantizer_split_size: ${quantizer_split_size}
quantizer_expire_threshold: 0.1
quantizer_shared_codebook: True
quantizer_ema_decay: 0.9
datamodule:
_target_: main.module_diffqe.Datamodule
dataset:
_target_: audio_data_pytorch.LibriSpeechDataset
root: ${data_dir}
transforms:
_target_: audio_data_pytorch.AllTransform
source_rate: 16000
target_rate: ${sampling_rate}
random_crop_size: ${length}
loudness: -20
val_split: 0.001
batch_size: 24
num_workers: 8
pin_memory: True
callbacks:
rich_progress_bar:
_target_: pytorch_lightning.callbacks.RichProgressBar
model_checkpoint:
_target_: pytorch_lightning.callbacks.ModelCheckpoint
monitor: "valid_loss" # name of the logged metric which determines when model is improving
save_top_k: 1 # save k best models (determined by above metric)
save_last: True # additionaly always save model from last epoch
mode: "min" # can be "max" or "min"
verbose: False
dirpath: ${logs_dir}/ckpts/${now:%Y-%m-%d-%H-%M-%S}
filename: '{epoch:02d}-{valid_loss:.3f}'
model_summary:
_target_: pytorch_lightning.callbacks.RichModelSummary
max_depth: 2
audio_samples_logger:
_target_: main.module_diffqe.SampleLogger
num_items: 2
channels: ${channels}
sampling_rate: ${sampling_rate}
length: ${length}
sampling_steps: [3, 10, 25, 50]
diffusion_sampler:
_target_: audio_diffusion_pytorch.ADPM2Sampler
rho: 1.0
diffusion_schedule:
_target_: audio_diffusion_pytorch.KarrasSchedule
sigma_min: 0.0001
sigma_max: 3.0
rho: 9.0
quantization_info:
_target_: main.module_diffqe.QuantizationInfoLogger
sample_rate: ${sampling_rate}
patch_size: ${patch_size}
split_size: ${quantizer_split_size}
num_residuals: ${quantizer_num_residuals}
num_groups: ${quantizer_num_groups}
downsample_factors: ${factors}
channels: ${encoder_channels}
encoder_depth: ${encoder_depth}
quantizer_type: ${quantizer_type}
loggers:
wandb:
_target_: pytorch_lightning.loggers.wandb.WandbLogger
project: ${oc.env:WANDB_PROJECT}
entity: ${oc.env:WANDB_ENTITY}
# offline: False # set True to store all logs only locally
job_type: "train"
group: ""
save_dir: ${logs_dir}
trainer:
_target_: pytorch_lightning.Trainer
gpus: 0 # Set `1` to train on GPU, `0` to train on CPU only, and `-1` to train on all GPUs, default `0`
precision: 32 # Precision used for tensors, default `32`
accelerator: null # `ddp` GPUs train individually and sync gradients, default `None`
min_epochs: 0
max_epochs: -1
enable_model_summary: False
log_every_n_steps: 1 # Logs metrics every N batches
check_val_every_n_epoch: null
val_check_interval: ${log_every_n_steps}
accumulate_grad_batches: 2