-
Notifications
You must be signed in to change notification settings - Fork 4
/
config_template.yaml
45 lines (39 loc) · 1.65 KB
/
config_template.yaml
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
experiment: # Name of the expirement
name: "experiment_name" # This will be used to save the best model weights
base_path: "/absolute/path/to/the/model/code"
data_generation:
train_dataset_file: "/path/to/the/training/dataset" # training / validation set
valid_dataset_file: "/path/to/the/validation/dataset"
benchmark_dataset_file: "/path/to/the/benchmarks/dataset"
batch_size: 2048
nb_processes: 4 # Number of processes to use when loading the data in parallel
min_functions_per_tree_footprint: 2 # Minimum number of functions accepted in a batch. Set to 0 if you are using a small data sample
training:
log_file: "logs.txt" # Just the name
lr: 0.001
max_epochs: 1000
training_gpu: "cuda:x" # GPU to train on. Example: cuda:2
validation_gpu: "cpu" # GPU to validate on. Usually the CPU is enough
continue_training: False # Continue training from saved model checkpoint
model_weights_path: "/path/to/model/weights" # Model weights to use for finetuning
testing:
testing_model_weights_path: "/path/to/model/weights" # Model weights to evaluate
gpu: "cuda:x" # GPU to validate on
wandb:
use_wandb: False # Track model progress using the Weights & Biases platform
project: "release_model" # Name of the project to add this expirement to
model:
input_size: 846 # Size of the input. Here we specify the size of the computation vector.
comp_embed_layer_sizes:
- 600
- 350
- 200
- 180
drops: # Dropout layers probabilities
- 0.050
- 0.050
- 0.050
- 0.050
- 0.050
defaults:
- override hydra/job_logging: disabled