-
Notifications
You must be signed in to change notification settings - Fork 0
/
growth_recursive_defaults.yaml
108 lines (108 loc) · 2.88 KB
/
growth_recursive_defaults.yaml
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
trainer:
accelerator: cpu
max_epochs: 1
min_epochs: 0
max_time: 00:00:10:00
precision: 32
num_sanity_val_steps: 0
limit_val_batches: 0
logger:
class_path: pytorch_lightning.loggers.WandbLogger
init_args:
offline: true # set to true to not upload during testing
log_model: false # set to true to save the model at the end
name: null # can set name or have it automatically generated
project: transversality_examples
group: null # can group related runs
tags:
- basic_example
callbacks:
- class_path: pytorch_lightning.callbacks.LearningRateMonitor
init_args:
logging_interval: step
log_momentum: false
- class_path: pytorch_lightning.callbacks.ModelCheckpoint
init_args:
filename: best
monitor: train_loss
verbose: false
save_last: true
save_top_k: 1
save_weights_only: true
mode: min
auto_insert_metric_name: true
- class_path: pytorch_lightning.callbacks.EarlyStopping
init_args:
monitor: train_loss
min_delta: 0.0
patience: 1000
mode: min
check_finite: true
divergence_threshold: 1000000 # stops if larger
stopping_threshold: 1.0e-6 # typically the binding stopping threshold
check_on_train_epoch_end: true
optimizer:
class_path: torch.optim.LBFGS
init_args:
lr: 1.0
tolerance_grad: 1.0e-6
max_iter: 200
line_search_fn: 'strong_wolfe'
model:
beta: 0.9
alpha: 0.333333333333333
# Set these to non-empty for the multiple steady states version
a:
b_1:
b_2:
delta: 0.1
k_0: 0.4
batch_size: 0
shuffle_training: false
k_sim_grid_points: 16
z_sim_grid_points: 1
k_grid_min: 0.8
k_grid_max: 2.5
z_grid_min: 1.0
z_grid_max: 1.0
exp_grid_base:
k_grid_min_2:
k_grid_max_2:
val_min_1:
val_max_1:
val_min_2:
val_max_2:
val_sim_grid_points: 0
max_T_test: 50 # set to 0 to ignore
train_grid_test_multiplier: 2.0 # if > 0 this is the number of additional grid points to sample for generating residuals
g: 0.0
vfi_parameters: # used in the VFI solution for the comparison
tol: 1.0e-9
max_iter: 200
c_solver_tol: 1e-7
k_grid_size: 50
k_min_multiplier: 0.9
k_max_multiplier: 1.1
interpolation_kind: "cubic"
# Settings for output
verbose: false
hpo_objective_name: k_abs_rel_error
always_log_hpo_objective: false
print_metrics: false
save_metrics: false
save_test_results: false
test_loss_success_threshold: 1e-2 # lax, see k_prime_abs_rel_error
TVC_test_treshold: 1.999999999999
ml_model:
class_path: econ_layers.layers.FlexibleSequential
init_args:
n_in: 2
n_out: 1
layers: 4
hidden_dim: 128
hidden_bias: true
last_bias: true
activator:
class_path: torch.nn.Tanh
last_activator:
class_path: torch.nn.Softplus