-
Notifications
You must be signed in to change notification settings - Fork 1
Expand file tree
/
Copy pathscript_turbulence.py
More file actions
401 lines (374 loc) · 12.5 KB
/
script_turbulence.py
File metadata and controls
401 lines (374 loc) · 12.5 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
import argparse
import os
os.environ["EQX_ON_ERROR"] = "nan"
from datetime import datetime
from functools import partial
from pathlib import Path
import equinox as eqx
import jax
import jax.numpy as jnp
import numpy as np
import optax as optx
from jaxtyping import Array, PRNGKeyArray
from torch.utils.data import DataLoader
from PHIROM.modules.models import DecoderArchEnum, NodeROM
from PHIROM.pde.data_utils import JaxLoader, NumpyLoader
from PHIROM.pde.navier_turbulence import *
from PHIROM.training.baseline import DINOTrainer
from PHIROM.training.callbacks import (
CheckpointCallback,
NODEUnrollingEvaluationCallback,
)
from PHIROM.training.train import NodeTrainingModeEnum, PhiROMTrainer
from PHIROM.utils.experiment_utils import *
from PHIROM.utils.serial import load_model, make_CROMOffline, save_model
parser = argparse.ArgumentParser()
parser.add_argument("--latent_dim", type=int, default=100)
parser.add_argument("--width", type=int, default=64)
parser.add_argument("--activation", type=str, default="sin")
parser.add_argument("--node_activation", type=str, default="swish")
parser.add_argument("--node_width", type=int, default=64)
parser.add_argument("--epochs", type=int, default=20000)
parser.add_argument("--dataset", type=str, default="turbulence_64x64_ins=5")
parser.add_argument("--prefix", type=str, default="")
parser.add_argument("--seed", type=int, default=101)
parser.add_argument("--loss", type=str, default="nmse")
parser.add_argument(
"--ode_solver", type=str, default="bosh3", choices=["bosh3", "dopri5", "euler"]
)
parser.add_argument("--adaptive", action=argparse.BooleanOptionalAction, default=False)
parser.add_argument("--max_ode_steps", type=int, default=None)
parser.add_argument(
"--dino",
action=argparse.BooleanOptionalAction,
default=False,
help="Train Data-Driven only (DINo)",
)
parser.add_argument("--gamma", type=float, default=1.0)
parser.add_argument("--gamma_decay_rate", type=float, default=0.99)
parser.add_argument(
"--gamma_epochs", type=int, default=10, help="Scheduling gamma decay epochs"
)
parser.add_argument("--final_gamma", type=float, default=0.0)
parser.add_argument("--final_lr", type=float, default=1e-6)
parser.add_argument("--decay_steps", type=int, default=50)
parser.add_argument("--decay_rate", type=float, default=0.985)
parser.add_argument("--num_samples", type=int, default=10)
parser.add_argument(
"--autodecoder",
action=argparse.BooleanOptionalAction,
default=True,
help="Use autodecoder",
)
parser.add_argument("--max_step", type=int, default=20)
parser.add_argument("--evolve_start", type=int, default=0)
parser.add_argument(
"--decoder_arch", type=str, default="hyper", choices=["mlp", "hyper"]
)
parser.add_argument(
"--node_arch", type=str, default="mlp", choices=["mlp", "hyper_concat"]
)
parser.add_argument(
"--node_training_mode",
type=str,
default=str(NodeTrainingModeEnum.JACOBIAN_INVERSE),
choices=[
NodeTrainingModeEnum.JACOBIAN_INVERSE,
NodeTrainingModeEnum.JACOBIAN_PSI,
NodeTrainingModeEnum.ZERO,
NodeTrainingModeEnum.LABELS,
],
)
parser.add_argument("--batch_size", type=int, default=128)
parser.add_argument("--learning_rate_decoder", type=float, default=1e-2)
parser.add_argument("--learning_rate_node", type=float, default=-1)
parser.add_argument("--learning_rate_latent", type=float, default=-1)
parser.add_argument("--normalize", action=argparse.BooleanOptionalAction, default=True)
parser.add_argument("--pinn", action=argparse.BooleanOptionalAction, default=False)
parser.add_argument("--loss_lambda", type=float, default=0.8)
args = parser.parse_args()
latent_dim = args.latent_dim
width = args.width
activation = args.activation
node_activation = args.node_activation
epochs = args.epochs
dataset_name = args.dataset
prefix = args.prefix
seed = args.seed
loss = args.loss
DINO = args.dino
final_lr = args.final_lr
decay_steps = args.decay_steps
decay_rate = args.decay_rate
gamma = args.gamma
gamma_decay_rate = args.gamma_decay_rate
gamma_epochs = args.gamma_epochs
final_gamma = args.final_gamma
num_samples = args.num_samples
autodecoder = args.autodecoder
max_step = args.max_step
evolve_start = args.evolve_start
max_split = 0
split_start = 0
arch = args.decoder_arch
ode_solver = args.ode_solver
adaptive = args.adaptive
max_ode_steps = args.max_ode_steps
node_activation = args.node_activation
node_width = args.node_width
node_arch = args.node_arch
batch_size = args.batch_size
learning_rate_decoder = args.learning_rate_decoder
learning_rate_node = args.learning_rate_node
learning_rate_latent = args.learning_rate_latent
normalize = args.normalize
node_training_mode = args.node_training_mode
loss_lambda = args.loss_lambda
paramed = False
path = f"data/{dataset_name}.h5"
if paramed:
param_dim = 1
else:
param_dim = 0
if node_arch == "hyper" or node_arch == "hyperV2":
raise ValueError("Hyper node not supported for non-parametric datasets")
if autodecoder:
if not DINO:
dataset_train = DecayingTurbulenceDatasetTorch(
path, max_step, indices=(0, num_samples), paramed=paramed
)
else:
dataset_train = DecayingTurbulenceTrajDatasetTorch(
path, max_step, indices=(0, num_samples), paramed=paramed
)
dataset_validation = DecayingTurbulenceTrajDatasetTorch(
path, max_step * 2, indices=(512, 512 + 16), paramed=paramed
)
subdataset_train = DecayingTurbulenceTrajDatasetTorch(
path, max_step * 2, indices=(0, 16), paramed=paramed
)
else:
raise NotImplementedError("AE Not implemented")
loader_train = DataLoader(dataset_train, batch_size=batch_size, shuffle=True)
decay_steps = len(loader_train) * decay_steps
print(
f"Training on {dataset_name} dataset - num batches: {len(loader_train)} - num samples: {num_samples} - max step: {max_step}"
)
print(f"Decay every {decay_steps} steps with rate {decay_rate}")
if paramed:
MEAN_NODE_ARGS = dataset_train.node_args.mean(axis=0).numpy()
STD_NODE_ARGS = dataset_train.node_args.std(axis=0).numpy()
else:
MEAN_NODE_ARGS = None
STD_NODE_ARGS = None
path, name = get_path_and_name(args)
MEAN, STD = dataset_train.compute_mean_std_fields()
nx = dataset_train.x.shape[0]
ny = dataset_train.y.shape[0]
hyperparams = {
"latent_dim": latent_dim,
"num_sensors": nx * ny,
"field_dim": 2,
"spatial_dim": 2,
"mean_field": MEAN if normalize else None,
"std_field": STD if normalize else None,
"activation": activation,
"node_kwargs": {
"node_arch": node_arch,
"activation": node_activation,
"depth": 4,
"width": node_width,
"param_size": param_dim,
"solver": ode_solver,
"adaptive": adaptive,
"max_steps": max_ode_steps,
"mean_params": MEAN_NODE_ARGS,
"std_params": STD_NODE_ARGS,
},
}
if arch == "mlp":
arch = DecoderArchEnum.MLP
hyperparams["width_scale"] = width
hyperparams["decoder_arch"] = arch
elif arch == "hyper":
arch = DecoderArchEnum.HYPER
hyperparams["decoder_arch"] = arch
hyperparams["width"] = width
hyperparams["n_layers"] = 3
hyperparams["std_coords"] = True
hyperparams["input_scale"] = 1.0
if activation in ["tanh", "relu", "swish", "elu", "softplus"]:
mean_x, std_x = dataset_train.compute_mean_std_coords()
hyperparams["mean_x"] = mean_x
hyperparams["std_x"] = std_x
elif activation == "sin":
min_x, max_x = dataset_train.compute_min_max_coords()
print(min_x, max_x)
hyperparams["min_x"] = min_x
hyperparams["max_x"] = max_x
key = jax.random.PRNGKey(seed)
key, subkey = jax.random.split(key)
model, model_state = eqx.nn.make_with_state(NodeROM)(**hyperparams, key=subkey)
path_experiment = os.path.join("NODE_experiments", path, name)
path_checkpoint = os.path.join(path_experiment, "checkpoints")
Path(path_experiment).mkdir(parents=True, exist_ok=True)
callbacks = [
CheckpointCallback(path_checkpoint, name, hyperparams, True, 1000),
NODEUnrollingEvaluationCallback(
dataset_validation,
max_step,
max_step * 2,
1000,
plot_results=False,
plot_dir=path_experiment,
dict_key_prefix="validation_unrolling",
batch_size=4,
),
NODEUnrollingEvaluationCallback(
subdataset_train,
max_step,
max_step * 2,
1000,
plot_results=True,
plot_dir=path_experiment,
dict_key_prefix="train_unrolling",
batch_size=4,
),
# LatentMemoryResetCallback(loader_train, 500, optax.adamw(1e-1), 1000)
]
key, subkey = jax.random.split(key)
if not DINO and node_training_mode != NodeTrainingModeEnum.ZERO:
evolve_fn = cfd_residual_builder(dataset_train.grid)
else:
evolve_fn = None
if DINO:
scheduler = (
optx.schedules.exponential_decay(
learning_rate_decoder,
decay_steps,
decay_rate,
end_value=final_lr,
staircase=True,
)
if decay_rate < 1.0
else learning_rate_decoder
)
optimizer = optx.adam(scheduler)
scheduler_node = (
optx.schedules.exponential_decay(
learning_rate_node,
decay_steps,
decay_rate,
end_value=final_lr,
staircase=True,
)
if decay_rate < 1.0
else learning_rate_node
)
optimizer_node = optx.adam(scheduler_node)
scheduler_latent = (
optx.schedules.exponential_decay(
learning_rate_latent,
decay_steps,
decay_rate,
end_value=final_lr,
staircase=True,
)
if decay_rate < 1.0
else learning_rate_latent
)
assert (
learning_rate_latent > 0
), "Learning rate for latent variable must be positive"
optimizer_latent = optx.adam(scheduler_latent)
trainer = DINOTrainer(
model=model,
model_state=model_state,
optimizer=optimizer,
optimizer_node=optimizer_node,
optimizer_latent=optimizer_latent,
loss=loss,
evolve_fn=evolve_fn,
evolve_start=evolve_start,
num_trajectories=num_samples,
num_time_steps=max_step,
latent_dim=latent_dim,
callbacks=callbacks,
gamma=gamma,
gamma_decay_rate=gamma_decay_rate,
gamma_decay_epochs=gamma_epochs,
final_gamma=final_gamma,
key=subkey,
)
elif not DINO:
scheduler = (
optx.schedules.exponential_decay(
learning_rate_decoder,
decay_steps,
decay_rate,
end_value=final_lr,
staircase=True,
)
if decay_rate < 1.0
else learning_rate_decoder
)
print(scheduler)
optimizer = optx.adamw(scheduler)
if learning_rate_node > 0:
scheduler_node = (
optx.schedules.exponential_decay(
learning_rate_node,
decay_steps,
decay_rate,
end_value=final_lr,
staircase=True,
)
if decay_rate < 1.0
else learning_rate_node
)
optimizer_node = optx.adamw(scheduler_node)
else:
optimizer_node = None
if learning_rate_latent > 0:
scheduler_latent = (
optx.schedules.exponential_decay(
learning_rate_latent,
decay_steps,
decay_rate,
end_value=final_lr,
staircase=True,
)
if decay_rate < 1.0
else learning_rate_latent
)
optimizer_latent = optx.adamw(scheduler_latent)
else:
optimizer_latent = None
trainer = PhiROMTrainer(
model=model,
model_state=model_state,
optimizer=optimizer,
optimizer_node=optimizer_node,
optimizer_latent=optimizer_latent,
node_training_mode=node_training_mode,
loss=loss,
evolve_fn=evolve_fn,
evolve_start=evolve_start,
num_trajectories=num_samples,
num_time_steps=max_step,
latent_dim=latent_dim,
callbacks=callbacks,
gamma=gamma,
key=subkey,
loss_lambda=loss_lambda,
)
model, model_state, opt_state, history = trainer.fit(
loader_train, epochs=epochs, warm_start=True
)
save_model(os.path.join(path_experiment, "model.eqx"), hyperparams, model, model_state)
history["loss_reconstruction"] = np.array(history["loss_reconstruction"])
history["loss_time_stepping"] = np.array(history["loss_time_stepping"])
np.savez(os.path.join(path_experiment, "history.npz"), **history)
if autodecoder:
l = np.array(trainer.latent_memory)
np.save(os.path.join(path_experiment, "latent_memory.npy"), l)