-
Notifications
You must be signed in to change notification settings - Fork 4
/
test.py
executable file
·47 lines (33 loc) · 1.1 KB
/
test.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
import logging
import os
import hydra
from hydra.utils import instantiate
from pytorch_lightning import Trainer, seed_everything
from pytorch_lightning.plugins import DDPPlugin
import torch
from data.data_module import DataModule
from finetune_learner import Learner
# static vars
os.environ["WANDB_SILENT"] = "true"
logging.getLogger("lightning").propagate = False
@hydra.main(config_path="conf", config_name="config_test")
def main(cfg):
if cfg.fix_seed:
seed_everything(42, workers=True)
print("The SLURM job ID for this run is {}".format(os.environ["SLURM_JOB_ID"]))
cfg.slurm_job_id = os.environ["SLURM_JOB_ID"]
cfg.gpus = torch.cuda.device_count()
print("num gpus:", cfg.gpus)
wandb_logger = None
if cfg.log_wandb:
wandb_logger = instantiate(cfg.logger)
data_module = DataModule(cfg)
learner = Learner(cfg)
trainer = Trainer(
**cfg.trainer,
logger=wandb_logger,
strategy=DDPPlugin(find_unused_parameters=False) if cfg.gpus > 1 else None
)
trainer.test(learner, datamodule=data_module)
if __name__ == "__main__":
main()