forked from lucasjinreal/yolov7_d2
-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathtrain_custom.py
executable file
·106 lines (83 loc) · 3.18 KB
/
train_custom.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
"""
Training script using custom coco format dataset
what you need to do is simply change the img_dir and annotation path here
Also define your own categories.
"""
import os
from detectron2.checkpoint import DetectionCheckpointer
from detectron2.config import get_cfg
from detectron2.engine import DefaultTrainer, default_argument_parser, default_setup, launch
from detectron2.evaluation import COCOEvaluator
from detectron2.data import MetadataCatalog, build_detection_train_loader, DatasetCatalog
from detectron2.data.datasets.coco import load_coco_json
from detectron2.data.dataset_mapper import DatasetMapper
from yolov7.config import add_yolo_config
from yolov7.data.dataset_mapper import MyDatasetMapper
# here is your dataset config
CLASS_NAMES = ["face", 'face_mask']
DATASET_ROOT = './datasets/facemask'
ANN_ROOT = os.path.join(DATASET_ROOT, 'annotations')
TRAIN_PATH = os.path.join(DATASET_ROOT, 'train')
VAL_PATH = os.path.join(DATASET_ROOT, 'val')
TRAIN_JSON = os.path.join(ANN_ROOT, 'instances_train2017.json')
VAL_JSON = os.path.join(ANN_ROOT, 'instances_val2017.json')
PREDEFINED_SPLITS_DATASET = {
"facemask_train": (TRAIN_PATH, TRAIN_JSON),
"facemask_val": (VAL_PATH, VAL_JSON),
}
def plain_register_dataset():
for k, v in PREDEFINED_SPLITS_DATASET.items():
DatasetCatalog.register(
k, lambda: load_coco_json(v[1], v[0]))
MetadataCatalog.get(k).set(thing_classes=CLASS_NAMES,
evaluator_type='coco',
json_file=v[1],
image_root=v[0])
plain_register_dataset()
class Trainer(DefaultTrainer):
@classmethod
def build_evaluator(cls, cfg, dataset_name, output_folder=None):
if output_folder is None:
output_folder = os.path.join(cfg.OUTPUT_DIR, "inference")
return COCOEvaluator(dataset_name, output_dir=output_folder)
@classmethod
def build_train_loader(cls, cfg):
# return build_detection_train_loader(cfg, mapper=DatasetMapper(cfg, True))
# test our own dataset mapper to add more augmentations
return build_detection_train_loader(cfg, mapper=MyDatasetMapper(cfg, True))
def setup(args):
"""
Create configs and perform basic setups.
"""
cfg = get_cfg()
add_yolo_config(cfg)
cfg.merge_from_file(args.config_file)
cfg.merge_from_list(args.opts)
cfg.freeze()
default_setup(cfg, args)
return cfg
def main(args):
cfg = setup(args)
if args.eval_only:
model = Trainer.build_model(cfg)
DetectionCheckpointer(model, save_dir=cfg.OUTPUT_DIR).resume_or_load(
cfg.MODEL.WEIGHTS, resume=args.resume
)
res = Trainer.test(cfg, model)
return res
trainer = Trainer(cfg)
trainer.resume_or_load(resume=args.resume)
return trainer.train()
if __name__ == "__main__":
args = default_argument_parser().parse_args()
print("Command Line Args:", args)
launch(
main,
args.num_gpus,
num_machines=args.num_machines,
machine_rank=args.machine_rank,
dist_url=args.dist_url,
args=(args,),
)