-
Notifications
You must be signed in to change notification settings - Fork 2.6k
Commit
This commit does not belong to any branch on this repository, and may belong to a fork outside of the repository.
* support coco stuff-10k/164k * update docs * fix docs * update docs * fix import lints * Update docs/dataset_prepare.md * Update docs/dataset_prepare.md * Update tools/convert_datasets/coco_stuff164k.py * Update tools/convert_datasets/coco_stuff10k.py * Update tools/convert_datasets/coco_stuff10k.py * Update tools/convert_datasets/coco_stuff10k.py * Update tools/convert_datasets/coco_stuff10k.py * Update coco_stuff.py fix the description of the dataset * Update dataset_prepare.md fix the doc tree of coco stuff 10k * Update coco_stuff10k.py fix img_dir * Update coco_stuff.py fix descriptions * Update coco_stuff164k.py fix out_dir * Update coco_stuff10k.py fix save file name * Update coco_stuff.py fix seg_map_suffix * Update dataset_prepare.md fix -p * Update dataset_prepare.md fix doc tree * modify coco stuff convertor * Remove redundant code * fix 164k convert bug * remove redundant comment * add deeplabv3 configs and more iterations * replace shutil.move with shtil.copyfile * Update deeplabv3_r50-d8_512x512_4x4_80k_coco_stuff10k.py fix wrong config * Update deeplabv3_r101-d8_512x512_4x4_80k_coco_stuff164k.py fix wrong config * fix wrong configs * fix wrong configs * fix wrong path for coco stuff 10k * fix convert bugs * fix seg_filename bug * when nproc=0, use track progress * rename configs: coco_stuff --> coco-stuff * add coco-stuff 10k and 164k to README.md * update configs * add deeplabv3 benchmark * add pspnet benchmark * remove redundant comma Co-authored-by: Junjun2016 <hejunjun@sjtu.edu.cn>
- Loading branch information
1 parent
4e6f2eb
commit 62f70eb
Showing
34 changed files
with
1,328 additions
and
1 deletion.
There are no files selected for viewing
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,57 @@ | ||
# dataset settings | ||
dataset_type = 'COCOStuffDataset' | ||
data_root = 'data/coco_stuff10k' | ||
img_norm_cfg = dict( | ||
mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True) | ||
crop_size = (512, 512) | ||
train_pipeline = [ | ||
dict(type='LoadImageFromFile'), | ||
dict(type='LoadAnnotations', reduce_zero_label=True), | ||
dict(type='Resize', img_scale=(2048, 512), ratio_range=(0.5, 2.0)), | ||
dict(type='RandomCrop', crop_size=crop_size, cat_max_ratio=0.75), | ||
dict(type='RandomFlip', prob=0.5), | ||
dict(type='PhotoMetricDistortion'), | ||
dict(type='Normalize', **img_norm_cfg), | ||
dict(type='Pad', size=crop_size, pad_val=0, seg_pad_val=255), | ||
dict(type='DefaultFormatBundle'), | ||
dict(type='Collect', keys=['img', 'gt_semantic_seg']), | ||
] | ||
test_pipeline = [ | ||
dict(type='LoadImageFromFile'), | ||
dict( | ||
type='MultiScaleFlipAug', | ||
img_scale=(2048, 512), | ||
# img_ratios=[0.5, 0.75, 1.0, 1.25, 1.5, 1.75], | ||
flip=False, | ||
transforms=[ | ||
dict(type='Resize', keep_ratio=True), | ||
dict(type='RandomFlip'), | ||
dict(type='Normalize', **img_norm_cfg), | ||
dict(type='ImageToTensor', keys=['img']), | ||
dict(type='Collect', keys=['img']), | ||
]) | ||
] | ||
data = dict( | ||
samples_per_gpu=4, | ||
workers_per_gpu=4, | ||
train=dict( | ||
type=dataset_type, | ||
data_root=data_root, | ||
reduce_zero_label=True, | ||
img_dir='images/train2014', | ||
ann_dir='annotations/train2014', | ||
pipeline=train_pipeline), | ||
val=dict( | ||
type=dataset_type, | ||
data_root=data_root, | ||
reduce_zero_label=True, | ||
img_dir='images/test2014', | ||
ann_dir='annotations/test2014', | ||
pipeline=test_pipeline), | ||
test=dict( | ||
type=dataset_type, | ||
data_root=data_root, | ||
reduce_zero_label=True, | ||
img_dir='images/test2014', | ||
ann_dir='annotations/test2014', | ||
pipeline=test_pipeline)) |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,54 @@ | ||
# dataset settings | ||
dataset_type = 'COCOStuffDataset' | ||
data_root = 'data/coco_stuff164k' | ||
img_norm_cfg = dict( | ||
mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True) | ||
crop_size = (512, 512) | ||
train_pipeline = [ | ||
dict(type='LoadImageFromFile'), | ||
dict(type='LoadAnnotations'), | ||
dict(type='Resize', img_scale=(2048, 512), ratio_range=(0.5, 2.0)), | ||
dict(type='RandomCrop', crop_size=crop_size, cat_max_ratio=0.75), | ||
dict(type='RandomFlip', prob=0.5), | ||
dict(type='PhotoMetricDistortion'), | ||
dict(type='Normalize', **img_norm_cfg), | ||
dict(type='Pad', size=crop_size, pad_val=0, seg_pad_val=255), | ||
dict(type='DefaultFormatBundle'), | ||
dict(type='Collect', keys=['img', 'gt_semantic_seg']), | ||
] | ||
test_pipeline = [ | ||
dict(type='LoadImageFromFile'), | ||
dict( | ||
type='MultiScaleFlipAug', | ||
img_scale=(2048, 512), | ||
# img_ratios=[0.5, 0.75, 1.0, 1.25, 1.5, 1.75], | ||
flip=False, | ||
transforms=[ | ||
dict(type='Resize', keep_ratio=True), | ||
dict(type='RandomFlip'), | ||
dict(type='Normalize', **img_norm_cfg), | ||
dict(type='ImageToTensor', keys=['img']), | ||
dict(type='Collect', keys=['img']), | ||
]) | ||
] | ||
data = dict( | ||
samples_per_gpu=4, | ||
workers_per_gpu=4, | ||
train=dict( | ||
type=dataset_type, | ||
data_root=data_root, | ||
img_dir='images/train2017', | ||
ann_dir='annotations/train2017', | ||
pipeline=train_pipeline), | ||
val=dict( | ||
type=dataset_type, | ||
data_root=data_root, | ||
img_dir='images/val2017', | ||
ann_dir='annotations/val2017', | ||
pipeline=test_pipeline), | ||
test=dict( | ||
type=dataset_type, | ||
data_root=data_root, | ||
img_dir='images/val2017', | ||
ann_dir='annotations/val2017', | ||
pipeline=test_pipeline)) |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,9 @@ | ||
# optimizer | ||
optimizer = dict(type='SGD', lr=0.01, momentum=0.9, weight_decay=0.0005) | ||
optimizer_config = dict() | ||
# learning policy | ||
lr_config = dict(policy='poly', power=0.9, min_lr=1e-4, by_epoch=False) | ||
# runtime settings | ||
runner = dict(type='IterBasedRunner', max_iters=320000) | ||
checkpoint_config = dict(by_epoch=False, interval=32000) | ||
evaluation = dict(interval=32000, metric='mIoU') |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Oops, something went wrong.