Skip to content

Commit

Permalink
add an argument format-only to handle the json formating (open-mmlab#…
Browse files Browse the repository at this point in the history
  • Loading branch information
hellock authored Feb 19, 2020
1 parent 535f281 commit 5226fc4
Show file tree
Hide file tree
Showing 3 changed files with 50 additions and 15 deletions.
42 changes: 31 additions & 11 deletions mmdet/datasets/coco.py
Original file line number Diff line number Diff line change
Expand Up @@ -260,6 +260,33 @@ def fast_eval_recall(self, results, proposal_nums, iou_thrs, logger=None):
ar = recalls.mean(axis=1)
return ar

def format_results(self, results, jsonfile_prefix=None, **kwargs):
"""Format the results to json (standard format for COCO evaluation).
Args:
results (list): Testing results of the dataset.
jsonfile_prefix (str | None): The prefix of json files. It includes
the file path and the prefix of filename, e.g., "a/b/prefix".
If not specified, a temp file will be created. Default: None.
Returns:
tuple: (result_files, tmp_dir), result_files is a dict containing
the json filepaths, tmp_dir is the temporal directory created
for saving json files when jsonfile_prefix is not specified.
"""
assert isinstance(results, list), 'results must be a list'
assert len(results) == len(self), (
'The length of results is not equal to the dataset len: {} != {}'.
format(len(results), len(self)))

if jsonfile_prefix is None:
tmp_dir = tempfile.TemporaryDirectory()
jsonfile_prefix = osp.join(tmp_dir.name, 'results')
else:
tmp_dir = None
result_files = self.results2json(results, jsonfile_prefix)
return result_files, tmp_dir

def evaluate(self,
results,
metric='bbox',
Expand All @@ -275,7 +302,9 @@ def evaluate(self,
metric (str | list[str]): Metrics to be evaluated.
logger (logging.Logger | str | None): Logger used for printing
related information during evaluation. Default: None.
jsonfile_prefix (str | None):
jsonfile_prefix (str | None): The prefix of json files. It includes
the file path and the prefix of filename, e.g., "a/b/prefix".
If not specified, a temp file will be created. Default: None.
classwise (bool): Whether to evaluating the AP for each class.
proposal_nums (Sequence[int]): Proposal number used for evaluating
recalls, such as recall@100, recall@1000.
Expand All @@ -287,23 +316,14 @@ def evaluate(self,
Returns:
dict[str: float]
"""
assert isinstance(results, list), 'results must be a list'
assert len(results) == len(self), (
'The length of results is not equal to the dataset len: {} != {}'.
format(len(results), len(self)))

metrics = metric if isinstance(metric, list) else [metric]
allowed_metrics = ['bbox', 'segm', 'proposal', 'proposal_fast']
for metric in metrics:
if metric not in allowed_metrics:
raise KeyError('metric {} is not supported'.format(metric))

if jsonfile_prefix is None:
tmp_dir = tempfile.TemporaryDirectory()
jsonfile_prefix = osp.join(tmp_dir.name, 'results')
else:
tmp_dir = None
result_files = self.results2json(results, jsonfile_prefix)
result_files, tmp_dir = self.format_results(results, jsonfile_prefix)

eval_results = {}
cocoGt = self.coco
Expand Down
3 changes: 3 additions & 0 deletions mmdet/datasets/custom.py
Original file line number Diff line number Diff line change
Expand Up @@ -152,6 +152,9 @@ def prepare_test_img(self, idx):
self.pre_pipeline(results)
return self.pipeline(results)

def format_results(self, results, **kwargs):
pass

def evaluate(self,
results,
metric='mAP',
Expand Down
20 changes: 16 additions & 4 deletions tools/test.py
Original file line number Diff line number Diff line change
Expand Up @@ -203,6 +203,12 @@ def parse_args():
parser.add_argument('config', help='test config file path')
parser.add_argument('checkpoint', help='checkpoint file')
parser.add_argument('--out', help='output result file in pickle format')
parser.add_argument(
'--format_only',
action='store_true',
help='Format the output results without perform evaluation. It is'
'useful when you want to format the result to a specific format and '
'submit it to the test server')
parser.add_argument(
'--eval',
type=str,
Expand Down Expand Up @@ -235,9 +241,13 @@ def parse_args():
def main():
args = parse_args()

assert args.out or args.eval or args.show, \
('Please specify at least one operation (save or eval or show the '
'results) with the argument "--out", "--eval" or "--show"')
assert args.out or args.eval or args.format_only or args.show, \
('Please specify at least one operation (save/eval/format/show the '
'results) with the argument "--out", "--eval", "--format_only" '
'or "--show"')

if args.eval and args.format_only:
raise ValueError('--eval and --format_only cannot be both specified')

if args.out is not None and not args.out.endswith(('.pkl', '.pickle')):
raise ValueError('The output file must be a pkl file.')
Expand Down Expand Up @@ -295,8 +305,10 @@ def main():
if args.out:
print('\nwriting results to {}'.format(args.out))
mmcv.dump(outputs, args.out)
kwargs = {} if args.options is None else args.options
if args.format_only:
dataset.format_results(outputs, **kwargs)
if args.eval:
kwargs = {} if args.options is None else args.options
dataset.evaluate(outputs, args.eval, **kwargs)


Expand Down

0 comments on commit 5226fc4

Please sign in to comment.