Skip to content

Commit

Permalink
Fix in AP score in Jupyter notebooks
Browse files Browse the repository at this point in the history
Before the fix, the computed AP score is slightly
lower than it should be. It doesn’t affect
training, but just shows slightly lower results AP
in the notebooks.
  • Loading branch information
waleedka committed Apr 8, 2018
1 parent 48a25fd commit 445bcb3
Showing 1 changed file with 4 additions and 4 deletions.
8 changes: 4 additions & 4 deletions mrcnn/utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -662,8 +662,8 @@ def compute_matches(gt_boxes, gt_class_ids, gt_masks,

# Loop through predictions and find matching ground truth boxes
match_count = 0
pred_match = np.zeros([pred_boxes.shape[0]])
gt_match = np.zeros([gt_boxes.shape[0]])
pred_match = -1 * np.ones([pred_boxes.shape[0]])
gt_match = -1 * np.ones([gt_boxes.shape[0]])
for i in range(len(pred_boxes)):
# Find best matching ground truth box
# 1. Sort matches by score
Expand Down Expand Up @@ -709,8 +709,8 @@ def compute_ap(gt_boxes, gt_class_ids, gt_masks,
iou_threshold)

# Compute precision and recall at each prediction box step
precisions = np.cumsum(pred_match > 0) / (np.arange(len(pred_match)) + 1)
recalls = np.cumsum(pred_match > 0).astype(np.float32) / len(gt_match)
precisions = np.cumsum(pred_match > -1) / (np.arange(len(pred_match)) + 1)
recalls = np.cumsum(pred_match > -1).astype(np.float32) / len(gt_match)

# Pad with start and end values to simplify the math
precisions = np.concatenate([[0], precisions, [0]])
Expand Down

0 comments on commit 445bcb3

Please sign in to comment.