Skip to content

Commit 4db085e

Browse files
committed
Adding the 11-point interpolation method and images
1 parent a9bb67c commit 4db085e

File tree

5 files changed

+151
-12
lines changed

5 files changed

+151
-12
lines changed

aux_images/11-pointInterpolation.png

28.3 KB
Loading

lib/Evaluator.py

Lines changed: 131 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -8,24 +8,33 @@
88
# Last modification: May 24th 2018 #
99
###########################################################################################
1010

11+
import sys
12+
from collections import Counter
13+
14+
import matplotlib.pyplot as plt
15+
import numpy as np
16+
1117
from BoundingBox import *
1218
from BoundingBoxes import *
13-
import matplotlib.pyplot as plt
14-
from collections import Counter
1519
from utils import *
16-
import numpy as np
17-
import sys
1820

1921

2022
class Evaluator:
21-
def GetPascalVOCMetrics(self, boundingboxes, IOUThreshold=0.5):
23+
def GetPascalVOCMetrics(self,
24+
boundingboxes,
25+
IOUThreshold=0.5,
26+
method=MethodAveragePrecision.EveryPointInterpolation):
2227
"""Get the metrics used by the VOC Pascal 2012 challenge.
2328
Get
2429
Args:
2530
boundingboxes: Object of the class BoundingBoxes representing ground truth and detected
2631
bounding boxes;
2732
IOUThreshold: IOU threshold indicating which detections will be considered TP or FP
28-
(default value = 0.5).
33+
(default value = 0.5);
34+
method (default = EveryPointInterpolation): It can be calculated as the implementation
35+
in the official PASCAL VOC toolkit (EveryPointInterpolation), or applying the 11-point
36+
interpolatio as described in the paper "The PASCAL Visual Object Classes(VOC) Challenge"
37+
or EveryPointInterpolation" (ElevenPointInterpolation);
2938
Returns:
3039
A list of dictionaries. Each dictionary contains information and metrics of each class.
3140
The keys of each dictionary are:
@@ -112,7 +121,11 @@ def GetPascalVOCMetrics(self, boundingboxes, IOUThreshold=0.5):
112121
acc_TP = np.cumsum(TP)
113122
rec = acc_TP / npos
114123
prec = np.divide(acc_TP, (acc_FP + acc_TP))
115-
[ap, mpre, mrec, ii] = Evaluator.CalculateAveragePrecision(rec, prec)
124+
# Depending on the method, call the right implementation
125+
if method == MethodAveragePrecision.EveryPointInterpolation:
126+
[ap, mpre, mrec, ii] = Evaluator.CalculateAveragePrecision(rec, prec)
127+
else:
128+
[ap, mpre, mrec, _] = Evaluator.ElevenPointInterpolatedAP(rec, prec)
116129
# add class result in the dictionary to be returned
117130
r = {
118131
'class': c,
@@ -132,6 +145,7 @@ def PlotPrecisionRecallCurve(self,
132145
classId,
133146
boundingBoxes,
134147
IOUThreshold=0.5,
148+
method=MethodAveragePrecision.EveryPointInterpolation,
135149
showAP=False,
136150
showInterpolatedPrecision=False,
137151
savePath=None,
@@ -144,6 +158,10 @@ def PlotPrecisionRecallCurve(self,
144158
bounding boxes;
145159
IOUThreshold (optional): IOU threshold indicating which detections will be considered
146160
TP or FP (default value = 0.5);
161+
method (default = EveryPointInterpolation): It can be calculated as the implementation
162+
in the official PASCAL VOC toolkit (EveryPointInterpolation), or applying the 11-point
163+
interpolatio as described in the paper "The PASCAL Visual Object Classes(VOC) Challenge"
164+
or EveryPointInterpolation" (ElevenPointInterpolation).
147165
showAP (optional): if True, the average precision value will be shown in the title of
148166
the graph (default = False);
149167
showInterpolatedPrecision (optional): if True, it will show in the plot the interpolated
@@ -164,7 +182,7 @@ def PlotPrecisionRecallCurve(self,
164182
dict['total TP']: total number of True Positive detections;
165183
dict['total FP']: total number of False Negative detections;
166184
"""
167-
results = self.GetPascalVOCMetrics(boundingBoxes, IOUThreshold)
185+
results = self.GetPascalVOCMetrics(boundingBoxes, IOUThreshold, method)
168186
result = None
169187
for res in results:
170188
if res['class'] == classId:
@@ -178,10 +196,64 @@ def PlotPrecisionRecallCurve(self,
178196
average_precision = result['AP']
179197
mpre = result['interpolated precision']
180198
mrec = result['interpolated recall']
199+
# npos = result['total positives']
200+
# total_tp = result['total TP']
201+
# total_fp = result['total FP']
202+
203+
if showInterpolatedPrecision:
204+
if method == MethodAveragePrecision.EveryPointInterpolation:
205+
plt.plot(mrec, mpre, '--r', label='Interpolated precision (every point)')
206+
elif method == MethodAveragePrecision.ElevenPointInterpolation:
207+
# Uncomment the line below if you want to plot the area
208+
# plt.plot(mrec, mpre, 'or', label='11-point interpolated precision')
209+
# Remove duplicates, getting only the highest precision of each recall value
210+
nrec = []
211+
nprec = []
212+
for idx in range(len(mrec)):
213+
r = mrec[idx]
214+
if r not in nrec:
215+
idxEq = np.argwhere(mrec == r)
216+
nrec.append(r)
217+
nprec.append(max([mpre[int(id)] for id in idxEq]))
218+
plt.plot(nrec, nprec, 'or', label='11-point interpolated precision')
219+
plt.plot(recall, precision, label='Precision')
220+
plt.xlabel('recall')
221+
plt.ylabel('precision')
222+
if showAP:
223+
ap_str = "{0:.2f}%".format(average_precision * 100)
224+
plt.title('Precision x Recall curve \nClass: %s, AP: %s' % (str(classId), ap_str))
225+
# plt.title('Precision x Recall curve \nClass: %s, AP: %.4f' % (str(classId),
226+
# average_precision))
227+
else:
228+
plt.title('Precision x Recall curve \nClass: %d' % classId)
229+
plt.legend(shadow=True)
230+
plt.grid()
231+
plt.show()
232+
233+
def PlotPrecisionRecallCurve2(self,
234+
classId,
235+
boundingBoxes,
236+
IOUThreshold=0.5,
237+
showAP=False,
238+
showInterpolatedPrecision=False,
239+
savePath=None,
240+
showGraphic=True):
241+
results = self.GetPascalVOCMetrics(boundingBoxes, IOUThreshold)
242+
result = None
243+
for res in results:
244+
if res['class'] == classId:
245+
result = res
246+
break
247+
if result is None:
248+
raise IOError('Error: Class %d could not be found.' % classId)
249+
precision = result['precision']
250+
recall = result['recall']
251+
average_precision = result['AP']
252+
mpre = result['interpolated precision']
253+
mrec = result['interpolated recall']
181254
npos = result['total positives']
182255
total_tp = result['total TP']
183256
total_fp = result['total FP']
184-
185257
if showInterpolatedPrecision:
186258
plt.plot(mrec, mpre, '--r', label='Interpolated precision')
187259
plt.plot(recall, precision, label='Precision')
@@ -288,6 +360,56 @@ def CalculateAveragePrecision(rec, prec):
288360
# return [ap, mpre[1:len(mpre)-1], mrec[1:len(mpre)-1], ii]
289361
return [ap, mpre[0:len(mpre) - 1], mrec[0:len(mpre) - 1], ii]
290362

363+
@staticmethod
364+
# 11-point interpolated average precision
365+
def ElevenPointInterpolatedAP(rec, prec):
366+
# def CalculateAveragePrecision2(rec, prec):
367+
mrec = []
368+
# mrec.append(0)
369+
[mrec.append(e) for e in rec]
370+
# mrec.append(1)
371+
mpre = []
372+
# mpre.append(0)
373+
[mpre.append(e) for e in prec]
374+
# mpre.append(0)
375+
recallValues = np.linspace(0, 1, 11)
376+
recallValues = list(recallValues[::-1])
377+
rhoInterp = []
378+
recallValid = []
379+
# For each recallValues (0, 0.1, 0.2, ... , 1)
380+
for r in recallValues:
381+
# Obtain all recall values higher or equal than r
382+
argGreaterRecalls = np.argwhere(mrec[:-1] >= r)
383+
pmax = 0
384+
# If there are recalls above r
385+
if argGreaterRecalls.size != 0:
386+
pmax = max(mpre[argGreaterRecalls.min():])
387+
recallValid.append(r)
388+
rhoInterp.append(pmax)
389+
# By definition AP = sum(max(precision whose recall is above r))/11
390+
ap = sum(rhoInterp) / 11
391+
# Generating values for the plot
392+
rvals = []
393+
rvals.append(recallValid[0])
394+
[rvals.append(e) for e in recallValid]
395+
rvals.append(0)
396+
pvals = []
397+
pvals.append(0)
398+
[pvals.append(e) for e in rhoInterp]
399+
pvals.append(0)
400+
# rhoInterp = rhoInterp[::-1]
401+
cc = []
402+
for i in range(len(rvals)):
403+
p = (rvals[i], pvals[i - 1])
404+
if p not in cc:
405+
cc.append(p)
406+
p = (rvals[i], pvals[i])
407+
if p not in cc:
408+
cc.append(p)
409+
recallValues = [i[0] for i in cc]
410+
rhoInterp = [i[1] for i in cc]
411+
return [ap, rhoInterp, recallValues, None]
412+
291413
# For each detections, calculate IOU with reference
292414
@staticmethod
293415
def _getAllIOUs(reference, detections):

lib/utils.py

Lines changed: 14 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,6 +1,19 @@
1-
import cv2
21
from enum import Enum
32

3+
import cv2
4+
5+
6+
class MethodAveragePrecision(Enum):
7+
"""
8+
Class representing if the coordinates are relative to the
9+
image size or are absolute values.
10+
11+
Developed by: Rafael Padilla
12+
Last modification: Apr 28 2018
13+
"""
14+
EveryPointInterpolation = 1
15+
ElevenPointInterpolation = 2
16+
417

518
class CoordinatesType(Enum):
619
"""
28.3 KB
Loading

samples/sample_2/sample_2.py

Lines changed: 6 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -14,6 +14,7 @@
1414
from BoundingBox import BoundingBox
1515
from BoundingBoxes import BoundingBoxes
1616
from Evaluator import *
17+
from utils import *
1718

1819

1920
def getBoundingBoxes():
@@ -138,12 +139,14 @@ def createImages(dictGroundTruth, dictDetected):
138139
'object', # Class to show
139140
boundingboxes, # Object containing all bounding boxes (ground truths and detections)
140141
IOUThreshold=0.3, # IOU threshold
142+
method=MethodAveragePrecision.EveryPointInterpolation, # As the official matlab code
141143
showAP=True, # Show Average Precision in the title of the plot
142-
showInterpolatedPrecision=False) # Don't plot the interpolated precision curve
144+
showInterpolatedPrecision=True) # Don't plot the interpolated precision curve
143145
# Get metrics with PASCAL VOC metrics
144146
metricsPerClass = evaluator.GetPascalVOCMetrics(
145147
boundingboxes, # Object containing all bounding boxes (ground truths and detections)
146-
IOUThreshold=0.3) # IOU threshold
148+
IOUThreshold=0.3, # IOU threshold
149+
method=MethodAveragePrecision.EveryPointInterpolation) # As the official matlab code
147150
print("Average precision values per class:\n")
148151
# Loop through classes to obtain their metrics
149152
for mc in metricsPerClass:
@@ -156,3 +159,4 @@ def createImages(dictGroundTruth, dictDetected):
156159
irec = mc['interpolated recall']
157160
# Print AP per class
158161
print('%s: %f' % (c, average_precision))
162+
a = 123

0 commit comments

Comments
 (0)