@@ -81,7 +81,7 @@ def _calc_stats(self):
81
81
fp = 10
82
82
fn = 30
83
83
84
- specificity = 470 / 500
84
+ sensitivity = 470 / 500
85
85
positive_predictivity = 470 / 480
86
86
false_positive_rate = 10 / 480
87
87
@@ -111,7 +111,7 @@ def _calc_stats(self):
111
111
self .fn = self .n_ref - self .tp
112
112
# No tn attribute
113
113
114
- self .specificity = float (self .tp ) / self .n_ref
114
+ self .sensitivity = float (self .tp ) / self .n_ref
115
115
self .positive_predictivity = float (self .tp ) / self .n_test
116
116
self .false_positive_rate = float (self .fp ) / self .n_test
117
117
@@ -243,7 +243,7 @@ def print_summary(self):
243
243
self .fn = self .n_ref - self .tp
244
244
# No tn attribute
245
245
246
- self .specificity = self .tp / self .n_ref
246
+ self .sensitivity = self .tp / self .n_ref
247
247
self .positive_predictivity = self .tp / self .n_test
248
248
self .false_positive_rate = self .fp / self .n_test
249
249
@@ -253,8 +253,8 @@ def print_summary(self):
253
253
print ('False Positives (unmatched test samples: %d' % self .fp )
254
254
print ('False Negatives (unmatched reference samples): %d\n ' % self .fn )
255
255
256
- print ('Specificity : %.4f (%d/%d)'
257
- % (self .specificity , self .tp , self .n_ref ))
256
+ print ('Sensitivity : %.4f (%d/%d)'
257
+ % (self .sensitivity , self .tp , self .n_ref ))
258
258
print ('Positive Predictivity: %.4f (%d/%d)'
259
259
% (self .positive_predictivity , self .tp , self .n_test ))
260
260
print ('False Positive Rate: %.4f (%d/%d)'
@@ -400,8 +400,8 @@ def benchmark_mitdb(detector, verbose=False, print_results=False):
400
400
comparitors : dictionary
401
401
Dictionary of Comparitor objects run on the records, keyed on
402
402
the record names.
403
- specificity : float
404
- Aggregate specificity .
403
+ sensitivity : float
404
+ Aggregate sensitivity .
405
405
positive_predictivity : float
406
406
Aggregate positive_predictivity.
407
407
false_positive_rate : float
@@ -433,7 +433,7 @@ def benchmark_mitdb(detector, verbose=False, print_results=False):
433
433
comparitors = p .starmap (benchmark_mitdb_record , args )
434
434
435
435
# Calculate aggregate stats
436
- specificity = np .mean ([c .specificity for c in comparitors ])
436
+ sensitivity = np .mean ([c .sensitivity for c in comparitors ])
437
437
positive_predictivity = np .mean (
438
438
[c .positive_predictivity for c in comparitors ])
439
439
false_positive_rate = np .mean (
@@ -444,14 +444,14 @@ def benchmark_mitdb(detector, verbose=False, print_results=False):
444
444
print ('Benchmark complete' )
445
445
446
446
if print_results :
447
- print ('\n Overall MITDB Performance - Specificity : %.4f, Positive Predictivity: %.4f, False Positive Rate: %.4f\n '
448
- % (specificity , positive_predictivity , false_positive_rate ))
447
+ print ('\n Overall MITDB Performance - Sensitivity : %.4f, Positive Predictivity: %.4f, False Positive Rate: %.4f\n '
448
+ % (sensitivity , positive_predictivity , false_positive_rate ))
449
449
for record_name in record_list :
450
450
print ('Record %s:' % record_name )
451
451
comparitors [record_name ].print_summary ()
452
452
print ('\n \n ' )
453
453
454
- return comparitors , specificity , positive_predictivity , false_positive_rate
454
+ return comparitors , sensitivity , positive_predictivity , false_positive_rate
455
455
456
456
457
457
def benchmark_mitdb_record (rec , detector , verbose ):
0 commit comments