@@ -390,7 +390,6 @@ class NativeImageHexToInt(object):
390
390
def __call__ (self , * args , ** kwargs ):
391
391
return int (args [0 ], 16 )
392
392
393
- suiteName = self .bmSuite .name () if self .bmSuite else ""
394
393
return [
395
394
mx_benchmark .StdOutRule (
396
395
r"The executed image size for benchmark (?P<bench_suite>[a-zA-Z0-9_\-]+):(?P<benchmark>[a-zA-Z0-9_\-]+) is (?P<value>[0-9]+) B" ,
@@ -422,7 +421,6 @@ def __call__(self, *args, **kwargs):
422
421
"metric.object" : ("<type>" , str )
423
422
}),
424
423
mx_benchmark .StdOutRule (r'^\[\S+:[0-9]+\][ ]+\[total\]:[ ]+(?P<time>[0-9,.]+?) ms' , {
425
- "bench-suite" : suiteName ,
426
424
"benchmark" : benchmarks [0 ],
427
425
"metric.name" : "compile-time" ,
428
426
"metric.type" : "numeric" ,
@@ -434,7 +432,6 @@ def __call__(self, *args, **kwargs):
434
432
"metric.object" : "total" ,
435
433
}),
436
434
mx_benchmark .StdOutRule (r'^\[\S+:[0-9]+\][ ]+(?P<phase>\w+?):[ ]+(?P<time>[0-9,.]+?) ms' , {
437
- "bench-suite" : suiteName ,
438
435
"benchmark" : benchmarks [0 ],
439
436
"metric.name" : "compile-time" ,
440
437
"metric.type" : "numeric" ,
@@ -446,7 +443,6 @@ def __call__(self, *args, **kwargs):
446
443
"metric.object" : ("<phase>" , str ),
447
444
}),
448
445
mx_benchmark .StdOutRule (r'^[ ]*[0-9]+[ ]+.(?P<section>[a-zA-Z0-9._-]+?)[ ]+(?P<size>[0-9a-f]+?)[ ]+' , {
449
- "bench-suite" : suiteName ,
450
446
"benchmark" : benchmarks [0 ],
451
447
"metric.name" : "binary-section-size" ,
452
448
"metric.type" : "numeric" ,
@@ -458,7 +454,6 @@ def __call__(self, *args, **kwargs):
458
454
"metric.object" : ("<section>" , str ),
459
455
}),
460
456
mx_benchmark .JsonStdOutFileRule (r'^# Printing analysis results stats to: (?P<path>\S+?)$' , 'path' , {
461
- "bench-suite" : suiteName ,
462
457
"benchmark" : benchmarks [0 ],
463
458
"metric.name" : "analysis-stats" ,
464
459
"metric.type" : "numeric" ,
@@ -470,7 +465,6 @@ def __call__(self, *args, **kwargs):
470
465
"metric.object" : "reachable-types" ,
471
466
}, ['total_reachable_types' ]),
472
467
mx_benchmark .JsonStdOutFileRule (r'^# Printing analysis results stats to: (?P<path>\S+?)$' , 'path' , {
473
- "bench-suite" : suiteName ,
474
468
"benchmark" : benchmarks [0 ],
475
469
"metric.name" : "analysis-stats" ,
476
470
"metric.type" : "numeric" ,
@@ -482,7 +476,6 @@ def __call__(self, *args, **kwargs):
482
476
"metric.object" : "reachable-methods" ,
483
477
}, ['total_reachable_methods' ]),
484
478
mx_benchmark .JsonStdOutFileRule (r'^# Printing analysis results stats to: (?P<path>\S+?)$' , 'path' , {
485
- "bench-suite" : suiteName ,
486
479
"benchmark" : benchmarks [0 ],
487
480
"metric.name" : "analysis-stats" ,
488
481
"metric.type" : "numeric" ,
@@ -494,7 +487,6 @@ def __call__(self, *args, **kwargs):
494
487
"metric.object" : "reachable-fields" ,
495
488
}, ['total_reachable_fields' ]),
496
489
mx_benchmark .JsonStdOutFileRule (r'^# Printing analysis results stats to: (?P<path>\S+?)$' , 'path' , {
497
- "bench-suite" : suiteName ,
498
490
"benchmark" : benchmarks [0 ],
499
491
"metric.name" : "analysis-stats" ,
500
492
"metric.type" : "numeric" ,
0 commit comments