-
Notifications
You must be signed in to change notification settings - Fork 7
/
Copy path_Common.py
770 lines (621 loc) · 33 KB
/
_Common.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
# -*- coding: utf-8 -*-
# vim: ts=4 sw=4 tw=100 et ai si
#
# Copyright (C) 2019-2023 Intel Corporation
# SPDX-License-Identifier: BSD-3-Clause
#
# Author: Artem Bityutskiy <artem.bityutskiy@linux.intel.com>
"""
This module contains miscellaneous functions used by the tools in the 'wult' project. There is no a
single purpose this module serves, it is just a collection of shared code. Many functions in this
module require the 'args' object which represents the command-line arguments.
"""
# pylint: disable=no-member
import sys
import logging
import re
from pathlib import Path
from pepclibs import ASPM
from pepclibs.helperlibs import Trivial, YAML, ProcessManager
from pepclibs.helperlibs.Exceptions import Error, ErrorNotFound, ErrorNotSupported
from statscollectlibs.helperlibs import ReportID
from statscollectlibs.collector import StatsCollectBuilder
from statscollecttools import ToolInfo
from wultlibs import Devices
from wultlibs.deploylibs import _Deploy
from wultlibs.helperlibs import Human
_LOG = logging.getLogger()
# Description for the '--datapoints' option of the 'start' command.
DATAPOINTS_DESCR = """How many datapoints should the test result include, default is 1000000."""
# Duration specifiers description.
DURATION_SPECS_DESCR = "d - days, h - hours, m - minutes, s - seconds"
DURATION_NS_SPECS_DESCR = "ms - milliseconds, us - microseconds, ns - nanoseconds"
# Description for the '--time-limit' option of the 'start' command.
TIME_LIMIT_DESCR = f"""The measurement time limit, i.e., for how long the SUT should be measured.
The default unit is minute, but you can use the following handy specifiers
as well: {DURATION_SPECS_DESCR}."""
# Description for the '--start-over' option of the 'start' command.
START_OVER_DESCR = """If the output directory already contains the datapoints CSV file with some
amount of datapoints in it, the default behavior is to keep them and append
more datapoints if necessary. But with this option all the pre-existing
datapoints will be removed as soon as the tool starts writing new
datapoints."""
# Description for the '--outdir' option of the 'start' command.
START_OUTDIR_DESCR = """Path to the directory to store the results at."""
_REPORTID_CHARS_DESCR = ReportID.get_charset_descr()
START_REPORTID_DESCR = f"""Any string which may serve as an identifier of this run. By default
report ID is the current date, prefixed with the remote host name in case
the '-H' option was used: [hostname-]YYYYMMDD. For example, "20150323" is
a report ID for a run made on March 23, 2015. The allowed characters are:
{_REPORTID_CHARS_DESCR}."""
# Description for the '--report' option of the 'start' command.
START_REPORT_DESCR = """Generate an HTML report for collected results (same as calling 'report'
command with default arguments)."""
# Description for the '--stats' option of the 'start' command.
default_stnames = ", ".join(StatsCollectBuilder.DEFAULT_STNAMES)
STATS_DESCR = f"""Comma-separated list of statistics to collect. By default, only
'{default_stnames}' statistics are collected. Use 'all' to collect all possible
statistics. Use '--stats=""' or '--stats="none"' to disable statistics collection.
If you know exactly what statistics you need, specify the comma-separated list of
statistics to collect."""
# Description for the '--stat-intervals' option of the 'start' command.
STAT_INTERVALS_DESCR = """The intervals for statistics. Statistics collection is based on doing
periodic snapshots of data. For example, by default the 'acpower'
statistics collector reads SUT power consumption for the last second
every second, and 'turbostat' default interval is 5 seconds. Use
'acpower:5,turbostat:10' to increase the intervals to 5 and 10 seconds
correspondingly. Use the '--list-stats' to get the default interval
values."""
# Description for the '--list-stats' option of the 'start' command.
LIST_STATS_DESCR = """Print information about the statistics '%s' can collect and exit."""
# Description for the '--outdir' option of the 'report' command.
def get_report_outdir_descr(toolname):
"""
Return description for the '--outdir' option of the 'report' command. The arguments are as
follows.
* toolname - name of the tool to return the description for.
"""
descr = f"""Path to the directory to store the report at. By default the report is stored in the
'{toolname}-report-<reportid>' sub-directory of the test result directory. If there
are multiple test results, the report is stored in the current directory. The
'<reportid>' is report ID of {toolname} test result."""
return descr
# Description for the '--force' option of the 'start' command.
START_FORCE_DESCR = """By default a network card is not accepted as a measurement device if it is
used by a Linux network interface and the interface is in an active state,
such as "up". Use '--force' to disable this safety mechanism. Use it with
caution."""
# Description of the '--all' option of the 'scan' command.
def get_scan_all_descr(toolname):
"""
Return description for the '--all' option of the 'scan' command. The arguments are as follows.
* toolname - name of the tool to return the description for.
"""
descr = f"""By default this command prints only the compatible devices which are supported by
current {toolname} installation. This option makes this command print about all the
compatible devices."""
return descr
# Description for the '--even-up-dp-count' option of the 'report' command.
EVEN_UP_DP_DESCR = """Even up datapoints count before generating the report. This option is useful
when generating a report for many test results (a diff). If the test results
contain different count of datapoints (rows count in the CSV file), the
resulting histograms may look a little bit misleading. This option evens up
datapoints count in the test results. It just finds the test result with the
minimum count of datapoints and ignores the extra datapoints in the other test
results."""
# Description for the '--xaxes' option of the 'report' command.
XAXES_DESCR = """A comma-separated list of metrics (or python style regular expressions matching the
names) to use on X-axes of the scatter plot(s), default is '%s'. Use
'--list-metrics' to get the list of the available metrics. Use value 'none' to
disable scatter plots."""
# Description for the '--yaxes' option of the 'report' command.
YAXES_DESCR = """A comma-separated list of metrics (or python style regular expressions matching the
names) to use on the Y-axes for the scatter plot(s). If multiple metrics are
specified for the X- or Y-axes, then the report will include multiple scatter plots
for all the X- and Y-axes combinations. The default is '%s'. Use '--list-metrics'
to get the list of the available metrics. Use value 'none' to disable scatter
plots."""
# Description for the '--hist' option of the 'report' command.
HIST_DESCR = """A comma-separated list of metrics (or python style regular expressions matching the
names) to add a histogram for, default is '%s'. Use '--list-metrics' to get the list
of the available metrics. Use value 'none' to disable histograms."""
# Description for the '--chist' option of the 'report' command.
CHIST_DESCR = """A comma-separated list of metrics (or python style regular expressions matching the
names) to add a cumulative distribution for, default is '%s'. Use '--list-metrics'
to get the list of the available metrics. Use value 'none' to disable cumulative
histograms."""
# Description for the '--reportids' option of the 'report' command.
REPORTIDS_DESCR = """Every input raw result comes with a report ID. This report ID is basically a
short name for the test result, and it used in the HTML report to refer to the
test result. However, sometimes it is helpful to temporarily override the
report IDs just for the HTML report, and this is what the '--reportids' option
does. Please, specify a comma-separated list of report IDs for every input raw
test result. The first report ID will be used for the first raw rest result,
the second report ID will be used for the second raw test result, and so on.
Please, refer to the '--reportid' option description in the 'start' command for
more information about the report ID."""
# Description for the '--report-descr' option of the 'report' command.
REPORT_DESCR = """The report description - any text describing this report as whole, or path to a
file containing the overall report description. For example, if the report
compares platform A and platform B, the description could be something like
'platform A vs B comparison'. This text will be included into the very beginning
of the resulting HTML report."""
# Description for the '--copy-raw' option of the 'report' command.
COPY_RAW_DESCR = """Copy raw test results to the output directory."""
# Description for the '--list-metrics' option of the 'report' and other commands.
LIST_METRICS_DESCR = "Print the list of the available metrics and exit."
# Description for the 'filter' command.
FILT_DESCR = """Filter datapoints out of a test result by removing CSV rows and metrics according to
specified criteria. The criteria is specified using the row and metric filter and
selector options ('--include', '--exclude-metrics', etc). The options may be
specified multiple times."""
EXCL_DESCR = """Datapoints to exclude: remove all the datapoints satisfying the expression
'EXCLUDE'."""
# Description for the '--include' option of the 'filter' command.
INCL_DESCR = """Datapoints to include: remove all datapoints except for those satisfying the
expression 'INCLUDE'. In other words, this option is the inverse of '--exclude'."""
KEEP_FILTERED_DESCR = """If the '--exclude' / '--include' options are used, then the datapoints not
matching the selector or matching the filter are discarded. This is the
default behavior which can be changed with this option. If
'--keep-filtered' has been specified, then all datapoints are saved in
result."""
# Description for the '--exclude-metrics' option of the 'filter' command.
MEXCLUDE_DESCR = """The metrics to exclude. Expects a comma-separated list of the metrics or python
style regular expressions matching the names. For example, the expression
'SilentTime,WarmupDelay,.*Cyc', would remove metrics 'SilentTime', 'WarmupDelay'
and all metrics with 'Cyc' in their name. Use '--list-metrics' to get the list
of the available metrics."""
# Description for the '--include-metrics' option of the 'filter' command.
MINCLUDE_DESCR = """The metrics to include: remove all metrics except for those specified by this
option. The syntax is the same as for '--exclude-metrics'."""
# Description for the '--human-readable' option of the 'filter' command.
FILTER_HUMAN_DESCR = """By default the result 'filter' command print the result as a CSV file to the
standard output. This option can be used to dump the result in a more
human-readable form."""
# Description for the '--outdir' option of the 'filter' command.
FILTER_OUTDIR_DESCR = """By default the resulting CSV lines are printed to the standard output. But
this option can be used to specify the output directly to store the result
at. This will create a filtered version of the input test result."""
# Description for the '--reportid' option of the 'filter' command.
FILTER_REPORTID_DESCR = """Report ID of the filtered version of the result (can only be used with
'--outdir')."""
# Description for the '--funcs' option of the 'calc' command.
FUNCS_DESCR = """Comma-separated list of summary functions to calculate. By default all generally
interesting functions are calculated (each metric is associated with a list of
functions that make sense for that metric). Use '--list-funcs' to get the list of
supported functions."""
# Description for the '--list-funcs' option of the 'calc' command.
LIST_FUNCS_DESCR = "Print the list of the available summary functions."
def get_pman(args):
"""
Return the process manager object for host 'hostname'. The arguments are as follows.
* args - the command line arguments object.
The returned object should either be used with the 'with' statement, or closed with the
'close()' method.
"""
if args.hostname == "localhost":
username = privkeypath = timeout = None
else:
username = args.username
privkeypath = args.privkey
timeout = args.timeout
return ProcessManager.get_pman(args.hostname, username=username, privkeypath=privkeypath,
timeout=timeout)
def _validate_range(rng, what, single_ok):
"""Implement 'parse_ldist()'."""
if single_ok:
min_len = 1
else:
min_len = 2
split_rng = Trivial.split_csv_line(rng)
if len(split_rng) < min_len:
raise Error(f"bad {what} range '{rng}', it should include {min_len} numbers")
if len(split_rng) > 2:
raise Error(f"bad {what} range '{rng}', it should not include more than 2 numbers")
vals = [None] * len(split_rng)
for idx, val in enumerate(split_rng):
vals[idx] = Human.parse_human(val, unit="us", target_unit="ns", integer=True, name=what)
if vals[idx] < 0:
raise Error(f"bad {what} value '{split_rng[idx]}', should be greater than zero")
if len(vals) == 2:
if vals[1] - vals[0] < 0:
raise Error(f"bad {what} range '{rng}', first number cannot be greater than the second "
f"number")
if not single_ok and vals[0] == vals[1]:
raise Error(f"bad {what} range '{rng}', first number cannot be the same as the second "
f"number")
if len(vals) == 1:
vals.append(vals[0])
return vals
def parse_ldist(ldist, single_ok=True):
"""
Parse and validate the launch distance range ('--ldist' option). The arguments are as follows.
* ldist - a string of single or two comma-separated launch distance values.
* single_ok - if 'True', raise an exception when 'ldist' contains only a single number
The default 'ldist' unit is 'microseconds', but the 'ldist' values are parsed with
'Human.parse_human()', so they can include specifiers like 'ms' or 'us'.
Return launch distance range as a list of two integers in nanoseconds.
"""
return _validate_range(ldist, "launch distance", single_ok)
def even_up_dpcnt(rsts):
"""
Implement the '--even-up-datapoints' option. The arguments are as follows.
* rsts - a list of 'RORawResult' objects to even up datapoints count in.
Truncate datapoints count in 'rsts' to the size of the smallest test result, where "size" is
defined as the count of rows in the CSV file.
"""
# Find test with the smallest CSV file. It should be a good approximation for the smallest test
# result, ant it will be corrected as we go.
min_size = min_res = None
for res in rsts:
try:
size = res.dp_path.stat().st_size
except OSError as err:
msg = Error(err).indent(2)
raise Error(f"'stat()' failed for '{res.dp_path}':\n{msg}") from None
if min_size is None or size < min_size:
min_size = size
min_res = res
min_res.load_df()
min_dpcnt = len(min_res.df.index)
# Load only 'min_dpcnt' datapoints for every test result, correcting 'min_dpcnt' as we go.
for res in rsts:
res.load_df(nrows=min_dpcnt)
min_dpcnt = min(min_dpcnt, len(res.df.index))
# And in case our initial 'min_dpcnt' estimation was incorrect, truncate all the results to the
# final 'min_dpcnt'.
for res in rsts:
dpcnt = len(res.df.index)
if dpcnt > min_dpcnt:
res.df = res.df.truncate(after=min_dpcnt - 1)
def set_filters(args, res):
"""
Implement the following command-line options: '--include', '--exclude', '--include-metrics',
'--exclude-metrics'. The arguments are as follows.
* args - the command line arguments object.
* res - a 'RORawResult' or 'WORawResult' object to set filters for.
"""
def set_filter(res, ops):
"""Set filter operations in 'ops' to test result 'res'."""
res.clear_filts()
for name, expr in ops.items():
# The '--include-metrics' and '--exclude-metrics' options may have comma-separated list
# of metrics.
if name in ("minclude", "mexclude"):
expr = Trivial.split_csv_line(expr)
getattr(res, f"set_{name}")(expr)
if not getattr(args, "oargs", None):
return
set_filter(res, args.oargs)
keep_filtered = getattr(args, "keep_filtered", None)
setattr(res, "keep_filtered", keep_filtered)
def apply_filters(args, res):
"""
Set and apply filters. The arguments are as follows.
* args - the command line arguments object.
* res - a 'RORawResult' or 'WORawResult' object to set and apply filters for.
"""
set_filters(args, res)
res.load_df()
def scan_command(args):
"""
Implement the 'scan' command for the 'wult' and 'ndl' tools. The arguments are as follows.
* args - the command line arguments object.
"""
pman = get_pman(args)
found_something = False
supported_msgs = unsupported_msgs = ""
for dev in Devices.scan_devices(args.toolname, pman):
err_msg = None
found_something = True
deploy_info = reduce_installables(args.deploy_info, dev)
with _Deploy.DeployCheck("wult", args.toolname, deploy_info, pman=pman) as depl:
try:
depl.check_deployment()
except (ErrorNotFound, ErrorNotSupported) as err:
if not getattr(args, "all", False):
_LOG.debug(err)
continue
err_msg = str(err)
msg = f"* Device ID: {dev.info['devid']}\n"
if dev.info.get("alias"):
msg += f" - Alias: {dev.info['alias']}\n"
if err_msg:
# The error message may include newlines, align them to match our indentation.
err_msg = err_msg.replace("\n", "\n ")
msg += f" - Error: {err_msg}\n"
msg += f" - Resolution: {dev.info['resolution']} ns\n"
msg += f" - Description: {dev.info['descr']}\n"
if err_msg:
unsupported_msgs += msg
else:
supported_msgs += msg
if not supported_msgs and not unsupported_msgs:
if not found_something:
_LOG.info("No %s compatible devices found", args.toolname)
else:
_LOG.info("There are compatible devices, but they are not supported by the current %s "
"installation", args.toolname)
return
if supported_msgs:
_LOG.info("Compatible and supported device(s)%s:", pman.hostmsg)
_LOG.info("%s", supported_msgs.strip())
if unsupported_msgs:
if supported_msgs:
_LOG.info("")
_LOG.info("Compatible, but unsupported device(s)%s:", pman.hostmsg)
_LOG.info("%s", unsupported_msgs.strip())
def filter_command(args):
"""
Implement the 'filter' command for the 'wult' and 'ndl' tools. The arguments are as follows.
* args - the command line arguments object.
"""
from wultlibs.rawresultlibs import RORawResult # pylint: disable=import-outside-toplevel
res = RORawResult.RORawResult(args.respath)
if args.list_metrics:
list_result_metrics([res])
return
if not getattr(args, "oargs", None):
raise Error("please, specify at least one reduction criterion")
if args.reportid and not args.outdir:
raise Error("'--reportid' can be used only with '-o'/'--outdir'")
if args.human_readable and args.outdir:
raise Error("'--human-readable' and '--outdir' are mutually exclusive")
apply_filters(args, res)
if args.outdir:
res.save(args.outdir, reportid=args.reportid)
elif not args.human_readable:
res.df.to_csv(sys.stdout, index=False, header=True)
else:
for idx, (_, dp) in enumerate(res.df.iterrows()):
if idx > 0:
_LOG.info("")
_LOG.info(Human.dict2str(dict(dp)))
def calc_command(args):
"""
Implement the 'calc' command for the 'wult' and 'ndl' tools. The arguments are as follows.
* args - the command line arguments object.
"""
if args.list_funcs:
from statscollectlibs import DFSummary # pylint: disable=import-outside-toplevel
for name, descr in DFSummary.get_smry_funcs():
_LOG.info("%s: %s", name, descr)
return
from wultlibs.rawresultlibs import RORawResult # pylint: disable=import-outside-toplevel
if args.funcs:
funcnames = Trivial.split_csv_line(args.funcs)
else:
funcnames = None
res = RORawResult.RORawResult(args.respath)
if args.list_metrics:
list_result_metrics([res])
return
apply_filters(args, res)
non_numeric = res.get_non_numeric_metrics()
if non_numeric:
minclude = mexclude = []
if args.minclude:
minclude = set(Trivial.split_csv_line(args.minclude))
if args.mexclude:
mexclude = set(Trivial.split_csv_line(args.mexclude))
skip = []
for metric in non_numeric:
if metric in minclude or metric in mexclude:
skip.append(metric)
if skip:
_LOG.warning("skipping non-numeric metric(s): %s", ", ".join(skip))
res.calc_smrys(funcnames=funcnames)
_LOG.info("Datapoints count: %d", len(res.df))
YAML.dump(res.smrys, sys.stdout, float_format="%.2f")
def open_raw_results(respaths, toolname, reportids=None):
"""
Open the input raw test results and return the list of 'RORawResult' objects. The arguments are
as follows.
* respaths - list of paths to raw results.
* toolname - name of the tool opening raw results.
* reportids - list of reportids to override report IDs in raw results.
"""
from wultlibs.rawresultlibs import RORawResult # pylint: disable=import-outside-toplevel
if reportids:
reportids = Trivial.split_csv_line(reportids)
else:
reportids = []
if len(reportids) > len(respaths):
raise Error(f"there are {len(reportids)} report IDs to assign to {len(respaths)} input "
f"test results. Please, provide {len(respaths)} or fewer report IDs.")
# Append the required amount of 'None's to make the 'reportids' list be of the same length as
# the 'respaths' list.
reportids += [None] * (len(respaths) - len(reportids))
rsts = []
for respath, reportid in zip(respaths, reportids):
if reportid:
ReportID.validate_reportid(reportid)
res = RORawResult.RORawResult(respath, reportid=reportid)
if toolname != res.info["toolname"]:
raise Error(f"cannot generate '{toolname}' report, results are collected with the"
f"'{res.info['toolname']}':\n{respath}")
rsts.append(res)
return rsts
def list_result_metrics(rsts):
"""
Implement the '--list-metrics' option by printing the metrics for each raw result 'rsts'. The
arguments are as follows.
* rsts - an iterable collection of test results to print the metrics for.
"""
for res in rsts:
_LOG.info("Metrics in '%s':", res.dirpath)
for metric in res.metrics:
if metric in res.mdo.mdd:
_LOG.info(" * %s: %s", metric, res.mdo.mdd[metric]["title"])
def reduce_installables(deploy_info, dev):
"""
Reduce full deployment information 'deploy_info' so that it includes only the installables
required for using device 'dev'. The arguments are as follows.
* deploy_info - full deployment information dictionary. Check the 'DeployBase.__init__()'
docstring for the format of the dictionary.
* dev - the device object created by 'Devices.GetDevice()'.
Return the reduced version of 'deploy_info'.
"""
# Copy the original dictionary, 2 levels are enough.
result = {}
for key, value in deploy_info.items():
result[key] = value.copy()
for installable, info in deploy_info["installables"].items():
if info["category"] == "drivers" and not dev.drvname:
del result["installables"][installable]
elif info["category"] in ("shelpers", "bpfhelpers") and not dev.helpername:
del result["installables"][installable]
return result
def start_command_reportid(args, pman):
"""
Validate and return user-provided report ID. If not report ID was provided, generate and return
the default report ID. The arguments are as follows.
* args - the command line arguments object.
* pman - the process manager object that defines the host to measure.
"""
if not args.reportid and pman.is_remote:
prefix = pman.hostname
else:
prefix = None
return ReportID.format_reportid(prefix=prefix, reportid=args.reportid,
strftime=f"{args.toolname}-{args.devid}-%Y%m%d")
def start_command_check_network(args, pman, netif):
"""
In case the device that is used for measurement is a network card, check that it is not in the
'up' state. This makes sure users do not lose networking by specifying a wrong device by a
mistake. The arguments are as follows.
* args - the command line arguments object.
* pman - the process manager object that defines the host to measure.
* netif - the network interface object ('NetIface.NetIface()') that will be used for measuring
the host.
"""
if args.force:
return
# Make sure the device is not used for networking and users do not lose networking by
# specifying a wrong device by a mistake.
if netif.get_operstate() == "up":
msg = ""
if args.devid != netif.ifname:
msg = f" (network interface '{netif.ifname}')"
raise Error(f"refusing to use device '{args.devid}'{msg}{pman.hostmsg}: it is up and "
f"might be used for networking. Please, bring it down if you want to use "
"it for measurements.")
def start_command_list_stats():
"""Implement the '--list-stats' command line option."""
from statscollectlibs.collector import StatsCollect # pylint: disable=import-outside-toplevel
StatsCollect.list_stats()
def report_command_outdir(args, rsts):
"""
Return the default or user-provided output directory path for the 'report' command. The
arguments are as follows.
* args - the command line arguments object.
* rsts - a list of 'RORawResult' objects to return the output directory for.
"""
if args.outdir is not None:
return args.outdir
if len(args.respaths) > 1:
outdir = ReportID.format_reportid(prefix=f"{args.toolname}-report",
reportid=rsts[0].reportid)
else:
outdir = args.respaths[0]
# Don't create report in results directory, use 'html-report' subdirectory instead.
outdir = outdir.joinpath("html-report")
_LOG.info("Report output directory: %s", outdir)
return Path(outdir)
def run_stats_collect_deploy(args, pman):
"""
Run the 'stats-collect deploy' command. The arguments are as follows.
* args - the command line arguments object.
* pman - the process manager object that defines the host to run on.
"""
# pylint: disable=import-outside-toplevel
from pepclibs.helperlibs import ProjectFiles, LocalProcessManager
exe_path = ProjectFiles.find_project_helper("stats-collect", ToolInfo.TOOLNAME)
cmd = str(exe_path)
if _LOG.colored:
cmd += " --force-color deploy"
else:
cmd += " deploy"
if args.debug:
cmd += " -d"
if args.quiet:
cmd += " -q"
if args.hostname != "localhost":
cmd += f" -H {args.hostname}"
if args.username:
cmd += f" -U {args.username}"
if args.privkey:
cmd += f" -K {args.privkey}"
if args.timeout:
cmd += f" -T {args.timeout}"
_LOG.info("Deploying statistics collectors%s", pman.hostmsg)
with LocalProcessManager.LocalProcessManager() as lpman:
try:
if args.debug:
kwargs = {"output_fobjs" : (sys.stdout, sys.stderr)}
else:
kwargs = {}
lpman.run_verify(cmd, **kwargs)
except Error as err:
_LOG.warning("falied to deploy statistics collectors%s", pman.hostmsg)
_LOG.debug(str(err))
def add_freq_noise_cmdline_args(subparser, man_msg):
"""
Add the 'freq-noise' options to the 'argparse' data. The input arguments are as follows.
* subparser - the 'argparse' subparser to add the 'freq-noise' options to.
* man_msg - the message to append to the help text pointing to the relevant man page.
"""
text = f"""Add frequency scaling noise to the measured system. 'FREQ_NOISE' is specified as
'TYPE:ID:MIN:MAX', where: TYPE should be 'cpu' or 'uncore', specifies whether CPU or
uncore frequency should be modified; ID is either CPU number or uncore domain ID to
modify the frequency for (e.g. 'cpu:12:...' would target CPU12); MIN is the minimum
CPU/uncore frequency value; MAX is the maximum CPU/uncore frequency value.
{man_msg}"""
subparser.add_argument("--freq-noise", action="append", help=text)
text = f"""Sleep between frequency noise operations. This time is added between every frequency
scaling operation executed by the 'freq-noise' feature. Default sleep time is 50ms.
{man_msg}"""
subparser.add_argument("--freq-noise-sleep", help=text)
def parse_freq_noise_cmdline_args(args):
"""
Parse the frequency noise related command line arguments, and return parsed data as a dictionary
to be passed to the '_FreqNoise' module. The arguments are as follows.
* args - the command line arguments object.
"""
if not args.freq_noise:
return None
specs = []
for spec in args.freq_noise:
tokens = spec.split(":")
if tokens[0] not in ("uncore", "cpu"):
raise Error(f"bad domain type for freq-noise: '{tokens[0]}'. Only 'cpu' and 'uncore'"
"are supported.")
match = re.match(r"^\d+$", tokens[1])
if not match:
raise Error(f"bad domain ID for freq-noise: '{tokens[1]}'. Must provide a positive "
"integer value.")
for idx in (2, 3):
if tokens[idx] not in ("min", "max"):
try:
tokens[idx] = Human.parse_human(tokens[idx], unit="Hz", integer=True)
except Error as err:
raise Error(f"failed to parse freq-noise frequency '{tokens[idx]}'") from err
specs += [{"type": tokens[0], "id": tokens[1], "min": tokens[2], "max": tokens[3]}]
if args.freq_noise_sleep:
val = Human.parse_human(args.freq_noise_sleep, unit="s", target_unit="us", integer=True,
name="frequency noise sleep time")
specs += [{"type": "sleep", "val": val}]
return specs
def check_aspm_setting(pman, dev, devname):
"""
If PCI ASPM is enabled for a device, print a notice message. The arguments are as follows.
* dev - the delayed event device object created by 'Devices.GetDevice()'.
* pman - the process manager object for the target system.
* devname - the device name to use in the message.
"""
if not dev.is_pci:
return
with ASPM.ASPM(pman=pman) as aspm:
if aspm.is_l1_enabled(dev.info["devid"]):
_LOG.notice("PCI L1 ASPM is enabled for %s, and this typically increases the measured "
"latency", devname)