|
| 1 | +import contextlib |
| 2 | +import gc |
| 3 | +import time |
| 4 | +from argparse import ArgumentParser, Namespace |
1 | 5 | from typing import List, Optional
|
2 | 6 |
|
3 |
| -from src.pipelines import get_pipeline_class |
4 |
| -from src.utils.arguments import parse_args |
5 |
| -from src.utils.benchmark import benchmark_end_to_end |
6 |
| -from src.utils.input import get_dummy_batch |
7 |
| -from src.utils.logging import configure_logging |
| 7 | +import torch |
| 8 | + |
| 9 | +from src.pipeline import get_pipeline_class |
| 10 | +from src.profile import get_profiler, logger |
| 11 | +from src.utils import ( |
| 12 | + configure_logging, |
| 13 | + format_mib, |
| 14 | + format_ms, |
| 15 | + get_dummy_batch, |
| 16 | + log_dict, |
| 17 | + log_rank_n, |
| 18 | + parse_config_args, |
| 19 | +) |
| 20 | + |
| 21 | + |
| 22 | +def get_arg_parser() -> ArgumentParser: |
| 23 | + parser = ArgumentParser() |
| 24 | + |
| 25 | + # Model |
| 26 | + parser.add_argument("--model_type") |
| 27 | + parser.add_argument("--pretrained_config") |
| 28 | + parser.add_argument("--pretrained_model") |
| 29 | + parser.add_argument("--tokenizer", default="gpt2") |
| 30 | + parser.add_argument("--trust_remote_code", action="store_true") |
| 31 | + parser.add_argument("config_args", nargs="*") |
| 32 | + |
| 33 | + # Runtime |
| 34 | + parser.add_argument("--pipeline_class", default="HF_Pipeline") |
| 35 | + parser.add_argument("--device", default="cuda", type=torch.device) |
| 36 | + parser.add_argument("--dtype", default="float16", type=lambda x: getattr(torch, x)) |
| 37 | + parser.add_argument("--local_rank", type=int) |
| 38 | + parser.add_argument("--no_fast_init", dest="fast_init", action="store_false") |
| 39 | + |
| 40 | + # Input and output |
| 41 | + parser.add_argument("--batch_size", default=1, type=int) |
| 42 | + parser.add_argument("--max_input_length", default=-1, type=int) |
| 43 | + parser.add_argument("--max_new_tokens", default=100, type=int) |
| 44 | + |
| 45 | + # Cleanup |
| 46 | + parser.add_argument("--clear_every_run", action="store_true") |
| 47 | + |
| 48 | + # Benchmark cycles |
| 49 | + parser.add_argument("--skip", type=int, default=1) |
| 50 | + parser.add_argument("--warmup", type=int, default=None) |
| 51 | + parser.add_argument("--cycles", type=int, default=5) |
| 52 | + |
| 53 | + # Profiling and logging |
| 54 | + parser.add_argument("--max_log_outputs", default=None, type=int) |
| 55 | + parser.add_argument("--profile", action="store_true") |
| 56 | + parser.add_argument("--full_trace", action="store_true") |
| 57 | + parser.add_argument("--show_op_names", action="store_true") |
| 58 | + |
| 59 | + return parser |
8 | 60 |
|
9 | 61 |
|
10 | 62 | def main(argv: Optional[List[str]] = None) -> None:
|
11 |
| - args = parse_args(argv=argv) |
| 63 | + parser = get_arg_parser() |
| 64 | + args = parser.parse_args(argv) |
| 65 | + config_args = parse_config_args(args.config_args) |
| 66 | + generate_kwargs = {"max_new_tokens": args.max_new_tokens, "do_sample": False} |
| 67 | + inputs = get_dummy_batch(args.batch_size, args.max_input_length) |
| 68 | + warmup = args.profile if args.warmup is None else args.warmup |
| 69 | + max_log_outputs = args.batch_size if args.max_log_outputs is None else args.max_log_outputs |
12 | 70 |
|
13 | 71 | pipeline_class = get_pipeline_class(args.pipeline_class)
|
14 | 72 | pipeline = pipeline_class(
|
15 | 73 | model_type=args.model_type,
|
16 | 74 | pretrained_model=args.pretrained_model,
|
17 | 75 | pretrained_config=args.pretrained_config,
|
18 |
| - config_args=args.config_args, |
| 76 | + config_args=config_args, |
19 | 77 | tokenizer=args.tokenizer,
|
20 | 78 | device=args.device,
|
21 | 79 | dtype=args.dtype,
|
22 | 80 | fast_init=args.fast_init,
|
23 | 81 | trust_remote_code=args.trust_remote_code,
|
24 | 82 | )
|
25 | 83 |
|
26 |
| - benchmark_end_to_end( |
27 |
| - pipeline=pipeline, |
28 |
| - inputs=get_dummy_batch(args.batch_size, args.max_input_length), |
29 |
| - generate_kwargs={"max_new_tokens": args.max_new_tokens, "do_sample": False}, |
30 |
| - profile=args.profile, |
31 |
| - skip=args.skip, |
32 |
| - warmup=args.profile if args.warmup is None else args.warmup, |
33 |
| - cycles=args.cycles, |
34 |
| - full_trace=args.full_trace, |
35 |
| - show_op_names=args.show_op_names, |
36 |
| - max_log_outputs=args.batch_size if args.max_log_outputs is None else args.max_log_outputs, |
37 |
| - clear_every_run=args.clear_every_run, |
38 |
| - ) |
| 84 | + all_metrics = [] |
| 85 | + |
| 86 | + if args.profile: |
| 87 | + profiler = get_profiler( |
| 88 | + skip=args.skip, |
| 89 | + warmup=warmup, |
| 90 | + cycles=args.cycles, |
| 91 | + full_trace=args.full_trace, |
| 92 | + show_op_names=args.show_op_names, |
| 93 | + ) |
| 94 | + else: |
| 95 | + profiler = contextlib.nullcontext() |
| 96 | + |
| 97 | + benchmark_stats = { |
| 98 | + "Model parameters": pipeline.get_num_parameters(), |
| 99 | + "Batch size": len(inputs), |
| 100 | + **generate_kwargs, |
| 101 | + **pipeline.get_initialization_metrics(), |
| 102 | + "Warmup cycles": args.skip + warmup, |
| 103 | + "Benchmark cycles": args.cycles, |
| 104 | + "Total cycles": args.skip + warmup + args.cycles, |
| 105 | + } |
| 106 | + |
| 107 | + if pipeline.device.type == "cuda": |
| 108 | + benchmark_stats["Initial memory used"] = format_mib(torch.cuda.memory_allocated()) |
| 109 | + benchmark_stats["Initial memory reserved"] = format_mib(torch.cuda.memory_reserved()) |
| 110 | + torch.cuda.reset_peak_memory_stats() |
| 111 | + |
| 112 | + t0 = time.perf_counter() |
| 113 | + with profiler as p: |
| 114 | + for step in range(args.skip + warmup + args.cycles): |
| 115 | + if step == args.skip + warmup: |
| 116 | + t1 = time.perf_counter() |
| 117 | + benchmark_stats["Warmup time"] = format_ms(t1 - t0) |
| 118 | + generated_text, metrics = pipeline(inputs, **generate_kwargs) |
| 119 | + if args.profile: |
| 120 | + p.step() |
| 121 | + |
| 122 | + if step == 0: |
| 123 | + for i, o, _ in zip(inputs, generated_text, range(max_log_outputs)): |
| 124 | + log_rank_n(f"{'-' * 60}\nINPUT = {i}\nOUTPUT = {o}", logger.info) |
| 125 | + |
| 126 | + if step >= args.skip + warmup: |
| 127 | + all_metrics.append(metrics) |
| 128 | + |
| 129 | + if args.clear_every_run: |
| 130 | + torch.cuda.synchronize() |
| 131 | + gc.collect() |
| 132 | + torch.cuda.empty_cache() |
| 133 | + if pipeline.device.type == "cuda": |
| 134 | + benchmark_stats["Memory used"] = format_mib(torch.cuda.memory_allocated()) |
| 135 | + benchmark_stats["Memory reserved"] = format_mib(torch.cuda.memory_reserved()) |
| 136 | + benchmark_stats["Max memory used"] = format_mib(torch.cuda.max_memory_allocated()) |
| 137 | + benchmark_stats["Max memory reserved"] = format_mib(torch.cuda.max_memory_reserved()) |
| 138 | + |
| 139 | + t2 = time.perf_counter() |
| 140 | + benchmark_stats["Benchmark time"] = format_ms(t2 - t1) |
| 141 | + benchmark_stats["Total time"] = format_ms(t2 - t0) |
| 142 | + |
| 143 | + if len(all_metrics) > 0: |
| 144 | + benchmark_stats.update(pipeline.aggregate_and_format_metrics(all_metrics)) |
| 145 | + |
| 146 | + log_rank_n("*** Benchmark results:", logger.info) |
| 147 | + log_dict(benchmark_stats, logger.info) |
39 | 148 |
|
40 | 149 |
|
41 | 150 | if __name__ == "__main__":
|
|
0 commit comments