-
Notifications
You must be signed in to change notification settings - Fork 86
Expand file tree
/
Copy pathrunner.py
More file actions
566 lines (495 loc) · 25.2 KB
/
runner.py
File metadata and controls
566 lines (495 loc) · 25.2 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT License.
"""SuperBench Runner."""
import os
import sys
import json
import random
import signal
from pathlib import Path
from pprint import pformat
from collections import defaultdict
import jsonlines
from natsort import natsorted
from joblib import Parallel, delayed
from omegaconf import ListConfig, OmegaConf
from superbench.common.utils import SuperBenchLogger, logger, gen_ibstat, gen_traffic_pattern_host_groups
from superbench.common.utils.lazy_import import LazyImport
from superbench.benchmarks import ReduceType, Reducer
from superbench.monitor import MonitorRecord
AnsibleClient = LazyImport('superbench.runner.ansible', 'AnsibleClient')
class SuperBenchRunner():
"""SuperBench runner class."""
def __init__(self, sb_config, docker_config, ansible_config, sb_output_dir):
"""Initilize.
Args:
sb_config (DictConfig): SuperBench config object.
docker_config (DictConfig): Docker config object.
ansible_config (DictConfig): Ansible config object.
sb_output_dir (str): SuperBench output directory.
"""
self._sb_config = sb_config
self._docker_config = docker_config
self._ansible_config = ansible_config
self._sb_output_dir = sb_output_dir
self._output_path = Path(sb_output_dir).expanduser().resolve()
self._ansible_client = AnsibleClient(ansible_config)
self.__set_logger('sb-run.log')
logger.info('Runner uses config: %s.', pformat(OmegaConf.to_container(self._sb_config, resolve=True)))
logger.info('Runner writes to: %s.', str(self._output_path))
self._sb_benchmarks = self._sb_config.superbench.benchmarks
self.__validate_sb_config()
self._sb_enabled_benchmarks = self.__get_enabled_benchmarks()
logger.info('Runner will run: %s', self._sb_enabled_benchmarks)
def __set_logger(self, filename):
"""Set logger and add file handler.
Args:
filename (str): Log file name.
"""
SuperBenchLogger.add_handler(logger.logger, filename=str(self._output_path / filename))
def __validate_sb_config(self): # noqa: C901
"""Validate SuperBench config object.
Raise:
InvalidConfigError: If input config is invalid.
"""
# TODO: add validation and defaulting
if 'env' not in self._sb_config.superbench:
self._sb_config.superbench.env = {}
for name in self._sb_benchmarks:
if 'modes' not in self._sb_benchmarks[name]:
self._sb_benchmarks[name].modes = []
for idx, mode in enumerate(self._sb_benchmarks[name].modes):
if 'env' not in mode:
self._sb_benchmarks[name].modes[idx].env = {}
if mode.name == 'local':
if 'proc_num' not in mode:
self._sb_benchmarks[name].modes[idx].proc_num = 1
if 'prefix' not in mode:
self._sb_benchmarks[name].modes[idx].prefix = ''
elif mode.name == 'torch.distributed':
if 'proc_num' not in mode:
self._sb_benchmarks[name].modes[idx].proc_num = 8
elif mode.name == 'mpi':
if 'mca' not in mode:
self._sb_benchmarks[name].modes[idx].mca = {
'pml': 'ob1',
'btl': '^openib',
'btl_tcp_if_exclude': 'lo,docker0',
'coll_hcoll_enable': 0,
}
if 'bind_to' not in mode:
self._sb_benchmarks[name].modes[idx].bind_to = 'numa'
for key in ['PATH', 'LD_LIBRARY_PATH', 'SB_MICRO_PATH', 'SB_WORKSPACE']:
self._sb_benchmarks[name].modes[idx].env.setdefault(key, None)
if 'pattern' in mode:
if mode.pattern.type == 'topo-aware' and 'ibstat' not in mode.pattern:
self._sb_benchmarks[name].modes[idx].pattern.ibstat = gen_ibstat(
self._ansible_config, str(self._output_path / 'ibstate_file.txt')
)
def __get_enabled_benchmarks(self):
"""Get enabled benchmarks list.
Return:
list: List of benchmarks which will be executed.
"""
if 'enable' in self._sb_config.superbench and self._sb_config.superbench.enable:
if isinstance(self._sb_config.superbench.enable, str):
return [self._sb_config.superbench.enable]
elif isinstance(self._sb_config.superbench.enable, (list, ListConfig)):
return list(self._sb_config.superbench.enable)
return [k for k, v in self._sb_benchmarks.items() if 'enable' in v and v.enable]
def __get_mode_command(self, benchmark_name, mode, timeout=None):
"""Get runner command for given mode.
Args:
benchmark_name (str): Benchmark name.
mode (DictConfig): Runner mode.
timeout (int): The timeout value in seconds.
host_list (list): The specified Host node list.
Return:
str: Runner command.
"""
exec_command = ('sb exec --output-dir {output_dir} -c sb.config.yaml -C superbench.enable={name}').format(
name=benchmark_name,
output_dir=self._sb_output_dir,
)
if timeout is not None:
exec_command = 'timeout {timeout} {command}'.format(timeout=timeout, command=exec_command)
# Enable nsys profiling based on environment variable
enable_nsys = os.environ.get('SB_ENABLE_NSYS', '') == '1'
trace_dir = os.environ.get('SB_NSYS_TRACE_DIR', self._sb_output_dir)
mode_command = exec_command
if mode.name == 'local':
trace_command = (
f'nsys profile --output {trace_dir}/{benchmark_name}_{mode.proc_rank}_traces '
f'--backtrace none --sample none --force-overwrite true --cpuctxsw none --trace cuda,nvtx '
) if enable_nsys and mode.proc_rank == 0 else ''
# Build the command parts, only including trace if it's not empty
command_parts = []
prefix = mode.prefix.format(proc_rank=mode.proc_rank, proc_num=mode.proc_num)
if prefix:
command_parts.append(prefix)
if trace_command:
command_parts.append(trace_command)
command_parts.append(exec_command)
mode_command = ' '.join(command_parts)
mode_command = f'PROC_RANK={mode.proc_rank} {mode_command}'
elif mode.name == 'torch.distributed':
# TODO: replace with torch.distributed.run in v1.9
# TODO: only supports node_num=1 and node_num=all currently
torch_dist_params = (
'' if 'node_num' in mode and mode.node_num == 1 else
'--nnodes=$NNODES --node_rank=$NODE_RANK --master_addr=$MASTER_ADDR --master_port=$MASTER_PORT '
)
nsys_prefix = (
f'nsys profile --output {trace_dir}/{benchmark_name}_traces '
f'--backtrace none --sample none --force-overwrite true --cpuctxsw none --trace cuda,nvtx '
) if enable_nsys else ''
mode_command = (
f'{nsys_prefix}'
f'torchrun'
f' --no_python --nproc_per_node={mode.proc_num} {torch_dist_params}{exec_command}'
f' superbench.benchmarks.{benchmark_name}.parameters.distributed_impl=ddp'
f' superbench.benchmarks.{benchmark_name}.parameters.distributed_backend=nccl'
)
elif mode.name == 'mpi':
trace_command = (
f'nsys profile --output {trace_dir}/{benchmark_name}_{mode.proc_rank}_traces '
f'--backtrace none --sample none --force-overwrite true --cpuctxsw none --trace cuda,nvtx '
) if enable_nsys else ''
mode_command = (
'{trace} '
'mpirun ' # use default OpenMPI in image
'-tag-output ' # tag mpi output with [jobid,rank]<stdout/stderr> prefix
'-allow-run-as-root ' # allow mpirun to run when executed by root user
'{host_list} ' # use prepared hostfile or specify nodes and launch {proc_num} processes on each node
'-bind-to {bind_to} ' # bind processes according to mode config
'{mca_list} {env_list} {command}'
).format(
trace=trace_command,
host_list=f'-host localhost:{mode.proc_num}' if 'node_num' in mode and mode.node_num == 1 else
f'-hostfile hostfile -map-by ppr:{mode.proc_num}:node' if 'host_list' not in mode else '-host ' +
','.join(f'{host}:{mode.proc_num}' for host in mode.host_list),
bind_to=mode.bind_to,
mca_list=' '.join(f'-mca {k} {v}' for k, v in mode.mca.items()),
env_list=' '.join(
f'-x {k}={str(v).format(proc_rank=mode.proc_rank, proc_num=mode.proc_num)}'
if isinstance(v, str) else f'-x {k}' for k, v in mode.env.items()
),
command=exec_command,
)
else:
logger.warning('Unknown mode %s.', mode.name)
return mode_command.strip()
def get_failure_count(self):
"""Get failure count during Ansible run.
Return:
int: Failure count.
"""
return self._ansible_client.failure_count
def deploy(self): # pragma: no cover
"""Deploy SuperBench environment."""
logger.info('Preparing SuperBench environment.')
extravars = {
'ssh_port': random.randint(1 << 14, (1 << 15) - 1),
'output_dir': str(self._output_path),
'docker_image': self._docker_config.image,
'docker_pull': bool(self._docker_config.pull),
}
if bool(self._docker_config.username) and bool(self._docker_config.password):
extravars.update(
{
'docker_registry': self._docker_config.registry,
'docker_username': self._docker_config.username,
'docker_password': self._docker_config.password,
}
)
self._ansible_client.run(self._ansible_client.get_playbook_config('deploy.yaml', extravars=extravars))
def run_sys_info(self):
"""Run the system info on all nodes."""
self.check_env()
logger.info('Runner is going to get node system info.')
fcmd = "docker exec sb-workspace bash -lc '{command}'"
if 'skip' not in self._docker_config:
self._docker_config.skip = False
if self._docker_config.skip:
fcmd = "bash -c 'cd $SB_WORKSPACE && {command}'"
ansible_runner_config = self._ansible_client.get_shell_config(
fcmd.format(command='sb node info --output-dir {output_dir}'.format(output_dir=self._sb_output_dir))
)
ansible_rc = self._ansible_client.run(ansible_runner_config, sudo=(not self._docker_config.skip))
if ansible_rc != 0:
self.cleanup()
self.fetch_results()
def check_env(self): # pragma: no cover
"""Check SuperBench environment."""
logger.info('Checking SuperBench environment.')
OmegaConf.save(config=self._sb_config, f=str(self._output_path / 'sb.config.yaml'))
self._ansible_client.run(
self._ansible_client.get_playbook_config(
'check_env.yaml',
extravars={
'no_docker': False if 'skip' not in self._docker_config else self._docker_config.skip,
'output_dir': str(self._output_path),
'env': '\n'.join(f'{k}={v}' for k, v in self._sb_config.superbench.env.items()),
}
)
)
def cleanup(self): # pragma: no cover
"""Cleanup remaining processes on all nodes."""
self._ansible_client.run(self._ansible_client.get_playbook_config('cleanup.yaml'))
def fetch_results(self): # pragma: no cover
"""Fetch benchmark results on all nodes."""
try:
(self._output_path / 'nodes').mkdir(mode=0o755, parents=True, exist_ok=True)
except Exception:
logger.exception('Failed to create directory %s.', str(self._output_path / 'nodes'))
raise
self._ansible_client.run(
self._ansible_client.get_playbook_config(
'fetch_results.yaml',
extravars={
'sb_output_dir': self._sb_output_dir,
'absolute_output_dir': str(self._output_path),
}
)
)
def __signal_handler(self, signum, frame):
"""Signal handler for runner.
Args:
signum (int): Signal number.
frame (FrameType): Timeout frame.
"""
if signum == signal.SIGINT or signum == signal.SIGTERM:
logger.info('Killed by %s, exiting ...', signal.Signals(signum).name)
self.cleanup()
sys.exit(128 + signum)
def __create_results_summary(self): # pragma: no cover
"""Create the result summary file of all nodes."""
all_results = list()
for node_path in (self._output_path / 'nodes').glob('*'):
if not node_path.is_dir():
continue
results_summary = self.__create_single_node_summary(node_path)
results_summary['node'] = node_path.name
all_results.append(results_summary)
with (self._output_path / 'results-summary.jsonl').open(mode='w') as f:
for result in all_results:
json.dump(result, f)
f.write('\n')
def __create_single_node_summary(self, node_path): # pragma: no cover # noqa: C901
"""Create the result summary file of single node.
Args:
node_path (Path): The Path instance of node directory.
Returns:
dict: Result summary of single node.
"""
results_summary = dict()
reduce_ops = dict()
file_list = [Path(f) for f in natsorted([str(f) for f in node_path.glob('**/results.json')])]
for results_file in file_list:
with results_file.open() as f:
try:
results = json.load(f)
except ValueError:
logger.error('Invalid JSON file: {}'.format(results_file))
continue
for result in results:
try:
benchmark_name = result['name']
except Exception:
logger.error('Invalid content in JSON file: {}'.format(results_file))
continue
if benchmark_name not in results_summary:
results_summary[benchmark_name] = defaultdict(list)
for metric in result['result']:
metric_name = '{}/{}'.format(benchmark_name, metric)
if metric_name not in reduce_ops:
reduce_ops[metric_name] = result['reduce_op'][metric]
elif reduce_ops[metric_name] != result['reduce_op'][metric]:
logger.error('Inconsistent reduce type for metric: {}'.format(metric_name))
continue
results_summary[benchmark_name][metric].append(result['result'][metric])
results_summary = self.__merge_benchmark_metrics(results_summary, reduce_ops)
monitor_summary = self.__merge_monitor_metrics(node_path)
results_summary = {**results_summary, **monitor_summary}
with (node_path / 'results-summary.json').open(mode='w') as f:
json.dump(results_summary, f, indent=2)
return results_summary
def __generate_metric_name(self, benchmark_name, metric, rank_count, run_count, curr_rank, curr_run):
"""Generate the summarized metrics name.
The format of metric name is:
{benchmark_name}/[{run_count}/]{metric_name}[:rank]
[run_count] and [rank] parts are optional.
Args:
benchmark_name (str): The benchmark name.
metric (str): The metric name.
rank_count (int): The total count of rank.
run_count (int): The total count of benchmarking.
curr_rank (int): The current rank index.
curr_run (int): The current run index.
Returns:
dict: Flattened result with metric as key.
"""
metric_name = benchmark_name
if run_count > 1:
metric_name = '{}/{}'.format(metric_name, curr_run)
metric_name = '{}/{}'.format(metric_name, metric)
if rank_count > 1:
metric_name = '{}:{}'.format(metric_name, curr_rank)
return metric_name
def __merge_benchmark_metrics(self, results_summary, reduce_ops):
"""Merge metrics of all benchmarks in one node.
Args:
results_summary (dict): Summarized result of one node.
reduce_ops (dict): The reduce type of each metric.
Returns:
dict: Flattened result with metric as key.
"""
metrics_summary = dict()
for benchmark_name in results_summary:
for metric in results_summary[benchmark_name]:
metric_name = '{}/{}'.format(benchmark_name, metric)
if metric_name not in reduce_ops or (
reduce_ops[metric_name] is not None and reduce_ops[metric_name] not in ReduceType.get_values()
):
logger.error('Unknown reduce type for metric: {}'.format(metric_name))
continue
if reduce_ops[metric_name] is not None:
reduce_func = Reducer.get_reduce_func(ReduceType(reduce_ops[metric_name]))
values = [reduce_func(list(result)) for result in zip(*results_summary[benchmark_name][metric])]
for run in range(len(values)):
metric_name = self.__generate_metric_name(benchmark_name, metric, 1, len(values), 0, run)
metrics_summary[metric_name] = values[run]
else:
rank_count = len(results_summary[benchmark_name][metric])
for rank, rank_value in enumerate(results_summary[benchmark_name][metric]):
run_count = len(rank_value)
for run, run_value in enumerate(rank_value):
metric_name = self.__generate_metric_name(
benchmark_name, metric, rank_count, run_count, rank, run
)
metrics_summary[metric_name] = run_value
return metrics_summary
def __merge_monitor_metrics(self, node_path):
"""Merge and summarize monitor metrics of one node.
Args:
node_path (Path): The Path instance of node directory.
Returns:
dict: Flattened result with metric as key.
"""
metrics_summary = dict()
all_samples = list()
file_list = list(node_path.glob('**/monitor.jsonl'))
for results_file in file_list:
try:
with jsonlines.open(results_file) as reader:
all_samples = list(reader)
except BaseException as e:
logger.error('Invalid Jsonline file: {}, error message: {}'.format(results_file, str(e)))
continue
all_samples = sorted(all_samples, key=lambda k: k.get('time', '0'))
metrics_dict = dict()
for sample in all_samples:
for metric, value in sample.items():
if metric not in metrics_dict:
metrics_dict[metric] = list()
metrics_dict[metric].append(value)
for metric, values in metrics_dict.items():
prefix = metric.split(':')[0]
for pattern, reduce_type in MonitorRecord.reduce_ops.items():
if pattern == prefix:
reduce_func = Reducer.get_reduce_func(reduce_type)
metric_name = 'monitor/{}'.format(metric)
metrics_summary[metric_name] = reduce_func(values)
continue
return metrics_summary
def _run_proc(self, benchmark_name, mode, vars):
"""Run the process.
Args:
benchmark_name (str): Benchmark name.
mode (DictConfig): Runner mode.
vars (dict): Process variables.
Returns:
int: Process return code.
"""
mode.update(vars)
if mode.name == 'mpi' and 'pattern' in mode:
mode.env.update({'SB_MODE_SERIAL_INDEX': mode.serial_index, 'SB_MODE_PARALLEL_INDEX': mode.parallel_index})
logger.info('Runner is going to run %s in %s mode, proc rank %d.', benchmark_name, mode.name, mode.proc_rank)
timeout = self._sb_benchmarks[benchmark_name].get('timeout', None)
if isinstance(timeout, int):
timeout = max(timeout, 60)
env_list = '--env-file /tmp/sb.env'
if 'skip' not in self._docker_config:
self._docker_config.skip = False
if self._docker_config.skip:
env_list = 'set -o allexport && source /tmp/sb.env && set +o allexport'
for k, v in mode.env.items():
if isinstance(v, str):
envvar = f'{k}={str(v).format(proc_rank=mode.proc_rank, proc_num=mode.proc_num)}'
env_list += f' -e {envvar}' if not self._docker_config.skip else f' && export {envvar}'
fcmd = "docker exec {env_list} sb-workspace bash -lc '{command}'"
if self._docker_config.skip:
fcmd = "bash -c '{env_list} && cd $SB_WORKSPACE && {command}'"
ansible_runner_config = self._ansible_client.get_shell_config(
fcmd.format(env_list=env_list, command=self.__get_mode_command(benchmark_name, mode, timeout))
)
if mode.name == 'mpi' and 'node_num' in mode and mode.node_num != 1:
ansible_runner_config = self._ansible_client.update_mpi_config(ansible_runner_config)
if isinstance(timeout, int):
# we do not expect timeout in ansible unless subprocess hangs
ansible_runner_config['timeout'] = timeout + 60
# overwrite ansible runner's default signal handler with main process's
rc = self._ansible_client.run(
ansible_runner_config, cancel_callback=lambda: None, sudo=(not self._docker_config.skip)
)
return rc
def run(self):
"""Run the SuperBench benchmarks distributedly."""
self.check_env()
signal.signal(signal.SIGINT, self.__signal_handler)
signal.signal(signal.SIGTERM, self.__signal_handler)
for benchmark_name in self._sb_benchmarks:
if benchmark_name not in self._sb_enabled_benchmarks:
continue
benchmark_config = self._sb_benchmarks[benchmark_name]
for mode in benchmark_config.modes:
ansible_rc = 0
if mode.name == 'local':
rc_list = Parallel(n_jobs=mode.proc_num if mode.parallel else 1)(
delayed(self._run_proc)(benchmark_name, mode, {
'proc_rank': proc_rank
}) for proc_rank in range(mode.proc_num)
)
ansible_rc = sum(rc_list)
elif mode.name == 'torch.distributed' or mode.name == 'mpi':
if 'pattern' not in mode:
ansible_rc = self._run_proc(benchmark_name, mode, {'proc_rank': 0})
else:
if not os.path.exists(self._output_path / 'hostfile'):
logger.warning('No hostfile under %s.', self._output_path)
continue
with open(self._output_path / 'hostfile', 'r') as f:
host_list = f.read().splitlines()
host_groups = gen_traffic_pattern_host_groups(
host_list, mode.pattern, self._output_path / 'mpi_pattern.txt', benchmark_name
)
for serial_index, host_group in enumerate(host_groups):
para_rc_list = Parallel(n_jobs=len(host_group))(
delayed(self._run_proc)(
benchmark_name,
mode,
vars={
'proc_rank': 0,
'host_list': host_list,
'serial_index': str(serial_index),
'parallel_index': str(parallel_index),
}
) for parallel_index, host_list in enumerate(host_group)
)
ansible_rc = ansible_rc + sum(para_rc_list)
else:
logger.warning('Unknown mode %s.', mode.name)
if ansible_rc != 0:
self.cleanup()
self.fetch_results()
self.__create_results_summary()