Skip to content

Commit ea13fdb

Browse files
committed
Rename parallel_workers keyword argument to parallel
1 parent c960b1b commit ea13fdb

3 files changed

Lines changed: 11 additions & 11 deletions

File tree

doc/source/parallel.rst

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -17,7 +17,7 @@ Parallel/remote tuning is implemented using `Ray <https://docs.ray.io/en/latest/
1717
How to use
1818
----------
1919

20-
To enable parallel tuning, pass the ``parallel_workers`` argument to ``tune_kernel``:
20+
To enable parallel tuning, pass the ``parallel`` argument to ``tune_kernel``:
2121

2222
.. code-block:: python
2323
@@ -27,11 +27,11 @@ To enable parallel tuning, pass the ``parallel_workers`` argument to ``tune_kern
2727
size,
2828
args,
2929
tune_params,
30-
parallel_workers=True,
30+
parallel=True,
3131
)
3232
33-
If ``parallel_workers`` is set to ``True``, Kernel Tuner will use all available Ray workers for tuning.
34-
Alternatively, ``parallel_workers`` can be set to an integer ``n`` to use exactly ``n`` workers.
33+
If ``parallel`` is set to ``True``, Kernel Tuner will use all available Ray workers for tuning.
34+
Alternatively, ``parallel`` can be set to an integer ``n`` to use exactly ``n`` workers.
3535

3636

3737
Parallel tuning and optimization strategies

examples/cuda/vector_add_parallel.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -26,7 +26,7 @@ def tune():
2626
tune_params = dict()
2727
tune_params["block_size_x"] = [32 * i for i in range(1, 33)]
2828

29-
results, env = tune_kernel("vector_add", kernel_string, size, args, tune_params, parallel_workers=True)
29+
results, env = tune_kernel("vector_add", kernel_string, size, args, tune_params, parallel=True)
3030
print(env)
3131
return results
3232

kernel_tuner/interface.py

Lines changed: 6 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -474,7 +474,7 @@ def __deepcopy__(self, _):
474474
),
475475
("metrics", ("specifies user-defined metrics, please see :ref:`metrics`.", "dict")),
476476
("simulation_mode", ("Simulate an auto-tuning search from an existing cachefile", "bool")),
477-
("parallel_workers", ("Set to `True` or an integer to enable parallel tuning. If set to an integer, this will be the number of parallel workers.", "int|bool")),
477+
("parallel", ("Set to `True` or an integer to enable parallel tuning. If set to an integer, this will be the number of parallel workers.", "int|bool")),
478478
("observers", ("""A list of Observers to use during tuning, please see :ref:`observers`.""", "list")),
479479
]
480480
)
@@ -586,7 +586,7 @@ def tune_kernel(
586586
cache=None,
587587
metrics=None,
588588
simulation_mode=False,
589-
parallel_workers=None,
589+
parallel=None,
590590
observers=None,
591591
objective=None,
592592
objective_higher_is_better=None,
@@ -662,14 +662,14 @@ def tune_kernel(
662662
# TODO: we could use the "match case" syntax when removing support for 3.9
663663
tuning_options.simulated_time = 0
664664

665-
if parallel_workers and simulation_mode:
666-
raise ValueError("Enabling `parallel_workers` and `simulation_mode` together is not supported")
665+
if parallel and simulation_mode:
666+
raise ValueError("Enabling `parallel` and `simulation_mode` together is not supported")
667667
elif simulation_mode:
668668
from kernel_tuner.runners.simulation import SimulationRunner
669669
runner = SimulationRunner(kernelsource, kernel_options, device_options, iterations, observers)
670-
elif parallel_workers:
670+
elif parallel:
671671
from kernel_tuner.runners.parallel import ParallelRunner
672-
num_workers = None if parallel_workers is True else parallel_workers
672+
num_workers = None if parallel is True else parallel
673673
runner = ParallelRunner(kernelsource, kernel_options, device_options, tuning_options, iterations, observers, num_workers=num_workers)
674674
else:
675675
from kernel_tuner.runners.sequential import SequentialRunner

0 commit comments

Comments
 (0)