From b729a47e104d2154351755b33fbc29d93a7342a5 Mon Sep 17 00:00:00 2001 From: Adam Siemieniuk Date: Thu, 9 Apr 2026 10:36:07 +0200 Subject: [PATCH] [CI] Limit max line length Enables linter's E501 rule to enforce maximum line length. The rule is separate from the code formatter and primarily targets blocks of comments, strings, documentation etc. Ruff version is also bumped to allow more control over the linter such as disabling rules for blocks of code. Files are reformatted to adhere to the new linter rule. There is no auto fix available, offending cases have to be addressed manually. --- .pre-commit-config.yaml | 2 +- examples/cpu/x86/matmul.py | 3 ++- examples/feed-forward-mpi/feed-forward-mpi.py | 4 +++- .../ingress/convert-kernel-bench-to-mlir.py | 3 ++- examples/ingress/torch/mlp_from_file.py | 11 +++++---- examples/llama/ref_model.py | 7 +++--- examples/llama/test_llama3.py | 11 +++++---- ...sform_a_payload_according_to_a_schedule.py | 9 +++++--- examples/xegpu/mlp.py | 3 ++- lighthouse/dialects/transform_tune_ext.py | 6 +++-- lighthouse/execution/init.py | 6 +++-- lighthouse/execution/runner.py | 3 ++- lighthouse/ingress/mlir_gen/main.py | 3 ++- lighthouse/ingress/torch/importer.py | 11 +++++---- lighthouse/pipeline/descriptor.py | 9 +++++--- lighthouse/pipeline/driver.py | 23 ++++++++++++------- lighthouse/pipeline/stage.py | 23 ++++++++++++------- lighthouse/tune/trace.py | 8 +++++-- pyproject.toml | 6 +++-- tools/lh-opt | 5 ++-- tools/lh-run | 10 ++++---- 21 files changed, 106 insertions(+), 60 deletions(-) diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index 1449b903..27c579c3 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -6,7 +6,7 @@ repos: - repo: https://github.com/astral-sh/ruff-pre-commit # Ruff version. - rev: v0.14.5 + rev: v0.15.9 hooks: # Run the linter. - id: ruff-check diff --git a/examples/cpu/x86/matmul.py b/examples/cpu/x86/matmul.py index e0d4d65b..cec9d048 100644 --- a/examples/cpu/x86/matmul.py +++ b/examples/cpu/x86/matmul.py @@ -1,6 +1,7 @@ # RUN: %PYTHON %s --dump-kernel=vectorized | FileCheck %s # RUN: %PYTHON %s --dump-kernel=vectorized --tile-size=64 | FileCheck %s -# RUN: %PYTHON %s --dump-kernel=vectorized --dtype=bf16 --avx512 | FileCheck %s --check-prefix=AVX512 +# RUN: %PYTHON %s --dump-kernel=vectorized --dtype=bf16 --avx512 \ +# RUN: | FileCheck %s --check-prefix=AVX512 # CHECK: vector.broadcast # CHECK: vector.fma diff --git a/examples/feed-forward-mpi/feed-forward-mpi.py b/examples/feed-forward-mpi/feed-forward-mpi.py index 7622a6e9..07575d9f 100644 --- a/examples/feed-forward-mpi/feed-forward-mpi.py +++ b/examples/feed-forward-mpi/feed-forward-mpi.py @@ -108,7 +108,9 @@ def parse_cla(): type=int, default=[WORLD_SIZE], nargs="+", - help="The shape of the device grid (1 or 2 dimensions). The product of the grid dimensions must match the number of MPI ranks. Use '0' if 2d grid dimensions should be inferred automatically.", + help="The shape of the device grid (1 or 2 dimensions). The product of the grid dimensions \ + must match the number of MPI ranks. Use '0' if 2d grid dimensions should be inferred \ + automatically.", ) parser.add_argument( "--nruns", diff --git a/examples/ingress/convert-kernel-bench-to-mlir.py b/examples/ingress/convert-kernel-bench-to-mlir.py index efa1ffa1..d1499057 100755 --- a/examples/ingress/convert-kernel-bench-to-mlir.py +++ b/examples/ingress/convert-kernel-bench-to-mlir.py @@ -36,7 +36,7 @@ ) sys.exit(1) - +# ruff: disable[E501] # The following kernels won't get converted: level1, level2 = Path("level1"), Path("level2") ignore_list = [ @@ -121,6 +121,7 @@ level2 / "92_Conv2d_GroupNorm_Tanh_HardSwish_ResidualAdd_LogSumExp.py", # error: failed to legalize operation 'torch.constant.int' ] +# ruff: enable[E501] @dataclass diff --git a/examples/ingress/torch/mlp_from_file.py b/examples/ingress/torch/mlp_from_file.py index 748de85b..4bcdc0b2 100644 --- a/examples/ingress/torch/mlp_from_file.py +++ b/examples/ingress/torch/mlp_from_file.py @@ -5,10 +5,11 @@ without initializing the model class on the user's side. The script uses 'lighthouse.ingress.torch.import_from_file' function that -takes a path to a Python file containing the model definition (a Python class derived from 'nn.Module'), -along with the names of functions to get model init arguments and sample inputs. The function -imports the model class on its own, initializes it, and passes it to torch_mlir -to get a MLIR module in the specified dialect. +takes a path to a Python file containing the model definition (a Python class +derived from 'nn.Module'), along with the names of functions to get model init +arguments and sample inputs. The function imports the model class on its own, +initializes it, and passes it to torch_mlir to get a MLIR module in the specified +dialect. The script uses the model from 'MLPModel/model.py' as an example. """ @@ -39,7 +40,7 @@ model_path, # Path to the Python file containing the model model_class_name="MLPModel", # Name of the PyTorch nn.Module class to convert init_args_fn_name="get_init_inputs", # Function that returns args for model.__init__() - sample_args_fn_name="get_sample_inputs", # Function that returns sample inputs to pass to 'model(...)' + sample_args_fn_name="get_sample_inputs", # Function that returns sample inputs to pass to model dialect="linalg-on-tensors", # Target MLIR dialect (linalg ops on tensor types) ir_context=ir_context, # MLIR context for the conversion ) diff --git a/examples/llama/ref_model.py b/examples/llama/ref_model.py index 3845a8ab..8acf18f7 100644 --- a/examples/llama/ref_model.py +++ b/examples/llama/ref_model.py @@ -1,9 +1,10 @@ # Copyright (c) Meta Platforms, Inc. and affiliates. -# This software may be used and distributed in accordance with the terms of the Llama 3 Community License Agreement. +# This software may be used and distributed in accordance with +# the terms of the Llama 3 Community License Agreement. -## This is a modified version of the LLaMA 3 model implementation. -## It doesn't use any FairScale components +# This is a modified version of the LLaMA 3 model implementation. +# It doesn't use any FairScale components import math as pymath from dataclasses import dataclass diff --git a/examples/llama/test_llama3.py b/examples/llama/test_llama3.py index 185e660a..913bdcdb 100644 --- a/examples/llama/test_llama3.py +++ b/examples/llama/test_llama3.py @@ -475,7 +475,8 @@ def get_rotary_emb( static_output_shape=xq_reshaped_shape, ) - # View xq as complex: (batch, seq_len, n_heads, head_dim//2, 2) -> (batch, seq_len, n_heads, head_dim//2) complex + # View xq as complex: (batch, seq_len, n_heads, head_dim//2, 2) + # -> (batch, seq_len, n_heads, head_dim//2) complex xq_complex_shape = [batch, seq_len, n_heads, head_dim // 2] xq_complex_uninit = tensor.empty(xq_complex_shape, ir.ComplexType.get(elty)) xq_complex = get_view_as_complex(xq_reshaped, xq_complex_uninit) @@ -757,7 +758,8 @@ def get_attention( # Compute attention scores: matmul(xq, keys.transpose(-2, -1)) # xq_transposed: (batch, n_heads, seq_len, head_dim) - # keys_transposed: (batch, n_heads, seq_len, head_dim) -> transpose to (batch, n_heads, head_dim, seq_len) + # keys_transposed: (batch, n_heads, seq_len, head_dim) -> transpose to + # (batch, n_heads, head_dim, seq_len) # scores: (batch, n_heads, seq_len, seq_len) scores_shape = [batch, n_heads, seq_len, seq_len] scores_uninit = tensor.empty(scores_shape, elty) @@ -964,8 +966,9 @@ def get_transformer( get_outer: torch.outer, get_linear: torch.nn.functional.linear, get_repeat_kv: repeat_kv, - get_l2_norm: lambda x, eps: x - * torch.rsqrt(torch.mean(x.pow(2), dim=-1, keepdim=True) + eps), + get_l2_norm: lambda x, eps: ( + x * torch.rsqrt(torch.mean(x.pow(2), dim=-1, keepdim=True) + eps) + ), get_rotary_emb: apply_rotary_emb, } diff --git a/examples/schedule/transform_a_payload_according_to_a_schedule.py b/examples/schedule/transform_a_payload_according_to_a_schedule.py index fcbbe7b7..6bf61b33 100644 --- a/examples/schedule/transform_a_payload_according_to_a_schedule.py +++ b/examples/schedule/transform_a_payload_according_to_a_schedule.py @@ -29,16 +29,19 @@ def example_payload() -> Module: # NB: Do the CHECKing on the transformed output: # CHECK-LABEL: result of applying schedule to payload # CHECK: func.func @fold_add_on_two_matmuls - # CHECK-SAME: (%[[MATRIX_A:.*]]: {{.*}}, %[[MATRIX_B:.*]]: {{.*}}, %[[WEIGHTS:.*]]: {{.*}}) + # CHECK-SAME: (%[[MATRIX_A:.*]]: {{.*}}, %[[MATRIX_B:.*]]: {{.*}}, + # CHECK-SAME: %[[WEIGHTS:.*]]: {{.*}}) @func.func(matrixType, matrixType, matrixType) def fold_add_on_two_matmuls(matrixA, matrixB, weights): empty = tensor.empty(matrixType.shape, matrixType.element_type) c0 = arith.constant(F32Type.get(), 0.0) # CHECK: %[[ZERO_INIT:.*]] = linalg.fill zero_init = linalg.fill(c0, outs=[empty]) - # CHECK: %[[A_X_WEIGHTS:.*]] = linalg.matmul ins(%[[MATRIX_A]], %[[WEIGHTS]]{{.*}}) outs(%[[ZERO_INIT]] + # CHECK: %[[A_X_WEIGHTS:.*]] = linalg.matmul ins(%[[MATRIX_A]], %[[WEIGHTS]] + # CHECK-SAME: outs(%[[ZERO_INIT]] A_x_weights = linalg.matmul(matrixA, weights, outs=[zero_init]) - # CHECK: %[[RES:.*]] = linalg.matmul ins(%[[MATRIX_B]], %[[WEIGHTS]]{{.*}}) outs(%[[A_X_WEIGHTS]] + # CHECK: %[[RES:.*]] = linalg.matmul ins(%[[MATRIX_B]], %[[WEIGHTS]] + # CHECK-SAME: outs(%[[A_X_WEIGHTS]] B_x_weights = linalg.matmul(matrixB, weights, outs=[zero_init]) # CHECK-NOT: linalg.add added = linalg.add(A_x_weights, B_x_weights, outs=[empty]) diff --git a/examples/xegpu/mlp.py b/examples/xegpu/mlp.py index 480e1f50..96854859 100644 --- a/examples/xegpu/mlp.py +++ b/examples/xegpu/mlp.py @@ -3,7 +3,8 @@ # RUN: %PYTHON %s --dump-kernel=xegpu-wg --hidden-sizes 1024 1024 --relu | FileCheck %s # RUN: %PYTHON %s --dump-kernel=xegpu-wg --hidden-sizes 1024 1024 --bias | FileCheck %s # RUN: %PYTHON %s --dump-kernel=xegpu-wg --hidden-sizes 1024 1024 --accumulate-c | FileCheck %s -# RUN: %PYTHON %s --dump-kernel=xegpu-wg --hidden-sizes 1024 1024 --bias --relu --accumulate-c | FileCheck %s +# RUN: %PYTHON %s --dump-kernel=xegpu-wg --hidden-sizes 1024 1024 --bias --relu --accumulate-c \ +# RUN: | FileCheck %s # CHECK: module attributes {gpu.container_module} { """ diff --git a/lighthouse/dialects/transform_tune_ext.py b/lighthouse/dialects/transform_tune_ext.py index ff14f2a5..dd6cb5de 100644 --- a/lighthouse/dialects/transform_tune_ext.py +++ b/lighthouse/dialects/transform_tune_ext.py @@ -82,7 +82,8 @@ def wrapper(*args, **kwargs): func_def_ast = func_ast.body[0] # TODO: in case of multiple decorators, remove just @KnobValue.ast_rewrite - func_def_ast.decorator_list.clear() # Remove the decorator to avoid infinite recursion. + # Remove the decorator to avoid infinite recursion. + func_def_ast.decorator_list.clear() if in_exprs: # Apply the rewriting of `in` expressions. func_def_ast.body = [ @@ -97,7 +98,8 @@ def wrapper(*args, **kwargs): mod = compile(func_ast, filename=source_file, mode="exec") frame = inspect.currentframe() assert frame and frame.f_back - # Make the original function's globals and locals available to the rewritten function. + # Make the original function's globals and locals available + # to the rewritten function. temp_globals = frame.f_back.f_globals.copy() temp_globals |= frame.f_back.f_locals.copy() temp_locals = frame.f_back.f_locals.copy() diff --git a/lighthouse/execution/init.py b/lighthouse/execution/init.py index 4835fb01..2342ebcb 100644 --- a/lighthouse/execution/init.py +++ b/lighthouse/execution/init.py @@ -27,11 +27,13 @@ class KernelArgument: """ A kernel argument, initialized according to the specified type. The argument value is stored in the `arg` attribute, which is a numpy array. - It will be initialized at construction time, so that the argument value is ready to use after construction. + It will be initialized at construction time, so that the argument value is ready + to use after construction. Arguments are: * dims: list of dimensions of the argument > 0 (e.g., [M, N, K]) - * element_type: NumPy data type of the argument (e.g., np.float32, np.int64, "f16", "bf16", etc.) + * element_type: NumPy data type of the argument + (e.g., np.float32, np.int64, "f16", "bf16", etc.) * init_type: type of initialization (InitType) TODO: Add support for distribution parameters on random. """ diff --git a/lighthouse/execution/runner.py b/lighthouse/execution/runner.py index b8ae4d61..c17f5588 100644 --- a/lighthouse/execution/runner.py +++ b/lighthouse/execution/runner.py @@ -82,7 +82,8 @@ def _find_shared_libs(self, shared_libs: list[str]) -> list[str]: def _get_engine(self) -> ExecutionEngine: """ - Get an execution engine for the given payload module, loading the necessary shared libraries. + Get an execution engine for the given payload module, + loading the necessary shared libraries. """ execution_engine = ExecutionEngine( self.payload, opt_level=self.opt_level, shared_libs=self.shared_libs diff --git a/lighthouse/ingress/mlir_gen/main.py b/lighthouse/ingress/mlir_gen/main.py index 4bddf1d5..ea522fea 100644 --- a/lighthouse/ingress/mlir_gen/main.py +++ b/lighthouse/ingress/mlir_gen/main.py @@ -69,7 +69,8 @@ def csints(s: str) -> Sequence[int]: "--layers", type=csints, default=(128, 256, 512), - help="the number of neurons in each layer - the first layer is the input layer and the last layer is the output layer", + help="the number of neurons in each layer - the first layer is the input layer \ + and the last layer is the output layer", ) parser.add_argument( diff --git a/lighthouse/ingress/torch/importer.py b/lighthouse/ingress/torch/importer.py index f95808ff..dc281d83 100644 --- a/lighthouse/ingress/torch/importer.py +++ b/lighthouse/ingress/torch/importer.py @@ -1,4 +1,3 @@ -import importlib import importlib.util from pathlib import Path from typing import Iterable, Mapping @@ -52,10 +51,11 @@ def import_from_model( ``OutputType.LINALG_ON_TENSORS``. ir_context (ir.Context, optional): An optional MLIR context to use for parsing the module. If not provided, the module is returned as a string. - **kwargs: Additional keyword arguments passed to the ``torch_mlir.fx.export_and_import`` function. + **kwargs: Additional keyword arguments passed to the ``torch_mlir.fx.export_and_import``. Returns: - str | ir.Module: The imported MLIR module as a string or an ir.Module if `ir_context` is provided. + str | ir.Module: The imported MLIR module as a string or an ir.Module + if `ir_context` is provided. Examples: >>> import torch @@ -142,10 +142,11 @@ def import_from_file( ``OutputType.LINALG_ON_TENSORS``. ir_context (ir.Context, optional): An optional MLIR context to use for parsing the module. If not provided, the module is returned as a string. - **kwargs: Additional keyword arguments passed to the ``torch_mlir.fx.export_and_import`` function. + **kwargs: Additional keyword arguments passed to the ``torch_mlir.fx.export_and_import``. Returns: - str | ir.Module: The imported MLIR module as a string or an ir.Module if `ir_context` is provided. + str | ir.Module: The imported MLIR module as a string or an ir.Module + if `ir_context` is provided. Examples: Given a file `path/to/model_file.py` with the following content: diff --git a/lighthouse/pipeline/descriptor.py b/lighthouse/pipeline/descriptor.py index 48d231e7..b2c1c193 100644 --- a/lighthouse/pipeline/descriptor.py +++ b/lighthouse/pipeline/descriptor.py @@ -38,8 +38,10 @@ def __init__(self, filename: str): def _normalize_include_path(self, filename) -> str: """ Finds the file in some standard locations, in order: - * The path of the descriptor file that includes it. This allows for relative includes. - * The path of the Lighthouse schedule module, where all the standard pipelines are located. + * The path of the descriptor file that includes it. + This allows for relative includes. + * The path of the Lighthouse schedule module, + where all the standard pipelines are located. """ filename = remove_args_and_opts(filename) descriptor_path = os.path.normpath(os.path.dirname(self.filename)) @@ -93,7 +95,8 @@ def _parse_stages(self) -> None: else: raise ValueError( - f"Invalid stage in pipeline description: {stage}. Must be one of 'pass', 'transform', 'bundle' or 'include'." + f"Invalid stage in pipeline description: {stage}. Must be one of 'pass', \ + 'transform', 'bundle' or 'include'." ) def _include_pipeline(self, filename: str) -> None: diff --git a/lighthouse/pipeline/driver.py b/lighthouse/pipeline/driver.py index f0071a0b..4f2d2575 100644 --- a/lighthouse/pipeline/driver.py +++ b/lighthouse/pipeline/driver.py @@ -9,7 +9,8 @@ class PipelineDriver: """ A simple driver that runs the optimization pipeline on a given workload. - Helps create a list of Stages (passes, transforms, bundles) to apply to the module, and runs them in sequence. + Helps create a list of Stages (passes, transforms, bundles) to apply to the module, + and runs them in sequence. """ stages: list[lhs.Stage] @@ -24,7 +25,7 @@ def add_pass(self, name: str) -> None: self.stages.append(lhs.PassStage([lhs.Pass(name)], self.context)) def add_transform(self, stage: str | ir.Module) -> None: - # Transform will figure out if this is MLIR, Python or Module, and will handle it accordingly. + # Transform will figure out if it is MLIR, Python or Module, and will handle it accordingly. if isinstance(stage, ir.Module): # This is a transform already in module form. Assume it has been verified already. if stage.context != self.context: @@ -43,7 +44,8 @@ def add_bundle(self, name: str) -> None: def add_stage(self, stage: lhs.Stage) -> None: # A generit stage that isn't covered by the existing infrastructure. - # Users can derive their own classes from Stage and add them to the pipeline with this method. + # Users can derive their own classes from Stage and add them to + # the pipeline with this method. self.stages.append(stage) def apply(self, module: ir.Module, print_after_all: bool = False) -> ir.Module: @@ -65,7 +67,8 @@ def reset(self): class TransformDriver(PipelineDriver): """ A simple driver that runs a sequence of transform modules on a given workload. - This is a thin wrapper around PipelineDriver that is used to run a sequence of transform modules on a given workload. + This is a thin wrapper around PipelineDriver that is used to run + a sequence of transform modules on a given workload. """ def __init__(self, schedules: list[ir.Module]): @@ -85,14 +88,18 @@ class CompilerDriver: This is a high-level interface that abstracts away the details of the optimization pipeline, and provides a simple interface for running the pipeline on a given workload. - The pipeline is flexible until the first time it is run, at which point it becomes fixed and cannot be modified until reset is called. - This is to allow running the same pipeline on different modules, without accidentally modifying the pipeline after it has been run. + The pipeline is flexible until the first time it is run, at which point it becomes fixed and + cannot be modified until reset is called. + This is to allow running the same pipeline on different modules, without accidentally modifying + the pipeline after it has been run. - Calling reset() will clear the pipeline and the module, allowing for a new pipeline to be constructed and run on a new module. + Calling reset() will clear the pipeline and the module, allowing for a new pipeline + to be constructed and run on a new module. """ def __init__(self, filename: str, stages: list[str] = []): - # The context is shared across the entire pipeline, and is used to create the PassManager and Transform Schedules. + # The context is shared across the entire pipeline, and is used to create the PassManager + # and Transform Schedules. # The module is owned by the Driver to encapsulate its use through the pipeline. # It is returned at the end of run() after being transformed by the stages in the pipeline. self.context = ir.Context() diff --git a/lighthouse/pipeline/stage.py b/lighthouse/pipeline/stage.py index 492c56d4..b7299191 100644 --- a/lighthouse/pipeline/stage.py +++ b/lighthouse/pipeline/stage.py @@ -32,7 +32,8 @@ def __str__(self) -> str: # Predefined pass bundles for common transformations. # These are not exhaustive and can be extended as needed. # The idea is to group together passes that are commonly used together in a pipeline, -# so that they can be easily added to a PassManager or Transform Schedule with a single function call. +# so that they can be easily added to a PassManager or Transform Schedule with +# a single function call. # FIXME: Deprecate bundles in favor of YAML pipeline descriptors. PassBundles = { # All in one bufferization bundle. @@ -127,21 +128,24 @@ def __str__(self) -> str: class Stage: """ - A stage in the optimization pipeline. Each stage will apply a specific set of transformations to the module, - and will keep track of the current state of the module after the transformations are applied. + A stage in the optimization pipeline. Each stage will apply a specific + set of transformations to the module, and will keep track of the current + state of the module after the transformations are applied. """ @abstractmethod def apply(self, module: ir.Module) -> ir.Module: """ - Apply the transformations for this stage to the given module, and return the transformed module. + Apply the transformations for this stage to the given module, + and return the transformed module. """ pass class PassStage(Stage): """ - A stage that applies a predefined set of passes to the module. This is a simple wrapper around a PassManager. + A stage that applies a predefined set of passes to the module. + This is a simple wrapper around a PassManager. """ def __init__(self, passes: list[Pass], context: ir.Context): @@ -197,11 +201,13 @@ def __init__(self, transform: Transform | ir.Module, context: ir.Context): spec.loader.exec_module(transform_module) if not hasattr(transform_module, transform.generator): raise ValueError( - f"Transform module '{transform.filename}' does not define a '{transform.generator}' generator function." + f"Transform module '{transform.filename}' does not define \ + a '{transform.generator}' generator function." ) self.generator = getattr(transform_module, transform.generator) - # Run the function with the dictionary as the options that will create the named sequence. + # Run the function with the dictionary as the options + # that will create the named sequence. with context, ir.Location.unknown(): self.module = self.generator(transform.options) else: @@ -210,7 +216,8 @@ def __init__(self, transform: Transform | ir.Module, context: ir.Context): # Check if the imported module contains at least one schedule if TransformStage.MLIR_ATTRIBUTE not in self.module.operation.attributes: raise ValueError( - f"Transform module {transform.filename} does not define a {TransformStage.MLIR_ATTRIBUTE} attribute." + f"Transform module {transform.filename} does not define \ + a {TransformStage.MLIR_ATTRIBUTE} attribute." ) # Assume the first (or only) sequence. diff --git a/lighthouse/tune/trace.py b/lighthouse/tune/trace.py index 203653ef..020dd2f2 100644 --- a/lighthouse/tune/trace.py +++ b/lighthouse/tune/trace.py @@ -86,8 +86,12 @@ def possibilities(self) -> Generator[int, None, None]: if self.options is not None: if self.divides is not None or self.divisible_by is not None: yield from filter( - lambda val: (self.divides is None or (self.divides % val == 0)) - and (self.divisible_by is None or (val % self.divisible_by == 0)), + lambda val: ( + (self.divides is None or (self.divides % val == 0)) + and ( + self.divisible_by is None or (val % self.divisible_by == 0) + ) + ), self.options, ) else: diff --git a/pyproject.toml b/pyproject.toml index ad039d4e..58b2e47c 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -10,7 +10,7 @@ dependencies = [ [dependency-groups] dev = [ "lit==18.1.8", # Tool to configure, discover and run tests - "ruff==0.14.5", # Python linter and formatter + "ruff==0.15.9", # Python linter and formatter "pre-commit", # Tool to manage and apply pre-commit hooks "pytest>=8.0.0", "psutil", @@ -121,7 +121,9 @@ select = [ "W", # Warning ] ignore = [ - "E501", # line-too-long "PERF203", # try-except-in-loop "PERF401", # manual-list-comprehension ] + +[tool.ruff.lint.pycodestyle] +max-line-length=100 # limit total line length covering comments, docs etc. diff --git a/tools/lh-opt b/tools/lh-opt index 18354471..668091a3 100755 --- a/tools/lh-opt +++ b/tools/lh-opt @@ -8,8 +8,9 @@ from lighthouse.pipeline.driver import CompilerDriver if __name__ == "__main__": Parser = argparse.ArgumentParser( description=""" - Lighthouse Optimization Pipeline: Applies a series of transformations to an input MLIR module, - and produces an optimized MLIR module as output. The transformations are applied in argument order. + Lighthouse Optimization Pipeline: Applies a series of transformations to + an input MLIR module, and produces an optimized MLIR module as output. + The transformations are applied in argument order. The names of the passes are registered by the driver. """ ) diff --git a/tools/lh-run b/tools/lh-run index 058dfc8c..7ecc89e7 100755 --- a/tools/lh-run +++ b/tools/lh-run @@ -32,8 +32,8 @@ def add_lower_to_llvm_stages() -> list[str]: if __name__ == "__main__": Parser = argparse.ArgumentParser( description=""" - Lighthouse Optimization Runner: Optionally applies a series of transformations to an input MLIR module, - and executes it on a device. + Lighthouse Optimization Runner: Optionally applies a series of transformations + to an input MLIR module, and executes it on a device. """ ) Parser.add_argument( @@ -118,7 +118,8 @@ if __name__ == "__main__": # Add the remaining stages defined by the user. driver.add_stages(args.stage) - # Add the necessary LLVM lowering stages to ensure the module can be executed by the ExecutionEngine. + # Add the necessary LLVM lowering stages to ensure + # the module can be executedby the ExecutionEngine. driver.add_stages(add_lower_to_llvm_stages()) # Run the pipeline to get the optimized module. @@ -127,7 +128,8 @@ if __name__ == "__main__": print(optimized_module) # Initialize the device data - # TODO: Allow automatic inspection of the payload module if the user doesn't provide input shapes. + # TODO: Allow automatic inspection of the payload module + # if the user doesn't provide input shapes. buffers = [] for shape in args.input_shape: for sub_shape in shape.split(","):