Skip to content

Bump aiohttp from 3.13.3 to 3.13.4 #141

Bump aiohttp from 3.13.3 to 3.13.4

Bump aiohttp from 3.13.3 to 3.13.4 #141

Workflow file for this run

---
# used for running tests
name: tests
on:
push:
branches: [main]
pull_request:
branches: [main]
jobs:
pre_commit_checks:
runs-on: ubuntu-24.04
steps:
# checks out the repo
- uses: actions/checkout@v6
# run pre-commit
- name: Python setup
uses: actions/setup-python@v6
with:
python-version: "3.11"
- name: Install the latest version of uv
uses: astral-sh/setup-uv@v7
- uses: pre-commit/action@v3.0.1
# run pre-commit ci lite for automated fixes
- uses: pre-commit-ci/lite-action@v1.1.0
if: ${{ !cancelled() }}
run_tests:
strategy:
matrix:
python_version: ["3.10", "3.11", "3.12", "3.13", "3.14"]
os: [ubuntu-24.04, macos-14]
runs-on: ${{ matrix.os }}
env:
OS: ${{ matrix.os }}
steps:
- name: Checkout
uses: actions/checkout@v6
- name: Python setup
uses: actions/setup-python@v6
with:
python-version: ${{ matrix.python_version }}
- name: Install the latest version of uv
uses: astral-sh/setup-uv@v7
- name: Sync dependencies (with viz + dlpack extras)
run: uv sync --frozen --extra viz --extra dlpack
- name: Run pytest
run: uv run --frozen pytest
benchmark_canary:
runs-on: ubuntu-24.04
env:
BENCH_ENFORCE: "0"
steps:
- name: Checkout
uses: actions/checkout@v6
- name: Python setup
uses: actions/setup-python@v6
with:
python-version: "3.11"
- name: Install the latest version of uv
uses: astral-sh/setup-uv@v7
- name: Sync dependencies
run: uv sync --frozen
- name: Run lazy tensor canary benchmark
run: |
extra_args=""
if [ "${BENCH_ENFORCE}" = "1" ]; then
extra_args="--fail-on-regression"
fi
uv run --frozen python benchmarks/benchmark_lazy_tensor.py \
--repeats 7 \
--warmup 2 \
--baseline-json benchmarks/ci-baseline.json \
--regression-factor 1.5 \
--absolute-slack-ms 5.0 \
--json-out benchmark-results.json \
${extra_args}
- name: Write benchmark summary
run: |
python - <<'PY'
import json
from pathlib import Path
payload = json.loads(Path("benchmark-results.json").read_text())
lines = [
"## Lazy Tensor Benchmark Canary",
"",
f"- repeats: `{payload['repeats']}`",
f"- warmup: `{payload['warmup']}`",
"",
"| case | median (ms) | min (ms) | max (ms) |",
"|---|---:|---:|---:|",
]
for r in payload["results"]:
lines.append(
"| "
f"{r['name']} | {r['median_ms']:.2f} "
f"| {r['min_ms']:.2f} | {r['max_ms']:.2f} |"
)
lines.extend(
[
"",
"| case | baseline (ms) | threshold (ms) | status |",
"|---|---:|---:|---|",
]
)
for c in payload["regression_checks"]:
baseline = (
"-"
if c["baseline_ms"] is None
else f"{c['baseline_ms']:.2f}"
)
threshold = (
"-"
if c["threshold_ms"] is None
else f"{c['threshold_ms']:.2f}"
)
status = "regressed" if c["regressed"] else "ok"
lines.append(
f"| {c['name']} | {baseline} | "
f"{threshold} | {status} |"
)
summary_path = Path(__import__("os").environ["GITHUB_STEP_SUMMARY"])
summary_path.write_text("\n".join(lines) + "\n")
PY
- name: Upload benchmark artifact
uses: actions/upload-artifact@v7
with:
name: lazy-tensor-benchmark-${{ github.run_id }}
path: benchmark-results.json