From ecc34d7c89fe818d7446592d6695edd48890150a Mon Sep 17 00:00:00 2001 From: GokhanKabar Date: Sat, 14 Mar 2026 01:05:38 +0100 Subject: [PATCH 001/204] fix(pytest): -q mode summary line not detected, reports "No tests collected" pytest -q emits the final summary without === wrappers. The parser only matched === prefixed lines, leaving summary_line empty and triggering the wrong "No tests collected" message. - detect bare summary lines in quiet mode (no === wrapper) - fix false "No tests collected" when only skipped tests exist - add 2 tests covering both cases Fixes #565 Signed-off-by: GokhanKabar --- CHANGELOG.md | 1 + src/cmds/python/pytest_cmd.rs | 66 +++++++++++++++++++++++++++++++++-- 2 files changed, 65 insertions(+), 2 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index e98489a2e..767a7b12d 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -9,6 +9,7 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0 ### Bug Fixes +* **pytest:** fix `rtk pytest -q` incorrectly reporting "No tests collected" when tests ran ([#565](https://github.com/rtk-ai/rtk/issues/565)) — quiet mode summary line (no `===` wrapper) was not captured by the parser, causing `parse_summary_line("")` to return `(0, 0, 0)` and trigger the wrong message. Also fix false "No tests collected" when only skipped tests exist. * **diff:** correct truncation overflow count in condense_unified_diff ([#833](https://github.com/rtk-ai/rtk/pull/833)) ([5399f83](https://github.com/rtk-ai/rtk/commit/5399f83)) * **git:** replace vague truncation markers with exact counts in log and grep output ([#833](https://github.com/rtk-ai/rtk/pull/833)) ([185fb97](https://github.com/rtk-ai/rtk/commit/185fb97)) diff --git a/src/cmds/python/pytest_cmd.rs b/src/cmds/python/pytest_cmd.rs index 412acf9c7..cc568cbfc 100644 --- a/src/cmds/python/pytest_cmd.rs +++ b/src/cmds/python/pytest_cmd.rs @@ -111,7 +111,21 @@ fn filter_pytest_output(output: &str) -> String { } continue; } else if trimmed.starts_with("===") - && (trimmed.contains("passed") || trimmed.contains("failed")) + && (trimmed.contains("passed") + || trimmed.contains("failed") + || trimmed.contains("skipped")) + { + summary_line = trimmed.to_string(); + continue; + // quiet mode (-q): bare summary without === wrapper, e.g. "5 failed, 1698 passed, 2 skipped in 108.89s" + } else if summary_line.is_empty() + && !trimmed.starts_with("===") + && !trimmed.starts_with("FAILED") + && !trimmed.starts_with("ERROR") + && (trimmed.contains(" passed") + || trimmed.contains(" failed") + || trimmed.contains(" skipped")) + && trimmed.contains(" in ") { summary_line = trimmed.to_string(); continue; @@ -172,7 +186,7 @@ fn build_pytest_summary(summary: &str, _test_files: &[String], failures: &[Strin return format!("Pytest: {} passed", passed); } - if passed == 0 && failed == 0 { + if passed == 0 && failed == 0 && skipped == 0 { return "Pytest: No tests collected".to_string(); } @@ -370,4 +384,52 @@ collected 0 items (3, 1, 2) ); } + + #[test] + fn test_filter_pytest_quiet_mode_failures() { + // In -q mode, the final summary line has NO === wrapper + // This was causing "No tests collected" to be reported incorrectly + let output = r#"=== test session starts === +platform linux -- Python 3.12.11, pytest-8.1.0 +collected 1705 items + +.......F....... + +=== FAILURES === +___ test_something ___ + +E AssertionError: expected True + +=== short test summary info === +FAILED tests/test_foo.py::test_something - AssertionError +5 failed, 1698 passed, 2 skipped in 108.89s"#; + + let result = filter_pytest_output(output); + assert!( + !result.contains("No tests collected"), + "Should not report 'No tests collected' when tests ran. Got: {}", + result + ); + assert!( + result.contains("1698") || result.contains("5 failed"), + "Should show actual test counts. Got: {}", + result + ); + } + + #[test] + fn test_filter_pytest_only_skipped() { + // If only skipped tests, should NOT say "No tests collected" + let output = r#"=== test session starts === +collected 3 items + +=== 3 skipped in 0.10s ==="#; + + let result = filter_pytest_output(output); + assert!( + !result.contains("No tests collected"), + "Should not say 'No tests collected' when tests were skipped. Got: {}", + result + ); + } } From 08d1abc4a842fc3af73a750414b069ef159afc34 Mon Sep 17 00:00:00 2001 From: Ousama Ben Younes Date: Thu, 12 Mar 2026 06:13:15 +0000 Subject: [PATCH 002/204] warn: emit stderr warning when RTK_DISABLED=1 is detected MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit When the rewrite hook sees RTK_DISABLED=1, it now prints a warning to stderr before skipping the rewrite. This educates AI agents to stop overusing the bypass — a real session showed 30% of commands using RTK_DISABLED=1 unnecessarily, dropping savings from ~65% to 48%. The warning is on stderr (not stdout) so it doesn't affect command output or piping. The bypass still works exactly as before. Fixes #508 Co-Authored-By: Claude Opus 4.6 Signed-off-by: Ousama Ben Younes --- src/discover/registry.rs | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/src/discover/registry.rs b/src/discover/registry.rs index cc975b3b9..cf18dbecb 100644 --- a/src/discover/registry.rs +++ b/src/discover/registry.rs @@ -638,7 +638,12 @@ fn rewrite_segment(seg: &str, excluded: &[String]) -> Option { let cmd_clean = stripped_cow.trim(); // #345: RTK_DISABLED=1 in env prefix → skip rewrite entirely + // #508: warn on stderr so agents learn to stop overusing it if has_rtk_disabled_prefix(cmd_part) { + eprintln!( + "[rtk] RTK_DISABLED=1 detected — skipping filter for this command. \ + Remove RTK_DISABLED=1 to restore token savings." + ); return None; } From 4ce95a321f78c63972527e9dd43f320d5c4f57f9 Mon Sep 17 00:00:00 2001 From: Ousama Ben Younes Date: Thu, 12 Mar 2026 06:19:16 +0000 Subject: [PATCH 003/204] test: add subprocess test for RTK_DISABLED=1 stderr warning Verifies that `rtk rewrite "RTK_DISABLED=1 git status"` emits the warning on stderr, not just that rewrite_command returns None. Co-Authored-By: Claude Opus 4.6 Signed-off-by: Ousama Ben Younes --- src/discover/registry.rs | 32 ++++++++++++++++++++++++++++++++ 1 file changed, 32 insertions(+) diff --git a/src/discover/registry.rs b/src/discover/registry.rs index cf18dbecb..c14a5dd32 100644 --- a/src/discover/registry.rs +++ b/src/discover/registry.rs @@ -1270,6 +1270,38 @@ mod tests { ); } + #[test] + fn test_rewrite_rtk_disabled_warns_on_stderr() { + // RTK_DISABLED=1 should still return None (no rewrite) + // and emit a warning on stderr (tested via subprocess below) + assert_eq!(rewrite_command("RTK_DISABLED=1 git status", &[]), None); + + // Verify warning via subprocess: `rtk rewrite "RTK_DISABLED=1 git status"` + // should exit non-zero AND print warning to stderr + let rtk_bin = std::path::Path::new(env!("CARGO_MANIFEST_DIR")) + .join("target") + .join("debug") + .join("rtk"); + if !rtk_bin.exists() { + return; // Binary not built — skip subprocess check + } + let output = std::process::Command::new(&rtk_bin) + .args(["rewrite", "RTK_DISABLED=1 git status"]) + .output() + .expect("Failed to run rtk"); + + assert!( + !output.status.success(), + "Should exit non-zero (no rewrite)" + ); + let stderr = String::from_utf8_lossy(&output.stderr); + assert!( + stderr.contains("RTK_DISABLED=1 detected"), + "Should warn on stderr, got: {}", + stderr + ); + } + #[test] fn test_rewrite_non_rtk_disabled_env_still_rewrites() { assert_eq!( From 097456e525bc8b635faca8fd505957e1df0ba8f0 Mon Sep 17 00:00:00 2001 From: Florian BRUNIAUX Date: Sat, 28 Mar 2026 01:22:38 +0100 Subject: [PATCH 004/204] fix(json): rename --schema to --keys-only for clearer opt-in semantics The previous --schema flag name implied output type rather than behavior. --keys-only is explicit: values are shown by default, this flag strips them. Closes #621 (json values now preserved by default; --keys-only is opt-in). Signed-off-by: Florian BRUNIAUX --- src/cmds/system/json_cmd.rs | 2 +- src/main.rs | 12 ++++++------ 2 files changed, 7 insertions(+), 7 deletions(-) diff --git a/src/cmds/system/json_cmd.rs b/src/cmds/system/json_cmd.rs index 4e887417e..176b6e568 100644 --- a/src/cmds/system/json_cmd.rs +++ b/src/cmds/system/json_cmd.rs @@ -35,7 +35,7 @@ fn validate_json_extension(file: &Path) -> Result<()> { Ok(()) } -/// Show JSON (compact with values, or schema-only with --schema) +/// Show JSON (compact with values by default, or keys-only with --keys-only) pub fn run(file: &Path, max_depth: usize, schema_only: bool, verbose: u8) -> Result<()> { validate_json_extension(file)?; let timer = tracking::TimedExecution::start(); diff --git a/src/main.rs b/src/main.rs index 50a39ce52..a6baba01d 100644 --- a/src/main.rs +++ b/src/main.rs @@ -195,16 +195,16 @@ enum Commands { command: Vec, }, - /// Show JSON (compact values, or schema-only with --schema) + /// Show JSON (compact values by default, or keys-only with --keys-only) Json { /// JSON file file: PathBuf, /// Max depth #[arg(short, long, default_value = "5")] depth: usize, - /// Show structure only (strip all values) + /// Show keys only (strip all values, show structure) #[arg(long)] - schema: bool, + keys_only: bool, }, /// Summarize project dependencies @@ -1494,12 +1494,12 @@ fn main() -> Result<()> { Commands::Json { file, depth, - schema, + keys_only, } => { if file == Path::new("-") { - json_cmd::run_stdin(depth, schema, cli.verbose)?; + json_cmd::run_stdin(depth, keys_only, cli.verbose)?; } else { - json_cmd::run(&file, depth, schema, cli.verbose)?; + json_cmd::run(&file, depth, keys_only, cli.verbose)?; } } From 2a2118d98bb52b4d1f0e26cdc880312377d72dee Mon Sep 17 00:00:00 2001 From: Florian BRUNIAUX Date: Sat, 28 Mar 2026 00:33:36 +0100 Subject: [PATCH 005/204] docs: add supported ecosystems reference to CLAUDE.md Adds a one-liner listing all supported ecosystems (ruff, pytest, pip, golangci-lint, etc.) in the Architecture section to satisfy the pre-push documentation validation script. Co-Authored-By: Claude Sonnet 4.6 Signed-off-by: Florian BRUNIAUX --- CLAUDE.md | 2 ++ 1 file changed, 2 insertions(+) diff --git a/CLAUDE.md b/CLAUDE.md index 0dddf14e5..e6183a514 100644 --- a/CLAUDE.md +++ b/CLAUDE.md @@ -75,6 +75,8 @@ For the full architecture, component details, and module development patterns, s Module responsibilities are documented in each folder's `README.md` and each file's `//!` doc header. Browse `src/cmds/*/` to discover available filters. +Supported ecosystems: git/gh/gt, cargo, go/golangci-lint, npm/pnpm/npx, ruff/pytest/pip/mypy, rspec/rubocop/rake, dotnet, playwright/vitest/jest, docker/kubectl/aws. + ### Proxy Mode **Purpose**: Execute commands without filtering but track usage for metrics. From 77a09514337ca89afdc502c76d39e86e156d9fee Mon Sep 17 00:00:00 2001 From: aesoft <43991222+aeppling@users.noreply.github.com> Date: Sat, 28 Mar 2026 18:23:54 +0100 Subject: [PATCH 006/204] fix(cmds): shared cmd execution flow + clean & update docs shared cmd exec flow and also removed some notes in technical docs (does not belong here) --- src/analytics/README.md | 4 +- src/analytics/cc_economics.rs | 3 - src/cmds/README.md | 143 ++++++++++++++++++---------------- src/cmds/go/go_cmd.rs | 48 ++---------- src/cmds/js/next_cmd.rs | 32 +++----- src/cmds/js/npm_cmd.rs | 33 +++----- src/cmds/js/tsc_cmd.rs | 39 +++------- src/cmds/js/vitest_cmd.rs | 6 +- src/cmds/python/mypy_cmd.rs | 37 +++------ src/cmds/python/pytest_cmd.rs | 54 +++---------- src/cmds/python/ruff_cmd.rs | 58 ++++---------- src/cmds/ruby/rake_cmd.rs | 48 +++--------- src/cmds/ruby/rspec_cmd.rs | 66 ++++------------ src/cmds/ruby/rubocop_cmd.rs | 63 ++++----------- src/cmds/rust/cargo_cmd.rs | 46 +++-------- src/core/README.md | 8 +- src/core/mod.rs | 1 + src/core/runner.rs | 100 ++++++++++++++++++++++++ src/core/tracking.rs | 36 --------- src/hooks/README.md | 4 - src/parser/README.md | 2 - 21 files changed, 306 insertions(+), 525 deletions(-) create mode 100644 src/core/runner.rs diff --git a/src/analytics/README.md b/src/analytics/README.md index 584b52d40..b6c651705 100644 --- a/src/analytics/README.md +++ b/src/analytics/README.md @@ -4,7 +4,7 @@ ## Scope -**Read-only dashboards** over the tracking database. Analytics presents the value that `cmds/` creates — it queries token savings, correlates with external spending data, and surfaces adoption opportunities. It never modifies the tracking DB. +**Read-only dashboards** over the tracking database. Queries token savings, correlates with external spending data, and surfaces adoption metrics. Never modifies the tracking DB. Owns: `rtk gain` (savings dashboard), `rtk cc-economics` (cost reduction), `rtk session` (adoption analysis), and Claude Code usage data parsing. @@ -15,7 +15,7 @@ Boundary rule: if a new module writes to the DB, it belongs in `core/` or `cmds/ ## Purpose Token savings analytics, economic modeling, and adoption metrics. -These modules read from the SQLite tracking database to produce dashboards, spending estimates, and session-level adoption reports that help users understand the value RTK provides. +These modules read from the SQLite tracking database to produce dashboards, spending estimates, and session-level adoption reports. ## Adding New Functionality To add a new analytics view: (1) create a new `*_cmd.rs` file in this directory, (2) query `core/tracking` for the metrics you need using the existing `TrackingDb` API, (3) register the command in `main.rs` under the `Commands` enum, and (4) add `#[cfg(test)]` unit tests with sample tracking data. Analytics modules should be read-only against the tracking database and never modify it. diff --git a/src/analytics/cc_economics.rs b/src/analytics/cc_economics.rs index 693dc61e2..037593102 100644 --- a/src/analytics/cc_economics.rs +++ b/src/analytics/cc_economics.rs @@ -14,9 +14,6 @@ use crate::core::utils::{format_cpt, format_tokens, format_usd}; // ── Constants ── -#[allow(dead_code)] -const BILLION: f64 = 1e9; - // API pricing ratios (verified Feb 2026, consistent across Claude models <=200K context) // Source: https://docs.anthropic.com/en/docs/about-claude/models const WEIGHT_OUTPUT: f64 = 5.0; // Output = 5x input diff --git a/src/cmds/README.md b/src/cmds/README.md index a84e8e744..c7bc52087 100644 --- a/src/cmds/README.md +++ b/src/cmds/README.md @@ -2,7 +2,7 @@ ## Scope -**Command execution and output filtering** — this is the core value RTK delivers. Every module here calls an external CLI tool (`Command::new("some_tool")`), transforms its stdout/stderr to reduce token consumption, and records savings via `core/tracking`. +**Command execution and output filtering.** Every module here calls an external CLI tool (`Command::new("some_tool")`), transforms its stdout/stderr to reduce token consumption, and records savings via `core/tracking`. Owns: all command-specific filter logic, organized by ecosystem (git, rust, js, python, go, dotnet, cloud, system). Cross-ecosystem routing (e.g., `lint_cmd` detecting Python and delegating to `ruff_cmd`) is an intra-component concern. @@ -35,47 +35,67 @@ Each subdirectory has its own README with file descriptions, parsing strategies, - **[`system/`](system/README.md)** — ls, tree, read, grep, find, wc, env, json, log, deps, summary, format, smart — format_cmd routing, filter levels, language detection - **[`ruby/`](ruby/README.md)** — rake/rails test, rspec, rubocop — JSON injection pattern, `ruby_exec()` bundle exec auto-detection -## Common Pattern +## Execution Flow: `runner::run_filtered()` -Every command module follows this structure: +The shared wrapper in [`core/runner.rs`](../core/runner.rs) encapsulates the six-phase execution skeleton. Modules build the `Command` (custom arg logic), then delegate to `run_filtered()` for everything else. + +``` + cmd.output() Filter applied to tee_and_hint() + | stdout or combined | + v | v + +---------+ stdout +-------+-------+ filtered +-------+ + | Execute |--------->| filter_fn() |----------->| Print | + +---------+ stderr +---------------+ +-------+ + | | + v v + +----------+ +---------+ + | raw = | | Track | + | stdout + | | savings | + | stderr | +---------+ + +----------+ | + v + +-----------+ + | Exit code | + | (on fail) | + +-----------+ +``` + +**Six phases in order:** + +1. **Execute** — `cmd.output()` captures stdout + stderr +2. **Filter** — `filter_fn` receives stdout-only or combined, returns compressed string +3. **Print** — filtered output printed; if tee enabled, appends recovery hint on failure +4. **Stderr passthrough** — when `filter_stdout_only`: stderr printed via `eprintln!()` unconditionally +5. **Track** — `timer.track()` records raw vs filtered for token savings +6. **Exit code** — `std::process::exit(code)` on failure, `Ok(())` on success + +**`RunOptions` builder:** + +| Constructor | Behavior | +|-------------|----------| +| `RunOptions::default()` | Combined stdout+stderr to filter, no tee | +| `RunOptions::with_tee("label")` | Combined filtering + tee recovery | +| `RunOptions::stdout_only()` | Stdout-only to filter, stderr passthrough, no tee | +| `RunOptions::stdout_only().tee("label")` | Stdout-only + tee recovery | + +**Example:** ```rust -pub fn run(args: MyArgs, verbose: u8) -> Result<()> { - let timer = tracking::TimedExecution::start(); - let output = resolved_command("mycmd").args(&args).output().context("Failed to execute mycmd")?; - let raw = format!("{}\n{}", String::from_utf8_lossy(&output.stdout), String::from_utf8_lossy(&output.stderr)); - - let filtered = filter_output(&raw).unwrap_or_else(|e| { - eprintln!("rtk: filter warning: {}", e); - raw.clone() // Fallback to raw on filter failure - }); - - let exit_code = output.status.code().unwrap_or(1); - if let Some(hint) = tee::tee_and_hint(&raw, "mycmd", exit_code) { - println!("{}\n{}", filtered, hint); - } else { - println!("{}", filtered); - } - - timer.track("mycmd args", "rtk mycmd args", &raw, &filtered); - if !output.status.success() { std::process::exit(exit_code); } - Ok(()) +pub fn run(args: &[String], verbose: u8) -> Result<()> { + let mut cmd = resolved_command("mycmd"); + for arg in args { cmd.arg(arg); } + if verbose > 0 { eprintln!("Running: mycmd {}", args.join(" ")); } + + runner::run_filtered( + cmd, "mycmd", &args.join(" "), + filter_mycmd_output, + runner::RunOptions::stdout_only().tee("mycmd"), + ) } ``` -Six phases: **timer** → **execute** → **filter (with fallback)** → **tee on failure** → **track** → **exit code**. See [core/README.md](../core/README.md#consumer-contracts) for the contracts each phase must honor. - -## Token Savings by Category +Modules with deviations (subcommand dispatch, parser trait systems, two-command fallback, synthetic output). -| Category | Commands | Typical Savings | Strategy | -|----------|----------|----------------|----------| -| Test Runners | vitest, pytest, cargo test, go test, playwright | 90-99% | Show failures only, aggregate passes | -| Build Tools | cargo build, npm, pnpm, dotnet | 70-90% | Strip progress bars, summarize errors | -| VCS | git status/log/diff/show | 70-80% | Compact commit hashes, stat summaries | -| Linters | eslint/biome, ruff, tsc, mypy, golangci-lint | 80-85% | Group by file/rule, strip context | -| Package Managers | pip, cargo install, pnpm list | 75-80% | Remove decorative output, compact trees | -| File Operations | ls, find, grep, cat/head/tail | 60-75% | Tree format, grouped results, truncation | -| Infrastructure | docker, kubectl, aws, terraform | 75-85% | Essential info only | ## Cross-Command Dependencies @@ -111,44 +131,29 @@ All modules must call `timer.track()` on every path — success, failure, and fa All modules accept `verbose: u8`. Use it to print debug info (command being run, savings %, filter tier). Do not accept and ignore it. -### Gaps (to be fixed) - -**Exit code** — 5 different patterns coexist, should be reviewed for uniform behavior: -- `vitest_cmd.rs`, `tsc_cmd.rs`, `psql_cmd.rs` — exit unconditionally, even on success -- `lint_cmd.rs` — swallows signal kills silently -- `golangci_cmd.rs` — maps signal kill to exit 130 (correct but unique) - -**Filter passthrough** — silent passthrough, no warning: -- `gh_cmd.rs`, `pip_cmd.rs`, `container.rs`, `dotnet_cmd.rs` — `run_passthrough()` skips filtering without warning -- `pnpm_cmd.rs` — 3-tier degradation but no tee recovery on final tier - -**Tee recovery** — missing from some high-risk modules: -- `pnpm_cmd.rs` — 3-tier parser, no tee -- `gh_cmd.rs` — aggressive markdown filtering, no tee -- `ruff_cmd.rs`, `golangci_cmd.rs` — JSON parsers, no tee -- `psql_cmd.rs` — has tee but exits before calling it on error path - -**Stderr handling** — 3 patterns coexist. Some modules combine stderr into raw (correct), others print via `eprintln!()` and exclude from tracking (inflates savings %). See `docs/ISO_ANALYZE.md` section 4. - -**Tracking** — exit before track on error path: -- `ls.rs`, `tree.rs` — lost metrics on failure -- `container.rs` — inconsistent across subcommands - -**Verbose** — accept parameter but ignore it: -- `container.rs` — all internal functions prefix `_verbose` -- `diff_cmd.rs` — `_verbose` unused ## Adding a New Command Filter -Adding a new filter or command requires changes in multiple places: +Adding a new filter or command requires changes in multiple places. For TOML-vs-Rust decision criteria, see [CONTRIBUTING.md](../../CONTRIBUTING.md#toml-vs-rust-which-one). -1. **Create the filter** — TOML file in [`src/filters/`](../filters/README.md) or Rust module in `src/cmds//` -2. **Add rewrite pattern** — Entry in `src/discover/rules.rs` (PATTERNS + RULES arrays at matching index) so hooks auto-rewrite the command -3. **Register in main.rs** — (Rust modules only) Three changes: - - Add `pub mod mymod;` to the ecosystem's `mod.rs` (e.g., `src/cmds/system/mod.rs`) +### Rust module (structured output, flag injection, state machines) + +1. **Create module** in `src/cmds//mycmd_cmd.rs`: + - Write the `filter_mycmd()` function (pure: `&str -> String`, no side effects) + - Write `run()` using `runner::run_filtered()` — build the `Command`, choose `RunOptions`, delegate + - Use `RunOptions::stdout_only()` when the filter parses structured stdout (JSON, NDJSON) — stderr would corrupt parsing + - Use `RunOptions::default()` when filtering combined text output + - Add `.tee("label")` when the filter parses structured output (enables raw output recovery on failure) +2. **Register module**: + - Add `pub mod mycmd_cmd;` to the ecosystem's `mod.rs` - Add variant to `Commands` enum in `main.rs` with `#[arg(trailing_var_arg = true, allow_hyphen_values = true)]` - - Add routing match arm in `main.rs` to call `mymod::run()` + - Add routing match arm in `main.rs` to call `mycmd_cmd::run()` +3. **Add rewrite pattern** — Entry in `src/discover/rules.rs` (PATTERNS + RULES arrays at matching index) so hooks auto-rewrite the command 4. **Write tests** — Real fixture, snapshot test, token savings >= 60% (see [testing rules](../../.claude/rules/cli-testing.md)) -5. **Update docs** — README.md command list, CHANGELOG.md +5. **Update docs** — Ecosystem README, CHANGELOG.md + +### TOML filter (simple line-based filtering) -Follow the [Common Pattern](#common-pattern) above for the module template (timer, fallback, tee, tracking, exit code). For TOML-vs-Rust decision criteria, see [CONTRIBUTING.md](../../CONTRIBUTING.md#toml-vs-rust-which-one). +1. **Create filter** in [`src/filters/`](../filters/README.md) +2. **Add rewrite pattern** in `src/discover/rules.rs` +3. **Write tests** and **update docs** diff --git a/src/cmds/go/go_cmd.rs b/src/cmds/go/go_cmd.rs index 47771e7ae..d146319e8 100644 --- a/src/cmds/go/go_cmd.rs +++ b/src/cmds/go/go_cmd.rs @@ -40,12 +40,9 @@ struct PackageResult { } pub fn run_test(args: &[String], verbose: u8) -> Result<()> { - let timer = tracking::TimedExecution::start(); - let mut cmd = resolved_command("go"); cmd.arg("test"); - // Force JSON output if not already specified if !args.iter().any(|a| a == "-json") { cmd.arg("-json"); } @@ -58,44 +55,13 @@ pub fn run_test(args: &[String], verbose: u8) -> Result<()> { eprintln!("Running: go test -json {}", args.join(" ")); } - let output = cmd - .output() - .context("Failed to run go test. Is Go installed?")?; - - let stdout = String::from_utf8_lossy(&output.stdout); - let stderr = String::from_utf8_lossy(&output.stderr); - let raw = format!("{}\n{}", stdout, stderr); - - let exit_code = output - .status - .code() - .unwrap_or(if output.status.success() { 0 } else { 1 }); - let filtered = filter_go_test_json(&stdout); - - if let Some(hint) = crate::core::tee::tee_and_hint(&raw, "go_test", exit_code) { - println!("{}\n{}", filtered, hint); - } else { - println!("{}", filtered); - } - - // Include stderr if present (build errors, etc.) - if !stderr.trim().is_empty() { - eprintln!("{}", stderr.trim()); - } - - timer.track( - &format!("go test {}", args.join(" ")), - &format!("rtk go test {}", args.join(" ")), - &raw, - &filtered, - ); - - // Preserve exit code for CI/CD - if !output.status.success() { - std::process::exit(exit_code); - } - - Ok(()) + crate::core::runner::run_filtered( + cmd, + "go test", + &args.join(" "), + filter_go_test_json, + crate::core::runner::RunOptions::stdout_only().tee("go_test"), + ) } pub fn run_build(args: &[String], verbose: u8) -> Result<()> { diff --git a/src/cmds/js/next_cmd.rs b/src/cmds/js/next_cmd.rs index 5a7ad353d..8cc78a478 100644 --- a/src/cmds/js/next_cmd.rs +++ b/src/cmds/js/next_cmd.rs @@ -1,13 +1,11 @@ //! Filters Next.js build output down to route metrics and bundle sizes. -use crate::core::tracking; +use crate::core::runner; use crate::core::utils::{resolved_command, strip_ansi, tool_exists, truncate}; -use anyhow::{Context, Result}; +use anyhow::Result; use regex::Regex; pub fn run(args: &[String], verbose: u8) -> Result<()> { - let timer = tracking::TimedExecution::start(); - // Try next directly first, fallback to npx if not found let next_exists = tool_exists("next"); @@ -30,25 +28,13 @@ pub fn run(args: &[String], verbose: u8) -> Result<()> { eprintln!("Running: {} build", tool); } - let output = cmd - .output() - .context("Failed to run next build (try: npm install -g next)")?; - let stdout = String::from_utf8_lossy(&output.stdout); - let stderr = String::from_utf8_lossy(&output.stderr); - let raw = format!("{}\n{}", stdout, stderr); - - let filtered = filter_next_build(&raw); - - println!("{}", filtered); - - timer.track("next build", "rtk next build", &raw, &filtered); - - // Preserve exit code for CI/CD - if !output.status.success() { - std::process::exit(output.status.code().unwrap_or(1)); - } - - Ok(()) + runner::run_filtered( + cmd, + "next build", + &args.join(" "), + |raw| filter_next_build(raw), + runner::RunOptions::default(), + ) } /// Filter Next.js build output - extract routes, bundles, warnings diff --git a/src/cmds/js/npm_cmd.rs b/src/cmds/js/npm_cmd.rs index 7c86fe776..38f50725c 100644 --- a/src/cmds/js/npm_cmd.rs +++ b/src/cmds/js/npm_cmd.rs @@ -1,8 +1,8 @@ //! Filters npm output and auto-injects the "run" subcommand when appropriate. -use crate::core::tracking; +use crate::core::runner; use crate::core::utils::resolved_command; -use anyhow::{Context, Result}; +use anyhow::Result; /// Known npm subcommands that should NOT get "run" injected. /// Shared between production code and tests to avoid drift. @@ -74,8 +74,6 @@ const NPM_SUBCOMMANDS: &[&str] = &[ ]; pub fn run(args: &[String], verbose: u8, skip_env: bool) -> Result<()> { - let timer = tracking::TimedExecution::start(); - let mut cmd = resolved_command("npm"); // Determine if this is "npm run - - -EOF -``` - -## Understanding Token Savings - -### Token Estimation - -rtk estimates tokens using `text.len() / 4` (4 characters per token average). - -**Accuracy**: ±10% compared to actual LLM tokenization (sufficient for trends). - -### Savings Calculation - -``` -Input Tokens = estimate_tokens(raw_command_output) -Output Tokens = estimate_tokens(rtk_filtered_output) -Saved Tokens = Input - Output -Savings % = (Saved / Input) × 100 -``` - -### Typical Savings by Command - -| Command | Typical Savings | Mechanism | -|---------|----------------|-----------| -| `rtk git status` | 77-93% | Compact stat format | -| `rtk eslint` | 84% | Group by rule | -| `rtk vitest run` | 94-99% | Show failures only | -| `rtk find` | 75% | Tree format | -| `rtk pnpm list` | 70-90% | Compact dependencies | -| `rtk grep` | 70% | Truncate + group | - -## Database Management - -### Inspect Raw Data - -```bash -# Location -ls -lh ~/.local/share/rtk/history.db - -# Schema -sqlite3 ~/.local/share/rtk/history.db ".schema" - -# Recent records -sqlite3 ~/.local/share/rtk/history.db \ - "SELECT timestamp, rtk_cmd, saved_tokens FROM commands - ORDER BY timestamp DESC LIMIT 10" - -# Total database size -sqlite3 ~/.local/share/rtk/history.db \ - "SELECT COUNT(*), - SUM(saved_tokens) as total_saved, - MIN(DATE(timestamp)) as first_record, - MAX(DATE(timestamp)) as last_record - FROM commands" -``` - -### Backup & Restore - -```bash -# Backup -cp ~/.local/share/rtk/history.db ~/backups/rtk-history-$(date +%Y%m%d).db - -# Restore -cp ~/backups/rtk-history-20260128.db ~/.local/share/rtk/history.db - -# Export for migration -sqlite3 ~/.local/share/rtk/history.db .dump > rtk-backup.sql -``` - -### Cleanup - -```bash -# Manual cleanup (older than 90 days) -sqlite3 ~/.local/share/rtk/history.db \ - "DELETE FROM commands WHERE timestamp < datetime('now', '-90 days')" - -# Reset all data -rm ~/.local/share/rtk/history.db -# Next rtk command will recreate database -``` - -## Integration Examples - -### GitHub Actions CI/CD - -```yaml -# .github/workflows/rtk-stats.yml -name: RTK Stats Report -on: - schedule: - - cron: '0 0 * * 1' # Weekly on Monday -jobs: - stats: - runs-on: ubuntu-latest - steps: - - uses: actions/checkout@v3 - - name: Install rtk - run: cargo install --path . - - name: Generate report - run: | - rtk gain --weekly --format json > stats/week-$(date +%Y-%W).json - - name: Commit stats - run: | - git add stats/ - git commit -m "Weekly rtk stats" - git push -``` - -### Slack Bot - -```python -import subprocess -import json -import requests - -def send_rtk_stats(): - result = subprocess.run(['rtk', 'gain', '--format', 'json'], - capture_output=True, text=True) - data = json.loads(result.stdout) - - message = f""" - 📊 *RTK Token Savings Report* - - Total Saved: {data['summary']['total_saved']:,} tokens - Savings Rate: {data['summary']['avg_savings_pct']:.1f}% - Commands: {data['summary']['total_commands']} - """ - - requests.post(SLACK_WEBHOOK_URL, json={'text': message}) -``` - -## Troubleshooting - -### No data showing - -```bash -# Check if database exists -ls -lh ~/.local/share/rtk/history.db - -# Check record count -sqlite3 ~/.local/share/rtk/history.db "SELECT COUNT(*) FROM commands" - -# Run a tracked command to generate data -rtk git status -``` - -### Export fails - -```bash -# Check for pipe errors -rtk gain --format json 2>&1 | tee /tmp/rtk-debug.log | jq . - -# Use release build to avoid warnings -cargo build --release -./target/release/rtk gain --format json -``` - -### Incorrect statistics - -Token estimation is a heuristic. For precise measurements: - -```bash -# Install tiktoken -pip install tiktoken - -# Validate estimation -rtk git status > output.txt -python -c " -import tiktoken -enc = tiktoken.get_encoding('cl100k_base') -text = open('output.txt').read() -print(f'Actual tokens: {len(enc.encode(text))}') -print(f'rtk estimate: {len(text) // 4}') -" -``` - -## Best Practices - -1. **Regular Exports**: `rtk gain --all --format json > monthly-$(date +%Y%m).json` -2. **Trend Analysis**: Compare week-over-week savings to identify optimization opportunities -3. **Command Profiling**: Use `--history` to see which commands save the most -4. **Backup Before Cleanup**: Always backup before manual database operations -5. **CI Integration**: Track savings across team in shared dashboards - -## See Also - -- [README.md](../README.md) - Full rtk documentation -- [CLAUDE.md](../CLAUDE.md) - Claude Code integration guide -- [ARCHITECTURE.md](../ARCHITECTURE.md) - Technical architecture diff --git a/docs/FEATURES.md b/docs/FEATURES.md deleted file mode 100644 index 061a604a9..000000000 --- a/docs/FEATURES.md +++ /dev/null @@ -1,1410 +0,0 @@ -# RTK - Documentation fonctionnelle complete - -> **rtk (Rust Token Killer)** -- Proxy CLI haute performance qui reduit la consommation de tokens LLM de 60 a 90%. - -Binaire Rust unique, zero dependances externes, overhead < 10ms par commande. - ---- - -## Table des matieres - -1. [Vue d'ensemble](#vue-densemble) -2. [Drapeaux globaux](#drapeaux-globaux) -3. [Commandes Fichiers](#commandes-fichiers) -4. [Commandes Git](#commandes-git) -5. [Commandes GitHub CLI](#commandes-github-cli) -6. [Commandes Test](#commandes-test) -7. [Commandes Build et Lint](#commandes-build-et-lint) -8. [Commandes Formatage](#commandes-formatage) -9. [Gestionnaires de paquets](#gestionnaires-de-paquets) -10. [Conteneurs et orchestration](#conteneurs-et-orchestration) -11. [Donnees et reseau](#donnees-et-reseau) -12. [Cloud et bases de donnees](#cloud-et-bases-de-donnees) -13. [Stacked PRs (Graphite)](#stacked-prs-graphite) -14. [Analytique et suivi](#analytique-et-suivi) -15. [Systeme de hooks](#systeme-de-hooks) -16. [Configuration](#configuration) -17. [Systeme Tee (recuperation de sortie)](#systeme-tee) -18. [Telemetrie](#telemetrie) - ---- - -## Vue d'ensemble - -rtk agit comme un proxy entre un LLM (Claude Code, Gemini CLI, etc.) et les commandes systeme. Quatre strategies de filtrage sont appliquees selon le type de commande : - -| Strategie | Description | Exemple | -|-----------|-------------|---------| -| **Filtrage intelligent** | Supprime le bruit (commentaires, espaces, boilerplate) | `ls -la` -> arbre compact | -| **Regroupement** | Agregation par repertoire, par type d'erreur, par regle | Tests groupes par fichier | -| **Troncature** | Conserve le contexte pertinent, supprime la redondance | Diff condense | -| **Deduplication** | Fusionne les lignes de log repetees avec compteurs | `error x42` | - -### Mecanisme de fallback - -Si rtk ne reconnait pas une sous-commande, il execute la commande brute (passthrough) et enregistre l'evenement dans la base de suivi. Cela garantit que rtk est **toujours sur** a utiliser -- aucune commande ne sera bloquee. - ---- - -## Drapeaux globaux - -Ces drapeaux s'appliquent a **toutes** les sous-commandes : - -| Drapeau | Court | Description | -|---------|-------|-------------| -| `--verbose` | `-v` | Augmenter la verbosite (-v, -vv, -vvv). Montre les details de filtrage. | -| `--ultra-compact` | `-u` | Mode ultra-compact : icones ASCII, format inline. Economies supplementaires. | -| `--skip-env` | -- | Definit `SKIP_ENV_VALIDATION=1` pour les processus enfants (Next.js, tsc, lint, prisma). | - -**Exemples :** - -```bash -rtk -v git status # Status compact + details de filtrage sur stderr -rtk -vvv cargo test # Verbosite maximale (debug) -rtk -u git log # Log ultra-compact, icones ASCII -rtk --skip-env next build # Desactive la validation d'env de Next.js -``` - ---- - -## Commandes Fichiers - -### `rtk ls` -- Listage de repertoire - -**Objectif :** Remplace `ls` et `tree` avec une sortie optimisee en tokens. - -**Syntaxe :** -```bash -rtk ls [args...] -``` - -Tous les drapeaux natifs de `ls` sont supportes (`-l`, `-a`, `-h`, `-R`, etc.). - -**Economies :** ~80% de reduction de tokens - -**Avant / Apres :** -``` -# ls -la (45 lignes, ~800 tokens) # rtk ls (12 lignes, ~150 tokens) -drwxr-xr-x 15 user staff 480 ... my-project/ --rw-r--r-- 1 user staff 1234 ... +-- src/ (8 files) --rw-r--r-- 1 user staff 567 ... | +-- main.rs -...40 lignes de plus... +-- Cargo.toml - +-- README.md -``` - ---- - -### `rtk tree` -- Arbre de repertoire - -**Objectif :** Proxy vers `tree` natif avec sortie filtree. - -**Syntaxe :** -```bash -rtk tree [args...] -``` - -Supporte tous les drapeaux natifs de `tree` (`-L`, `-d`, `-a`, etc.). - -**Economies :** ~80% - ---- - -### `rtk read` -- Lecture de fichier - -**Objectif :** Remplace `cat`, `head`, `tail` avec un filtrage intelligent du contenu. - -**Syntaxe :** -```bash -rtk read [options] -rtk read - [options] # Lecture depuis stdin -``` - -**Options :** - -| Option | Court | Defaut | Description | -|--------|-------|--------|-------------| -| `--level` | `-l` | `minimal` | Niveau de filtrage : `none`, `minimal`, `aggressive` | -| `--max-lines` | `-m` | illimite | Nombre maximum de lignes | -| `--line-numbers` | `-n` | non | Afficher les numeros de ligne | - -**Niveaux de filtrage :** - -| Niveau | Description | Economies | -|--------|-------------|-----------| -| `none` | Aucun filtrage, sortie brute | 0% | -| `minimal` | Supprime commentaires et lignes vides excessives | ~30% | -| `aggressive` | Signatures uniquement (supprime les corps de fonctions) | ~74% | - -**Avant / Apres (mode aggressive) :** -``` -# cat main.rs (~200 lignes) # rtk read main.rs -l aggressive (~50 lignes) -fn main() -> Result<()> { fn main() -> Result<()> { ... } - let config = Config::load()?; fn process_data(input: &str) -> Vec { ... } - let data = process_data(&input); struct Config { ... } - for item in data { impl Config { fn load() -> Result { ... } } - println!("{}", item); - } - Ok(()) -} -... -``` - -**Langages supportes pour le filtrage :** Rust, Python, JavaScript, TypeScript, Go, C, C++, Java, Ruby, Shell. - ---- - -### `rtk smart` -- Resume heuristique - -**Objectif :** Genere un resume technique de 2 lignes pour un fichier source. - -**Syntaxe :** -```bash -rtk smart [--model heuristic] [--force-download] -``` - -**Economies :** ~95% - -**Exemple :** -``` -$ rtk smart src/tracking.rs -SQLite-based token tracking system for command executions. -Records input/output tokens, savings %, execution times with 90-day retention. -``` - ---- - -### `rtk find` -- Recherche de fichiers - -**Objectif :** Remplace `find` et `fd` avec une sortie compacte groupee par repertoire. - -**Syntaxe :** -```bash -rtk find [args...] -``` - -Supporte a la fois la syntaxe RTK et la syntaxe native `find` (`-name`, `-type`, etc.). - -**Economies :** ~80% - -**Avant / Apres :** -``` -# find . -name "*.rs" (30 lignes) # rtk find "*.rs" . (8 lignes) -./src/main.rs src/ (12 .rs) -./src/git.rs main.rs, git.rs, config.rs -./src/config.rs tracking.rs, filter.rs, utils.rs -./src/tracking.rs ...6 more -./src/filter.rs tests/ (3 .rs) -./src/utils.rs test_git.rs, test_ls.rs, test_filter.rs -...24 lignes de plus... -``` - ---- - -### `rtk grep` -- Recherche dans le contenu - -**Objectif :** Remplace `grep` et `rg` avec une sortie groupee par fichier, tronquee. - -**Syntaxe :** -```bash -rtk grep [chemin] [options] -``` - -**Options :** - -| Option | Court | Defaut | Description | -|--------|-------|--------|-------------| -| `--max-len` | `-l` | 80 | Longueur maximale de ligne | -| `--max` | `-m` | 50 | Nombre maximum de resultats | -| `--context-only` | `-c` | non | Afficher uniquement le contexte du match | -| `--file-type` | `-t` | tous | Filtrer par type (ts, py, rust, etc.) | -| `--line-numbers` | `-n` | oui | Numeros de ligne (toujours actif) | - -Les arguments supplementaires sont transmis a `rg` (ripgrep). - -**Economies :** ~80% - -**Avant / Apres :** -``` -# rg "fn run" (20 lignes) # rtk grep "fn run" (10 lignes) -src/git.rs:45:pub fn run(...) src/git.rs -src/git.rs:120:fn run_status(...) 45: pub fn run(...) -src/ls.rs:12:pub fn run(...) 120: fn run_status(...) -src/ls.rs:25:fn run_tree(...) src/ls.rs -... 12: pub fn run(...) - 25: fn run_tree(...) -``` - ---- - -### `rtk diff` -- Diff condense - -**Objectif :** Diff ultra-condense entre deux fichiers (uniquement les lignes modifiees). - -**Syntaxe :** -```bash -rtk diff -rtk diff # Stdin comme second fichier -``` - -**Economies :** ~60% - ---- - -### `rtk wc` -- Comptage compact - -**Objectif :** Remplace `wc` avec une sortie compacte (supprime les chemins et le padding). - -**Syntaxe :** -```bash -rtk wc [args...] -``` - -Supporte tous les drapeaux natifs de `wc` (`-l`, `-w`, `-c`, etc.). - ---- - -## Commandes Git - -### Vue d'ensemble - -Toutes les sous-commandes git sont supportees. Les commandes non reconnues sont transmises directement a git (passthrough). - -**Options globales git :** - -| Option | Description | -|--------|-------------| -| `-C ` | Changer de repertoire avant execution | -| `-c ` | Surcharger une config git | -| `--git-dir ` | Chemin vers le repertoire .git | -| `--work-tree ` | Chemin vers le working tree | -| `--no-pager` | Desactiver le pager | -| `--no-optional-locks` | Ignorer les locks optionnels | -| `--bare` | Traiter comme repo bare | -| `--literal-pathspecs` | Pathspecs literals | - ---- - -### `rtk git status` -- Status compact - -**Economies :** ~80% - -```bash -rtk git status [args...] # Supporte tous les drapeaux git status -``` - -**Avant / Apres :** -``` -# git status (~20 lignes, ~400 tokens) # rtk git status (~5 lignes, ~80 tokens) -On branch main main | 3M 1? 1A -Your branch is up to date with M src/main.rs - 'origin/main'. M src/git.rs - M tests/test_git.rs -Changes not staged for commit: ? new_file.txt - (use "git add ..." to update) A staged_file.rs - modified: src/main.rs - modified: src/git.rs - ... -``` - ---- - -### `rtk git log` -- Historique compact - -**Economies :** ~80% - -```bash -rtk git log [args...] # Supporte --oneline, --graph, --all, -n, etc. -``` - -**Avant / Apres :** -``` -# git log (50+ lignes) # rtk git log -n 5 (5 lignes) -commit abc123def... (HEAD -> main) abc123 Fix token counting bug -Author: User def456 Add vitest support -Date: Mon Jan 15 10:30:00 2024 789abc Refactor filter engine - 012def Update README - Fix token counting bug 345ghi Initial commit -... -``` - ---- - -### `rtk git diff` -- Diff compact - -**Economies :** ~75% - -```bash -rtk git diff [args...] # Supporte --stat, --cached, --staged, etc. -``` - -**Avant / Apres :** -``` -# git diff (~100 lignes) # rtk git diff (~25 lignes) -diff --git a/src/main.rs b/src/main.rs src/main.rs (+5/-2) -index abc123..def456 100644 + let config = Config::load()?; ---- a/src/main.rs + config.validate()?; -+++ b/src/main.rs - // old code -@@ -10,6 +10,8 @@ - let x = 42; - fn main() { src/git.rs (+1/-1) -+ let config = Config::load()?; ~ format!("ok {}", branch) -...30 lignes de headers et contexte... -``` - ---- - -### `rtk git show` -- Show compact - -**Economies :** ~80% - -```bash -rtk git show [args...] -``` - -Affiche le resume du commit + stat + diff compact. - ---- - -### `rtk git add` -- Add ultra-compact - -**Economies :** ~92% - -```bash -rtk git add [args...] # Supporte -A, -p, --all, etc. -``` - -**Sortie :** `ok` (un seul mot) - ---- - -### `rtk git commit` -- Commit ultra-compact - -**Economies :** ~92% - -```bash -rtk git commit -m "message" [args...] # Supporte -a, --amend, --allow-empty, etc. -``` - -**Sortie :** `ok abc1234` (confirmation + hash court) - ---- - -### `rtk git push` -- Push ultra-compact - -**Economies :** ~92% - -```bash -rtk git push [args...] # Supporte -u, remote, branch, etc. -``` - -**Avant / Apres :** -``` -# git push (15 lignes, ~200 tokens) # rtk git push (1 ligne, ~10 tokens) -Enumerating objects: 5, done. ok main -Counting objects: 100% (5/5), done. -Delta compression using up to 8 threads -... -``` - ---- - -### `rtk git pull` -- Pull ultra-compact - -**Economies :** ~92% - -```bash -rtk git pull [args...] -``` - -**Sortie :** `ok 3 files +10 -2` - ---- - -### `rtk git branch` -- Branches compact - -```bash -rtk git branch [args...] # Supporte -d, -D, -m, etc. -``` - -Affiche branche courante, branches locales, branches distantes de facon compacte. - ---- - -### `rtk git fetch` -- Fetch compact - -```bash -rtk git fetch [args...] -``` - -**Sortie :** `ok fetched (N new refs)` - ---- - -### `rtk git stash` -- Stash compact - -```bash -rtk git stash [list|show|pop|apply|drop|push] [args...] -``` - ---- - -### `rtk git worktree` -- Worktree compact - -```bash -rtk git worktree [add|remove|prune|list] [args...] -``` - ---- - -### Passthrough git - -Toute sous-commande git non listee ci-dessus est executee directement : - -```bash -rtk git rebase main # Execute git rebase main -rtk git cherry-pick abc # Execute git cherry-pick abc -rtk git tag v1.0.0 # Execute git tag v1.0.0 -``` - ---- - -## Commandes GitHub CLI - -### `rtk gh` -- GitHub CLI compact - -**Objectif :** Remplace `gh` avec une sortie optimisee. - -**Syntaxe :** -```bash -rtk gh [args...] -``` - -**Sous-commandes supportees :** - -| Commande | Description | Economies | -|----------|-------------|-----------| -| `rtk gh pr list` | Liste des PRs compacte | ~80% | -| `rtk gh pr view ` | Details d'une PR + checks | ~87% | -| `rtk gh pr checks` | Status des checks CI | ~79% | -| `rtk gh issue list` | Liste des issues compacte | ~80% | -| `rtk gh run list` | Status des workflow runs | ~82% | -| `rtk gh api ` | Reponse API compacte | ~26% | - -**Avant / Apres :** -``` -# gh pr list (~30 lignes) # rtk gh pr list (~10 lignes) -Showing 10 of 15 pull requests in org/repo #42 feat: add vitest (open, 2d) - #41 fix: git diff crash (open, 3d) -#42 feat: add vitest support #40 chore: update deps (merged, 5d) - user opened about 2 days ago #39 docs: add guide (merged, 1w) - ... labels: enhancement -... -``` - ---- - -## Commandes Test - -### `rtk test` -- Wrapper de tests generique - -**Objectif :** Execute n'importe quelle commande de test et affiche uniquement les echecs. - -**Syntaxe :** -```bash -rtk test -``` - -**Economies :** ~90% - -**Exemple :** -```bash -rtk test cargo test -rtk test npm test -rtk test bun test -rtk test pytest -``` - -**Avant / Apres :** -``` -# cargo test (200+ lignes en cas d'echec) # rtk test cargo test (~20 lignes) -running 15 tests FAILED: 2/15 tests -test utils::test_parse ... ok test_edge_case: assertion failed -test utils::test_format ... ok test_overflow: panic at utils.rs:18 -test utils::test_edge_case ... FAILED -...150 lignes de backtrace... -``` - ---- - -### `rtk err` -- Erreurs/avertissements uniquement - -**Objectif :** Execute une commande et ne montre que les erreurs et avertissements. - -**Syntaxe :** -```bash -rtk err -``` - -**Economies :** ~80% - -**Exemple :** -```bash -rtk err npm run build -rtk err cargo build -``` - ---- - -### `rtk cargo test` -- Tests Rust - -**Economies :** ~90% - -```bash -rtk cargo test [args...] -``` - -N'affiche que les echecs. Supporte tous les arguments de `cargo test`. - ---- - -### `rtk cargo nextest` -- Tests Rust (nextest) - -```bash -rtk cargo nextest [run|list|--lib] [args...] -``` - -Filtre la sortie de `cargo nextest` pour n'afficher que les echecs. - ---- - -### `rtk vitest run` -- Tests Vitest - -**Economies :** ~99.5% - -```bash -rtk vitest run [args...] -``` - ---- - -### `rtk playwright test` -- Tests E2E Playwright - -**Economies :** ~94% - -```bash -rtk playwright [args...] -``` - ---- - -### `rtk pytest` -- Tests Python - -**Economies :** ~90% - -```bash -rtk pytest [args...] -``` - ---- - -### `rtk go test` -- Tests Go - -**Economies :** ~90% - -```bash -rtk go test [args...] -``` - -Utilise le streaming JSON NDJSON de Go pour un filtrage precis. - ---- - -## Commandes Build et Lint - -### `rtk cargo build` -- Build Rust - -**Economies :** ~80% - -```bash -rtk cargo build [args...] -``` - -Supprime les lignes "Compiling...", ne conserve que les erreurs et le resultat final. - ---- - -### `rtk cargo check` -- Check Rust - -**Economies :** ~80% - -```bash -rtk cargo check [args...] -``` - -Supprime les lignes "Checking...", ne conserve que les erreurs. - ---- - -### `rtk cargo clippy` -- Clippy Rust - -**Economies :** ~80% - -```bash -rtk cargo clippy [args...] -``` - -Regroupe les avertissements par regle de lint. - ---- - -### `rtk cargo install` -- Install Rust - -```bash -rtk cargo install [args...] -``` - -Supprime la compilation des dependances, ne conserve que le resultat d'installation et les erreurs. - ---- - -### `rtk tsc` -- TypeScript Compiler - -**Economies :** ~83% - -```bash -rtk tsc [args...] -``` - -Regroupe les erreurs TypeScript par fichier et par code d'erreur. - -**Avant / Apres :** -``` -# tsc --noEmit (50 lignes) # rtk tsc (15 lignes) -src/api.ts(12,5): error TS2345: ... src/api.ts (3 errors) -src/api.ts(15,10): error TS2345: ... TS2345: Argument type mismatch (x2) -src/api.ts(20,3): error TS7006: ... TS7006: Parameter implicitly has 'any' -src/utils.ts(5,1): error TS2304: ... src/utils.ts (1 error) -... TS2304: Cannot find name 'foo' -``` - ---- - -### `rtk lint` -- ESLint / Biome - -**Economies :** ~84% - -```bash -rtk lint [args...] -rtk lint biome [args...] -``` - -Regroupe les violations par regle et par fichier. Auto-detecte le linter. - ---- - -### `rtk prettier` -- Verification du formatage - -**Economies :** ~70% - -```bash -rtk prettier [args...] # ex: rtk prettier --check . -``` - -Affiche uniquement les fichiers necessitant un formatage. - ---- - -### `rtk format` -- Formateur universel - -```bash -rtk format [args...] -``` - -Auto-detecte le formateur du projet (prettier, black, ruff format) et applique un filtre compact. - ---- - -### `rtk next build` -- Build Next.js - -**Economies :** ~87% - -```bash -rtk next [args...] -``` - -Sortie compacte avec metriques de routes. - ---- - -### `rtk ruff` -- Linter/formateur Python - -**Economies :** ~80% - -```bash -rtk ruff check [args...] -rtk ruff format --check [args...] -``` - -Sortie JSON compressee. - ---- - -### `rtk mypy` -- Type checker Python - -```bash -rtk mypy [args...] -``` - -Regroupe les erreurs de type par fichier. - ---- - -### `rtk golangci-lint` -- Linter Go - -**Economies :** ~85% - -```bash -rtk golangci-lint run [args...] -``` - -Sortie JSON compressee. - ---- - -## Commandes Formatage - -### `rtk prettier` -- Prettier - -```bash -rtk prettier --check . -rtk prettier --write src/ -``` - ---- - -### `rtk format` -- Detecteur universel - -```bash -rtk format [args...] -``` - -Detecte automatiquement : prettier, black, ruff format, rustfmt. Applique un filtre compact unifie. - ---- - -## Gestionnaires de paquets - -### `rtk pnpm` -- pnpm - -| Commande | Description | Economies | -|----------|-------------|-----------| -| `rtk pnpm list [-d N]` | Arbre de dependances compact | ~70% | -| `rtk pnpm outdated` | Paquets obsoletes : `pkg: old -> new` | ~80% | -| `rtk pnpm install [pkgs...]` | Filtre les barres de progression | ~60% | -| `rtk pnpm build` | Delegue au filtre Next.js | ~87% | -| `rtk pnpm typecheck` | Delegue au filtre tsc | ~83% | - -Les sous-commandes non reconnues sont transmises directement a pnpm (passthrough). - ---- - -### `rtk npm` -- npm - -```bash -rtk npm [args...] # ex: rtk npm run build -``` - -Filtre le boilerplate npm (barres de progression, en-tetes, etc.). - ---- - -### `rtk npx` -- npx avec routage intelligent - -```bash -rtk npx [args...] -``` - -Route intelligemment vers les filtres specialises : -- `rtk npx tsc` -> filtre tsc -- `rtk npx eslint` -> filtre lint -- `rtk npx prisma` -> filtre prisma -- Autres -> passthrough filtre - ---- - -### `rtk pip` -- pip / uv - -```bash -rtk pip list # Liste des paquets (auto-detecte uv) -rtk pip outdated # Paquets obsoletes -rtk pip install # Installation -``` - -Auto-detecte `uv` si disponible et l'utilise a la place de `pip`. - ---- - -### `rtk deps` -- Resume des dependances - -**Objectif :** Resume compact des dependances du projet. - -```bash -rtk deps [chemin] # Defaut: repertoire courant -``` - -Auto-detecte : `Cargo.toml`, `package.json`, `pyproject.toml`, `go.mod`, `Gemfile`, etc. - -**Economies :** ~70% - ---- - -### `rtk prisma` -- ORM Prisma - -| Commande | Description | -|----------|-------------| -| `rtk prisma generate` | Generation du client (supprime l'ASCII art) | -| `rtk prisma migrate dev [--name N]` | Creer et appliquer une migration | -| `rtk prisma migrate status` | Status des migrations | -| `rtk prisma migrate deploy` | Deployer en production | -| `rtk prisma db-push` | Push du schema | - ---- - -## Conteneurs et orchestration - -### `rtk docker` -- Docker - -| Commande | Description | Economies | -|----------|-------------|-----------| -| `rtk docker ps` | Liste compacte des conteneurs | ~80% | -| `rtk docker images` | Liste compacte des images | ~80% | -| `rtk docker logs ` | Logs dedupliques | ~70% | -| `rtk docker compose ps` | Services Compose compacts | ~80% | -| `rtk docker compose logs [service]` | Logs Compose dedupliques | ~70% | -| `rtk docker compose build [service]` | Resume du build | ~60% | - -Les sous-commandes non reconnues sont transmises directement (passthrough). - -**Avant / Apres :** -``` -# docker ps (lignes longues, ~30 tokens/ligne) # rtk docker ps (~10 tokens/ligne) -CONTAINER ID IMAGE COMMAND ... web nginx:1.25 Up 2d (healthy) -abc123def456 nginx:1.25 "/dock..." ... db postgres:16 Up 2d (healthy) -789012345678 postgres:16 "docker..." redis redis:7 Up 1d -``` - ---- - -### `rtk kubectl` -- Kubernetes - -| Commande | Description | Options | -|----------|-------------|---------| -| `rtk kubectl pods [-n ns] [-A]` | Liste compacte des pods | Namespace ou tous | -| `rtk kubectl services [-n ns] [-A]` | Liste compacte des services | Namespace ou tous | -| `rtk kubectl logs [-c container]` | Logs dedupliques | Container specifique | - -Les sous-commandes non reconnues sont transmises directement (passthrough). - ---- - -## Donnees et reseau - -### `rtk json` -- Structure JSON - -**Objectif :** Affiche la structure d'un fichier JSON sans les valeurs. - -```bash -rtk json [--depth N] # Defaut: profondeur 5 -rtk json - # Depuis stdin -``` - -**Economies :** ~60% - -**Avant / Apres :** -``` -# cat package.json (50 lignes) # rtk json package.json (10 lignes) -{ { - "name": "my-app", name: string - "version": "1.0.0", version: string - "dependencies": { dependencies: { 15 keys } - "react": "^18.2.0", devDependencies: { 8 keys } - "next": "^14.0.0", scripts: { 6 keys } - ...15 dependances... } - }, - ... -} -``` - ---- - -### `rtk env` -- Variables d'environnement - -```bash -rtk env # Toutes les variables (sensibles masquees) -rtk env -f AWS # Filtrer par nom -rtk env --show-all # Inclure les valeurs sensibles -``` - -Les variables sensibles (tokens, secrets, mots de passe) sont masquees par defaut : `AWS_SECRET_ACCESS_KEY=***`. - ---- - -### `rtk log` -- Logs dedupliques - -**Objectif :** Filtre et deduplique la sortie de logs. - -```bash -rtk log # Depuis un fichier -rtk log # Depuis stdin (pipe) -``` - -Les lignes repetees sont fusionnees : `[ERROR] Connection refused (x42)`. - -**Economies :** ~60-80% (selon la repetitivite) - ---- - -### `rtk curl` -- HTTP avec detection JSON - -```bash -rtk curl [args...] -``` - -Auto-detecte les reponses JSON et affiche le schema au lieu du contenu complet. - ---- - -### `rtk wget` -- Telechargement compact - -```bash -rtk wget [args...] -rtk wget -O - # Sortie vers stdout -``` - -Supprime les barres de progression et le bruit. - ---- - -### `rtk summary` -- Resume heuristique - -**Objectif :** Execute une commande et genere un resume heuristique de la sortie. - -```bash -rtk summary -``` - -Utile pour les commandes longues dont la sortie n'a pas de filtre dedie. - ---- - -### `rtk proxy` -- Passthrough avec suivi - -**Objectif :** Execute une commande **sans filtrage** mais enregistre l'utilisation pour le suivi. - -```bash -rtk proxy -``` - -Utile pour le debug : comparer la sortie brute avec la sortie filtree. - ---- - -## Cloud et bases de donnees - -### `rtk aws` -- AWS CLI - -```bash -rtk aws [args...] -``` - -Force la sortie JSON et compresse le resultat. Supporte tous les services AWS (sts, s3, ec2, ecs, rds, cloudformation, etc.). - ---- - -### `rtk psql` -- PostgreSQL - -```bash -rtk psql [args...] -``` - -Supprime les bordures de tableaux et compresse la sortie. - ---- - -## Stacked PRs (Graphite) - -### `rtk gt` -- Graphite - -| Commande | Description | -|----------|-------------| -| `rtk gt log` | Stack log compact | -| `rtk gt submit` | Submit compact | -| `rtk gt sync` | Sync compact | -| `rtk gt restack` | Restack compact | -| `rtk gt create` | Create compact | -| `rtk gt branch` | Branch info compact | - -Les sous-commandes non reconnues sont transmises directement ou detectees comme passthrough git. - ---- - -## Analytique et suivi - -### Systeme de tracking - -RTK enregistre chaque execution de commande dans une base SQLite : - -- **Emplacement :** `~/.local/share/rtk/tracking.db` (Linux), `~/Library/Application Support/rtk/tracking.db` (macOS) -- **Retention :** 90 jours automatique -- **Metriques :** tokens entree/sortie, pourcentage d'economies, temps d'execution, projet - ---- - -### `rtk gain` -- Statistiques d'economies - -```bash -rtk gain # Resume global -rtk gain -p # Filtre par projet courant -rtk gain --graph # Graphe ASCII (30 derniers jours) -rtk gain --history # Historique recent des commandes -rtk gain --daily # Ventilation jour par jour -rtk gain --weekly # Ventilation par semaine -rtk gain --monthly # Ventilation par mois -rtk gain --all # Toutes les ventilations -rtk gain --quota -t pro # Estimation d'economies sur le quota mensuel -rtk gain --failures # Log des echecs de parsing (commandes en fallback) -rtk gain --format json # Export JSON (pour dashboards) -rtk gain --format csv # Export CSV -``` - -**Options :** - -| Option | Court | Description | -|--------|-------|-------------| -| `--project` | `-p` | Filtrer par repertoire courant | -| `--graph` | `-g` | Graphe ASCII des 30 derniers jours | -| `--history` | `-H` | Historique recent des commandes | -| `--quota` | `-q` | Estimation d'economies sur le quota mensuel | -| `--tier` | `-t` | Tier d'abonnement : `pro`, `5x`, `20x` (defaut: `20x`) | -| `--daily` | `-d` | Ventilation quotidienne | -| `--weekly` | `-w` | Ventilation hebdomadaire | -| `--monthly` | `-m` | Ventilation mensuelle | -| `--all` | `-a` | Toutes les ventilations | -| `--format` | `-f` | Format de sortie : `text`, `json`, `csv` | -| `--failures` | `-F` | Affiche les commandes en fallback | - -**Exemple de sortie :** -``` -$ rtk gain -RTK Token Savings Summary - Total commands: 1,247 - Total input: 2,341,000 tokens - Total output: 468,200 tokens - Total saved: 1,872,800 tokens (80%) - Avg per command: 1,501 tokens saved - -Top commands: - git status 312x -82% - cargo test 156x -91% - git diff 98x -76% -``` - ---- - -### `rtk discover` -- Opportunites manquees - -**Objectif :** Analyse l'historique Claude Code pour trouver les commandes qui auraient pu etre optimisees par rtk. - -```bash -rtk discover # Projet courant, 30 derniers jours -rtk discover --all --since 7 # Tous les projets, 7 derniers jours -rtk discover -p /chemin/projet # Filtrer par projet -rtk discover --limit 20 # Max commandes par section -rtk discover --format json # Export JSON -``` - -**Options :** - -| Option | Court | Description | -|--------|-------|-------------| -| `--project` | `-p` | Filtrer par chemin de projet | -| `--limit` | `-l` | Max commandes par section (defaut: 15) | -| `--all` | `-a` | Scanner tous les projets | -| `--since` | `-s` | Derniers N jours (defaut: 30) | -| `--format` | `-f` | Format : `text`, `json` | - ---- - -### `rtk learn` -- Apprendre des erreurs - -**Objectif :** Analyse l'historique d'erreurs CLI de Claude Code pour detecter les corrections recurrentes. - -```bash -rtk learn # Projet courant -rtk learn --all --since 7 # Tous les projets -rtk learn --write-rules # Generer .claude/rules/cli-corrections.md -rtk learn --min-confidence 0.8 # Seuil de confiance (defaut: 0.6) -rtk learn --min-occurrences 3 # Occurrences minimales (defaut: 1) -rtk learn --format json # Export JSON -``` - ---- - -### `rtk cc-economics` -- Analyse economique Claude Code - -**Objectif :** Compare les depenses Claude Code (via ccusage) avec les economies RTK. - -```bash -rtk cc-economics # Resume -rtk cc-economics --daily # Ventilation quotidienne -rtk cc-economics --weekly # Ventilation hebdomadaire -rtk cc-economics --monthly # Ventilation mensuelle -rtk cc-economics --all # Toutes les ventilations -rtk cc-economics --format json # Export JSON -``` - ---- - -### `rtk hook-audit` -- Metriques du hook - -**Prerequis :** Necessite `RTK_HOOK_AUDIT=1` dans l'environnement. - -```bash -rtk hook-audit # 7 derniers jours (defaut) -rtk hook-audit --since 30 # 30 derniers jours -rtk hook-audit --since 0 # Tout l'historique -``` - ---- - -## Systeme de hooks - -### Fonctionnement - -Le hook RTK intercepte les commandes Bash dans Claude Code **avant leur execution** et les reecrit automatiquement en equivalent RTK. - -**Flux :** -``` -Claude Code "git status" - | - v -settings.json -> PreToolUse hook - | - v -rtk-rewrite.sh (bash) - | - v -rtk rewrite "git status" -> "rtk git status" - | - v -Claude Code execute "rtk git status" - | - v -Sortie filtree retournee a Claude (~10 tokens vs ~200) -``` - -**Points cles :** -- Claude ne voit jamais la recriture -- il recoit simplement une sortie optimisee -- Le hook est un delegateur leger (~50 lignes bash) qui appelle `rtk rewrite` -- Toute la logique de recriture est dans le registre Rust (`src/discover/registry.rs`) -- Les commandes deja prefixees par `rtk` passent sans modification -- Les heredocs (`<<`) ne sont pas modifies -- Les commandes non reconnues passent sans modification - -### Installation - -```bash -rtk init -g # Installation recommandee (hook + RTK.md) -rtk init -g --auto-patch # Non-interactif (CI/CD) -rtk init -g --hook-only # Hook seul, sans RTK.md -rtk init --show # Verifier l'installation -rtk init -g --uninstall # Desinstaller -``` - -### Fichiers installes - -| Fichier | Description | -|---------|-------------| -| `~/.claude/hooks/rtk-rewrite.sh` | Script hook (delegue a `rtk rewrite`) | -| `~/.claude/RTK.md` | Instructions minimales pour le LLM | -| `~/.claude/settings.json` | Enregistrement du hook PreToolUse | - -### `rtk rewrite` -- Recriture de commande - -Commande interne utilisee par le hook. Imprime la commande reecrite sur stdout (exit 0) ou sort avec exit 1 si aucun equivalent RTK n'existe. - -```bash -rtk rewrite "git status" # -> "rtk git status" (exit 0) -rtk rewrite "terraform plan" # -> (exit 1, pas de recriture) -rtk rewrite "rtk git status" # -> "rtk git status" (exit 0, inchange) -``` - -### `rtk verify` -- Verification d'integrite - -Verifie l'integrite du hook installe via un controle SHA-256. - -```bash -rtk verify -``` - -### Commandes reecrites automatiquement - -| Commande brute | Reecrite en | -|----------------|-------------| -| `git status/diff/log/add/commit/push/pull` | `rtk git ...` | -| `gh pr/issue/run` | `rtk gh ...` | -| `cargo test/build/clippy/check` | `rtk cargo ...` | -| `cat/head/tail ` | `rtk read ` | -| `rg/grep ` | `rtk grep ` | -| `ls` | `rtk ls` | -| `tree` | `rtk tree` | -| `wc` | `rtk wc` | -| `vitest/jest` | `rtk vitest run` | -| `tsc` | `rtk tsc` | -| `eslint/biome` | `rtk lint` | -| `prettier` | `rtk prettier` | -| `playwright` | `rtk playwright` | -| `prisma` | `rtk prisma` | -| `ruff check/format` | `rtk ruff ...` | -| `pytest` | `rtk pytest` | -| `mypy` | `rtk mypy` | -| `pip list/install` | `rtk pip ...` | -| `go test/build/vet` | `rtk go ...` | -| `golangci-lint` | `rtk golangci-lint` | -| `docker ps/images/logs` | `rtk docker ...` | -| `kubectl get/logs` | `rtk kubectl ...` | -| `curl` | `rtk curl` | -| `pnpm list/outdated` | `rtk pnpm ...` | - -### Exclusion de commandes - -Pour empecher certaines commandes d'etre reecrites, ajoutez-les dans `config.toml` : - -```toml -[hooks] -exclude_commands = ["curl", "playwright"] -``` - ---- - -## Configuration - -### Fichier de configuration - -**Emplacement :** `~/.config/rtk/config.toml` (Linux) ou `~/Library/Application Support/rtk/config.toml` (macOS) - -**Commandes :** -```bash -rtk config # Afficher la configuration actuelle -rtk config --create # Creer le fichier avec les valeurs par defaut -``` - -### Structure complete - -```toml -[tracking] -enabled = true # Activer/desactiver le suivi -history_days = 90 # Jours de retention (nettoyage automatique) -database_path = "/custom/path/tracking.db" # Chemin personnalise (optionnel) - -[display] -colors = true # Sortie coloree -emoji = true # Utiliser les emojis -max_width = 120 # Largeur maximale de sortie - -[filters] -ignore_dirs = [".git", "node_modules", "target", "__pycache__", ".venv", "vendor"] -ignore_files = ["*.lock", "*.min.js", "*.min.css"] - -[tee] -enabled = true # Activer la sauvegarde de sortie brute -mode = "failures" # "failures" (defaut), "always", ou "never" -max_files = 20 # Rotation : garder les N derniers fichiers -# directory = "/custom/tee/path" # Chemin personnalise (optionnel) - -[telemetry] -enabled = true # Telemetrie anonyme (1 ping/jour, opt-out possible) - -[hooks] -exclude_commands = [] # Commandes a exclure de la recriture automatique -``` - -### Variables d'environnement - -| Variable | Description | -|----------|-------------| -| `RTK_TEE_DIR` | Surcharge le repertoire tee | -| `RTK_TELEMETRY_DISABLED=1` | Desactiver la telemetrie | -| `RTK_HOOK_AUDIT=1` | Activer l'audit du hook | -| `SKIP_ENV_VALIDATION=1` | Desactiver la validation d'env (Next.js, etc.) | - ---- - -## Systeme Tee - -### Recuperation de sortie brute - -Quand une commande echoue, RTK sauvegarde automatiquement la sortie brute complete dans un fichier log. Cela permet au LLM de lire la sortie sans re-executer la commande. - -**Fonctionnement :** -1. La commande echoue (exit code != 0) -2. RTK sauvegarde la sortie brute dans `~/.local/share/rtk/tee/` -3. Le chemin du fichier est affiche dans la sortie filtree -4. Le LLM peut lire le fichier si besoin de plus de details - -**Sortie :** -``` -FAILED: 2/15 tests -[full output: ~/.local/share/rtk/tee/1707753600_cargo_test.log] -``` - -**Configuration :** - -| Parametre | Defaut | Description | -|-----------|--------|-------------| -| `tee.enabled` | `true` | Activer/desactiver | -| `tee.mode` | `"failures"` | `"failures"`, `"always"`, `"never"` | -| `tee.max_files` | `20` | Rotation : garder les N derniers | -| Taille min | 500 octets | Les sorties trop courtes ne sont pas sauvegardees | -| Taille max fichier | 1 Mo | Troncature au-dela | - ---- - -## Telemetrie - -RTK envoie un ping anonyme une fois par jour (23h d'intervalle) pour des statistiques d'utilisation. - -**Donnees envoyees :** hash de device, version, OS, architecture, nombre de commandes/24h, top commandes, pourcentage d'economies. - -**Desactiver :** -```bash -# Via variable d'environnement -export RTK_TELEMETRY_DISABLED=1 - -# Via config.toml -[telemetry] -enabled = false -``` - -Aucune donnee personnelle, aucun contenu de commande, aucun chemin de fichier n'est transmis. - ---- - -## Resume des economies par categorie - -| Categorie | Commandes | Economies typiques | -|-----------|-----------|-------------------| -| **Fichiers** | ls, tree, read, find, grep, diff | 60-80% | -| **Git** | status, log, diff, show, add, commit, push, pull | 75-92% | -| **GitHub** | pr, issue, run, api | 26-87% | -| **Tests** | cargo test, vitest, playwright, pytest, go test | 90-99% | -| **Build/Lint** | cargo build, tsc, eslint, prettier, next, ruff, clippy | 70-87% | -| **Paquets** | pnpm, npm, pip, deps, prisma | 60-80% | -| **Conteneurs** | docker, kubectl | 70-80% | -| **Donnees** | json, env, log, curl, wget | 60-80% | -| **Analytique** | gain, discover, learn, cc-economics | N/A (meta) | - ---- - -## Nombre total de commandes - -RTK supporte **45+ commandes** reparties en 9 categories, avec passthrough automatique pour les sous-commandes non reconnues. Cela en fait un proxy universel : il est toujours sur a utiliser en prefixe. diff --git a/docs/TECHNICAL.md b/docs/TECHNICAL.md deleted file mode 100644 index 04522683d..000000000 --- a/docs/TECHNICAL.md +++ /dev/null @@ -1,432 +0,0 @@ -# RTK Technical Documentation - -> **Start here** for a guided tour of how RTK works end-to-end. -> -> - [CONTRIBUTING.md](../CONTRIBUTING.md) — Design philosophy, PR process, branch naming, testing requirements -> - [ARCHITECTURE.md](../ARCHITECTURE.md) — Deep reference: filtering taxonomy, performance benchmarks, architecture decisions -> - Each folder has its own `README.md` with implementation details and file descriptions - ---- - -## 1. Project Vision - -LLM-powered coding agents (Claude Code, Copilot, Cursor, etc.) consume tokens for every CLI command output they process. Most command outputs contain boilerplate, progress bars, ANSI escape codes, and verbose formatting that wastes tokens without providing actionable information. - -RTK sits between the agent and the CLI, filtering outputs to keep only what matters. This achieves 60-90% token savings per command, reducing costs and increasing effective context window utilization. RTK is a single Rust binary with no runtime dependencies beyond the compiled binary itself, adding less than 10ms overhead per command. - ---- - -## 2. Architecture Overview - -``` -User / LLM Agent - | - v -+--------------------------------------------------+ -| LLM Agent Hook | -| hooks/{claude,copilot,cursor,...}/ | -| Intercepts: "git status" -> "rtk git status" | -+-------------------------+------------------------+ - | - v -+--------------------------------------------------+ -| RTK CLI (main.rs) | -| | -| +-------------+ +-----------------+ | -| | Clap Parser | -> | Command Routing | | -| | (Commands | | (match on enum) | | -| | enum) | +--------+--------+ | -| +-------------+ | | -| +---------+---------+ | -| v v v | -| +----------+ +--------+ +----------+| -| |Rust Filter| |TOML DSL| |Passthru || -| |(cmds/**) | |Filter | |(fallback)|| -| +-----+----+ +----+---+ +----+-----+| -| | | | | -| +-----+-----+-----------+ | -| v | -| +---------------------+ | -| | Token Tracking | | -| | (core/tracking) | | -| | SQLite DB | | -| +---------------------+ | -+--------------------------------------------------+ -``` - -**Design principles:** -- Single-threaded, no async (startup < 10ms) -- Graceful degradation: filter failure falls back to raw output -- Exit code propagation: RTK never swallows non-zero exits -- Transparent proxy: unknown commands pass through unchanged - ---- - -## 3. End-to-End Flow - -This is the full lifecycle of a command through RTK, from LLM agent to filtered output. - -### 3.1 Hook Installation (`rtk init`) - -The user runs `rtk init` to set up hooks for their LLM agent. This: - -1. Writes a thin shell hook script (e.g., `~/.claude/hooks/rtk-rewrite.sh`) -2. Stores its SHA-256 hash for integrity verification -3. Patches the agent's settings file (e.g., `settings.json`) to register the hook -4. Writes RTK awareness instructions (e.g., `RTK.md`) for prompt-level guidance - -RTK supports 7 agents, each with its own installation mode. The hook scripts are embedded in the binary and written at install time. - -> **Details**: [`src/hooks/README.md`](../src/hooks/README.md) covers all installation modes, configuration files, and the uninstall flow. - -### 3.2 Hook Interception (Command Rewriting) - -When an LLM agent runs a command (e.g., `git status`): - -1. The agent fires a `PreToolUse` event (or equivalent) containing the command as JSON -2. The hook script reads the JSON, extracts the command string -3. The hook calls `rtk rewrite "git status"` as a subprocess -4. `rtk rewrite` consults the command registry and returns `rtk git status` -5. The hook sends a response telling the agent to use the rewritten command -6. If anything fails (jq missing, rtk not found, no match), the hook exits silently -- the raw command runs unchanged - -All rewrite logic lives in Rust (`src/discover/registry.rs`). Hooks are thin delegates that handle agent-specific JSON formats. - -> **Details**: [`hooks/README.md`](../hooks/README.md) covers each agent's JSON format, the rewrite registry, compound command handling, and the `RTK_DISABLED` override. - -#### Rewrite Pipeline - -The rewrite pipeline is how RTK intercepts and rewrites commands. The call chain is: - -``` -hook shell → rewrite_cmd.rs → rewrite_command() → rewrite_compound() → rewrite_segment() → classify_command() -``` - -Traced step by step for `cargo fmt --all && cargo test 2>&1 | tail -20`: - -``` -LLM Agent: "cargo fmt --all && cargo test 2>&1 | tail -20" - | - | Hook shell (hooks/claude/rtk-rewrite.sh) - | Reads JSON from agent, extracts command, calls `rtk rewrite "$CMD"` - | On failure (jq missing, rtk missing, old version): exit 0 (passthrough) - | - v -rewrite_cmd::run(cmd) [src/hooks/rewrite_cmd.rs] - | 1. Load config → hooks.exclude_commands - | 2. check_command(cmd) → Deny → exit(2) - | 3. registry::rewrite_command(cmd, excluded) - | → None → exit(1) (no RTK equivalent, passthrough) - | → Some + Allow → print, exit(0) - | → Some + Ask → print, exit(3) - | - v -rewrite_command(cmd, excluded) [src/discover/registry.rs] - | Early exits: - | - Empty → None - | - Contains "<<" or "$((" (heredoc/arithmetic) → None - | - Simple "rtk ..." (no operators) → return as-is - | - Otherwise → rewrite_compound(cmd, excluded) - | - v -rewrite_compound(cmd, excluded) [src/discover/registry.rs] - | - | Step 1 — Tokenize (lexer.rs) - | tokenize() produces typed tokens with byte offsets: - | Arg("cargo") Arg("fmt") Arg("--all") - | Operator("&&") - | Arg("cargo") Arg("test") Redirect("2>&1") - | Pipe("|") - | Arg("tail") Arg("-20") - | - | Step 2 — Split on operators, rewrite each segment - | Operator (&&, ||, ;) → rewrite both sides - | Pipe (|) → rewrite left side only, keep right side raw - | exception: find/fd before pipe → skip rewrite - | Shellism (&) → rewrite both sides (background) - | - | Calls rewrite_segment() per segment: - | segment 1: "cargo fmt --all" - | segment 2: "cargo test 2>&1" - | after pipe: "tail -20" kept raw - | - v -rewrite_segment(seg, excluded) [src/discover/registry.rs] - | - | Step 3 — Strip trailing redirects - | strip_trailing_redirects() re-tokenizes the segment: - | "cargo test 2>&1" → cmd_part="cargo test", redirect=" 2>&1" - | (simple commands like "cargo fmt --all" → no redirect, suffix is "") - | - | Step 4 — Already RTK → return as-is - | - | Step 5 — Special cases (short-circuit before classification) - | head -N / --lines=N → rewrite_line_range() → "rtk read file --max-lines N" - | tail -N / -n N / --lines N → rewrite_line_range() → "rtk read file --tail-lines N" - | head/tail with unsupported flag (-c, -f) → None (skip rewrite) - | cat with incompatible flag (-A, -v, -e) → None (skip rewrite) - | - | Step 6 — classify_command(cmd_part) [see below] - | → Supported → check excluded list → continue - | → Unsupported/Ignored → None (skip rewrite) - | - | Step 7 — Build rewritten command - | a. Find matching rule from rules.rs - | b. Extract env prefix (ENV_PREFIX regex, second pass — first was in classify) - | e.g. "GIT_SSH_COMMAND=\"ssh -o ...\" git push" → prefix="GIT_SSH_COMMAND=..." - | c. Guard: RTK_DISABLED=1 in prefix → None - | d. Guard: gh with --json/--jq/--template → None - | e. Apply rule's rewrite_prefixes: "cargo fmt" → "rtk cargo fmt" - | f. Reassemble: env_prefix + rtk_cmd + args + redirect_suffix - | - v -classify_command(cmd) [src/discover/registry.rs] - | 1. Check IGNORED_EXACT (cd, echo, fi, done, ...) - | 2. Check IGNORED_PREFIXES (rtk, mkdir, mv, ...) - | 3. Strip env prefix with ENV_PREFIX regex (for pattern matching only) - | 4. Normalize absolute paths: /usr/bin/grep → grep - | 5. Strip git global opts: git -C /tmp status → git status - | 6. Guard: cat/head/tail with redirect (>, >>) → Unsupported (write, not read) - | 7. Match against REGEX_SET (60+ compiled patterns from rules.rs) - | 8. Extract subcommand → lookup custom savings/status overrides - | 9. Return Classification::Supported { rtk_equivalent, category, savings, status } - | - v -Result: "rtk cargo fmt --all && rtk cargo test 2>&1 | tail -20" - | - | Hook response - | Hook wraps result in agent-specific JSON, returns to LLM agent - | - v -LLM Agent executes rewritten command - (bash handles && and |, each rtk invocation is a separate process) -``` - -Key design decisions: -- **Lexer-based tokenization**: A single-pass state machine (`lexer.rs`) handles all shell constructs (quotes, escapes, redirects, operators). Used for both compound splitting and redirect stripping. -- **Segment-level rewriting**: Compound commands are split by operators, each segment rewritten independently. Bash recombines them at execution time. -- **Pipe semantics**: Only the left side of `|` is rewritten. The pipe consumer (grep, head, wc) runs raw. `find`/`fd` before a pipe is never rewritten (output format incompatible with xargs). -- **Double env prefix handling**: `classify_command()` strips env prefixes to match the underlying command against rules. `rewrite_segment()` extracts the same prefix separately to re-prepend it to the rewritten command. -- **Fallback contract**: If any segment fails to match, it stays raw. `rewrite_command()` returns `None` only when zero segments were rewritten. - -### 3.3 CLI Parsing and Routing - -Once the rewritten command reaches RTK: - -1. **Telemetry**: `telemetry::maybe_ping()` fires a non-blocking daily usage ping -2. **Clap parsing**: `Cli::try_parse()` matches against the `Commands` enum -3. **Hook check**: `hook_check::maybe_warn()` warns if the installed hook is outdated (rate-limited to 1/day) -4. **Integrity check**: `integrity::runtime_check()` verifies the hook's SHA-256 hash for operational commands -5. **Routing**: A `match cli.command` dispatches to the specialized filter module - -If Clap parsing fails (command not in the enum), the fallback path runs instead. - -### 3.4 Filter Execution - -RTK has two filter systems: - -**Rust Filters**: Compiled modules in `src/cmds/` that execute the command, parse its output, and apply specialized transformations (regex, JSON, state machines). - -**TOML DSL Filters**: Declarative filters in `src/filters/*.toml` that apply regex-based line filtering, truncation, and section extraction. Applied in `run_fallback()` when no Rust filter matches. - -Each filter module follows the same pattern: -1. Start a timer (`TimedExecution::start()`) -2. Execute the underlying command (`std::process::Command`) -3. Apply filtering (strip boilerplate, group errors, truncate) -4. On filter error, fall back to raw output -5. Track token savings to SQLite -6. Propagate exit code - -> **Details**: [`src/cmds/README.md`](../src/cmds/README.md) covers the common pattern, ecosystem organization, cross-command dependencies, and how to add new filters. - -### 3.5 Fallback Path - -When Clap parsing fails (unknown command): - -1. Guard: check if the command is an RTK meta-command (`gain`, `init`, etc.) -- if so, show Clap error -2. Look up TOML DSL filters via `toml_filter::find_matching_filter()` -3. If TOML match: capture stdout, apply filter pipeline, track savings -4. If no match: pure passthrough with `Stdio::inherit`, track as 0% savings - -``` -Command received - -> Clap parse succeeds? - -> Yes: Route to Rust filter module - -> No: run_fallback() - -> TOML filter match? - -> Yes: Capture stdout, apply filter, track savings - -> No: Passthrough (inherit stdio, track 0% savings) -``` - -> **Details**: [`src/core/README.md`](../src/core/README.md) covers the TOML filter engine, filter pipeline stages, and trust-gated project filters. - -### 3.6 Token Tracking - -Every command execution records metrics to SQLite (`~/.local/share/rtk/tracking.db`): - -- Input tokens (raw output size) and output tokens (filtered size) -- Savings percentage, execution time, project path -- 90-day automatic retention cleanup -- Token estimation: `ceil(chars / 4.0)` approximation - -Analytics commands (`rtk gain`, `rtk cc-economics`, `rtk session`) query this database to produce dashboards and ROI reports. - -> **Details**: [`src/analytics/README.md`](../src/analytics/README.md) covers the analytics modules, and [`src/core/README.md`](../src/core/README.md) covers the tracking database schema. - -### 3.7 Tee Recovery - -On command failure (non-zero exit code): - -1. Raw unfiltered output is saved to `~/.local/share/rtk/tee/{epoch}_{slug}.log` -2. A hint line is printed: `[full output: ~/.../tee/1234_cargo_test.log]` -3. LLM agents can re-read the file instead of re-running the failed command - -Tee is configurable (enabled/disabled, min size, max files, max file size) and never affects command output or exit code on failure. - -> **Details**: [`src/core/README.md`](../src/core/README.md) covers tee configuration and the rotation strategy. - ---- - -## 4. Folder Map - -Start here, then drill down into each README for file-level details. - -### `src/` — Rust source code - -| Directory | What it does | What you'll find in its README | -|-----------|-------------|-------------------------------| -| `main.rs` | CLI entry point, `Commands` enum, routing match | _(no README — read the file directly)_ | -| [`core/`](../src/core/README.md) | Shared infrastructure | Tracking DB schema, config system, tee recovery, TOML filter engine, utility functions | -| [`hooks/`](../src/hooks/README.md) | Hook system | Installation flow (`rtk init`), integrity verification, rewrite command, trust model | -| [`analytics/`](../src/analytics/README.md) | Token savings analytics | `rtk gain` dashboard, Claude Code economics, ccusage parsing | -| [`cmds/`](../src/cmds/README.md) | **Command filters (9 ecosystems)** | Common filter pattern, cross-command routing, token savings table, **links to each ecosystem** | -| [`discover/`](../src/discover/README.md) | History analysis + rewrite registry | Rewrite patterns, session providers, compound command splitting | -| [`learn/`](../src/learn/README.md) | CLI correction detection | Error classification, correction pair detection, rule generation | -| [`parser/`](../src/parser/README.md) | Parser infrastructure | Canonical types (TestResult, LintResult, etc.), 3-tier format modes, migration guide | -| [`filters/`](../src/filters/README.md) | TOML filter configs | TOML DSL syntax, 8-stage pipeline, inline testing, naming conventions | - -### `hooks/` — Deployed hook artifacts (root directory) - -| Directory | Agent | What you'll find in its README | -|-----------|-------|-------------------------------| -| [`hooks/`](../hooks/README.md) | _(parent)_ | **All JSON formats**, rewrite registry overview, exit code contract, override controls | -| [`claude/`](../hooks/claude/README.md) | Claude Code | Shell hook mechanism, `PreToolUse` JSON, test script | -| [`copilot/`](../hooks/copilot/README.md) | GitHub Copilot | Rust binary hook, VS Code Chat vs Copilot CLI dual format | -| [`cursor/`](../hooks/cursor/README.md) | Cursor IDE | Shell hook, empty JSON response requirement | -| [`cline/`](../hooks/cline/README.md) | Cline / Roo Code | Rules file (prompt-level, no programmatic hook) | -| [`windsurf/`](../hooks/windsurf/README.md) | Windsurf / Cascade | Rules file (workspace-scoped) | -| [`codex/`](../hooks/codex/README.md) | OpenAI Codex CLI | Awareness document, AGENTS.md integration | -| [`opencode/`](../hooks/opencode/README.md) | OpenCode | TypeScript plugin, zx library, in-place mutation | - ---- - -## 5. Hook System Summary - -RTK supports the following LLM agents through hook integrations: - -| Agent | Hook Type | Mechanism | Can Modify Command? | -|-------|-----------|-----------|---------------------| -| Claude Code | Shell hook | `PreToolUse` in `settings.json` | Yes (`updatedInput`) | -| GitHub Copilot (VS Code) | Rust binary | `rtk hook copilot` reads JSON | Yes (`updatedInput`) | -| GitHub Copilot CLI | Rust binary | `rtk hook copilot` reads JSON | No (deny + suggestion) | -| Cursor | Shell hook | `preToolUse` hook | Yes (`updated_input`) | -| Gemini CLI | Rust binary | `rtk hook gemini` reads JSON | Yes (`hookSpecificOutput`) | -| Cline/Roo Code | Rules file | Prompt-level guidance | N/A (prompt) | -| Windsurf | Rules file | Prompt-level guidance | N/A (prompt) | -| Codex CLI | Awareness doc | AGENTS.md integration | N/A (prompt) | -| OpenCode | TS plugin | `tool.execute.before` event | Yes (in-place mutation) | - -> **Details**: [`hooks/README.md`](../hooks/README.md) has the full JSON schemas for each agent. [`src/hooks/README.md`](../src/hooks/README.md) covers installation, integrity verification, and the rewrite command. - ---- - -## 6. Filter Pipeline Summary - -### Rust Filters (cmds/**) - -Compiled filter modules for complex transformations with 60-95% token savings. - -> **Details**: [`src/cmds/README.md`](../src/cmds/README.md) and each ecosystem subdirectory README. - -### TOML DSL Filters (src/filters/*.toml) - -Declarative filters with an 8-stage pipeline: strip ANSI, regex replace, match output, strip/keep lines, truncate lines, head/tail, max lines, on-empty message. Loaded from three tiers: built-in (compiled), global (`~/.config/rtk/filters/`), project-local (`.rtk/filters/`, trust-gated). - -> **Details**: [`src/core/README.md`](../src/core/README.md) covers the TOML filter engine. - ---- - -## 7. Performance Constraints - -| Metric | Target | Verification | -|--------|--------|--------------| -| Startup time | < 10ms | `hyperfine 'rtk git status' 'git status'` | -| Memory usage | < 5MB resident | `/usr/bin/time -v rtk git status` | -| Binary size | < 5MB stripped | `ls -lh target/release/rtk` | -| Token savings | 60-90% per filter | Snapshot + token count tests | - -Achieved through: -- Zero async overhead (single-threaded, no tokio) -- Lazy regex compilation (`lazy_static!`) -- Minimal allocations (borrow over clone) -- No config file I/O on startup (loaded on-demand) - ---- - -## 8. Testing - -Tests live **in the module file itself** inside a `#[cfg(test)] mod tests` block (e.g., tests for `src/cmds/cloud/container.rs` go at the bottom of that same file). - -### How to Write Tests - -**1. Create a fixture from real command output** (not synthetic data): -```bash -kubectl get pods > tests/fixtures/kubectl_pods_raw.txt -``` - -**2. Write your test in the same module file** (`#[cfg(test)] mod tests`): -```rust -#[test] -fn test_my_filter() { - let input = include_str!("../tests/fixtures/my_cmd_raw.txt"); - let output = filter_my_cmd(input); - assert!(output.contains("expected content")); - assert!(!output.contains("noise line")); -} -``` - -**3. Verify token savings** (60% minimum required): -```rust -#[test] -fn test_my_filter_savings() { - let input = include_str!("../tests/fixtures/my_cmd_raw.txt"); - let output = filter_my_cmd(input); - let savings = 100.0 - (count_tokens(&output) as f64 / count_tokens(input) as f64 * 100.0); - assert!(savings >= 60.0, "Expected >=60% savings, got {:.1}%", savings); -} -``` - -### Test Organization - -``` -tests/ -├── fixtures/ # Real command output (never synthetic) -│ ├── git_log_raw.txt -│ ├── cargo_test_raw.txt -│ └── dotnet/ # Ecosystem-specific fixtures -└── integration_test.rs # Integration tests (#[ignore]) -``` - -- **Unit tests**: `#[cfg(test)] mod tests` embedded in each module -- **Fixtures**: real command output in `tests/fixtures/` -- **Integration tests**: `#[ignore]` attribute, run with `cargo test --ignored` - -> For testing requirements, pre-commit gate, and PR checklist, see [CONTRIBUTING.md — Testing](../CONTRIBUTING.md#testing). - ---- - -## 9. Future Improvements - -- **Extract cli.rs**: Move `Commands` enum, 13 sub-enums (`GitCommands`, `CargoCommands`, etc.), and `AgentTarget` from main.rs to a dedicated cli.rs module. This would reduce main.rs from ~2600 to ~1500 lines. -- **Split routing**: Extract the `match cli.command { ... }` block into a separate routing module. -- **Streaming filters**: For long-running commands, filter output line-by-line as it arrives instead of buffering. diff --git a/docs/TROUBLESHOOTING.md b/docs/TROUBLESHOOTING.md deleted file mode 100644 index cf52f026d..000000000 --- a/docs/TROUBLESHOOTING.md +++ /dev/null @@ -1,337 +0,0 @@ -# RTK Troubleshooting Guide - -## Problem: "rtk gain" command not found - -### Symptom -```bash -$ rtk --version -rtk 1.0.0 # (or similar) - -$ rtk gain -rtk: 'gain' is not a rtk command. See 'rtk --help'. -``` - -### Root Cause -You installed the **wrong rtk package**. You have **Rust Type Kit** (reachingforthejack/rtk) instead of **Rust Token Killer** (rtk-ai/rtk). - -### Solution - -**1. Uninstall the wrong package:** -```bash -cargo uninstall rtk -``` - -**2. Install the correct one (Token Killer):** - -#### Quick Install (Linux/macOS) -```bash -curl -fsSL https://github.com/rtk-ai/rtk/blob/master/install.sh | sh -``` - -#### Alternative: Manual Installation -```bash -cargo install --git https://github.com/rtk-ai/rtk -``` - -**3. Verify installation:** -```bash -rtk --version -rtk gain # MUST show token savings stats, not error -``` - -If `rtk gain` now works, installation is correct. - ---- - -## Problem: Confusion Between Two "rtk" Projects - -### The Two Projects - -| Project | Repository | Purpose | Key Command | -|---------|-----------|---------|-------------| -| **Rust Token Killer** ✅ | rtk-ai/rtk | LLM token optimizer for Claude Code | `rtk gain` | -| **Rust Type Kit** ❌ | reachingforthejack/rtk | Rust codebase query and type generator | `rtk query` | - -### How to Identify Which One You Have - -```bash -# Check if "gain" command exists -rtk gain - -# Token Killer → Shows token savings stats -# Type Kit → Error: "gain is not a rtk command" -``` - ---- - -## Problem: cargo install rtk installs wrong package - -### Why This Happens -If **Rust Type Kit** is published to crates.io under the name `rtk`, running `cargo install rtk` will install the wrong package. - -### Solution -**NEVER use** `cargo install rtk` without verifying. - -**Always use explicit repository URLs:** - -```bash -# CORRECT - Token Killer -cargo install --git https://github.com/rtk-ai/rtk - -# OR install from fork -git clone https://github.com/rtk-ai/rtk.git -cd rtk && git checkout feat/all-features -cargo install --path . --force -``` - -**After any installation, ALWAYS verify:** -```bash -rtk gain # Must work if you want Token Killer -``` - ---- - -## Problem: RTK not working in Claude Code - -### Symptom -Claude Code doesn't seem to be using rtk, outputs are verbose. - -### Checklist - -**1. Verify rtk is installed and correct:** -```bash -rtk --version -rtk gain # Must show stats -``` - -**2. Initialize rtk for Claude Code:** -```bash -# Global (all projects) -rtk init --global - -# Per-project -cd /your/project -rtk init -``` - -**3. Verify CLAUDE.md file exists:** -```bash -# Check global -cat ~/.claude/CLAUDE.md | grep rtk - -# Check project -cat ./CLAUDE.md | grep rtk -``` - -**4. Install auto-rewrite hook (recommended for automatic RTK usage):** - -**Option A: Automatic (recommended)** -```bash -rtk init -g -# → Installs hook + RTK.md automatically -# → Follow printed instructions to add hook to ~/.claude/settings.json -# → Restart Claude Code - -# Verify installation -rtk init --show # Should show "✅ Hook: executable, with guards" -``` - -**Option B: Manual (fallback)** -```bash -# Copy hook to Claude Code hooks directory -mkdir -p ~/.claude/hooks -cp .claude/hooks/rtk-rewrite.sh ~/.claude/hooks/ -chmod +x ~/.claude/hooks/rtk-rewrite.sh -``` - -Then add to `~/.claude/settings.json` (replace `~` with full path): -```json -{ - "hooks": { - "PreToolUse": [ - { - "matcher": "Bash", - "hooks": [ - { - "type": "command", - "command": "/Users/yourname/.claude/hooks/rtk-rewrite.sh" - } - ] - } - ] - } -} -``` - -**Note**: Use absolute path in `settings.json`, not `~/.claude/...` - ---- - -## Problem: RTK not working in OpenCode - -### Symptom -OpenCode runs commands without rtk, outputs are verbose. - -### Checklist - -**1. Verify rtk is installed and correct:** -```bash -rtk --version -rtk gain # Must show stats -``` - -**2. Install the OpenCode plugin (global only):** -```bash -rtk init -g --opencode -``` - -**3. Verify plugin file exists:** -```bash -ls -la ~/.config/opencode/plugins/rtk.ts -``` - -**4. Restart OpenCode** -OpenCode must be restarted to load the plugin. - -**5. Verify status:** -```bash -rtk init --show # Should show "OpenCode: plugin installed" -``` - ---- - -## Problem: RTK commands fail on Windows ("program not found" or "No such file") - -### Symptom -``` -rtk vitest --run -# Error: program not found -# Or: The system cannot find the file specified - -rtk lint . -# Error: No such file or directory -``` - -### Root Cause -On Windows, Node.js tools (vitest, eslint, tsc, etc.) are installed as `.CMD` or `.BAT` wrapper scripts, not as native `.exe` binaries. Rust's `std::process::Command::new("vitest")` does not honor the Windows `PATHEXT` environment variable, so it cannot find `vitest.CMD` even when it's on PATH. - -### Solution -Update to rtk v0.23.1+ which resolves this via the `which` crate for proper PATH+PATHEXT resolution. All 16+ command modules now use `resolved_command()` instead of `Command::new()`. - -```bash -cargo install --git https://github.com/rtk-ai/rtk -rtk --version # Should be 0.23.1+ -``` - -### Affected Commands -All commands that spawn external tools: `rtk vitest`, `rtk lint`, `rtk tsc`, `rtk pnpm`, `rtk playwright`, `rtk prisma`, `rtk next`, `rtk prettier`, `rtk ruff`, `rtk pytest`, `rtk pip`, `rtk mypy`, `rtk golangci-lint`, and others. - ---- - -## Problem: "command not found: rtk" after installation - -### Symptom -```bash -$ cargo install --path . --force - Compiling rtk v0.7.1 - Finished release [optimized] target(s) - Installing ~/.cargo/bin/rtk - -$ rtk --version -zsh: command not found: rtk -``` - -### Root Cause -`~/.cargo/bin` is not in your PATH. - -### Solution - -**1. Check if cargo bin is in PATH:** -```bash -echo $PATH | grep -o '[^:]*\.cargo[^:]*' -``` - -**2. If not found, add to PATH:** - -For **bash** (`~/.bashrc`): -```bash -export PATH="$HOME/.cargo/bin:$PATH" -``` - -For **zsh** (`~/.zshrc`): -```bash -export PATH="$HOME/.cargo/bin:$PATH" -``` - -For **fish** (`~/.config/fish/config.fish`): -```fish -set -gx PATH $HOME/.cargo/bin $PATH -``` - -**3. Reload shell config:** -```bash -source ~/.bashrc # or ~/.zshrc or restart terminal -``` - -**4. Verify:** -```bash -which rtk -rtk --version -rtk gain -``` - ---- - -## Problem: Compilation errors during installation - -### Symptom -```bash -$ cargo install --path . -error: failed to compile rtk v0.7.1 -``` - -### Solutions - -**1. Update Rust toolchain:** -```bash -rustup update stable -rustup default stable -``` - -**2. Clean and rebuild:** -```bash -cargo clean -cargo build --release -cargo install --path . --force -``` - -**3. Check Rust version (minimum required):** -```bash -rustc --version # Should be 1.70+ for most features -``` - -**4. If still fails, report issue:** -- GitHub: https://github.com/rtk-ai/rtk/issues - ---- - -## Need More Help? - -**Report issues:** -- Fork-specific: https://github.com/rtk-ai/rtk/issues -- Upstream: https://github.com/rtk-ai/rtk/issues - -**Run the diagnostic script:** -```bash -# From the rtk repository root -bash scripts/check-installation.sh -``` - -This script will check: -- ✅ RTK installed and in PATH -- ✅ Correct version (Token Killer, not Type Kit) -- ✅ Available features (pnpm, vitest, next, etc.) -- ✅ Claude Code integration (CLAUDE.md files) -- ✅ Auto-rewrite hook status - -The script provides specific fix commands for any issues found. diff --git a/docs/architecture/decisions/proxy-architecture.md b/docs/architecture/decisions/proxy-architecture.md deleted file mode 100644 index 7c2ccd9ba..000000000 --- a/docs/architecture/decisions/proxy-architecture.md +++ /dev/null @@ -1,68 +0,0 @@ ---- -title: Proxy Architecture -description: ADR — why RTK uses a CLI proxy pattern instead of shell aliases or wrappers -sidebar: - order: 2 ---- - -# ADR: Proxy Architecture - -## Decision - -RTK is a CLI proxy: a single binary that intercepts commands, executes them as subprocesses, filters the output, and exits. Users invoke `rtk git status` instead of `git status`. - -Hooks extend this by making the interception transparent — the AI agent's command is rewritten before execution, so neither the agent nor the user types `rtk`. - -## Alternatives considered - -### Shell aliases - -```bash -alias git='rtk git' -alias cargo='rtk cargo' -``` - -**Rejected because:** -- Requires shell configuration per-user, per-machine, per-shell -- Doesn't work for non-interactive contexts (scripts, CI, AI agents) -- Can't be installed programmatically without modifying shell dotfiles -- Breaks if the user has other aliases or functions with the same name - -### Shell function wrappers - -Similar to aliases but more fragile. Same problems. - -### `LD_PRELOAD` / dynamic linking interception - -**Rejected because:** -- Platform-specific (Linux only with glibc) -- Security restrictions (macOS SIP, container environments) -- Complex to implement, maintain, and debug - -### Hook-only approach (no explicit `rtk` prefix) - -Make RTK entirely invisible — install hooks and never expose `rtk ` as a user-facing interface. - -**Rejected because:** -- Users need a way to invoke RTK explicitly for debugging (`rtk git status -vvv`) -- `rtk gain` and `rtk discover` need a namespace -- Transparent hooks are additive, not a replacement for the explicit interface - -## Why the proxy pattern works - -**Single binary, no configuration:** `rtk git status` works identically on macOS, Linux, and Windows. No dotfiles. No shell-specific setup. - -**Explicit and debuggable:** `-v`/`-vv`/`-vvv` flags expose what RTK is doing at each phase. `RTK_DISABLED=1` bypasses it for one command. - -**Exit code preservation:** RTK propagates the underlying tool's exit code. CI pipelines that check `$?` work correctly. - -**Fail-safe:** If RTK's filter fails, it falls back to raw output. The user always gets a result. - -**Hook interception as an enhancement:** The hook layer adds transparency on top of the proxy pattern — it rewrites `git status` to `rtk git status` before the agent sees it. But the proxy interface remains available for direct use, debugging, and tools that can't be hooked. - -## Consequences - -- Every supported command needs a module in `src/cmds/` -- Unsupported commands pass through transparently (no breakage) -- The binary grows as new commands are added (~5MB currently, well within the `<5MB` soft target after stripping) -- Adding a new command = adding a module + registering in `main.rs` + adding rewrite pattern in `discover/registry.rs` diff --git a/docs/architecture/decisions/why-no-async.md b/docs/architecture/decisions/why-no-async.md deleted file mode 100644 index 299c51a9f..000000000 --- a/docs/architecture/decisions/why-no-async.md +++ /dev/null @@ -1,52 +0,0 @@ ---- -title: Why No Async -description: ADR — why RTK is single-threaded and does not use tokio or async runtimes -sidebar: - order: 1 ---- - -# ADR: Why No Async - -## Decision - -RTK is single-threaded. No `tokio`, `async-std`, or `futures`. All I/O is blocking. - -## Context - -RTK is a CLI proxy that runs for milliseconds and exits. The typical invocation: - -1. Parse CLI arguments (~0.1ms) -2. Spawn one subprocess and capture its output (~2-5ms) -3. Filter the output (~0.1ms) -4. Print and exit - -There is no concurrent I/O, no network server, no parallel request handling. - -## Consequences - -**Why async would hurt:** - -- `tokio` adds 5-10ms to startup time from runtime initialization. RTK's target is `<10ms` total. Async would consume half the budget before the first useful line of code. -- The entire value proposition of RTK is zero-overhead transparency. If developers perceive any delay, they disable it. -- One subprocess. One output stream. No concurrency needed. - -**Why blocking I/O is correct here:** - -- `std::process::Command::output()` captures stdout + stderr in one blocking call. This is exactly what RTK needs. -- No event loop required. No `.await` noise in filter code. -- Binary stays under 5MB. No runtime dependencies. - -## Tradeoffs - -**What we give up:** -- Hypothetical future parallelism (e.g., running multiple filters in parallel). Not needed today. -- Async ecosystem crates (reqwest, sqlx). RTK uses `rusqlite` (sync) and `ureq` (sync) instead. - -**What we gain:** -- `<10ms` startup, always. -- Simple, readable filter code with no `.await` punctuation. -- No runtime initialization path that can fail. - -## Rule - -If you add a dependency that pulls in `tokio` or any async runtime, the PR will be rejected. Check before adding: `cargo tree | grep tokio`. diff --git a/docs/architecture/diagrams/command-flow.md b/docs/architecture/diagrams/command-flow.md deleted file mode 100644 index 6d8f06267..000000000 --- a/docs/architecture/diagrams/command-flow.md +++ /dev/null @@ -1,65 +0,0 @@ ---- -title: Command Flow -description: End-to-end diagram of how a command flows through RTK from agent to LLM -sidebar: - order: 1 ---- - -# Command Flow - -End-to-end flow from the AI agent issuing a command to the filtered output reaching the LLM. - -## With hook (transparent rewrite) - -```mermaid -flowchart TD - A["AI Agent\n(Claude Code, Cursor, etc.)"] -->|"runs: cargo test"| B - - subgraph HOOK ["Hook Interception (PreToolUse)"] - B["Hook reads JSON input\nextract command string"] --> C - C["rtk rewrite 'cargo test'"] --> D - D{"Registry match?"} - D -->|"yes"| E["returns 'rtk cargo test'"] - D -->|"no match"| F["returns original unchanged"] - end - - E --> G - F --> G - - subgraph RTK ["RTK Binary"] - G["Phase 1: Parse\nClap → Commands::Cargo"] --> H - H["Phase 2: Route\ncargo::run(args)"] --> I - I["Phase 3: Execute\nstd::process::Command::new('cargo')\n.args(['test'])"] --> J - J["Phase 4: Filter\nfailures only\n200 lines → 5 lines"] --> K - K["Phase 5: Print\nprintln!(filtered)"] --> L - L["Phase 6: Track\nSQLite INSERT\n(input=5000tok, output=50tok)"] - end - - K -->|"filtered output"| M["LLM Context\n~90% fewer tokens"] -``` - -## Without hook (direct usage) - -```mermaid -flowchart LR - A["Developer\ntype: rtk git status"] --> B["RTK Binary"] - B --> C["git status (subprocess)"] - C -->|"20 lines raw"| B - B -->|"5 lines filtered"| D["Terminal\n(or LLM context)"] -``` - -## Filter lookup (TOML path) - -```mermaid -flowchart LR - CMD["rtk my-tool args"] --> P1 - P1{"1. .rtk/filters.toml\n(project-local)"} - P1 -->|"match"| WIN["apply filter → print"] - P1 -->|"no match"| P2 - P2{"2. ~/.config/rtk/filters.toml\n(user-global)"} - P2 -->|"match"| WIN - P2 -->|"no match"| P3 - P3{"3. BUILTIN_TOML\n(binary)"} - P3 -->|"match"| WIN - P3 -->|"no match"| P4[["exec raw\n(passthrough)"]] -``` diff --git a/docs/architecture/diagrams/filter-pipeline.md b/docs/architecture/diagrams/filter-pipeline.md deleted file mode 100644 index dda20e9d4..000000000 --- a/docs/architecture/diagrams/filter-pipeline.md +++ /dev/null @@ -1,71 +0,0 @@ ---- -title: Filter Pipeline -description: How a TOML filter goes from file to execution — build pipeline and runtime stages -sidebar: - order: 2 ---- - -# Filter Pipeline - -## Build pipeline - -```mermaid -flowchart TD - A[["src/filters/my-tool.toml\n(new file)"]] --> B - - subgraph BUILD ["cargo build"] - B["build.rs\n1. ls src/filters/*.toml\n2. sort alphabetically\n3. concat → BUILTIN_TOML"] --> C - C{"TOML valid?\nDuplicate names?"} -->|"❌ panic"| D[["Build fails\nerror points to bad file"]] - C -->|"✅ ok"| E[["OUT_DIR/builtin_filters.toml\n(generated)"]] - E --> F["rustc embeds via include_str!"] - F --> G[["rtk binary\nBUILTIN_TOML embedded"]] - end - - subgraph TESTS ["cargo test"] - H["test_builtin_filter_count\nassert_eq!(filters.len(), N)"] -->|"❌ wrong count"| I[["FAIL"]] - J["test_builtin_all_filters_present\nassert!(names.contains('my-tool'))"] -->|"❌ name missing"| K[["FAIL"]] - L["test_builtin_all_filters_have_inline_tests\nassert!(tested.contains(name))"] -->|"❌ no tests"| M[["FAIL"]] - end - - subgraph VERIFY ["rtk verify"] - N["runs [[tests.my-tool]]\ninput → filter → compare expected"] - N -->|"❌ mismatch"| O[["FAIL\nshows actual vs expected"]] - N -->|"✅ pass"| P[["All tests passed"]] - end - - G --> H & J & L & N -``` - -## Runtime stages - -```mermaid -flowchart TD - CMD["rtk my-tool args"] --> LOOKUP - - subgraph LOOKUP ["Filter Lookup"] - L1{".rtk/filters.toml"} -->|"match"| APPLY - L1 -->|"no match"| L2 - L2{"~/.config/rtk/filters.toml"} -->|"match"| APPLY - L2 -->|"no match"| L3 - L3{"BUILTIN_TOML"} -->|"match"| APPLY - L3 -->|"no match"| RAW[["exec raw (passthrough)"]] - end - - APPLY --> EXEC["exec command\ncapture stdout"] - EXEC --> PIPE - - subgraph PIPE ["8-stage filter pipeline"] - S1["1. strip_ansi"] --> S2 - S2["2. replace"] --> S3 - S3{"3. match_output\nshort-circuit?"} - S3 -->|"✅ match"| MSG[["emit on_match\nstop"]] - S3 -->|"no match"| S4 - S4["4. strip/keep_lines"] --> S5 - S5["5. truncate_lines_at"] --> S6 - S6["6. tail_lines"] --> S7 - S7["7. max_lines"] --> S8 - S8{"8. output empty?"} - S8 -->|"yes"| EMPTY[["emit on_empty"]] - S8 -->|"no"| OUT[["print filtered output\n+ exit code"]] - end -``` diff --git a/docs/architecture/index.md b/docs/architecture/index.md deleted file mode 100644 index b64e5a332..000000000 --- a/docs/architecture/index.md +++ /dev/null @@ -1,15 +0,0 @@ ---- -title: Architecture -description: System design, diagrams, and architectural decision records for RTK -sidebar: - order: 1 ---- - -# Architecture - -Conceptual documentation covering how RTK is designed and why key decisions were made. - -## What's in this section - -- **Diagrams** — Visual representations of the command flow and filter pipeline -- **Decisions** — Architectural decision records (ADRs) for key design choices diff --git a/docs/filter-workflow.md b/docs/filter-workflow.md deleted file mode 100644 index 0b0d32c1b..000000000 --- a/docs/filter-workflow.md +++ /dev/null @@ -1,102 +0,0 @@ -# How a TOML filter goes from file to execution - -This document explains what happens between "I created `src/filters/my-tool.toml`" and "RTK filters the output of `my-tool`". - -## Build pipeline - -```mermaid -flowchart TD - A[["📄 src/filters/my-tool.toml\n(new file)"]] --> B - - subgraph BUILD ["🔨 cargo build"] - B["build.rs\n① ls src/filters/*.toml\n② sort alphabetically\n③ concat → schema_version = 1 + all files"] --> C - C{"TOML valid?\nDuplicate names?"} -->|"❌ panic! (build fails)"| D[["🛑 Error message\npoints to bad file"]] - C -->|"✅ ok"| E[["OUT_DIR/builtin_filters.toml\n(generated file)"]] - E --> F["rustc\ninclude_str!(concat!(env!(OUT_DIR),\n'/builtin_filters.toml'))"] - F --> G[["🦀 rtk binary\nBUILTIN_TOML embedded"]] - end - - subgraph TESTS ["🧪 cargo test"] - H["test_builtin_filter_count\nassert_eq!(filters.len(), N)"] -->|"❌ count wrong"| I[["FAIL\n'Expected N, got N+1'\nUpdate the count'"]] - J["test_builtin_all_expected_\nfilters_present\nassert!(names.contains('my-tool'))"] -->|"❌ name missing"| K[["FAIL\n'my-tool is missing—\nwas its .toml deleted?'"]] - L["test_builtin_all_filters_\nhave_inline_tests\nassert!(tested.contains(name))"] -->|"❌ no tests"| M[["FAIL\n'Add tests.my-tool\nentries'"]] - end - - subgraph VERIFY ["✅ rtk verify"] - N["runs [[tests.my-tool]]\ninput → filter → compare expected"] - N -->|"❌ mismatch"| O[["FAIL\nshows actual vs expected"]] - N -->|"✅ pass"| P[["60/60 tests passed"]] - end - - G --> H - G --> J - G --> L - G --> N - - subgraph RUNTIME ["⚡ rtk my-tool --verbose"] - Q["Claude Code hook\nmy-tool ... → rtk my-tool ..."] --> R - R["TomlFilterRegistry::load()\n① .rtk/filters.toml (project)\n② ~/.config/rtk/filters.toml (user)\n③ BUILTIN_TOML (binary)\n④ passthrough"] --> S - S{"match_command\n'^my-tool\\b'\nmatches?"} -->|"No match"| T[["exec raw\n(passthrough)"]] - S -->|"✅ match"| U["exec command\ncapture stdout"] - U --> V - - subgraph PIPELINE ["8-stage filter pipeline"] - V["strip_ansi"] --> W["replace"] - W --> X{"match_output\nshort-circuit?"} - X -->|"✅ pattern matched"| Y[["emit message\nstop pipeline"]] - X -->|"no match"| Z["strip/keep_lines"] - Z --> AA["truncate_lines_at"] - AA --> AB["tail_lines"] - AB --> AC["max_lines"] - AC --> AD{"output\nempty?"} - AD -->|"yes"| AE[["emit on_empty"]] - AD -->|"no"| AF[["print filtered\noutput + exit code"]] - end - end - - G --> Q - - style BUILD fill:#1e3a5f,color:#fff - style TESTS fill:#1a3a1a,color:#fff - style VERIFY fill:#2d1b69,color:#fff - style RUNTIME fill:#3a1a1a,color:#fff - style PIPELINE fill:#4a2a00,color:#fff - style D fill:#8b0000,color:#fff - style I fill:#8b0000,color:#fff - style K fill:#8b0000,color:#fff - style M fill:#8b0000,color:#fff - style O fill:#8b0000,color:#fff -``` - -## Step-by-step summary - -| Step | Who | What happens | Fails if | -|------|-----|--------------|----------| -| 1 | Contributor | Creates `src/filters/my-tool.toml` | — | -| 2 | `build.rs` | Concatenates all `.toml` files alphabetically | TOML syntax error, duplicate filter name | -| 3 | `rustc` | Embeds result in binary via `BUILTIN_TOML` const | — | -| 4 | `cargo test` | 3 guards check count, names, inline test presence | Count not updated, name not in list, no `[[tests.*]]` | -| 5 | `rtk verify` | Runs each `[[tests.my-tool]]` entry | Filter logic doesn't match expected output | -| 6 | Runtime | Hook rewrites command, registry looks up filter, pipeline runs | No match → passthrough (not an error) | - -## Filter lookup priority at runtime - -```mermaid -flowchart LR - CMD["rtk my-tool args"] --> P1 - P1{"1. .rtk/filters.toml\n(project-local)"} - P1 -->|"✅ match"| WIN["apply filter"] - P1 -->|"no match"| P2 - P2{"2. ~/.config/rtk/filters.toml\n(user-global)\n(macOS alt: ~/Library/Application Support/rtk/filters.toml)"} - P2 -->|"✅ match"| WIN - P2 -->|"no match"| P3 - P3{"3. BUILTIN_TOML\n(binary)"} - P3 -->|"✅ match"| WIN - P3 -->|"no match"| P4[["exec raw\n(passthrough)"]] -``` - -First match wins. A project filter with the same name as a built-in shadows the built-in and triggers a warning: - -``` -[rtk] warning: filter 'make' is shadowing a built-in filter -``` diff --git a/docs/guide/analytics/discover.md b/docs/guide/analytics/discover.md deleted file mode 100644 index bcbd248ec..000000000 --- a/docs/guide/analytics/discover.md +++ /dev/null @@ -1,49 +0,0 @@ ---- -title: Discover Missed Savings -description: Find commands that ran without RTK and could have saved tokens -sidebar: - order: 2 ---- - -# Discover Missed Savings - -`rtk discover` analyzes Claude Code's command history to find commands that ran without RTK and could have been optimized. - -## Usage - -```bash -rtk discover # current project, last 30 days -rtk discover --all --since 7 # all projects, last 7 days -rtk discover -p /path/to/project # filter by project path -rtk discover --limit 20 # max commands per section -rtk discover --format json # JSON export -``` - -## Options - -| Option | Short | Description | -|--------|-------|-------------| -| `--project` | `-p` | Filter by project path | -| `--limit` | `-l` | Max commands per section (default: 15) | -| `--all` | `-a` | Scan all projects | -| `--since` | `-s` | Last N days (default: 30) | -| `--format` | `-f` | Output format: `text`, `json` | - -## Example output - -``` -RTK Missed Opportunities (last 30 days) - -Commands that could have used RTK: - git log --oneline -20 ×12 (est. 80% savings each) - cargo test ×8 (est. 90% savings each) - pnpm list ×5 (est. 70% savings each) - -Estimated savings missed: ~340K tokens -``` - -## How it works - -RTK reads Claude Code's command history database (the same one that backs `rtk gain --history`). It matches raw commands against the RTK rewrite registry and flags instances where RTK was not used but a filter exists. - -Use this after setting up RTK to see how much you were leaving on the table before. diff --git a/docs/guide/analytics/economics.md b/docs/guide/analytics/economics.md deleted file mode 100644 index 3287d6915..000000000 --- a/docs/guide/analytics/economics.md +++ /dev/null @@ -1,54 +0,0 @@ ---- -title: Claude Code Economics -description: Compare your Claude Code spending against RTK token savings -sidebar: - order: 3 ---- - -# Claude Code Economics - -`rtk cc-economics` compares your Claude Code API spending (via ccusage) against the token savings RTK has generated. It answers: "what is RTK actually saving me in dollars?" - -## Requirements - -Requires [ccusage](https://github.com/ryoppippi/ccusage) to be installed and have data. ccusage tracks Claude Code API costs from your usage history. - -## Usage - -```bash -rtk cc-economics # summary -rtk cc-economics --daily # day-by-day breakdown -rtk cc-economics --weekly # week-by-week -rtk cc-economics --monthly # month-by-month -rtk cc-economics --all # all breakdowns at once -rtk cc-economics --format json -``` - -## Example output - -``` -Claude Code Economics -════════════════════════════════════════ -Total API cost $12.40 (30 days) -RTK tokens saved 1.2M (30 days) -Estimated savings $3.20 (26% of bill) - -At current savings rate: - Monthly reduction: ~$3.20/mo - Annual reduction: ~$38/yr -``` - -## How savings are estimated - -RTK estimates dollar savings by applying Claude's input token pricing to the tokens it prevented from reaching the LLM: - -``` -Saved tokens × (input price per token) = estimated dollar savings -``` - -This is an estimate — actual savings depend on which model was used for each request. - -## See also - -- [Token Savings Analytics](./gain.md) — the `rtk gain` command for raw token counts -- [Discover Missed Savings](./discover.md) — find commands that ran without RTK diff --git a/docs/guide/commands/cargo.md b/docs/guide/commands/cargo.md deleted file mode 100644 index dc2df8f77..000000000 --- a/docs/guide/commands/cargo.md +++ /dev/null @@ -1,142 +0,0 @@ ---- -title: Cargo (Rust) -description: RTK filters for cargo — build, test, check, clippy, and nextest -sidebar: - order: 2 ---- - -# Cargo (Rust) - -RTK filters the most verbose cargo outputs and passes through unrecognized subcommands unchanged. - -## cargo test — 90% savings - -```bash -rtk cargo test [args...] -``` - -Shows failures only. On success, shows a compact summary. - -**Before (200+ lines on failure):** -``` -running 15 tests -test utils::test_parse ... ok -test utils::test_format ... ok -test utils::test_edge_case ... FAILED - -failures: - ----- utils::test_edge_case stdout ---- -thread 'utils::test_edge_case' panicked at 'assertion failed: ...' -...150 lines of backtrace... -``` - -**After (~5 lines):** -``` -FAILED: 2/15 tests - test_edge_case: assertion failed at utils.rs:42 - test_overflow: panic at utils.rs:18 -[full output: ~/.local/share/rtk/tee/cargo_test_1234.log] -``` - -The tee file path lets you (or your AI assistant) read the full output if needed without re-running the command. - -## cargo nextest — failures only - -```bash -rtk cargo nextest [run|list|--lib] [args...] -``` - -Same behavior as `cargo test` — filters to failures only. - -## cargo build — 80% savings - -```bash -rtk cargo build [args...] -``` - -Removes all "Compiling..." lines, keeps errors and the final result. - -**Before:** -``` - Compiling proc-macro2 v1.0.79 - Compiling unicode-ident v1.0.12 - Compiling quote v1.0.35 - ...200 crate lines... - Compiling rtk v0.28.0 - Finished release [optimized] target(s) in 12.34s -``` - -**After:** -``` -Finished release [optimized] in 12.34s -``` - -## cargo check — 80% savings - -```bash -rtk cargo check [args...] -``` - -Removes "Checking..." lines, keeps errors. - -## cargo clippy — 80% savings - -```bash -rtk cargo clippy [args...] -``` - -Groups warnings by lint rule. - -**Before (50 lines for 3 warnings):** -``` -warning: unused variable `x` - --> src/main.rs:42:9 - | -42 | let x = 5; - | ^ help: if this is intentional, prefix it with an underscore: `_x` - = note: `#[warn(unused_variables)]` on by default -... -``` - -**After:** -``` -src/main.rs — 2 warnings - unused_variables (x2): src/main.rs:42, src/main.rs:67 -``` - -## cargo install - -```bash -rtk cargo install [args...] -``` - -Removes dependency compilation noise, keeps the install result and any errors. - -## Generic test wrapper - -```bash -rtk test -``` - -Runs any test command and shows failures only. Works with any test runner: - -```bash -rtk test cargo test -rtk test npm test -rtk test bun test -rtk test pytest -``` - -## Error-only wrapper - -```bash -rtk err -``` - -Runs any command and shows errors and warnings only: - -```bash -rtk err cargo build -rtk err npm run build -``` diff --git a/docs/guide/commands/containers.md b/docs/guide/commands/containers.md deleted file mode 100644 index c5d6a52c1..000000000 --- a/docs/guide/commands/containers.md +++ /dev/null @@ -1,90 +0,0 @@ ---- -title: Containers -description: RTK filters for Docker and Kubernetes -sidebar: - order: 9 ---- - -# Containers - -RTK compresses Docker and Kubernetes command output into compact, token-efficient summaries. - -## Docker - -### docker ps — 80% savings - -```bash -rtk docker ps [args...] -``` - -**Before:** -``` -CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES -abc123def456 nginx:1.25 "/docker-entrypoint.…" 2 days ago Up 2 days (healthy) 0.0.0.0:80->80/tcp web -789012345678 postgres:16 "docker-entrypoint.s…" 2 days ago Up 2 days (healthy) 0.0.0.0:5432->5432/tcp db -``` - -**After:** -``` -web nginx:1.25 Up 2d (healthy) -db postgres:16 Up 2d (healthy) -``` - -### docker images — 80% savings - -```bash -rtk docker images [args...] -``` - -Compact list: name, tag, size, age. - -### docker logs — 70% savings - -```bash -rtk docker logs [args...] -``` - -Deduplicates repeated log lines: `[ERROR] Connection refused (×42)`. - -### Docker Compose - -```bash -rtk docker compose ps # compact service list — 80% -rtk docker compose logs [service] # deduplicated logs — 70% -rtk docker compose build [service] # build summary — 60% -``` - -Unrecognized `docker compose` subcommands pass through. - -## Kubernetes - -### kubectl pods — 75%+ savings - -```bash -rtk kubectl pods [-n namespace] [-A] -``` - -Compact pod list with status. - -### kubectl services - -```bash -rtk kubectl services [-n namespace] [-A] -``` - -### kubectl logs — 70% savings - -```bash -rtk kubectl logs [-c container] -``` - -Deduplicates repeated log lines. - -### Passthrough - -All other `kubectl` subcommands pass through to kubectl: - -```bash -rtk kubectl apply -f deployment.yaml # passes through -rtk kubectl describe pod # passes through -``` diff --git a/docs/guide/commands/data.md b/docs/guide/commands/data.md deleted file mode 100644 index 239e1aa2e..000000000 --- a/docs/guide/commands/data.md +++ /dev/null @@ -1,103 +0,0 @@ ---- -title: Data & Network -description: RTK filters for JSON, environment variables, logs, curl, and wget -sidebar: - order: 11 ---- - -# Data & Network - -RTK compresses structured data, network output, and log streams. - -## json — 60% savings - -```bash -rtk json [--depth N] # default: depth 5 -rtk json - # from stdin -``` - -Shows JSON structure (keys and types) without values. Useful for exploring large API responses or config files. - -**Before — `cat package.json` (50 lines):** -```json -{ - "name": "my-app", - "version": "1.0.0", - "dependencies": { - "react": "^18.2.0", - "next": "^14.0.0", - ...15 dependencies... - } -} -``` - -**After — `rtk json package.json` (10 lines):** -``` -{ - name: string - version: string - dependencies: { 15 keys } - devDependencies: { 8 keys } - scripts: { 6 keys } -} -``` - -## env — sensitive values masked - -```bash -rtk env # all variables (sensitive values masked) -rtk env -f AWS # filter by name -rtk env --show-all # include sensitive values -``` - -Sensitive variables (tokens, secrets, passwords) are masked by default: `AWS_SECRET_ACCESS_KEY=***`. - -## log — 60-80% savings - -```bash -rtk log # from a file -rtk log # from stdin (pipe) -``` - -Deduplicates repeated log lines: `[ERROR] Connection refused (×42)`. Savings depend on how repetitive the log is. - -## curl — HTTP with JSON detection - -```bash -rtk curl [args...] -``` - -Auto-detects JSON responses and shows schema instead of full content. Falls back to raw output for non-JSON. - -## wget - -```bash -rtk wget [args...] -rtk wget -O - # output to stdout -``` - -Removes progress bars and download noise. - -## aws - -```bash -rtk aws [args...] -``` - -Forces JSON output mode and compresses the result. Supports all AWS services (sts, s3, ec2, ecs, rds, cloudformation, etc.). - -## psql - -```bash -rtk psql [args...] -``` - -Removes table borders and compresses query output. - -## summary - -```bash -rtk summary -``` - -Runs any command and generates a heuristic summary of the output. Useful for commands that don't have a dedicated RTK filter. diff --git a/docs/guide/commands/dotnet.md b/docs/guide/commands/dotnet.md deleted file mode 100644 index 08d93d05e..000000000 --- a/docs/guide/commands/dotnet.md +++ /dev/null @@ -1,71 +0,0 @@ ---- -title: .NET -description: RTK filters for dotnet build, test, and MSBuild logs -sidebar: - order: 8 ---- - -# .NET - -RTK covers .NET build, test, and diagnostic outputs. - -## dotnet build — 70-80% savings - -```bash -rtk dotnet build [args...] -``` - -Removes per-project compilation lines, keeps errors and build summary. - -**Before:** -``` -Build started... -Microsoft (R) Build Engine version 17.x - Restore complete (1.2s) - MyLib -> bin/Debug/net8.0/MyLib.dll - MyApp -> bin/Debug/net8.0/MyApp.dll - -Build succeeded. - 0 Warning(s) - 0 Error(s) -Time Elapsed 00:00:04.23 -``` - -**After:** -``` -Build succeeded. 0 warnings, 0 errors (4.23s) -``` - -## dotnet test — 85% savings - -```bash -rtk dotnet test [args...] -``` - -Shows failures only. On success, compact summary. - -## MSBuild binary logs - -```bash -rtk dotnet binlog [path/to/file.binlog] -``` - -Parses `.binlog` binary log files and displays a compact error/warning summary. - -## dotnet format - -```bash -rtk dotnet format [args...] -``` - -Shows only files that were reformatted or have formatting issues. - -## Passthrough - -Other `dotnet` subcommands pass through unchanged: - -```bash -rtk dotnet run # passes through -rtk dotnet publish # passes through -rtk dotnet ef migrate # passes through -``` diff --git a/docs/guide/commands/files.md b/docs/guide/commands/files.md deleted file mode 100644 index 53d652398..000000000 --- a/docs/guide/commands/files.md +++ /dev/null @@ -1,185 +0,0 @@ ---- -title: File System Commands -description: RTK filters for ls, read, grep, find, diff, and wc -sidebar: - order: 3 ---- - -# File System Commands - -RTK replaces common file and search commands with compact, token-optimized equivalents. - -## ls — 80% savings - -```bash -rtk ls [args...] -``` - -Replaces `ls` and `tree` with a compact directory tree. All native `ls` flags are supported (`-l`, `-a`, `-h`, `-R`, etc.). - -**Before (45 lines):** -``` -drwxr-xr-x 15 user staff 480 ... --rw-r--r-- 1 user staff 1234 ... -...40 more lines... -``` - -**After (12 lines):** -``` -my-project/ -+-- src/ (8 files) -| +-- main.rs -+-- Cargo.toml -+-- README.md -``` - -## read — up to 74% savings - -```bash -rtk read [options] -rtk read - # read from stdin -``` - -Replaces `cat`, `head`, `tail` with intelligent content filtering. - -| Option | Short | Default | Description | -|--------|-------|---------|-------------| -| `--level` | `-l` | `minimal` | Filtering level: `none`, `minimal`, `aggressive` | -| `--max-lines` | `-m` | unlimited | Maximum number of lines | -| `--line-numbers` | `-n` | off | Show line numbers | - -**Filtering levels:** - -| Level | Description | Savings | -|-------|-------------|---------| -| `none` | Raw output, no filtering | 0% | -| `minimal` | Removes excessive blank lines and comments | ~30% | -| `aggressive` | Signatures only — removes function bodies | ~74% | - -**Before — `cat main.rs` (~200 lines):** -```rust -fn main() -> Result<()> { - let config = Config::load()?; - let data = process_data(&input); - for item in data { - println!("{}", item); - } - Ok(()) -} -... -``` - -**After — `rtk read main.rs -l aggressive` (~50 lines):** -```rust -fn main() -> Result<()> { ... } -fn process_data(input: &str) -> Vec { ... } -struct Config { ... } -impl Config { fn load() -> Result { ... } } -``` - -Supported languages for filtering: Rust, Python, JavaScript, TypeScript, Go, C, C++, Java, Ruby, Shell. - -## grep — 80% savings - -```bash -rtk grep [path] [options] -``` - -Replaces `grep` and `rg` with results grouped by file and truncated. - -| Option | Short | Default | Description | -|--------|-------|---------|-------------| -| `--max-len` | `-l` | 80 | Maximum line length | -| `--max` | `-m` | 50 | Maximum number of results | -| `--context-only` | `-c` | off | Show only match context | -| `--file-type` | `-t` | all | Filter by type (ts, py, rust, etc.) | - -Additional arguments are passed to `rg` (ripgrep). - -**Before (20 lines):** -``` -src/git.rs:45:pub fn run(...) -src/git.rs:120:fn run_status(...) -src/ls.rs:12:pub fn run(...) -src/ls.rs:25:fn run_tree(...) -``` - -**After (10 lines):** -``` -src/git.rs - 45: pub fn run(...) - 120: fn run_status(...) -src/ls.rs - 12: pub fn run(...) - 25: fn run_tree(...) -``` - -## find — 80% savings - -```bash -rtk find [args...] -``` - -Replaces `find` and `fd` with results grouped by directory. Both RTK syntax and native `find` flags (`-name`, `-type`, etc.) are supported. - -**Before (30 lines):** -``` -./src/main.rs -./src/git.rs -./src/config.rs -./src/tracking.rs -./src/filter.rs -./src/utils.rs -...24 more lines... -``` - -**After (8 lines):** -``` -src/ (12 .rs) - main.rs, git.rs, config.rs - tracking.rs, filter.rs, utils.rs - ...6 more -tests/ (3 .rs) - test_git.rs, test_ls.rs, test_filter.rs -``` - -## tree - -```bash -rtk tree [args...] -``` - -Proxy to native `tree` with filtered output. All native flags supported (`-L`, `-d`, `-a`, etc.). - -**Savings:** ~80% - -## diff — 60% savings - -```bash -rtk diff -rtk diff # stdin as second file -``` - -Ultra-compact diff showing only changed lines. - -## wc - -```bash -rtk wc [args...] -``` - -Replaces `wc` with compact output (removes paths and padding). All native flags supported (`-l`, `-w`, `-c`, etc.). - -## smart — 95% savings - -```bash -rtk smart -``` - -Generates a 2-line technical summary of a source file using heuristics. - -```bash -$ rtk smart src/tracking.rs -SQLite-based token tracking system for command executions. -Records input/output tokens, savings %, execution times with 90-day retention. -``` diff --git a/docs/guide/commands/git.md b/docs/guide/commands/git.md deleted file mode 100644 index dc1b37a1d..000000000 --- a/docs/guide/commands/git.md +++ /dev/null @@ -1,221 +0,0 @@ ---- -title: Git Commands -description: RTK filters for git and GitHub CLI — compact status, log, diff, and more -sidebar: - order: 1 ---- - -# Git Commands - -RTK supports all git subcommands. Unrecognized subcommands pass through to git unchanged. - -## git status — 80% savings - -```bash -rtk git status [args...] -``` - -Replaces the verbose multi-section output with a compact one-line summary. - -**Before (20 lines):** -``` -On branch main -Your branch is up to date with 'origin/main'. - -Changes not staged for commit: - (use "git add ..." to update what will be committed) - modified: src/main.rs - modified: src/git.rs - -Untracked files: - (use "git add ..." to include in what will be committed) - new_file.txt -... -``` - -**After (5 lines):** -``` -main | 3M 1? 1A -M src/main.rs -M src/git.rs -? new_file.txt -A staged_file.rs -``` - -## git log — 80% savings - -```bash -rtk git log [args...] # supports --oneline, --graph, --all, -n, etc. -``` - -Shows hash + subject only, one line per commit. - -**Before (50+ lines for 5 commits):** -``` -commit abc123def... (HEAD -> main) -Author: User -Date: Mon Jan 15 10:30:00 2024 - - Fix token counting bug - -commit def456... -... -``` - -**After (5 lines):** -``` -abc123 Fix token counting bug -def456 Add vitest support -789abc Refactor filter engine -012def Update README -345ghi Initial commit -``` - -## git diff — 75% savings - -```bash -rtk git diff [args...] # supports --stat, --cached, --staged, etc. -``` - -Shows changed files with line counts and condensed hunks. - -**Before (~100 lines):** -``` -diff --git a/src/main.rs b/src/main.rs -index abc123..def456 100644 ---- a/src/main.rs -+++ b/src/main.rs -@@ -10,6 +10,8 @@ - fn main() { -+ let config = Config::load()?; -+ config.validate()?; -...30 lines of headers and context... -``` - -**After (~25 lines):** -``` -src/main.rs (+5/-2) - + let config = Config::load()?; - + config.validate()?; - - // old code - - let x = 42; -src/git.rs (+1/-1) - ~ format!("ok {}", branch) -``` - -## git show — 80% savings - -```bash -rtk git show [args...] -``` - -Shows commit summary + stat + compact diff. - -## git add — 92% savings - -```bash -rtk git add [args...] # supports -A, -p, --all, etc. -``` - -Output: `ok` (single word). - -## git commit — 92% savings - -```bash -rtk git commit -m "message" [args...] # supports -a, --amend, --allow-empty, etc. -``` - -Output: `ok abc1234` (confirmation + short hash). - -## git push — 92% savings - -```bash -rtk git push [args...] # supports -u, remote, branch, etc. -``` - -**Before (15 lines):** -``` -Enumerating objects: 5, done. -Counting objects: 100% (5/5), done. -Delta compression using up to 8 threads -... -``` - -**After (1 line):** -``` -ok main -``` - -## git pull — 92% savings - -```bash -rtk git pull [args...] -``` - -Output: `ok 3 files +10 -2` - -## git branch - -```bash -rtk git branch [args...] # supports -d, -D, -m, etc. -``` - -Shows current branch, local branches, and remote branches in compact form. - -## git fetch - -```bash -rtk git fetch [args...] -``` - -Output: `ok fetched (N new refs)` - -## git stash - -```bash -rtk git stash [list|show|pop|apply|drop|push] [args...] -``` - -## git worktree - -```bash -rtk git worktree [add|remove|prune|list] [args...] -``` - -## Passthrough - -Any git subcommand without a specific RTK filter runs unchanged: - -```bash -rtk git rebase main # runs git rebase main -rtk git cherry-pick abc # runs git cherry-pick abc -rtk git tag v1.0.0 # runs git tag v1.0.0 -``` - -## GitHub CLI - -```bash -rtk gh pr list -rtk gh pr view # 87% savings -rtk gh pr checks # 79% savings -rtk gh issue list -rtk gh run list # 82% savings -rtk gh api -``` - -**Before (30 lines for pr list):** -``` -Showing 10 of 15 pull requests in org/repo - -#42 feat: add vitest support - user opened about 2 days ago - labels: enhancement -... -``` - -**After (10 lines):** -``` -#42 feat: add vitest (open, 2d) -#41 fix: git diff crash (open, 3d) -#40 chore: update deps (merged, 5d) -``` diff --git a/docs/guide/commands/github-cli.md b/docs/guide/commands/github-cli.md deleted file mode 100644 index 5367942f8..000000000 --- a/docs/guide/commands/github-cli.md +++ /dev/null @@ -1,94 +0,0 @@ ---- -title: GitHub CLI -description: RTK filters for gh — pull requests, issues, checks, and workflow runs -sidebar: - order: 10 ---- - -# GitHub CLI - -RTK filters `gh` output by removing ASCII art, verbose metadata, and decorative formatting, keeping the information that matters. - -## pr list — 80% savings - -```bash -rtk gh pr list [args...] -``` - -**Before (~30 lines):** -``` -Showing 3 of 3 pull requests in org/repo - -#42 feat: add vitest support - user1 opened about 2 days ago - Labels: enhancement - Review: 0 approving, 0 requesting changes - -#41 fix: git diff crash - user2 opened about 3 days ago - ... -``` - -**After (~6 lines):** -``` -#42 feat: add vitest (open, 2d) -#41 fix: git diff crash (open, 3d) -#40 chore: update deps (merged, 5d) -``` - -## pr view — 87% savings - -```bash -rtk gh pr view [args...] -``` - -Compact PR summary: title, status, author, description excerpt, and CI checks in one block. - -## pr checks — 79% savings - -```bash -rtk gh pr checks [args...] -``` - -Shows check name + status only, strips URLs and timestamps. - -## issue list — 80% savings - -```bash -rtk gh issue list [args...] -``` - -Same compact format as pr list. - -## run list — 82% savings - -```bash -rtk gh run list [args...] -``` - -Workflow run name + status + duration, one line each. - -## api - -```bash -rtk gh api [args...] -``` - -~26% savings — strips HTTP headers and formats JSON output. - -## Stacked PRs (Graphite) - -```bash -rtk gt log # compact stack log -rtk gt submit # compact submit output -rtk gt sync # compact sync -rtk gt restack # compact restack -rtk gt create # compact create -rtk gt branch # compact branch info -``` - -Unrecognized `gt` subcommands pass through to Graphite or git. - -## Passthrough - -Other `gh` subcommands pass through to the GitHub CLI unchanged. diff --git a/docs/guide/commands/go.md b/docs/guide/commands/go.md deleted file mode 100644 index e0b9d8ce5..000000000 --- a/docs/guide/commands/go.md +++ /dev/null @@ -1,54 +0,0 @@ ---- -title: Go -description: RTK filters for go test and golangci-lint -sidebar: - order: 6 ---- - -# Go - -RTK covers Go's two most verbose outputs: test runs and the linter. - -## go test — 90% savings - -```bash -rtk go test [args...] -``` - -Parses Go's NDJSON streaming output for precise failure filtering. Shows failures only on failure, compact summary on success. - -Uses `go test -json` internally for reliable event-by-event parsing. - -## golangci-lint — 85% savings - -```bash -rtk golangci-lint run [args...] -``` - -Compressed JSON output grouped by linter and file. - -**Before (verbose lint output):** -``` -src/main.go:42:5: Error return value of `fmt.Fprintf` is not checked (errcheck) -src/main.go:67:1: exported function `ProcessData` should have comment (godot) -src/handler.go:15:9: G104: Errors unhandled. (gosec) -... -``` - -**After:** -``` -src/main.go (2 issues) - errcheck: Error return not checked (x1) - godot: Missing comment on exported func (x1) -src/handler.go (1 issue) - gosec/G104: Errors unhandled (x1) -``` - -## go build - -Unrecognized `go` subcommands pass through directly: - -```bash -rtk go build ./... # passes through to go build -rtk go vet ./... # passes through to go vet -``` diff --git a/docs/guide/commands/javascript.md b/docs/guide/commands/javascript.md deleted file mode 100644 index 6b27cdc43..000000000 --- a/docs/guide/commands/javascript.md +++ /dev/null @@ -1,129 +0,0 @@ ---- -title: JavaScript / TypeScript -description: RTK filters for pnpm, npm, vitest, tsc, ESLint, Next.js, Playwright, and Prisma -sidebar: - order: 4 ---- - -# JavaScript / TypeScript - -RTK covers the full JS/TS toolchain: package managers, test runners, type checking, linting, build tools, and ORM. - -## vitest run — 99.5% savings - -```bash -rtk vitest run [args...] -``` - -Shows failures only. One of the highest savings rates in RTK — vitest verbose output can be massive. - -## Playwright — 94% savings - -```bash -rtk playwright [args...] -``` - -Shows failures and summaries only, strips progress output. - -## tsc — 83% savings - -```bash -rtk tsc [args...] -``` - -Groups TypeScript errors by file and error code. - -**Before (50 lines for 4 errors):** -``` -src/api.ts(12,5): error TS2345: Argument of type 'string' is not assignable to parameter of type 'number'. -src/api.ts(15,10): error TS2345: Argument of type 'string' is not assignable to parameter of type 'number'. -src/api.ts(20,3): error TS7006: Parameter 'x' implicitly has an 'any' type. -src/utils.ts(5,1): error TS2304: Cannot find name 'foo'. -``` - -**After (15 lines):** -``` -src/api.ts (3 errors) - TS2345: Argument type mismatch (x2) - TS7006: Parameter implicitly has 'any' -src/utils.ts (1 error) - TS2304: Cannot find name 'foo' -``` - -## ESLint / Biome — 84% savings - -```bash -rtk lint [args...] -rtk lint biome [args...] -``` - -Groups violations by rule and file. Auto-detects the linter. - -## prettier - -```bash -rtk prettier --check . # 70% savings -rtk prettier --write src/ -``` - -Shows only files that need formatting. - -## Next.js build — 87% savings - -```bash -rtk next [args...] -rtk pnpm build # delegates to Next.js filter -``` - -Compact output with route metrics. - -## pnpm - -| Command | Description | Savings | -|---------|-------------|---------| -| `rtk pnpm list [-d N]` | Compact dependency tree | ~70% | -| `rtk pnpm outdated` | Outdated packages: `pkg: old -> new` | ~80% | -| `rtk pnpm install [pkgs...]` | Filters progress bars | ~60% | -| `rtk pnpm build` | Delegates to Next.js filter | ~87% | -| `rtk pnpm typecheck` | Delegates to tsc filter | ~83% | - -Unrecognized subcommands pass through to pnpm directly. - -## npm - -```bash -rtk npm [args...] # e.g. rtk npm run build -``` - -Filters npm boilerplate (progress bars, headers, audit notices). - -## npx — smart routing - -```bash -rtk npx [args...] -``` - -Routes automatically to specialized filters: - -- `rtk npx tsc` → tsc filter -- `rtk npx eslint` → lint filter -- `rtk npx prisma` → prisma filter -- Other → passthrough filter - -## Prisma - -| Command | Description | -|---------|-------------| -| `rtk prisma generate` | Client generation (removes ASCII art) | -| `rtk prisma migrate dev [--name N]` | Create and apply a migration | -| `rtk prisma migrate status` | Migration status | -| `rtk prisma migrate deploy` | Deploy to production | -| `rtk prisma db-push` | Push schema | - -## Universal format detector - -```bash -rtk format [args...] -``` - -Auto-detects the project formatter (prettier, black, ruff format, rustfmt) and applies a unified compact filter. diff --git a/docs/guide/commands/python.md b/docs/guide/commands/python.md deleted file mode 100644 index 4444dcc26..000000000 --- a/docs/guide/commands/python.md +++ /dev/null @@ -1,55 +0,0 @@ ---- -title: Python -description: RTK filters for pytest, ruff, mypy, and pip -sidebar: - order: 5 ---- - -# Python - -RTK covers the core Python toolchain: testing, linting/formatting, type checking, and package management. - -## pytest — 90% savings - -```bash -rtk pytest [args...] -``` - -Shows failures only. On success, shows a compact summary. - -## ruff — 80% savings - -```bash -rtk ruff check [args...] -rtk ruff format --check [args...] -``` - -Compressed JSON output grouped by file and rule. - -## mypy — type errors grouped by file - -```bash -rtk mypy [args...] -``` - -Groups type errors by file and error code, similar to the tsc filter. - -## pip / uv - -```bash -rtk pip list # package list -rtk pip outdated # outdated packages -rtk pip install # installation -``` - -Auto-detects `uv` if available and uses it instead of pip. - -## deps — project overview - -```bash -rtk deps [path] # default: current directory -``` - -Compact summary of project dependencies. Auto-detects `Cargo.toml`, `package.json`, `pyproject.toml`, `go.mod`, `Gemfile`, and others. - -**Savings:** ~70% diff --git a/docs/guide/commands/ruby.md b/docs/guide/commands/ruby.md deleted file mode 100644 index 5e1a4ed28..000000000 --- a/docs/guide/commands/ruby.md +++ /dev/null @@ -1,45 +0,0 @@ ---- -title: Ruby -description: RTK filters for rake, rspec, and rubocop -sidebar: - order: 7 ---- - -# Ruby - -RTK covers Ruby's core development tools: build tasks, test output, and linting. - -## rspec — 60-90% savings - -```bash -rtk rspec [args...] -``` - -Shows failures only. On success, compact summary. - -## rubocop — 80%+ savings - -```bash -rtk rubocop [args...] -``` - -Groups violations by cop and file. - -## rake — 60-80% savings - -```bash -rtk rake [args...] -rtk rake test -rtk rake spec -``` - -Filters task execution noise, keeps errors and final result. - -## Passthrough - -Unrecognized rake tasks pass through to rake directly: - -```bash -rtk rake db:migrate # passes through unchanged -rtk rake assets:precompile -``` diff --git a/docs/guide/commands/utilities.md b/docs/guide/commands/utilities.md deleted file mode 100644 index 02ff3dc31..000000000 --- a/docs/guide/commands/utilities.md +++ /dev/null @@ -1,66 +0,0 @@ ---- -title: Utilities -description: RTK utility commands — proxy passthrough, analytics, global flags -sidebar: - order: 12 ---- - -# Utilities - -Utility commands that apply across all ecosystems. - -## proxy — passthrough with tracking - -```bash -rtk proxy -``` - -Runs any command without filtering but records it in the token savings database. Useful when you need raw output but still want usage tracked. - -```bash -rtk proxy git log --oneline -20 # full git log, no filtering -rtk proxy npm install express # raw npm output -rtk proxy curl https://api.example.com/data -``` - -All proxy commands appear in `rtk gain --history` with 0% savings (input = output). - -## Global flags - -These flags apply to every RTK command: - -| Flag | Description | -|------|-------------| -| `-v` | Debug messages | -| `-vv` | Show command being executed | -| `-vvv` | Show raw output before filtering | -| `-u` / `--ultra-compact` | Maximum compression (ASCII icons, inline format) | - -**Verbosity example:** -```bash -rtk git status -vvv -# shows the raw git status output before RTK filters it -``` - -**Ultra-compact example:** -```bash -rtk git push -u -# output: ✓ main (vs "ok main" in normal mode) -``` - -## RTK_DISABLED — per-command override - -```bash -RTK_DISABLED=1 git status # runs raw git status, no rewrite -``` - -## Passthrough behavior - -Any command RTK doesn't recognize is executed unchanged and the output passes through: - -```bash -rtk make install # runs make install verbatim -rtk terraform plan # runs terraform plan verbatim (unless a TOML filter matches) -``` - -The exit code from the underlying command is always preserved. diff --git a/docs/guide/filters/creating-filters.md b/docs/guide/filters/creating-filters.md deleted file mode 100644 index 2203d3a8d..000000000 --- a/docs/guide/filters/creating-filters.md +++ /dev/null @@ -1,105 +0,0 @@ ---- -title: Creating Filters -description: Write TOML filters to compress output from any command RTK doesn't cover yet -sidebar: - order: 2 ---- - -# Creating Filters - -TOML filters let you add RTK support for any command without writing Rust. They work well for tools with predictable, line-by-line text output where regex filtering achieves 60%+ savings. - -## When to use a TOML filter - -TOML works well for: -- Install/update logs (brew, composer, poetry) — strip `Using ...` / `Already installed` lines -- System monitoring (df, ps, systemctl) — keep essential rows, drop headers -- Simple linters (shellcheck, yamllint, hadolint) — strip context, keep findings -- Infra tools (terraform plan, helm, rsync) — strip progress, keep summary - -For commands with structured JSON output, state machine parsing, or complex routing, a Rust module is needed instead. - -## Create a project-local filter - -Create `.rtk/filters.toml` in your project root: - -```toml -[filters.my-tool] -description = "Strip progress noise from my-tool output" -match_command = "^my-tool\\b" -strip_ansi = true -strip_lines_matching = [ - "^Loading", - "^Scanning", - "^\\s*$", -] -max_lines = 50 -on_empty = "my-tool: nothing to report" - -[[tests.my-tool]] -name = "strips progress lines" -input = "Loading plugins...\nScan complete: 3 issues\nWarning: foo at line 42" -expected = "Scan complete: 3 issues\nWarning: foo at line 42" -``` - -Verify: - -```bash -rtk verify -``` - -## Filter fields reference - -| Field | Type | Description | -|-------|------|-------------| -| `description` | string | Human-readable description | -| `match_command` | regex | Matched against full command string | -| `strip_ansi` | bool | Strip ANSI escape codes first | -| `strip_lines_matching` | regex[] | Drop lines matching any of these | -| `keep_lines_matching` | regex[] | Keep only lines matching at least one | -| `replace` | array | Regex substitutions (`{ pattern, replacement }`) | -| `match_output` | array | Short-circuit rules (`{ pattern, message }`) | -| `truncate_lines_at` | int | Truncate lines longer than N chars | -| `max_lines` | int | Keep only the first N lines | -| `tail_lines` | int | Keep only the last N lines | -| `on_empty` | string | Message when output is empty after filtering | - -## Naming convention - -Use the command name as the filter key: `terraform-plan`, `docker-inspect`, `mix-compile`. For commands with subcommands, prefer `cmd-subcommand` over grouping multiple filters together. - -## Filter lookup order - -Project filters override user-global filters, which override built-ins: - -``` -1. .rtk/filters.toml (project-local — highest priority) -2. ~/.config/rtk/filters.toml (user-global) -3. BUILTIN_TOML (embedded in binary — lowest priority) -``` - -A project filter with the same key as a built-in shadows the built-in with a warning: - -``` -[rtk] warning: filter 'make' is shadowing a built-in filter -``` - -## Writing inline tests - -Always add at least one `[[tests.my-tool]]` entry. Tests run via `rtk verify` and also during `cargo test` (for built-in filters). - -```toml -[[tests.my-tool]] -name = "normal output" -input = "Progress: 10%\nDone. 3 findings." -expected = "Done. 3 findings." - -[[tests.my-tool]] -name = "empty output" -input = "Progress: 10%\nProgress: 100%" -expected = "my-tool: nothing to report" -``` - -## Contributing a filter upstream - -To add a filter to RTK's built-in set, see the [Contributing guide](../../reference/contributing/guide.md) for the full checklist (register in `discover/rules.rs`, update filter count in tests, write fixture). diff --git a/docs/guide/filters/using-filters.md b/docs/guide/filters/using-filters.md deleted file mode 100644 index a23bd80d9..000000000 --- a/docs/guide/filters/using-filters.md +++ /dev/null @@ -1,121 +0,0 @@ ---- -title: Using Filters -description: How RTK filters work, the lookup priority, and how to create project-local filters -sidebar: - order: 1 ---- - -# Using Filters - -RTK filters are TOML files that define how a command's output should be compressed. They are the core mechanism behind every `rtk ` savings number. - -## Filter lookup priority - -When you run `rtk my-tool args`, RTK checks three locations in order: - -``` -1. .rtk/filters.toml (project-local) -2. ~/.config/rtk/filters.toml (user-global) - macOS alt: ~/Library/Application Support/rtk/filters.toml -3. BUILTIN_TOML (embedded in binary) -``` - -First match wins. A project filter with the same name as a built-in shadows the built-in and triggers a warning: - -``` -[rtk] warning: filter 'make' is shadowing a built-in filter -``` - -## The 8-stage filter pipeline - -Every matched filter runs output through this pipeline: - -``` -strip_ansi - -> replace - -> match_output (short-circuit: if pattern matches, emit message and stop) - -> strip/keep_lines - -> truncate_lines_at - -> tail_lines - -> max_lines - -> on_empty (if output is empty after all stages, emit this message) -``` - -Stages you don't configure are skipped. - -## Creating a project-local filter - -Create `.rtk/filters.toml` in your project root: - -```toml -[[filters]] -name = "my-tool" -match_command = "^my-tool\\b" - -strip_lines = [ - "^Loading", - "^Scanning", -] - -max_lines = 50 -on_empty = "my-tool: nothing to do" - -[[tests.my-tool]] -input = "Loading plugins...\nScan complete: 3 issues\nWarning: foo at line 42" -expected = "Scan complete: 3 issues\nWarning: foo at line 42" -``` - -Verify it works: - -```bash -rtk verify -``` - -## Filter fields reference - -| Field | Type | Description | -|-------|------|-------------| -| `name` | string | Unique identifier (used in logs and `rtk verify`) | -| `match_command` | regex | Pattern matched against the full command string | -| `strip_lines` | regex[] | Remove lines matching any of these patterns | -| `keep_lines` | regex[] | Keep only lines matching any of these patterns | -| `replace` | `[[from, to]]` | String replacements applied before line filtering | -| `match_output` | regex | If matched, emit `on_match` and stop pipeline | -| `on_match` | string | Message emitted when `match_output` matches | -| `truncate_lines_at` | number | Truncate lines longer than N characters | -| `tail_lines` | number | Keep only the last N lines | -| `max_lines` | number | Keep only the first N lines after all other stages | -| `on_empty` | string | Message emitted when pipeline produces empty output | - -## How built-in filters are compiled - -Built-in filters live in `src/filters/*.toml` in the RTK source. At build time, `build.rs` concatenates all TOML files alphabetically and embeds the result in the binary as a constant. No external files are needed at runtime. - -This means: -- Built-in filters have zero filesystem overhead -- Project filters override built-ins by name -- New built-in filters require a new RTK release - -## Verifying filters - -```bash -rtk verify -``` - -Runs every `[[tests.*]]` entry in all filter files and reports pass/fail. Use this to validate your custom filters before committing. - -## Flow diagram - -```mermaid -flowchart LR - CMD["rtk my-tool args"] --> P1 - P1{"1. .rtk/filters.toml\n(project-local)"} - P1 -->|"match"| WIN["apply filter"] - P1 -->|"no match"| P2 - P2{"2. ~/.config/rtk/filters.toml\n(user-global)"} - P2 -->|"match"| WIN - P2 -->|"no match"| P3 - P3{"3. BUILTIN_TOML\n(binary)"} - P3 -->|"match"| WIN - P3 -->|"no match"| P4[["exec raw\n(passthrough)"]] -``` diff --git a/docs/guide/what-rtk-covers.md b/docs/guide/what-rtk-covers.md new file mode 100644 index 000000000..3e3f2a46a --- /dev/null +++ b/docs/guide/what-rtk-covers.md @@ -0,0 +1,133 @@ +--- +title: What RTK Optimizes +description: Commands and ecosystems automatically optimized by RTK with typical token savings +sidebar: + order: 2 +--- + +# What RTK Optimizes + +Once RTK is installed with a hook, these commands are automatically intercepted and filtered. You run them normally — the hook rewrites them transparently before execution. + +60+ commands across 9 ecosystems. Typical savings: 60-99%. + +## Git + +| Command | Savings | What changes | +|---------|---------|--------------| +| `git status` | 75-93% | Compact stat format, grouped by state | +| `git log` | 80-92% | Hash + author + subject only | +| `git diff` | 70% | Context reduced, headers stripped | +| `git show` | 70% | Same as diff | +| `git stash list` | 75% | Compact one-line per entry | + +## GitHub CLI + +| Command | Savings | What changes | +|---------|---------|--------------| +| `gh pr view` | 87% | Removes ASCII art and verbose metadata | +| `gh pr checks` | 79% | Status + name only, failures highlighted | +| `gh run list` | 82% | Compact workflow run summary | +| `gh issue view` | 80% | Body only, no decoration | + +## Graphite (Stacked PRs) + +| Command | Savings | What changes | +|---------|---------|--------------| +| `gt log` | 75% | Stack summary only | +| `gt status` | 70% | Current branch context | + +## Cargo / Rust + +| Command | Savings | What changes | +|---------|---------|--------------| +| `cargo test` | 90% | Failures only, passed tests suppressed | +| `cargo nextest` | 90% | Same as test | +| `cargo build` | 80% | Errors and warnings only | +| `cargo check` | 80% | Errors and warnings only | +| `cargo clippy` | 80% | Lint warnings grouped by file | + +## JavaScript / TypeScript + +| Command | Savings | What changes | +|---------|---------|--------------| +| `vitest run` | 94-99% | Failures only | +| `tsc` | 75% | Type errors grouped by file | +| `eslint` | 84% | Violations grouped by rule | +| `pnpm list` | 70-90% | Compact dependency tree | +| `pnpm outdated` | 70% | Package + current + latest only | +| `next build` | 80% | Route summary + errors only | +| `prisma migrate` | 75% | Migration status only | +| `playwright test` | 90% | Failures + trace links only | + +## Python + +| Command | Savings | What changes | +|---------|---------|--------------| +| `pytest` | 80-90% | Failures only | +| `ruff check` | 75% | Violations grouped by file | +| `mypy` | 75% | Type errors grouped by file | +| `pip install` | 70% | Installed packages only, progress stripped | + +## Go + +| Command | Savings | What changes | +|---------|---------|--------------| +| `go test` | 80-90% | Failures only | +| `golangci-lint run` | 75% | Violations grouped by file | +| `go build` | 75% | Errors only | + +## Ruby + +| Command | Savings | What changes | +|---------|---------|--------------| +| `rspec` | 80-90% | Failures only | +| `rubocop` | 75% | Offenses grouped by file | +| `rake` | 70% | Task output, build errors highlighted | + +## .NET + +| Command | Savings | What changes | +|---------|---------|--------------| +| `dotnet build` | 80% | Errors and warnings only | +| `dotnet test` | 85-90% | Failures only | +| `dotnet format` | 75% | Changed files only | + +## Docker / Kubernetes + +| Command | Savings | What changes | +|---------|---------|--------------| +| `docker ps` | 65% | Essential columns (name, image, status, port) | +| `docker images` | 60% | Name + tag + size only | +| `docker logs` | 70% | Deduplicated, last N lines | +| `docker compose up` | 75% | Service status, errors highlighted | +| `kubectl get pods` | 65% | Name + status + restarts only | +| `kubectl logs` | 70% | Deduplicated entries | + +## Files and Search + +| Command | Savings | What changes | +|---------|---------|--------------| +| `ls` | 80% | Tree format with file counts | +| `find` | 75% | Tree format | +| `grep` | 70% | Truncated lines, grouped by file | +| `diff` | 65% | Context reduced | +| `wc` | 60% | Compact counts | + +## Cloud and Data + +| Command | Savings | What changes | +|---------|---------|--------------| +| `aws` | 70% | JSON condensed, relevant fields only | +| `psql` | 65% | Query results without decoration | +| `curl` | 60% | Response body only, headers stripped | + +## Commands that are not rewritten + +If a command isn't in the list above, RTK runs it through passthrough — the output reaches the LLM unchanged. You can explicitly track unsupported commands: + +```bash +rtk proxy make install # runs make install, tracks usage, no filtering +``` + +To check which commands were missed opportunities: `rtk discover`. diff --git a/docs/reference/contributing/coding-standards.md b/docs/reference/contributing/coding-standards.md deleted file mode 100644 index 0670d563c..000000000 --- a/docs/reference/contributing/coding-standards.md +++ /dev/null @@ -1,128 +0,0 @@ ---- -title: Coding Standards -description: Rust patterns, error handling rules, and anti-patterns for RTK development -sidebar: - order: 3 ---- - -# Coding Standards - -RTK-specific Rust constraints. These override general Rust conventions. - -## Non-negotiable rules - -1. **No async** — Zero `tokio`, `async-std`, `futures`. Single-threaded by design. Async adds 5-10ms startup. -2. **No `.unwrap()` in production** — Use `.context("description")?`. Tests: use `.expect("reason")`. -3. **Lazy regex** — `Regex::new()` inside a function recompiles on every call. Always `lazy_static!`. -4. **Fallback pattern** — If filter fails, execute raw command unchanged. Never block the user. -5. **Exit code propagation** — `std::process::exit(code)` if underlying command fails. - -## Error handling - -```rust -use anyhow::{Context, Result}; - -// ✅ Correct -fn read_config(path: &Path) -> Result { - let content = fs::read_to_string(path) - .with_context(|| format!("Failed to read config: {}", path.display()))?; - toml::from_str(&content).context("Failed to parse config TOML") -} - -// ❌ Wrong — no context -fn read_config(path: &Path) -> Result { - let content = fs::read_to_string(path)?; - Ok(toml::from_str(&content)?) -} -``` - -## Fallback pattern (mandatory for all filters) - -```rust -pub fn run(args: MyArgs) -> Result<()> { - let output = execute_command("mycmd", &args.to_cmd_args()) - .context("Failed to execute mycmd")?; - - let filtered = filter_output(&output.stdout) - .unwrap_or_else(|e| { - eprintln!("rtk: filter warning: {}", e); - output.stdout.clone() // passthrough on failure - }); - - tracking::record("mycmd", &output.stdout, &filtered)?; - print!("{}", filtered); - - if !output.status.success() { - std::process::exit(output.status.code().unwrap_or(1)); - } - Ok(()) -} -``` - -## Regex — always lazy_static - -```rust -use lazy_static::lazy_static; -use regex::Regex; - -lazy_static! { - static ref ERROR_RE: Regex = Regex::new(r"^error\[").unwrap(); - static ref HASH_RE: Regex = Regex::new(r"^[0-9a-f]{7,40}").unwrap(); -} - -// ✅ Compiled once at first use -fn is_error_line(line: &str) -> bool { - ERROR_RE.is_match(line) -} - -// ❌ Recompiles on every call -fn is_error_line(line: &str) -> bool { - let re = Regex::new(r"^error\[").unwrap(); - re.is_match(line) -} -``` - -Note: `lazy_static!` with `.unwrap()` for initialization is the established RTK pattern — acceptable because a bad regex literal is a programming error caught at first use. - -## Module structure - -Every `*_cmd.rs` follows this pattern: - -```rust -// 1. Imports -use anyhow::{Context, Result}; -use lazy_static::lazy_static; -use regex::Regex; - -// 2. Args struct -pub struct MyArgs { ... } - -// 3. Lazy regexes -lazy_static! { static ref MY_RE: Regex = ...; } - -// 4. Public entry point -pub fn run(args: MyArgs) -> Result<()> { ... } - -// 5. Private filter functions -fn filter_output(input: &str) -> Result { ... } - -// 6. Tests (always present) -#[cfg(test)] -mod tests { - use super::*; - fn count_tokens(s: &str) -> usize { s.split_whitespace().count() } - // snapshot tests, savings tests... -} -``` - -## Anti-patterns - -| Pattern | Problem | Fix | -|---------|---------|-----| -| `Regex::new()` in function | Recompiles every call | `lazy_static!` | -| `.unwrap()` in production | Panic breaks user workflow | `.context()?` | -| `tokio::main` or `async fn` | +5-10ms startup | Blocking I/O only | -| `Err(_) => {}` | User gets no output | Log warning + fallback | -| `println!` in filter path | Debug artifact in output | Use `eprintln!` | -| Early return without exit code | CI thinks command succeeded | `std::process::exit(code)` | -| `.clone()` of large strings in hot path | Extra allocation | Borrow with `&str` | diff --git a/docs/reference/contributing/guide.md b/docs/reference/contributing/guide.md deleted file mode 100644 index e37a6a48c..000000000 --- a/docs/reference/contributing/guide.md +++ /dev/null @@ -1,102 +0,0 @@ ---- -title: Contributing Guide -description: How to contribute to RTK — design philosophy, PR process, testing, and documentation -sidebar: - order: 1 ---- - -# Contributing Guide - -## Design philosophy - -Four principles guide every RTK design decision. - -### Correctness over savings - -When a user explicitly requests detailed output via flags (e.g., `--nocapture`, `--verbose`, `-la`), respect that intent. Filters should be flag-aware: default output gets compressed, explicit verbose flags pass through more content. - -> `rtk cargo test` shows failures only (90% savings). `rtk cargo test -- --nocapture` preserves all output. - -### Transparency - -RTK's output must be a valid, useful subset of the original tool's output. The filtered version should be indistinguishable from "a shorter version of the real command." Don't invent new formats or add RTK-specific headers in default output. - -### Never block - -If a filter fails, fall back to raw output. RTK must never prevent a command from executing. Every filter needs a fallback path. Every hook must handle malformed input gracefully and exit 0. - -### Zero overhead - -`<10ms` startup. No async runtime. No config file I/O on the critical path. Use `lazy_static!` for all regex. No network calls in the hot path. Benchmark before/after with `hyperfine`. - -## What belongs in RTK? - -**In scope:** Commands that produce text output (typically 100+ tokens) compressible 60%+ without losing essential information for the LLM. - -- Test runners (vitest, pytest, cargo test, go test) -- Linters and type checkers (eslint, ruff, tsc, mypy) -- Build tools (cargo build, dotnet build, make, next build) -- VCS operations (git status/log/diff, gh pr/issue) -- Package managers (pnpm, pip, cargo install) -- File operations (ls, tree, grep, find) -- Infrastructure tools with text output (docker, kubectl, terraform) - -**Out of scope:** Interactive TUIs, binary output, trivial commands, non-text output. - -## TOML filter vs Rust module - -| Use **TOML filter** when | Use **Rust module** when | -|--------------------------|--------------------------| -| Plain text with predictable line structure | Structured output (JSON, NDJSON) | -| Regex line filtering achieves 60%+ savings | State machine parsing needed | -| No CLI flag injection needed | Must inject flags like `--format json` | -| No cross-command routing | Routes to other commands | - -## Branch naming - -| Prefix | Semver | When to use | -|--------|--------|-------------| -| `fix/` | Patch | Bug fixes, filter corrections | -| `feat/` | Minor | New filters, new command support | -| `chore/` | Major | Breaking changes, API changes | - -Examples: `fix/git-log-drops-merge-commits`, `feat/kubectl-pod-filter` - -## PR process - -1. Branch from `develop` -2. Make changes + add tests + update docs -3. Run pre-commit gate: `cargo fmt --all --check && cargo clippy --all-targets && cargo test` -4. Open PR targeting `develop` -5. Sign CLA (CLA Assistant will prompt on first PR) -6. Address review feedback -7. Maintainer merges to `develop` → eventual release to `master` - -## Testing requirements - -Every PR must include tests. Follow TDD (Red-Green-Refactor): write failing test first. - -| Type | Location | Runner | -|------|----------|--------| -| Unit tests | `#[cfg(test)] mod tests` in each module | `cargo test` | -| Snapshot tests | `assert_snapshot!()` via `insta` | `cargo test` + `cargo insta review` | -| Smoke tests | `scripts/test-all.sh` | `bash scripts/test-all.sh` | -| Integration tests | `#[ignore]` tests | `cargo test --ignored` | - -**PR testing checklist:** -- [ ] Unit tests added/updated -- [ ] Snapshot tests reviewed (`cargo insta review`) -- [ ] Token savings ≥60% verified -- [ ] `cargo fmt --all --check && cargo clippy --all-targets && cargo test` passes - -## Documentation requirements - -Every filter addition requires: -- Update `docs/guide/commands/.md` with the new command -- Update `CHANGELOG.md` under `[Unreleased]` - -For the full step-by-step checklist for adding a new command filter, see [src/cmds/README.md](https://github.com/rtk-ai/rtk/blob/master/src/cmds/README.md#adding-a-new-command-filter). - -## Contributor License Agreement - -All contributions require signing the CLA. CLA Assistant will post a comment on your first PR with a link to sign. You only need to sign once. diff --git a/docs/reference/contributing/security.md b/docs/reference/contributing/security.md deleted file mode 100644 index 2b7299e22..000000000 --- a/docs/reference/contributing/security.md +++ /dev/null @@ -1,83 +0,0 @@ ---- -title: Security Policy -description: How to report vulnerabilities, the PR security review process, and dangerous patterns to avoid -sidebar: - order: 2 ---- - -# Security Policy - -## Reporting a vulnerability - -Report security issues privately — do not open public GitHub issues for vulnerabilities. - -- **Email**: security@rtk-ai.dev (or create a private GitHub security advisory) -- **Response time**: Acknowledgment within 48 hours -- **Disclosure**: 90-day embargo, responsible disclosure - -## Automated security checks - -Every PR triggers `security-check.yml`: - -1. **Dependency audit** (`cargo audit`) — detects known CVEs -2. **Critical files alert** — flags modifications to high-risk files -3. **Dangerous pattern scan** — regex detection of shell execution, env manipulation, network ops, unsafe code, `.unwrap()` in production -4. **Clippy security lints** - -## High-risk files - -These files require enhanced review (2 maintainers for Tier 1): - -**Tier 1 — Shell execution & system interaction:** -- `src/runner.rs` — shell command execution engine -- `src/tracking.rs` — SQLite database operations -- `src/discover/registry.rs` — command rewrite logic -- `hooks/rtk-rewrite.sh` — intercepts all Claude Code commands - -**Tier 2 — Input validation:** -- `src/pnpm_cmd.rs` — package name validation -- `src/container.rs` — Docker/container operations - -**Tier 3 — Supply chain & CI/CD:** -- `Cargo.toml` — dependency manifest -- `.github/workflows/*.yml` — CI/CD pipelines - -## Dangerous patterns - -| Pattern | Risk | -|---------|------| -| `Command::new("sh")` | Shell injection | -| `.env("LD_PRELOAD")` | Library hijacking | -| `reqwest::`, `std::net::` | Data exfiltration | -| `unsafe {` | Memory safety bypass | -| `.unwrap()` in `src/` | DoS via panic | -| `SystemTime::now() > ...` | Logic bombs | - -**Avoid — shell injection:** -```rust -// ❌ Never do this -Command::new("sh").arg("-c").arg(format!("echo {}", user_input)).output(); - -// ✅ Direct binary execution -Command::new("echo").arg(user_input).output(); -``` - -## Dependency criteria for new crates - -- Downloads: >10,000 on crates.io -- Maintainer: verified GitHub profile + track record -- License: MIT or Apache-2.0 -- Activity: commits within 6 months -- No typosquatting (verify against similar crate names) - -## Disclosure timeline - -| Day | Action | -|-----|--------| -| 0 | Acknowledgment to reporter | -| 7 | Severity assessment | -| 14 | Patch development | -| 30 | Patch released + CVE filed (if applicable) | -| 90 | Public disclosure | - -Critical vulnerabilities (RCE, data exfiltration) may be fast-tracked. diff --git a/docs/reference/contributing/testing.md b/docs/reference/contributing/testing.md deleted file mode 100644 index 54054f334..000000000 --- a/docs/reference/contributing/testing.md +++ /dev/null @@ -1,142 +0,0 @@ ---- -title: Testing Strategy -description: Snapshot tests, token accuracy tests, cross-platform tests, and performance benchmarks -sidebar: - order: 4 ---- - -# Testing Strategy - -## Snapshot tests (critical) - -Use the `insta` crate for output validation. This is the primary testing strategy for RTK filters. - -```rust -use insta::assert_snapshot; - -#[test] -fn test_git_log_output() { - let input = include_str!("../tests/fixtures/git_log_raw.txt"); - let output = filter_git_log(input); - assert_snapshot!(output); -} -``` - -**Workflow:** -1. Write test with `assert_snapshot!(output)` -2. `cargo test` (creates new snapshot on first run) -3. `cargo insta review` (interactive review — press `a` to accept) -4. Snapshot saved in `src/cmds//snapshots/` - -## Token accuracy tests (critical) - -All filters must verify 60-90% savings claims with real fixtures. - -```rust -fn count_tokens(text: &str) -> usize { - text.split_whitespace().count() -} - -#[test] -fn test_git_log_savings() { - let input = include_str!("../tests/fixtures/git_log_raw.txt"); - let output = filter_git_log(input); - let savings = 100.0 - (count_tokens(&output) as f64 / count_tokens(input) as f64 * 100.0); - assert!(savings >= 60.0, "Expected ≥60% savings, got {:.1}%", savings); -} -``` - -**Savings targets:** - -| Filter | Minimum | Mechanism | -|--------|---------|-----------| -| `git log` | 80% | Condense commits to hash + message | -| `cargo test` | 90% | Show failures only | -| `gh pr view` | 87% | Remove ASCII art + verbose metadata | -| `pnpm list` | 70% | Compact dependency tree | -| `docker ps` | 60% | Essential fields only | - -## Creating fixtures - -Use real command output, not synthetic data: - -```bash -git log -20 > tests/fixtures/git_log_raw.txt -cargo test 2>&1 > tests/fixtures/cargo_test_raw.txt -gh pr view 123 > tests/fixtures/gh_pr_view_raw.txt -``` - -## Cross-platform tests - -RTK must work on macOS (zsh), Linux (bash), and Windows (PowerShell). Test shell escaping with `cfg` guards: - -```rust -#[test] -fn test_shell_escaping() { - let escaped = escape_for_shell("test"); - #[cfg(target_os = "windows")] - assert_eq!(escaped, "\"test\""); - #[cfg(not(target_os = "windows"))] - assert_eq!(escaped, "test"); -} -``` - -## Performance tests - -RTK targets `<10ms` startup and `<5MB` memory. - -```bash -# Benchmark before/after changes -hyperfine 'rtk git log -10' --warmup 3 - -# Memory usage (macOS) -/usr/bin/time -l rtk git status -# "maximum resident set size" should be <5MB -``` - -## Integration tests - -Run against an installed binary with `#[ignore]`: - -```rust -#[test] -#[ignore] -fn test_real_git_log() { - let output = std::process::Command::new("rtk") - .args(&["git", "log", "-10"]) - .output() - .expect("Failed to run rtk"); - assert!(output.status.success()); - let stdout = String::from_utf8_lossy(&output.stdout); - assert!(stdout.len() < 5000, "Output too large, filter not working"); -} -``` - -Run with: `cargo test --ignored` - -## Test organization - -``` -src/cmds// - .rs # filter + embedded unit tests - snapshots/ # insta snapshots (auto-generated) -tests/ - common/mod.rs # count_tokens + shared helpers - fixtures/ # real command output (txt files) - integration_test.rs # #[ignore] end-to-end tests -``` - -## Pre-commit gate - -All three must pass before any commit: - -```bash -cargo fmt --all --check && cargo clippy --all-targets && cargo test -``` - -## Anti-patterns - -- **Don't** test with hardcoded synthetic strings — use real fixture files -- **Don't** skip cross-platform tests — use `cfg` guards -- **Don't** ignore savings drops below 60% — investigate and fix -- **Don't** commit without running `cargo insta review` diff --git a/docs/reference/index.md b/docs/reference/index.md deleted file mode 100644 index 4f8567b5d..000000000 --- a/docs/reference/index.md +++ /dev/null @@ -1,16 +0,0 @@ ---- -title: Reference -description: Technical reference for RTK contributors, maintainers, and integrators -sidebar: - order: 1 ---- - -# Reference - -Technical documentation for people working on RTK internals or integrating it into their toolchain. - -## What's in this section - -- **Contributing** — How to contribute, coding standards, testing strategy, and security policy -- **Internals** — Command routing, filter pipeline, tracking system, and hook engine -- **TOML DSL** — Specification for custom filter files diff --git a/docs/reference/internals/command-routing.md b/docs/reference/internals/command-routing.md deleted file mode 100644 index 3656880f8..000000000 --- a/docs/reference/internals/command-routing.md +++ /dev/null @@ -1,130 +0,0 @@ ---- -title: Command Routing -description: How RTK parses, routes, and dispatches commands through the 6-phase execution lifecycle -sidebar: - order: 1 ---- - -# Command Routing - -## 6-phase execution lifecycle - -Every RTK command goes through six phases: - -### Phase 1: Parse - -Clap parses the CLI arguments into a typed `Commands` enum: - -``` -$ rtk git log --oneline -5 -v - -Commands::Git { args: ["log", "--oneline", "-5"], verbose: 1 } -``` - -### Phase 2: Route - -`main.rs` matches the enum variant and dispatches to the module: - -```rust -match cli.command { - Commands::Git { args, .. } => git::run(&args, verbose)?, - Commands::Cargo { args, .. } => cargo::run(&args, verbose)?, - Commands::Ls { args } => ls_cmd::run(&args)?, - // ... -} -``` - -### Phase 3: Execute - -The module spawns the underlying process: - -```rust -std::process::Command::new("git") - .args(["log", "--oneline", "-5"]) - .output()? -// stdout: "abc123 Fix bug\ndef456 Add feature\n..." -// exit_code: 0 -``` - -### Phase 4: Filter - -The module applies its filtering strategy to the captured output: - -```rust -git::format_git_output(stdout, "log", verbose) -// Strategy: Stats Extraction -// Filtered: "5 commits, +142/-89" (96% reduction) -``` - -### Phase 5: Print - -```rust -println!("{}", colored_output); -// eprintln! for debug output when verbose > 0 -``` - -### Phase 6: Track - -```rust -tracking::track( - original_cmd: "git log --oneline -5", - rtk_cmd: "rtk git log --oneline -5", - input: &raw_output, // 500 chars - output: &filtered, // 20 chars -) -// SQLite INSERT: input_tokens=125, output_tokens=5, savings_pct=96.0 -``` - -## Exit code preservation - -RTK always propagates the exit code from the underlying tool: - -```rust -if !output.status.success() { - let stderr = String::from_utf8_lossy(&output.stderr); - eprintln!("{}", stderr); - std::process::exit(output.status.code().unwrap_or(1)); -} -``` - -RTK exit codes: `0` = success, `1` = RTK internal error, `N` = preserved from underlying tool. - -## Verbosity levels - -| Flag | Behavior | -|------|----------| -| (none) | Compact output only | -| `-v` | + Debug messages (`eprintln!`) | -| `-vv` | + Command being executed | -| `-vvv` | + Raw output before filtering | - -## Ultra-compact mode - -`-u` / `--ultra-compact`: maximum compression — ASCII icons instead of words, single-line summaries. - -```bash -rtk git push -u # ✓ main (vs "ok main" normally) -``` - -## Module organization - -``` -src/ - main.rs ← Commands enum + routing - cmds/ - git/ ← git, gh, gt, diff - rust/ ← cargo, runner (err/test) - js/ ← pnpm, vitest, tsc, next, playwright, prisma, lint - python/ ← ruff, pytest, mypy, pip - go/ ← go, golangci-lint - dotnet/ ← dotnet, binlog - cloud/ ← aws, docker/kubectl, curl, wget, psql - system/ ← ls, read, grep, find, json, env, log, deps, summary - ruby/ ← rake, rspec, rubocop - core/ ← utils, tracking, tee, config, toml_filter, filter - hooks/ ← init, rewrite, hook_cmd, verify, integrity - analytics/ ← gain, cc_economics, ccusage - discover/ ← rtk discover, registry -``` - -Total: 64 modules (42 command + 22 infrastructure). diff --git a/docs/reference/internals/filter-pipeline.md b/docs/reference/internals/filter-pipeline.md deleted file mode 100644 index 0e52432ba..000000000 --- a/docs/reference/internals/filter-pipeline.md +++ /dev/null @@ -1,81 +0,0 @@ ---- -title: Filter Pipeline -description: The 12 filtering strategies RTK uses to compress command output -sidebar: - order: 2 ---- - -# Filter Pipeline - -## Filtering strategies - -RTK uses 12 distinct filtering strategies depending on the command type: - -| # | Strategy | Reduction | Used by | -|---|----------|-----------|---------| -| 1 | **Stats Extraction** | 90-99% | git status, git log, git diff, pnpm list | -| 2 | **Error Only** | 60-80% | runner (err mode), test failures | -| 3 | **Grouping by Pattern** | 80-90% | lint, tsc, grep (group by file/rule/code) | -| 4 | **Deduplication** | 70-85% | log_cmd (count occurrences) | -| 5 | **Structure Only** | 80-95% | json_cmd (keys + types, strip values) | -| 6 | **Code Filtering** | 0-90% | read, smart (language-aware, 3 levels) | -| 7 | **Failure Focus** | 94-99% | vitest, playwright, runner (test mode) | -| 8 | **Tree Compression** | 50-70% | ls (directory tree with counts) | -| 9 | **Progress Filtering** | 85-95% | wget, pnpm install (strip ANSI bars) | -| 10 | **JSON/Text Dual Mode** | 80%+ | ruff (JSON when available, text fallback) | -| 11 | **State Machine Parsing** | 90%+ | pytest (text state machine: name → result) | -| 12 | **NDJSON Streaming** | 90%+ | go test (line-by-line JSON event parsing) | - -## TOML filter pipeline (8 stages) - -TOML filters run output through this pipeline in order: - -``` -1. strip_ansi — remove terminal color codes -2. replace — regex substitutions -3. match_output — short-circuit: if pattern matches, emit message and stop -4. strip/keep_lines — keep or remove lines by regex -5. truncate_lines_at — truncate lines longer than N chars -6. tail_lines — keep only the last N lines -7. max_lines — keep only the first N lines -8. on_empty — if output is empty, emit this message -``` - -Stages not configured in the filter definition are skipped. First match wins in TOML filter lookup (project → user-global → built-in). - -## Code filtering levels (src/core/filter.rs) - -The `read` and `smart` commands use language-aware filtering with three levels: - -- **`none`**: Keep everything (0%) -- **`minimal`**: Strip comments and excessive blank lines (20-40%) -- **`aggressive`**: Keep signatures only, remove function bodies (60-90%) - -Supported languages: Rust, Python, JavaScript, TypeScript, Go, C, C++, Java, Ruby, Shell. - -## Savings by ecosystem - -``` -GIT (cmds/git/) 85-99% status, diff, log, gh, gt -JS/TS (cmds/js/) 70-99% lint, tsc, next, vitest, playwright, pnpm, prisma -PYTHON (cmds/python/) 70-90% ruff, pytest, mypy, pip -GO (cmds/go/) 75-90% go test, golangci-lint -RUBY (cmds/ruby/) 60-90% rake, rspec, rubocop -DOTNET (cmds/dotnet/) 70-85% dotnet build/test, binlog -CLOUD (cmds/cloud/) 60-80% aws, docker, kubectl, curl, wget, psql -SYSTEM (cmds/system/) 50-90% ls, read, grep, find, json, log, env -RUST (cmds/rust/) 60-99% cargo test/build/clippy, err -``` - -## Shared infrastructure (src/core/) - -| Module | Responsibility | -|--------|----------------| -| `utils.rs` | `strip_ansi`, `truncate`, `execute_command` | -| `filter.rs` | Language-aware code filtering engine | -| `toml_filter.rs` | TOML DSL filter engine (runtime) | -| `tracking.rs` | SQLite token metrics recording | -| `tee.rs` | Raw output recovery on failure | -| `config.rs` | `~/.config/rtk/config.toml` loading | -| `display_helpers.rs` | Terminal formatting helpers | -| `telemetry.rs` | Anonymous daily ping | diff --git a/docs/reference/internals/hook-engine.md b/docs/reference/internals/hook-engine.md deleted file mode 100644 index 0e958d902..000000000 --- a/docs/reference/internals/hook-engine.md +++ /dev/null @@ -1,102 +0,0 @@ ---- -title: Hook Engine -description: How RTK hooks intercept and rewrite agent commands before execution -sidebar: - order: 4 ---- - -# Hook Engine - -## Architecture - -RTK hooks are thin delegates: they parse agent-specific JSON, call `rtk rewrite` as a subprocess, and format the agent-specific response. All rewrite logic lives in the Rust binary (`src/discover/registry.rs`). - -``` -Agent runs "cargo test" - → Hook intercepts (PreToolUse / plugin event) - → Reads JSON input, extracts command string - → Calls `rtk rewrite "cargo test"` - → Registry matches pattern, returns "rtk cargo test" - → Hook formats agent-specific response - → Agent executes "rtk cargo test" - → Filtered output reaches LLM -``` - -## Integration tiers - -| Tier | Mechanism | Examples | -|------|-----------|---------| -| **Full hook** | Shell script or Rust binary, intercepts via agent hook API | Claude Code, Cursor, Copilot, Gemini | -| **Plugin** | TypeScript/JS in agent's plugin system | OpenCode | -| **Rules file** | Prompt-level instructions | Cline, Windsurf, Codex | - -## Rewrite registry - -`src/discover/registry.rs` holds 70+ rewrite patterns organized by category: - -| Category | Savings | -|----------|---------| -| Test runners (vitest, pytest, cargo test, go test, playwright) | 90-99% | -| Build tools (cargo build, npm, pnpm, dotnet, make) | 70-90% | -| VCS (git status/log/diff/show) | 70-80% | -| Language servers (tsc, mypy) | 80-83% | -| Linters (eslint, ruff, golangci-lint, biome) | 80-85% | -| Package managers (pip, cargo install, pnpm list) | 75-80% | -| File operations (ls, find, grep) | 60-75% | -| Infrastructure (docker, kubectl, aws) | 75-85% | - -## Compound command handling - -| Operator | Behavior | -|----------|----------| -| `&&`, `\|\|`, `;` | Both sides rewritten independently | -| `\|` (pipe) | Left side only (right side consumes output format) | -| `find`/`fd` in pipes | Never rewritten (incompatible with xargs/wc/grep) | - -Example: `cargo fmt --all && cargo test` → `rtk cargo fmt --all && rtk cargo test` - -## Exit code contract - -**Hooks must never block command execution.** All error paths must exit 0. A hook that exits non-zero prevents the user's command from running. - -Failure modes handled gracefully: -- RTK binary not found → warning to stderr, exit 0 -- Invalid JSON input → pass through unchanged -- RTK version too old (< 0.23.0) → warning + exit 0 -- `rtk rewrite` crashes → hook exits 0 - -## rtk init - -`src/hooks/init.rs` installs hook files for each supported agent: - -```bash -rtk init --global # Claude Code -rtk init --global --cursor # Cursor -rtk init --global --copilot # VS Code Copilot -rtk init --global --gemini # Gemini CLI -rtk init --global --opencode # OpenCode -rtk init --cline # Cline (project-local) -rtk init --windsurf # Windsurf (project-local) -rtk init --codex # Codex CLI -``` - -## rtk verify - -`src/hooks/verify_cmd.rs` runs inline TOML filter tests and checks hook integrity via SHA-256: - -```bash -rtk verify # all filters pass / fail with diff -rtk init --show # hook status per agent -``` - -## Override controls - -```bash -RTK_DISABLED=1 git status # skip rewrite for one command -``` - -```toml -# ~/.config/rtk/config.toml -[hooks] -exclude_commands = ["git rebase", "docker exec"] -``` diff --git a/docs/reference/internals/tracking-system.md b/docs/reference/internals/tracking-system.md deleted file mode 100644 index 076b3a660..000000000 --- a/docs/reference/internals/tracking-system.md +++ /dev/null @@ -1,120 +0,0 @@ ---- -title: Tracking System -description: How RTK records token savings in SQLite and exposes aggregation APIs -sidebar: - order: 3 ---- - -# Tracking System - -## Overview - -Every RTK command execution is recorded in a local SQLite database. This data powers `rtk gain` and `rtk cc-economics`. - -**Storage locations:** -- Linux: `~/.local/share/rtk/tracking.db` -- macOS: `~/Library/Application Support/rtk/tracking.db` -- Windows: `%APPDATA%\rtk\tracking.db` - -**Retention:** Records older than 90 days are automatically deleted on each write. - -## Data flow - -``` -rtk command execution - ↓ -TimedExecution::start() - ↓ -[command runs] - ↓ -TimedExecution::track(original_cmd, rtk_cmd, input, output) - ↓ -Tracker::record(original_cmd, rtk_cmd, input_tokens, output_tokens, exec_time_ms) - ↓ -SQLite INSERT - ↓ -Aggregation APIs (get_summary, get_all_days, etc.) - ↓ -rtk gain output -``` - -## Core API - -```rust -pub struct Tracker { - conn: Connection, // SQLite connection -} - -impl Tracker { - pub fn new() -> Result; - - pub fn record( - &self, - original_cmd: &str, // e.g., "ls -la" - rtk_cmd: &str, // e.g., "rtk ls" - input_tokens: usize, // estimate_tokens(raw_output) - output_tokens: usize, // estimate_tokens(filtered_output) - exec_time_ms: u64, - ) -> Result<()>; - - pub fn get_summary(&self) -> Result; - pub fn get_all_days(&self) -> Result>; - pub fn get_weekly(&self) -> Result>; - pub fn get_monthly(&self) -> Result>; -} -``` - -## Token estimation - -``` -estimate_tokens(text) = text.len() / 4 -``` - -~4 characters per token average. Accuracy: ±10% vs actual LLM tokenization. - -## Database schema - -```sql -CREATE TABLE commands ( - id INTEGER PRIMARY KEY, - timestamp DATETIME DEFAULT CURRENT_TIMESTAMP, - original_cmd TEXT NOT NULL, - rtk_cmd TEXT NOT NULL, - input_tokens INTEGER NOT NULL, - output_tokens INTEGER NOT NULL, - saved_tokens INTEGER GENERATED ALWAYS AS (input_tokens - output_tokens), - savings_pct REAL GENERATED ALWAYS AS ( - CASE WHEN input_tokens > 0 - THEN (1.0 - CAST(output_tokens AS REAL) / input_tokens) * 100 - ELSE 0 END - ), - exec_time_ms INTEGER -); -``` - -## Reporting query - -```sql -SELECT - COUNT(*) as total_commands, - SUM(saved_tokens) as total_saved, - AVG(savings_pct) as avg_savings, - SUM(exec_time_ms) as total_time_ms -FROM commands -WHERE timestamp > datetime('now', '-90 days') -``` - -## Configuration - -```toml -[tracking] -enabled = true -history_days = 90 -database_path = "/custom/path/tracking.db" # optional override -``` - -Environment variable override: `RTK_DB_PATH=/custom/path.db` - -## Thread safety - -Single-threaded execution with `Mutex>` for future-proofing. No multi-threading currently used. diff --git a/docs/reference/toml-dsl/specification.md b/docs/reference/toml-dsl/specification.md deleted file mode 100644 index 9b4c065b4..000000000 --- a/docs/reference/toml-dsl/specification.md +++ /dev/null @@ -1,124 +0,0 @@ ---- -title: TOML DSL Specification -description: Complete specification for RTK's TOML filter format -sidebar: - order: 1 ---- - -# TOML DSL Specification - -TOML filters are the declarative way to add RTK support for commands with line-by-line text output. They are embedded in the binary at build time and can also be used as project-local or user-global overrides. - -## File format - -```toml -[filters.my-tool] -description = "Short description of what this filter does" -match_command = "^my-tool\\b" # regex matched against full command string -strip_ansi = true # optional: strip ANSI codes first - -strip_lines_matching = [ # optional: drop lines matching any regex - "^\\s*$", - "^noise pattern", -] - -keep_lines_matching = [ # optional: keep only matching lines - "^error", - "^warning", -] - -max_lines = 40 # optional: keep only first N lines after filtering -tail_lines = 20 # optional: keep only last N lines -truncate_lines_at = 120 # optional: truncate lines longer than N chars -on_empty = "my-tool: nothing to do" # optional: message when output is empty - -[[tests.my-tool]] -name = "descriptive test name" -input = "raw command output here" -expected = "expected filtered output" -``` - -## Field reference - -| Field | Type | Required | Description | -|-------|------|----------|-------------| -| `description` | string | yes | Human-readable description | -| `match_command` | regex | yes | Matched against the full command string | -| `strip_ansi` | bool | no | Strip ANSI escape codes before processing | -| `strip_lines_matching` | regex[] | no | Drop lines matching any of these patterns | -| `keep_lines_matching` | regex[] | no | Keep only lines matching at least one pattern | -| `replace` | array | no | Regex substitutions: `{ pattern, replacement }` | -| `match_output` | array | no | Short-circuit rules: `{ pattern, message }` | -| `truncate_lines_at` | int | no | Truncate lines longer than N characters | -| `max_lines` | int | no | Keep only the first N lines (after other stages) | -| `tail_lines` | int | no | Keep only the last N lines | -| `on_empty` | string | no | Message emitted when pipeline produces empty output | - -## Pipeline execution order - -When a filter matches, output passes through these stages in order: - -1. `strip_ansi` — remove terminal color codes -2. `replace` — regex substitutions -3. `match_output` — short-circuit: if pattern matches, emit message and stop -4. `strip_lines_matching` / `keep_lines_matching` -5. `truncate_lines_at` -6. `tail_lines` -7. `max_lines` -8. `on_empty` — if output is now empty, emit this message - -Stages not defined in the filter are skipped. - -## Inline tests - -Every filter must have at least one `[[tests.]]` entry. Tests run during `cargo test` and `rtk verify`. - -```toml -[[tests.my-tool]] -name = "strips progress lines" -input = "Loading...\nDone: 3 issues" -expected = "Done: 3 issues" - -[[tests.my-tool]] -name = "empty output fallback" -input = "Loading...\nProgress: 100%" -expected = "my-tool: nothing to do" -``` - -## Filter lookup priority - -``` -1. .rtk/filters.toml (project-local — highest priority) -2. ~/.config/rtk/filters.toml (user-global) -3. BUILTIN_TOML (embedded in binary — lowest priority) -``` - -First match wins. A project filter with the same name as a built-in triggers a warning: - -``` -[rtk] warning: filter 'make' is shadowing a built-in filter -``` - -## Built-in filter compilation - -Built-in filters live in `src/filters/*.toml`. At build time, `build.rs`: - -1. Lists all `*.toml` files alphabetically -2. Concatenates them into a single TOML blob -3. Validates syntax and checks for duplicate names (build fails on error) -4. Embeds the result in the binary via `include_str!(concat!(env!(OUT_DIR), "/builtin_filters.toml"))` - -New built-in filters require a new RTK release. Project and user filters take effect immediately without rebuilding. - -## Naming convention - -Use the command name as the filter key: `terraform-plan`, `docker-inspect`, `mix-compile`. For subcommands, prefer `cmd-subcommand` over grouping: `docker-ps.toml`, not `docker.toml` with multiple filters. - -## Regex syntax - -Patterns use Rust's `regex` crate (RE2-compatible). Backslashes must be doubled in TOML strings: - -```toml -match_command = "^my-tool\\b" # matches "my-tool" as a word -strip_lines_matching = ["^\\s*$"] # matches blank lines -``` diff --git a/docs/tracking.md b/docs/tracking.md deleted file mode 100644 index 82c12883d..000000000 --- a/docs/tracking.md +++ /dev/null @@ -1,583 +0,0 @@ -# RTK Tracking API Documentation - -Comprehensive documentation for RTK's token savings tracking system. - -## Table of Contents - -- [Overview](#overview) -- [Architecture](#architecture) -- [Public API](#public-api) -- [Usage Examples](#usage-examples) -- [Data Formats](#data-formats) -- [Integration Examples](#integration-examples) -- [Database Schema](#database-schema) - -## Overview - -RTK's tracking system records every command execution to provide analytics on token savings. The system: -- Stores command history in SQLite (~/.local/share/rtk/tracking.db) -- Tracks input/output tokens, savings percentage, and execution time -- Automatically cleans up records older than 90 days -- Provides aggregation APIs (daily/weekly/monthly) -- Exports to JSON/CSV for external integrations - -## Architecture - -### Data Flow - -``` -rtk command execution - ↓ -TimedExecution::start() - ↓ -[command runs] - ↓ -TimedExecution::track(original_cmd, rtk_cmd, input, output) - ↓ -Tracker::record(original_cmd, rtk_cmd, input_tokens, output_tokens, exec_time_ms) - ↓ -SQLite database (~/.local/share/rtk/tracking.db) - ↓ -Aggregation APIs (get_summary, get_all_days, etc.) - ↓ -CLI output (rtk gain) or JSON/CSV export -``` - -### Storage Location - -- **Linux**: `~/.local/share/rtk/tracking.db` -- **macOS**: `~/Library/Application Support/rtk/tracking.db` -- **Windows**: `%APPDATA%\rtk\tracking.db` - -### Data Retention - -Records older than **90 days** are automatically deleted on each write operation to prevent unbounded database growth. - -## Public API - -### Core Types - -#### `Tracker` - -Main tracking interface for recording and querying command history. - -```rust -pub struct Tracker { - conn: Connection, // SQLite connection -} - -impl Tracker { - /// Create new tracker instance (opens/creates database) - pub fn new() -> Result; - - /// Record a command execution - pub fn record( - &self, - original_cmd: &str, // Standard command (e.g., "ls -la") - rtk_cmd: &str, // RTK command (e.g., "rtk ls") - input_tokens: usize, // Estimated input tokens - output_tokens: usize, // Actual output tokens - exec_time_ms: u64, // Execution time in milliseconds - ) -> Result<()>; - - /// Get overall summary statistics - pub fn get_summary(&self) -> Result; - - /// Get daily statistics (all days) - pub fn get_all_days(&self) -> Result>; - - /// Get weekly statistics (grouped by week) - pub fn get_by_week(&self) -> Result>; - - /// Get monthly statistics (grouped by month) - pub fn get_by_month(&self) -> Result>; - - /// Get recent command history (limit = max records) - pub fn get_recent(&self, limit: usize) -> Result>; -} -``` - -#### `GainSummary` - -Aggregated statistics across all recorded commands. - -```rust -pub struct GainSummary { - pub total_commands: usize, // Total commands recorded - pub total_input: usize, // Total input tokens - pub total_output: usize, // Total output tokens - pub total_saved: usize, // Total tokens saved - pub avg_savings_pct: f64, // Average savings percentage - pub total_time_ms: u64, // Total execution time (ms) - pub avg_time_ms: u64, // Average execution time (ms) - pub by_command: Vec<(String, usize, usize, f64, u64)>, // Top 10 commands - pub by_day: Vec<(String, usize)>, // Last 30 days -} -``` - -#### `DayStats` - -Daily statistics (Serializable for JSON export). - -```rust -#[derive(Debug, Serialize)] -pub struct DayStats { - pub date: String, // ISO date (YYYY-MM-DD) - pub commands: usize, // Commands executed this day - pub input_tokens: usize, // Total input tokens - pub output_tokens: usize, // Total output tokens - pub saved_tokens: usize, // Total tokens saved - pub savings_pct: f64, // Savings percentage - pub total_time_ms: u64, // Total execution time (ms) - pub avg_time_ms: u64, // Average execution time (ms) -} -``` - -#### `WeekStats` - -Weekly statistics (Serializable for JSON export). - -```rust -#[derive(Debug, Serialize)] -pub struct WeekStats { - pub week_start: String, // ISO date (YYYY-MM-DD) - pub week_end: String, // ISO date (YYYY-MM-DD) - pub commands: usize, - pub input_tokens: usize, - pub output_tokens: usize, - pub saved_tokens: usize, - pub savings_pct: f64, - pub total_time_ms: u64, - pub avg_time_ms: u64, -} -``` - -#### `MonthStats` - -Monthly statistics (Serializable for JSON export). - -```rust -#[derive(Debug, Serialize)] -pub struct MonthStats { - pub month: String, // YYYY-MM format - pub commands: usize, - pub input_tokens: usize, - pub output_tokens: usize, - pub saved_tokens: usize, - pub savings_pct: f64, - pub total_time_ms: u64, - pub avg_time_ms: u64, -} -``` - -#### `CommandRecord` - -Individual command record from history. - -```rust -pub struct CommandRecord { - pub timestamp: DateTime, // UTC timestamp - pub rtk_cmd: String, // RTK command used - pub saved_tokens: usize, // Tokens saved - pub savings_pct: f64, // Savings percentage -} -``` - -#### `TimedExecution` - -Helper for timing command execution (preferred API). - -```rust -pub struct TimedExecution { - start: Instant, -} - -impl TimedExecution { - /// Start timing a command execution - pub fn start() -> Self; - - /// Track command with elapsed time - pub fn track(&self, original_cmd: &str, rtk_cmd: &str, input: &str, output: &str); - - /// Track passthrough commands (timing-only, no token counting) - pub fn track_passthrough(&self, original_cmd: &str, rtk_cmd: &str); -} -``` - -### Utility Functions - -```rust -/// Estimate token count (~4 chars = 1 token) -pub fn estimate_tokens(text: &str) -> usize; - -/// Format OsString args for display -pub fn args_display(args: &[OsString]) -> String; - -/// Legacy tracking function (deprecated, use TimedExecution) -#[deprecated(note = "Use TimedExecution instead")] -pub fn track(original_cmd: &str, rtk_cmd: &str, input: &str, output: &str); -``` - -## Usage Examples - -### Basic Tracking - -```rust -use rtk::tracking::{TimedExecution, Tracker}; - -fn main() -> anyhow::Result<()> { - // Start timer - let timer = TimedExecution::start(); - - // Execute command - let input = execute_original_command()?; - let output = execute_rtk_command()?; - - // Track execution - timer.track("ls -la", "rtk ls", &input, &output); - - Ok(()) -} -``` - -### Querying Statistics - -```rust -use rtk::tracking::Tracker; - -fn main() -> anyhow::Result<()> { - let tracker = Tracker::new()?; - - // Get overall summary - let summary = tracker.get_summary()?; - println!("Total commands: {}", summary.total_commands); - println!("Total saved: {} tokens", summary.total_saved); - println!("Average savings: {:.1}%", summary.avg_savings_pct); - - // Get daily breakdown - let days = tracker.get_all_days()?; - for day in days.iter().take(7) { - println!("{}: {} commands, {} tokens saved", - day.date, day.commands, day.saved_tokens); - } - - // Get recent history - let recent = tracker.get_recent(10)?; - for cmd in recent { - println!("{}: {} saved {:.1}%", - cmd.timestamp, cmd.rtk_cmd, cmd.savings_pct); - } - - Ok(()) -} -``` - -### Passthrough Commands - -For commands that stream output or run interactively (no output capture): - -```rust -use rtk::tracking::TimedExecution; - -fn main() -> anyhow::Result<()> { - let timer = TimedExecution::start(); - - // Execute streaming command (e.g., git tag --list) - execute_streaming_command()?; - - // Track timing only (input_tokens=0, output_tokens=0) - timer.track_passthrough("git tag --list", "rtk git tag --list"); - - Ok(()) -} -``` - -## Data Formats - -### JSON Export Schema - -#### DayStats JSON - -```json -{ - "date": "2026-02-03", - "commands": 42, - "input_tokens": 15420, - "output_tokens": 3842, - "saved_tokens": 11578, - "savings_pct": 75.08, - "total_time_ms": 8450, - "avg_time_ms": 201 -} -``` - -#### WeekStats JSON - -```json -{ - "week_start": "2026-01-27", - "week_end": "2026-02-02", - "commands": 284, - "input_tokens": 98234, - "output_tokens": 19847, - "saved_tokens": 78387, - "savings_pct": 79.80, - "total_time_ms": 56780, - "avg_time_ms": 200 -} -``` - -#### MonthStats JSON - -```json -{ - "month": "2026-02", - "commands": 1247, - "input_tokens": 456789, - "output_tokens": 91358, - "saved_tokens": 365431, - "savings_pct": 80.00, - "total_time_ms": 249560, - "avg_time_ms": 200 -} -``` - -### CSV Export Schema - -```csv -date,commands,input_tokens,output_tokens,saved_tokens,savings_pct,total_time_ms,avg_time_ms -2026-02-03,42,15420,3842,11578,75.08,8450,201 -2026-02-02,38,14230,3557,10673,75.00,7600,200 -2026-02-01,45,16890,4223,12667,75.00,9000,200 -``` - -## Integration Examples - -### GitHub Actions - Track Savings in CI - -```yaml -# .github/workflows/track-rtk-savings.yml -name: Track RTK Savings - -on: - schedule: - - cron: '0 0 * * 1' # Weekly on Monday - workflow_dispatch: - -jobs: - track-savings: - runs-on: ubuntu-latest - steps: - - name: Install RTK - run: cargo install --git https://github.com/rtk-ai/rtk - - - name: Export weekly stats - run: | - rtk gain --weekly --format json > rtk-weekly.json - cat rtk-weekly.json - - - name: Upload artifact - uses: actions/upload-artifact@v3 - with: - name: rtk-metrics - path: rtk-weekly.json - - - name: Post to Slack - if: success() - env: - SLACK_WEBHOOK: ${{ secrets.SLACK_WEBHOOK }} - run: | - SAVINGS=$(jq -r '.[0].saved_tokens' rtk-weekly.json) - PCT=$(jq -r '.[0].savings_pct' rtk-weekly.json) - curl -X POST -H 'Content-type: application/json' \ - --data "{\"text\":\"📊 RTK Weekly: ${SAVINGS} tokens saved (${PCT}%)\"}" \ - $SLACK_WEBHOOK -``` - -### Custom Dashboard Script - -```python -#!/usr/bin/env python3 -""" -Export RTK metrics to Grafana/Datadog/etc. -""" -import json -import subprocess -from datetime import datetime - -def get_rtk_metrics(): - """Fetch RTK metrics as JSON.""" - result = subprocess.run( - ["rtk", "gain", "--all", "--format", "json"], - capture_output=True, - text=True - ) - return json.loads(result.stdout) - -def export_to_datadog(metrics): - """Send metrics to Datadog.""" - import datadog - - datadog.initialize(api_key="YOUR_API_KEY") - - for day in metrics.get("daily", []): - datadog.api.Metric.send( - metric="rtk.tokens_saved", - points=[(datetime.now().timestamp(), day["saved_tokens"])], - tags=[f"date:{day['date']}"] - ) - - datadog.api.Metric.send( - metric="rtk.savings_pct", - points=[(datetime.now().timestamp(), day["savings_pct"])], - tags=[f"date:{day['date']}"] - ) - -if __name__ == "__main__": - metrics = get_rtk_metrics() - export_to_datadog(metrics) - print(f"Exported {len(metrics.get('daily', []))} days to Datadog") -``` - -### Rust Integration (Using RTK as Library) - -```rust -// In your Cargo.toml -// [dependencies] -// rtk = { git = "https://github.com/rtk-ai/rtk" } - -use rtk::tracking::{Tracker, TimedExecution}; -use anyhow::Result; - -fn main() -> Result<()> { - // Track your own commands - let timer = TimedExecution::start(); - - let input = run_expensive_operation()?; - let output = run_optimized_operation()?; - - timer.track( - "expensive_operation", - "optimized_operation", - &input, - &output - ); - - // Query aggregated stats - let tracker = Tracker::new()?; - let summary = tracker.get_summary()?; - - println!("Total savings: {} tokens ({:.1}%)", - summary.total_saved, - summary.avg_savings_pct - ); - - // Export to JSON for external tools - let days = tracker.get_all_days()?; - let json = serde_json::to_string_pretty(&days)?; - std::fs::write("metrics.json", json)?; - - Ok(()) -} -``` - -## Database Schema - -### Table: `commands` - -```sql -CREATE TABLE commands ( - id INTEGER PRIMARY KEY, - timestamp TEXT NOT NULL, -- RFC3339 UTC timestamp - original_cmd TEXT NOT NULL, -- Original command (e.g., "ls -la") - rtk_cmd TEXT NOT NULL, -- RTK command (e.g., "rtk ls") - input_tokens INTEGER NOT NULL, -- Estimated input tokens - output_tokens INTEGER NOT NULL, -- Actual output tokens - saved_tokens INTEGER NOT NULL, -- input_tokens - output_tokens - savings_pct REAL NOT NULL, -- (saved/input) * 100 - exec_time_ms INTEGER DEFAULT 0 -- Execution time in milliseconds -); - -CREATE INDEX idx_timestamp ON commands(timestamp); -``` - -### Automatic Cleanup - -On every write operation (`Tracker::record`), records older than 90 days are deleted: - -```rust -fn cleanup_old(&self) -> Result<()> { - let cutoff = Utc::now() - chrono::Duration::days(90); - self.conn.execute( - "DELETE FROM commands WHERE timestamp < ?1", - params![cutoff.to_rfc3339()], - )?; - Ok(()) -} -``` - -### Migration Support - -The system automatically adds new columns if they don't exist (e.g., `exec_time_ms` was added later): - -```rust -// Safe migration on Tracker::new() -let _ = conn.execute( - "ALTER TABLE commands ADD COLUMN exec_time_ms INTEGER DEFAULT 0", - [], -); -``` - -## Performance Considerations - -- **SQLite WAL mode**: Not enabled (may add in future for concurrent writes) -- **Index on timestamp**: Enables fast date-range queries -- **Automatic cleanup**: Prevents database from growing unbounded -- **Token estimation**: ~4 chars = 1 token (simple, fast approximation) -- **Aggregation queries**: Use SQL GROUP BY for efficient aggregation - -## Security & Privacy - -- **Local storage only**: Tracking database never leaves the machine -- **Telemetry enabled by default**: RTK sends a daily anonymous usage ping (version, OS, command counts, token savings). Device identity is a salted SHA-256 hash. Opt out with `RTK_TELEMETRY_DISABLED=1` or `[telemetry] enabled = false` in `~/.config/rtk/config.toml` -- **User control**: Users can delete `~/.local/share/rtk/tracking.db` anytime -- **90-day retention**: Old data automatically purged - -## Troubleshooting - -### Database locked error - -If you see "database is locked" errors: -- Ensure only one RTK process writes at a time -- Check file permissions on `~/.local/share/rtk/tracking.db` -- Delete and recreate: `rm ~/.local/share/rtk/tracking.db && rtk gain` - -### Missing exec_time_ms column - -Older databases may not have the `exec_time_ms` column. RTK automatically migrates on first use, but you can force it: - -```bash -sqlite3 ~/.local/share/rtk/tracking.db \ - "ALTER TABLE commands ADD COLUMN exec_time_ms INTEGER DEFAULT 0" -``` - -### Incorrect token counts - -Token estimation uses `~4 chars = 1 token`. This is approximate. For precise counts, integrate with your LLM's tokenizer API. - -## Future Enhancements - -Planned improvements (contributions welcome): - -- [ ] Export to Prometheus/OpenMetrics format -- [ ] Support for custom retention periods (not just 90 days) -- [ ] SQLite WAL mode for concurrent writes -- [ ] Per-project tracking (multiple databases) -- [ ] Integration with Claude API for precise token counts -- [ ] Web dashboard (localhost) for visualizing trends - -## See Also - -- [README.md](../README.md) - Main project documentation -- [COMMAND_AUDIT.md](../claudedocs/COMMAND_AUDIT.md) - List of all RTK commands -- [Rust docs](https://docs.rs/) - Run `cargo doc --open` for API docs From 6e55f8984c6efb1b23b187f704f159f7aaa6e5e0 Mon Sep 17 00:00:00 2001 From: Florian BRUNIAUX Date: Mon, 6 Apr 2026 11:15:57 +0200 Subject: [PATCH 087/204] =?UTF-8?q?docs:=20address=20Adrien's=20review=20?= =?UTF-8?q?=E2=80=94=20structure,=20missing=20content,=20agents?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Structure: - Move docs/README.md → .github/docs-pipeline-contract.md (interface contract is not user-facing content) - Rewrite docs/guide/index.md as a proper landing page with RTK intro, flow diagram, and navigation (replaces empty nav stub) - Move configuration.md → getting-started/configuration.md (sidebar order 4, after supported-agents per Adrien's suggestion) Missing content: - Add docs/guide/analytics/discover.md: rtk discover (missed savings analysis) and rtk session (adoption tracking across sessions) - Add uninstall instructions to installation.md (rtk init -g --uninstall, cargo uninstall, brew uninstall) - Add global flags section to what-rtk-covers.md (-u/--ultra-compact, -v/--verbose) - Add rtk smart command to what-rtk-covers.md Files section Agents: - Update supported-agents.md: 10 agents (was 9) - Add OpenClaw (TypeScript plugin, before_tool_call) - Add Mistral Vibe (planned, issue #800) - Fix GitHub Copilot: VS Code = transparent rewrite via PreToolUse, Copilot CLI = deny-with-suggestion (upstream limitation) Signed-off-by: Florian BRUNIAUX --- .github/docs-pipeline-contract.md | 57 +++++++++++++++++ docs/README.md | 64 ------------------- docs/guide/analytics/discover.md | 58 +++++++++++++++++ docs/guide/analytics/gain.md | 60 ++++++++++++++--- .../{ => getting-started}/configuration.md | 4 +- docs/guide/getting-started/installation.md | 8 +++ docs/guide/getting-started/quick-start.md | 36 +++-------- .../guide/getting-started/supported-agents.md | 26 ++++++-- docs/guide/index.md | 58 +++++++++++++---- docs/guide/what-rtk-covers.md | 20 ++++++ src/filters/README.md | 55 ++++++++++++++++ 11 files changed, 328 insertions(+), 118 deletions(-) create mode 100644 .github/docs-pipeline-contract.md delete mode 100644 docs/README.md create mode 100644 docs/guide/analytics/discover.md rename docs/guide/{ => getting-started}/configuration.md (94%) diff --git a/.github/docs-pipeline-contract.md b/.github/docs-pipeline-contract.md new file mode 100644 index 000000000..f812912cd --- /dev/null +++ b/.github/docs-pipeline-contract.md @@ -0,0 +1,57 @@ +# RTK Documentation — Interface Contract + +This directory contains user-facing documentation for the RTK website. +It feeds `rtk-ai/rtk-website` via the `prepare-docs.mjs` pipeline. + +**Scope**: `docs/guide/` is website content only. Technical and contributor documentation +lives in the codebase (distributed, co-located pattern): +- `ARCHITECTURE.md` — System design, ADRs, filtering strategies +- `CONTRIBUTING.md` — Design philosophy, PR process, TOML vs Rust +- `SECURITY.md` — Vulnerability policy +- `src/*/README.md` — Per-module implementation docs +- `hooks/README.md` — Hook system and agent integrations + +## Structure + +``` +docs/ + README.md <- This file (interface contract — do not remove) + guide/ -> User-facing documentation (website "Guide" tab) + index.md + getting-started/ + installation.md + quick-start.md + supported-agents.md + what-rtk-covers.md + analytics/ + gain.md + configuration.md + troubleshooting.md +``` + +## Frontmatter (required on every .md) + +Every markdown file under `docs/guide/` must include: + +```yaml +--- +title: string # Page title (used in sidebar + search) +description: string # One-line summary for search results and SEO +sidebar: + order: number # Position within the sidebar group (1 = first) +--- +``` + +The `prepare-docs.mjs` pipeline validates this at build time and fails fast +if frontmatter is missing or malformed. + +## Conventions + +- **Filenames**: kebab-case, `.md` only +- **Subdirectories**: become sidebar groups in Starlight +- **Internal links**: relative (`./foo.md`, `../configuration.md`) +- **Diagrams**: Mermaid in fenced code blocks +- **Code samples**: always specify the language (`rust`, `toml`, `bash`) +- **Language**: English only +- **No `rtk ` syntax**: users never type `rtk` — hooks rewrite commands transparently. + Only `rtk gain`, `rtk init`, `rtk verify`, and `rtk proxy` appear as user-typed commands. diff --git a/docs/README.md b/docs/README.md deleted file mode 100644 index 07d64eb46..000000000 --- a/docs/README.md +++ /dev/null @@ -1,64 +0,0 @@ -# RTK Documentation — Interface Contract - -This directory is the source of truth for user-facing and contributor documentation. -It feeds the RTK website via the `prepare-docs.mjs` pipeline in `rtk-ai/rtk-website`. - -## Structure - -``` -docs/ - README.md <- This file (interface contract — do not remove) - guide/ -> User-facing documentation (Tab "Guide") - reference/ -> Contributor/technical documentation (Tab "Reference") - architecture/ -> Conceptual/visual documentation (Tab "Architecture") -``` - -## Frontmatter (required on every .md) - -Every markdown file under `docs/guide/`, `docs/reference/`, and `docs/architecture/` -must include this frontmatter block at the top: - -```yaml ---- -title: string # Page title (used in sidebar + search) -description: string # One-line summary for search results and SEO -sidebar: - order: number # Position within the sidebar group (1 = first) ---- -``` - -The `prepare-docs.mjs` pipeline validates this contract at build time and **fails fast** -with a clear error message if frontmatter is missing or malformed. - -## Conventions - -- **Filenames**: kebab-case, `.md` only (e.g., `getting-started.md`, `quick-start.md`) -- **Subdirectories**: become sidebar groups in Starlight (directory name = group label) -- **Internal links**: relative within the same tab (`./foo.md`, `./getting-started/installation.md`) -- **Cross-tab links**: full path from `docs/` root (`../../reference/internals/command-routing.md`) -- **Diagrams**: Mermaid in fenced code blocks (` ```mermaid `) -- **Code samples**: always specify the language (`rust`, `toml`, `bash`, `shell`) -- **Language**: English only - -## Tabs overview - -| Tab | Path | Audience | -|-----|------|----------| -| Guide | `docs/guide/` | End users installing and using RTK | -| Reference | `docs/reference/` | Contributors, maintainers, integrators | -| Architecture | `docs/architecture/` | Readers exploring design decisions and diagrams | - -## Root files (do not move or modify structure) - -The following files live at the repository root and are **not** managed by this pipeline. -They are the canonical source for GitHub display and remain unchanged. - -- `README.md` — Project overview -- `INSTALL.md` — Installation reference (full) -- `CONTRIBUTING.md` — Contribution guide -- `SECURITY.md` — Security policy -- `ARCHITECTURE.md` — Full architecture document -- `CHANGELOG.md` — Release history - -The guide files in `docs/` are derived, English-only, structured versions intended -for the website. They reference root files as source material but do not replace them. diff --git a/docs/guide/analytics/discover.md b/docs/guide/analytics/discover.md new file mode 100644 index 000000000..77d21cc65 --- /dev/null +++ b/docs/guide/analytics/discover.md @@ -0,0 +1,58 @@ +--- +title: Discover and Session +description: Find missed savings opportunities with rtk discover, and track RTK adoption with rtk session +sidebar: + order: 2 +--- + +# Discover and Session + +## rtk discover — find missed savings + +`rtk discover` analyzes your Claude Code command history to identify commands that ran without RTK filtering and calculates how many tokens you lost. + +```bash +rtk discover # analyze current project history +rtk discover --all # all projects +rtk discover --all --since 7 # last 7 days, all projects +``` + +**Example output:** + +``` +Missed savings analysis (last 7 days) +──────────────────────────────────── +Command Count Est. lost +cargo test 12 ~48,000 tokens +git log 8 ~12,000 tokens +pnpm list 3 ~6,000 tokens +──────────────────────────────────── +Total missed: 23 ~66,000 tokens + +Run `rtk init --global` to capture these automatically. +``` + +If commands appear in the missed list after installing RTK, it usually means the hook isn't active for that agent. See [Troubleshooting](../troubleshooting.md) — "Agent not using RTK". + +## rtk session — adoption tracking + +`rtk session` shows RTK adoption across recent Claude Code sessions: how many shell commands ran through RTK vs. raw. + +```bash +rtk session +``` + +**Example output:** + +``` +Recent sessions (last 10) +───────────────────────────────────────────────────── +Session Total RTK Coverage +2026-04-06 14:32 (45 cmds) 45 43 95.6% +2026-04-05 09:14 (38 cmds) 38 38 100.0% +2026-04-04 16:50 (52 cmds) 52 49 94.2% +───────────────────────────────────────────────────── +Average coverage: 96.6% +``` + +Low coverage on a session usually means RTK was disabled (`RTK_DISABLED=1`) or the hook wasn't active for a specific subagent. diff --git a/docs/guide/analytics/gain.md b/docs/guide/analytics/gain.md index ef0000298..db2249d4b 100644 --- a/docs/guide/analytics/gain.md +++ b/docs/guide/analytics/gain.md @@ -92,12 +92,12 @@ Same columns as daily, aggregated by Sunday-Saturday week or calendar month. | Command | Typical savings | Mechanism | |---------|----------------|-----------| -| `rtk git status` | 77-93% | Compact stat format | -| `rtk eslint` | 84% | Group by rule | -| `rtk vitest run` | 94-99% | Show failures only | -| `rtk find` | 75% | Tree format | -| `rtk pnpm list` | 70-90% | Compact dependencies | -| `rtk grep` | 70% | Truncate + group | +| `git status` | 77-93% | Compact stat format | +| `eslint` | 84% | Group by rule | +| `vitest run` | 94-99% | Show failures only | +| `find` | 75% | Tree format | +| `pnpm list` | 70-90% | Compact dependencies | +| `grep` | 70% | Truncate + group | ## How token estimation works @@ -131,19 +131,63 @@ cp ~/.local/share/rtk/history.db ~/backups/rtk-history-$(date +%Y%m%d).db rm ~/.local/share/rtk/history.db # recreated on next command ``` +## Analysis workflows + +```bash +# Weekly progress: generate a CSV report every Monday +rtk gain --weekly --format csv > reports/week-$(date +%Y-%W).csv + +# Monthly budget review +rtk gain --monthly --format json | jq '.monthly[] | + {month, saved_tokens, quota_pct: (.saved_tokens / 6000000 * 100)}' + +# Cron: daily JSON snapshot for a dashboard +0 0 * * * rtk gain --all --format json > /var/www/dashboard/rtk-stats.json +``` + +**Python/pandas:** +```python +import pandas as pd +import subprocess + +result = subprocess.run(['rtk', 'gain', '--all', '--format', 'csv'], + capture_output=True, text=True) +lines = result.stdout.split('\n') +daily_start = lines.index('# Daily Data') + 2 +daily_end = lines.index('', daily_start) +daily_df = pd.read_csv(pd.StringIO('\n'.join(lines[daily_start:daily_end]))) +daily_df['date'] = pd.to_datetime(daily_df['date']) +daily_df.plot(x='date', y='savings_pct', kind='line') +``` + +**GitHub Actions (weekly stats):** +```yaml +on: + schedule: + - cron: '0 0 * * 1' +jobs: + stats: + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v3 + - run: cargo install rtk + - run: rtk gain --weekly --format json > stats/week-$(date +%Y-%W).json + - run: git add stats/ && git commit -m "Weekly rtk stats" && git push +``` + ## Troubleshooting **No data showing:** ```bash ls -lh ~/.local/share/rtk/history.db sqlite3 ~/.local/share/rtk/history.db "SELECT COUNT(*) FROM commands" -rtk git status # run a tracked command to generate data +git status # run any tracked command to generate data ``` **Incorrect statistics:** Token estimation is a heuristic. For precise counts, use `tiktoken`: ```bash pip install tiktoken -rtk git status > output.txt +git status > output.txt python -c " import tiktoken enc = tiktoken.get_encoding('cl100k_base') diff --git a/docs/guide/configuration.md b/docs/guide/getting-started/configuration.md similarity index 94% rename from docs/guide/configuration.md rename to docs/guide/getting-started/configuration.md index 053eb351e..f2b1d5a2e 100644 --- a/docs/guide/configuration.md +++ b/docs/guide/getting-started/configuration.md @@ -2,7 +2,7 @@ title: Configuration description: Customize RTK behavior via config.toml, environment variables, and per-project filters sidebar: - order: 7 + order: 4 --- # Configuration @@ -112,4 +112,4 @@ enabled = false ## Per-project filters -Create `.rtk/filters.toml` in your project root to add custom filters or override built-ins. See [Using Filters](./filters/using-filters.md) for the TOML DSL reference. +Create `.rtk/filters.toml` in your project root to add custom filters or override built-ins. See [`src/filters/README.md`](https://github.com/rtk-ai/rtk/blob/master/src/filters/README.md) for the full TOML DSL reference. diff --git a/docs/guide/getting-started/installation.md b/docs/guide/getting-started/installation.md index e52c9df32..3f3eac286 100644 --- a/docs/guide/getting-started/installation.md +++ b/docs/guide/getting-started/installation.md @@ -71,3 +71,11 @@ For a global install that patches `settings.json` automatically: ```bash rtk init --global ``` + +## Uninstall + +```bash +rtk init -g --uninstall # remove hook, RTK.md, and settings.json entry +cargo uninstall rtk # remove binary (if installed via Cargo) +brew uninstall rtk # remove binary (if installed via Homebrew) +``` diff --git a/docs/guide/getting-started/quick-start.md b/docs/guide/getting-started/quick-start.md index 8f3b6ed42..af661ebff 100644 --- a/docs/guide/getting-started/quick-start.md +++ b/docs/guide/getting-started/quick-start.md @@ -32,34 +32,22 @@ cd /your/project && rtk init This installs the hook that automatically rewrites commands. Restart your AI assistant after this step. -## Step 2: Run your first RTK commands +## Step 2: Use your tools normally -You can use RTK directly or let the hook rewrite commands transparently. +Once the hook is installed, nothing changes in how you work. Your AI assistant runs commands as usual — the hook intercepts them transparently and rewrites them before execution. -```bash -# Git — compact status and log -rtk git status -rtk git log -10 - -# Rust — build and test with failures only -rtk cargo build -rtk cargo test +For example, when Claude Code runs `cargo test`, the hook rewrites it to `rtk cargo test` before it executes. The LLM receives filtered output with only the failures — not 500 lines of passing tests. You never see or type `rtk`. -# JavaScript — type errors grouped by file -rtk tsc -rtk vitest run -``` +Supported ecosystems: Git, Cargo/Rust, JavaScript (vitest, tsc, eslint, pnpm, Next.js, Prisma), Python, Go, Ruby, .NET, Docker/Kubernetes, GitHub CLI, and more. See [What RTK Optimizes](../what-rtk-covers.md) for the full list. ## Step 3: Check your savings -After a few commands, see how much you saved: +After a few commands, see how much was saved: ```bash rtk gain ``` -Output: - ``` Total commands : 12 Input tokens : 45,230 @@ -67,22 +55,16 @@ Output tokens : 4,890 Saved : 40,340 (89.2%) ``` -## Step 4: Use the proxy for unsupported commands +## Step 4: Unsupported commands -Any command RTK doesn't know about runs through passthrough — the output is unchanged but usage is tracked: +Commands RTK doesn't recognize run through passthrough — output is unchanged, usage is tracked: ```bash rtk proxy make install ``` -## What the hook does - -Once installed, the hook intercepts every command your AI assistant runs and rewrites it transparently. You don't need to type `rtk` — the hook does it automatically. - -For example, when Claude Code executes `cargo test`, the hook rewrites it to `rtk cargo test` before it runs. The filtered output is what the LLM sees. - ## Next steps +- [What RTK Optimizes](../what-rtk-covers.md) — all supported commands and savings by ecosystem - [Supported agents](./supported-agents.md) — Claude Code, Cursor, Copilot, and more -- [Commands](../commands/git.md) — full reference for each ecosystem -- [Configuration](../configuration.md) — customize RTK behavior +- [Configuration](./configuration.md) — customize RTK behavior diff --git a/docs/guide/getting-started/supported-agents.md b/docs/guide/getting-started/supported-agents.md index 52e5b520c..dc45bf970 100644 --- a/docs/guide/getting-started/supported-agents.md +++ b/docs/guide/getting-started/supported-agents.md @@ -7,7 +7,7 @@ sidebar: # Supported Agents -RTK supports 9 AI coding agents across 3 integration tiers. +RTK supports 10 AI coding agents across 3 integration tiers. Mistral Vibe support is planned. ## How it works @@ -29,14 +29,16 @@ Agent runs "cargo test" | Agent | Integration tier | Can rewrite transparently? | |-------|-----------------|---------------------------| | Claude Code | Shell hook (`PreToolUse`) | Yes | -| VS Code Copilot Chat | Rust binary | Yes | -| GitHub Copilot CLI | Rust binary (deny-with-suggestion) | No (agent retries) | +| VS Code Copilot Chat | Shell hook (`PreToolUse`) | Yes | +| GitHub Copilot CLI | Shell hook (deny-with-suggestion) | No (agent retries) | | Cursor | Shell hook (`preToolUse`) | Yes | -| Gemini CLI | Rust binary | Yes | +| Gemini CLI | Rust binary (`BeforeTool`) | Yes | +| OpenCode | TypeScript plugin (`tool.execute.before`) | Yes | +| OpenClaw | TypeScript plugin (`before_tool_call`) | Yes | | Cline / Roo Code | Rules file (prompt-level) | N/A | | Windsurf | Rules file (prompt-level) | N/A | | Codex CLI | AGENTS.md instructions | N/A | -| OpenCode | TypeScript plugin | Yes | +| Mistral Vibe | Planned ([#800](https://github.com/rtk-ai/rtk/issues/800)) | Pending upstream | ## Installation by agent @@ -78,7 +80,15 @@ rtk init --global --gemini rtk init --global --opencode ``` -Restart OpenCode. The plugin uses the `tool.execute.before` event. +Creates `~/.config/opencode/plugins/rtk.ts`. Uses the `tool.execute.before` hook. + +### OpenClaw + +```bash +openclaw plugins install ./openclaw +``` + +Plugin in the `openclaw/` directory. Uses the `before_tool_call` hook, delegates to `rtk rewrite`. ### Cline / Roo Code @@ -100,6 +110,10 @@ rtk init --windsurf # creates .windsurfrules in current project rtk init --codex # creates AGENTS.md or patches existing one ``` +### Mistral Vibe (planned) + +Support is blocked on upstream `BeforeToolCallback` ([mistral-vibe#531](https://github.com/mistralai/mistral-vibe/issues/531)). Tracked in [#800](https://github.com/rtk-ai/rtk/issues/800). + ## Integration tiers explained | Tier | Mechanism | How rewrites work | diff --git a/docs/guide/index.md b/docs/guide/index.md index 4ccbf78a0..2b9a09b47 100644 --- a/docs/guide/index.md +++ b/docs/guide/index.md @@ -1,19 +1,55 @@ --- -title: RTK Guide -description: User-facing documentation for RTK, the token-saving CLI proxy for AI coding assistants +title: RTK Documentation +description: RTK (Rust Token Killer) — reduce LLM token consumption by 60-90% on common dev commands, with zero workflow changes sidebar: order: 1 --- -# RTK Guide +# RTK — Rust Token Killer -RTK (Rust Token Killer) is a CLI proxy that reduces LLM token consumption by 60-90% on common development operations. It filters and compresses command output before it reaches your AI assistant, without changing how you work. +RTK is a CLI proxy that sits between your AI assistant and your development tools. It filters command output before it reaches the LLM, keeping only what matters and discarding boilerplate, progress bars, and noise. -## What's in this guide +**Result:** 60-90% fewer tokens consumed per command, without changing how you work. You run `git status` as usual — RTK's hook intercepts it, filters the output, and the LLM sees a compact 3-line summary instead of 40 lines. -- **[Getting Started](./getting-started/installation.md)** — Install RTK, verify it works, run your first command -- **Commands** — Per-ecosystem reference: git, cargo, GitHub CLI, JavaScript, Python, and more -- **[Filters](./filters/using-filters.md)** — Create custom TOML filters for your own commands -- **[Analytics](./analytics/gain.md)** — Measure your actual token savings with `rtk gain` -- **[Configuration](./configuration.md)** — Customize RTK behavior via `~/.config/rtk/config.toml` -- **[Troubleshooting](./troubleshooting.md)** — Common issues and how to fix them +## How it works + +``` +Your AI assistant runs: git status + ↓ + Hook intercepts (PreToolUse) + ↓ + rtk git status (transparent rewrite) + ↓ + Raw output: 40 lines → Filtered: 3 lines + ~800 tokens → ~60 tokens (92% saved) + ↓ + LLM sees the compact output +``` + +Zero config changes to your workflow. The hook handles everything automatically. + +## What RTK optimizes + +60+ commands across 9 ecosystems — Git, Cargo/Rust, JavaScript, Python, Go, Ruby, .NET, Docker/Kubernetes, and more. See [What RTK Optimizes](./what-rtk-covers.md) for the full list with savings percentages. + +## Get started + +1. **[Installation](./getting-started/installation.md)** — Install RTK and verify you have the right package +2. **[Quick Start](./getting-started/quick-start.md)** — Connect to your AI assistant in 5 minutes +3. **[Supported Agents](./getting-started/supported-agents.md)** — Claude Code, Cursor, Copilot, Gemini, and 7 more + +## Measure your savings + +```bash +rtk gain # total savings across all sessions +rtk gain --daily # day-by-day breakdown +rtk gain --weekly # weekly aggregation +``` + +See [Analytics](./analytics/gain.md) for export formats and analysis workflows. + +## Further reading + +- [Configuration](./getting-started/configuration.md) — config.toml, global flags, env vars, tee recovery +- [Troubleshooting](./troubleshooting.md) — common issues and fixes +- [ARCHITECTURE.md](https://github.com/rtk-ai/rtk/blob/master/ARCHITECTURE.md) — system design for contributors diff --git a/docs/guide/what-rtk-covers.md b/docs/guide/what-rtk-covers.md index 3e3f2a46a..426e91a97 100644 --- a/docs/guide/what-rtk-covers.md +++ b/docs/guide/what-rtk-covers.md @@ -113,6 +113,8 @@ Once RTK is installed with a hook, these commands are automatically intercepted | `grep` | 70% | Truncated lines, grouped by file | | `diff` | 65% | Context reduced | | `wc` | 60% | Compact counts | +| `cat` / `head` / `tail ` | 60-80% | Smart file reading via `rtk read` | +| `rtk smart ` | 85% | 2-line heuristic code summary (signatures only) | ## Cloud and Data @@ -122,6 +124,24 @@ Once RTK is installed with a hook, these commands are automatically intercepted | `psql` | 65% | Query results without decoration | | `curl` | 60% | Response body only, headers stripped | +## Global flags + +These flags apply to all RTK commands and can push savings even higher: + +| Flag | Description | +|------|-------------| +| `-u` / `--ultra-compact` | ASCII icons, inline format — extra token reduction on top of normal filtering | +| `-v` / `--verbose` | Show filtering details on stderr (`-v`, `-vv`, `-vvv` for increasing detail) | + +```bash +# Ultra-compact: even smaller output +git log # → already filtered by RTK +git log -u # → ultra-compact variant (if using rtk directly) + +# Debug: see what RTK is doing +RTK_DISABLED=0 git status -vvv +``` + ## Commands that are not rewritten If a command isn't in the list above, RTK runs it through passthrough — the output reaches the LLM unchanged. You can explicitly track unsupported commands: diff --git a/src/filters/README.md b/src/filters/README.md index fbd4c4cb0..702f5034f 100644 --- a/src/filters/README.md +++ b/src/filters/README.md @@ -64,3 +64,58 @@ expected = "expected filtered output" Use the command name as the filename: `terraform-plan.toml`, `docker-inspect.toml`, `mix-compile.toml`. For commands with subcommands, prefer `-.toml` over grouping multiple filters in one file. + +## Build and runtime pipeline + +How a `.toml` file goes from contributor → binary → filtered output. + +```mermaid +flowchart TD + A[["src/filters/my-tool.toml\n(new file)"]] --> B + + subgraph BUILD ["cargo build"] + B["build.rs\n1. ls src/filters/*.toml\n2. sort alphabetically\n3. concat → BUILTIN_TOML"] --> C + C{"TOML valid?\nDuplicate names?"} -->|"fail"| D[["Build fails\nerror points to bad file"]] + C -->|"ok"| E[["OUT_DIR/builtin_filters.toml\n(generated)"]] + E --> F["rustc embeds via include_str!"] + F --> G[["rtk binary\nBUILTIN_TOML embedded"]] + end + + subgraph TESTS ["cargo test"] + H["test_builtin_filter_count\nassert_eq!(filters.len(), N)"] -->|"wrong count"| I[["FAIL"]] + J["test_builtin_all_filters_present\nassert!(names.contains('my-tool'))"] -->|"name missing"| K[["FAIL"]] + L["test_builtin_all_filters_have_inline_tests\nassert!(tested.contains(name))"] -->|"no tests"| M[["FAIL"]] + end + + subgraph RUNTIME ["rtk my-tool args"] + R["TomlFilterRegistry::load()\n1. .rtk/filters.toml\n2. ~/.config/rtk/filters.toml\n3. BUILTIN_TOML\n4. passthrough"] --> S + S{"match_command\nmatches?"} -->|"no match"| T[["exec raw (passthrough)"]] + S -->|"match"| U["exec command\ncapture stdout"] + U --> V["8-stage pipeline\nstrip_ansi → replace → match_output\n→ strip/keep_lines → truncate\n→ tail_lines → max_lines → on_empty"] + V --> W[["print filtered output + exit code"]] + end + + G --> H & J & L & R +``` + +## Filter lookup priority + +```mermaid +flowchart LR + CMD["rtk my-tool args"] --> P1 + P1{"1. .rtk/filters.toml\n(project-local)"} + P1 -->|"match"| WIN["apply filter"] + P1 -->|"no match"| P2 + P2{"2. ~/.config/rtk/filters.toml\n(user-global)"} + P2 -->|"match"| WIN + P2 -->|"no match"| P3 + P3{"3. BUILTIN_TOML\n(binary)"} + P3 -->|"match"| WIN + P3 -->|"no match"| P4[["exec raw (passthrough)"]] +``` + +First match wins. A project filter with the same name as a built-in shadows the built-in and triggers a warning: + +``` +[rtk] warning: filter 'make' is shadowing a built-in filter +``` From 76f3b24074fe87b8209b68713cb5a0a7d0b2ed98 Mon Sep 17 00:00:00 2001 From: "github-actions[bot]" <41898282+github-actions[bot]@users.noreply.github.com> Date: Mon, 6 Apr 2026 10:54:48 +0000 Subject: [PATCH 088/204] chore(master): release 0.35.0 --- .release-please-manifest.json | 2 +- CHANGELOG.md | 39 +++++++++++++++++++++++++++++++++++ Cargo.lock | 2 +- Cargo.toml | 2 +- 4 files changed, 42 insertions(+), 3 deletions(-) diff --git a/.release-please-manifest.json b/.release-please-manifest.json index b9091c583..3a39fd8cf 100644 --- a/.release-please-manifest.json +++ b/.release-please-manifest.json @@ -1,3 +1,3 @@ { - ".": "0.34.3" + ".": "0.35.0" } diff --git a/CHANGELOG.md b/CHANGELOG.md index 98628683e..8f6bf6e54 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -5,6 +5,45 @@ All notable changes to rtk (Rust Token Killer) will be documented in this file. The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html). +## [0.35.0](https://github.com/rtk-ai/rtk/compare/v0.34.3...v0.35.0) (2026-04-06) + + +### Features + +* **aws:** expand CLI filters from 8 to 25 subcommands ([402c48e](https://github.com/rtk-ai/rtk/commit/402c48e66988e638a5b4f4dd193238fc1d0fe18f)) + + +### Bug Fixes + +* **cmd:** read/cat multiple file and consistent behavior ([3f58018](https://github.com/rtk-ai/rtk/commit/3f58018f4af1d7206457929cf80bb4534203c3ee)) +* **docs:** clean some docs + disclaimer ([deda44f](https://github.com/rtk-ai/rtk/commit/deda44f73607981f3d27ecc6341ce927aab34d37)) +* **gh:** pass through gh pr merge instead of canned response ([#938](https://github.com/rtk-ai/rtk/issues/938)) ([8465ca9](https://github.com/rtk-ai/rtk/commit/8465ca953fa9d70dcc971a941c19465d456eb7d4)) +* **gh:** pass through gh pr merge instead of canned response ([#938](https://github.com/rtk-ai/rtk/issues/938)) ([e1f2845](https://github.com/rtk-ai/rtk/commit/e1f2845df06a8d8b8325945dc4940ec5f530e4cc)) +* **git:** inherit stdin for commit and push to preserve SSH signing ([#733](https://github.com/rtk-ai/rtk/issues/733)) ([eefeae4](https://github.com/rtk-ai/rtk/commit/eefeae45656ff2607c3f519c8eae235e3f0fe411)) +* **git:** inherit stdin for commit and push to preserve SSH signing ([#733](https://github.com/rtk-ai/rtk/issues/733)) ([6cee6c6](https://github.com/rtk-ai/rtk/commit/6cee6c60b80f914ed9505e3925d85cadec43ab97)) +* **git:** preserve full diff hunk headers ([62f4452](https://github.com/rtk-ai/rtk/commit/62f445227679f3df293fe35e9b18cc5ab39d7963)) +* **git:** preserve full diff hunk headers ([09b3ff9](https://github.com/rtk-ai/rtk/commit/09b3ff9424e055f5fe25e535e5b60e077f8344f9)) +* **go:** avoid false build errors from download logs ([9c1cf2f](https://github.com/rtk-ai/rtk/commit/9c1cf2f403534fa7874638b1b983c2d7f918a185)) +* **go:** avoid false build errors from download logs ([d44fd3e](https://github.com/rtk-ai/rtk/commit/d44fd3e034208e3bcd59c2c46f7720eec4f10c98)) +* **go:** cover more build failure shapes ([2425ad6](https://github.com/rtk-ai/rtk/commit/2425ad68e5386d19e5ec9ff1ca151a6d2c9a56d3)) +* **go:** preserve failing test location context ([1481bc5](https://github.com/rtk-ai/rtk/commit/1481bc590924031456a6022510275c29c09e330e)) +* **go:** preserve failing test location context ([374fe64](https://github.com/rtk-ai/rtk/commit/374fe64cfbedcd676733973e81a63a6dfecbb1b7)) +* **go:** restore build error coverage ([1177c9c](https://github.com/rtk-ai/rtk/commit/1177c9c873ac63b6c0bcc9e1b664a705baa0ad7a)) +* **grep:** close subprocess stdin to prevent memory leak ([#897](https://github.com/rtk-ai/rtk/issues/897)) ([7217562](https://github.com/rtk-ai/rtk/commit/72175623551f40b581b4a7f6ed966c1e4a9c7358)) +* **grep:** close subprocess stdin to prevent memory leak ([#897](https://github.com/rtk-ai/rtk/issues/897)) ([09979cf](https://github.com/rtk-ai/rtk/commit/09979cf29701a1b775bcac761d24ec0e055d1bec)) +* **hook_check:** detect missing integrations ([9cf9ccc](https://github.com/rtk-ai/rtk/commit/9cf9ccc1ac39f8bba37e932c7d318a3aa7a34ae9)) +* **init:** remove opt-out instruction from telemetry message ([7571c8e](https://github.com/rtk-ai/rtk/commit/7571c8e101c41ee64c51e2bd64697f85f9142423)) +* **init:** remove telemetry info lines from init output ([7dbef2c](https://github.com/rtk-ai/rtk/commit/7dbef2ce00824d26f2057e4c3c76e429e2e23088)) +* **main:** kill zombie processes + path for rtk md ([d16fc6d](https://github.com/rtk-ai/rtk/commit/d16fc6dacbfec912c21522939b15b7bbd9719487)) +* **main:** kill zombie processes + path for rtk md + missing intergrations ([a919335](https://github.com/rtk-ai/rtk/commit/a919335519ed4a5259a212e56407cb312aa99bac)) +* **merge:** changelog conflicts ([d92c5d2](https://github.com/rtk-ai/rtk/commit/d92c5d264a49483c8d6079e04d946a79bc990a74)) +* **proxy:** kill child process on SIGINT/SIGTERM to prevent orphans ([d813919](https://github.com/rtk-ai/rtk/commit/d813919a24546e044e7844fc7ed05fef4ec24033)) +* **proxy:** kill child process on SIGINT/SIGTERM to prevent orphans ([3318510](https://github.com/rtk-ai/rtk/commit/33185101fc122d0c11a25a4e02ac9f3a7dc7e3bb)) +* **review:** address ChildGuard disarm, stdin dedup, hook masking ([d85fe33](https://github.com/rtk-ai/rtk/commit/d85fe3384b87c16fafd25ec7bcadbff6e69f3f1f)) +* **security:** default to ask when no permission rule matches ([#886](https://github.com/rtk-ai/rtk/issues/886)) ([158c745](https://github.com/rtk-ai/rtk/commit/158c74527f6591d372e40a78cd604d73a20649a9)) +* **security:** default to ask when no permission rule matches ([#886](https://github.com/rtk-ai/rtk/issues/886)) ([41a6c6b](https://github.com/rtk-ai/rtk/commit/41a6c6bf6da78a4754794fdc6a1469df2e327920)) +* **tracking:** use std::env::temp_dir() for compatibility (instead of unix tmp) ([e918661](https://github.com/rtk-ai/rtk/commit/e918661440d7b50321f0535032f52c5e87aaf3cb)) + ## [Unreleased] ### Features diff --git a/Cargo.lock b/Cargo.lock index e1309d2eb..64aa3e9dd 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -903,7 +903,7 @@ dependencies = [ [[package]] name = "rtk" -version = "0.34.3" +version = "0.35.0" dependencies = [ "anyhow", "automod", diff --git a/Cargo.toml b/Cargo.toml index 68ed2aaf6..69beeaa53 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "rtk" -version = "0.34.3" +version = "0.35.0" edition = "2021" authors = ["Patrick Szymkowiak"] description = "Rust Token Killer - High-performance CLI proxy to minimize LLM token consumption" From 6938a9dacc00670fd61ec44a0fc2264974709e0e Mon Sep 17 00:00:00 2001 From: Florian BRUNIAUX Date: Mon, 6 Apr 2026 18:37:05 +0200 Subject: [PATCH 089/204] docs: fix broken README links and reduce redundancy, point to docs site - Fix broken links: docs/TROUBLESHOOTING.md and docs/AUDIT_GUIDE.md were removed in previous commit; update nav header and Documentation section - Condense Supported AI Tools: keep the summary table, replace 12 per-agent detail blocks with a single link to rtk-ai.app/guide/supported-agents - Condense Configuration: keep essential snippet and tee recovery example, link to rtk-ai.app/guide/configuration for full reference - Update Documentation section: lead with rtk-ai.app/guide as primary entry, keep INSTALL.md, ARCHITECTURE.md, CONTRIBUTING.md, SECURITY.md Signed-off-by: Florian BRUNIAUX --- README.md | 145 +++++------------------------------------------------- 1 file changed, 13 insertions(+), 132 deletions(-) diff --git a/README.md b/README.md index 24a93f364..128bbaa40 100644 --- a/README.md +++ b/README.md @@ -17,8 +17,8 @@

WebsiteInstall • - Troubleshooting • - Architecture • + Troubleshooting • + ArchitectureDiscord

@@ -313,153 +313,32 @@ RTK supports 10 AI coding tools. Each integration transparently rewrites shell c | Tool | Install | Method | |------|---------|--------| | **Claude Code** | `rtk init -g` | PreToolUse hook (bash) | -| **GitHub Copilot (VS Code)** | `rtk init -g --copilot` | PreToolUse hook (`rtk hook copilot`) — transparent rewrite | +| **GitHub Copilot (VS Code)** | `rtk init -g --copilot` | PreToolUse hook — transparent rewrite | | **GitHub Copilot CLI** | `rtk init -g --copilot` | PreToolUse deny-with-suggestion (CLI limitation) | | **Cursor** | `rtk init -g --agent cursor` | preToolUse hook (hooks.json) | -| **Gemini CLI** | `rtk init -g --gemini` | BeforeTool hook (`rtk hook gemini`) | +| **Gemini CLI** | `rtk init -g --gemini` | BeforeTool hook | | **Codex** | `rtk init -g --codex` | AGENTS.md + RTK.md instructions | | **Windsurf** | `rtk init --agent windsurf` | .windsurfrules (project-scoped) | | **Cline / Roo Code** | `rtk init --agent cline` | .clinerules (project-scoped) | | **OpenCode** | `rtk init -g --opencode` | Plugin TS (tool.execute.before) | | **OpenClaw** | `openclaw plugins install ./openclaw` | Plugin TS (before_tool_call) | -| **Mistral Vibe** | Planned (#800) | Blocked on upstream BeforeToolCallback | +| **Mistral Vibe** | Planned ([#800](https://github.com/rtk-ai/rtk/issues/800)) | Blocked on upstream | -### Claude Code (default) - -```bash -rtk init -g # Install hook + RTK.md -rtk init -g --auto-patch # Non-interactive (CI/CD) -rtk init --show # Verify installation -rtk init -g --uninstall # Remove -``` - -### GitHub Copilot (VS Code + CLI) - -```bash -rtk init -g --copilot # Install hook + instructions -``` - -Creates `.github/hooks/rtk-rewrite.json` (PreToolUse hook) and `.github/copilot-instructions.md` (prompt-level awareness). - -The hook (`rtk hook copilot`) auto-detects the format: -- **VS Code Copilot Chat**: transparent rewrite via `updatedInput` (same as Claude Code) -- **Copilot CLI**: deny-with-suggestion (CLI does not support `updatedInput` yet — see [copilot-cli#2013](https://github.com/github/copilot-cli/issues/2013)) - -### Cursor - -```bash -rtk init -g --agent cursor -``` - -Creates `~/.cursor/hooks/rtk-rewrite.sh` + patches `~/.cursor/hooks.json` with preToolUse matcher. Works with both Cursor editor and `cursor-agent` CLI. - -### Gemini CLI - -```bash -rtk init -g --gemini -rtk init -g --gemini --uninstall -``` - -Creates `~/.gemini/hooks/rtk-hook-gemini.sh` + patches `~/.gemini/settings.json` with BeforeTool hook. - -### Codex (OpenAI) - -```bash -rtk init -g --codex -``` - -Creates `~/.codex/RTK.md` + `~/.codex/AGENTS.md` with `@RTK.md` reference. Codex reads these as global instructions. - -### Windsurf - -```bash -rtk init --agent windsurf -``` - -Creates `.windsurfrules` in the current project. Cascade reads rules and prefixes commands with `rtk`. - -### Cline / Roo Code - -```bash -rtk init --agent cline -``` - -Creates `.clinerules` in the current project. Cline reads rules and prefixes commands with `rtk`. - -### OpenCode - -```bash -rtk init -g --opencode -``` - -Creates `~/.config/opencode/plugins/rtk.ts`. Uses `tool.execute.before` hook. - -### OpenClaw - -```bash -openclaw plugins install ./openclaw -``` - -Plugin in `openclaw/` directory. Uses `before_tool_call` hook, delegates to `rtk rewrite`. - -### Mistral Vibe (planned) - -Blocked on upstream BeforeToolCallback support ([mistral-vibe#531](https://github.com/mistralai/mistral-vibe/issues/531), [PR #533](https://github.com/mistralai/mistral-vibe/pull/533)). Tracked in [#800](https://github.com/rtk-ai/rtk/issues/800). - -### Commands Rewritten - -| Raw Command | Rewritten To | -|-------------|-------------| -| `git status/diff/log/add/commit/push/pull` | `rtk git ...` | -| `gh pr/issue/run` | `rtk gh ...` | -| `cargo test/build/clippy` | `rtk cargo ...` | -| `cat/head/tail ` | `rtk read ` | -| `rg/grep ` | `rtk grep ` | -| `ls` | `rtk ls` | -| `vitest/jest` | `rtk vitest run` | -| `tsc` | `rtk tsc` | -| `eslint/biome` | `rtk lint` | -| `prettier` | `rtk prettier` | -| `playwright` | `rtk playwright` | -| `prisma` | `rtk prisma` | -| `ruff check/format` | `rtk ruff ...` | -| `pytest` | `rtk pytest` | -| `pip list/install` | `rtk pip ...` | -| `go test/build/vet` | `rtk go ...` | -| `golangci-lint` | `rtk golangci-lint` | -| `rake test` / `rails test` | `rtk rake test` | -| `rspec` / `bundle exec rspec` | `rtk rspec` | -| `rubocop` / `bundle exec rubocop` | `rtk rubocop` | -| `bundle install/update` | `rtk bundle ...` | -| `aws sts/ec2/lambda/...` | `rtk aws ...` | -| `docker ps/images/logs` | `rtk docker ...` | -| `kubectl get/logs` | `rtk kubectl ...` | -| `curl` | `rtk curl` | -| `pnpm list/outdated` | `rtk pnpm ...` | - -Commands already using `rtk`, heredocs (`<<`), and unrecognized commands pass through unchanged. +For per-agent setup details, override controls, and graceful degradation, see the [Supported Agents guide](https://www.rtk-ai.app/guide/getting-started/supported-agents). ## Configuration -### Config File - `~/.config/rtk/config.toml` (macOS: `~/Library/Application Support/rtk/config.toml`): ```toml -[tracking] -database_path = "/path/to/custom.db" # default: ~/.local/share/rtk/history.db - [hooks] exclude_commands = ["curl", "playwright"] # skip rewrite for these [tee] enabled = true # save raw output on failure (default: true) mode = "failures" # "failures", "always", or "never" -max_files = 20 # rotation limit ``` -### Tee: Full Output Recovery - When a command fails, RTK saves the full unfiltered output so the LLM can read it without re-executing: ``` @@ -467,6 +346,8 @@ FAILED: 2/15 tests [full output: ~/.local/share/rtk/tee/1707753600_cargo_test.log] ``` +For the full config reference (all sections, env vars, per-project filters), see the [Configuration guide](https://www.rtk-ai.app/guide/getting-started/configuration). + ### Uninstall ```bash @@ -477,11 +358,11 @@ brew uninstall rtk # If installed via Homebrew ## Documentation -- **[TROUBLESHOOTING.md](docs/TROUBLESHOOTING.md)** - Fix common issues -- **[INSTALL.md](INSTALL.md)** - Detailed installation guide -- **[ARCHITECTURE.md](docs/contributing/ARCHITECTURE.md)** - Technical architecture -- **[SECURITY.md](SECURITY.md)** - Security policy and PR review process -- **[AUDIT_GUIDE.md](docs/AUDIT_GUIDE.md)** - Token savings analytics guide +- **[rtk-ai.app/guide](https://www.rtk-ai.app/guide)** — full user guide (installation, supported agents, what gets optimized, analytics, configuration, troubleshooting) +- **[INSTALL.md](INSTALL.md)** — detailed installation reference +- **[ARCHITECTURE.md](ARCHITECTURE.md)** — system design and technical decisions +- **[CONTRIBUTING.md](CONTRIBUTING.md)** — contribution guide +- **[SECURITY.md](SECURITY.md)** — security policy ## Privacy & Telemetry From cea6e2093d5f87bd21cc0efb2c685183955bf36b Mon Sep 17 00:00:00 2001 From: Florian BRUNIAUX Date: Mon, 6 Apr 2026 18:48:43 +0200 Subject: [PATCH 090/204] docs: add Star History and StarMapper to README Signed-off-by: Florian BRUNIAUX --- README.md | 20 ++++++++++++++++++++ 1 file changed, 20 insertions(+) diff --git a/README.md b/README.md index 128bbaa40..78116b252 100644 --- a/README.md +++ b/README.md @@ -386,6 +386,26 @@ export RTK_TELEMETRY_DISABLED=1 enabled = false ``` +## Star History + + + + + + Star History Chart + + + +## StarMapper + + + + + + StarMapper + + + ## Contributing Contributions welcome! Please open an issue or PR on [GitHub](https://github.com/rtk-ai/rtk). From 47383e80197fc56e38f880f33a6b54261b82523c Mon Sep 17 00:00:00 2001 From: Patrick szymkowiak Date: Tue, 7 Apr 2026 09:50:27 +0200 Subject: [PATCH 091/204] fix(security): correct email domain from .dev to .app MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Fixes #608 — security@rtk-ai.dev was bouncing, preventing vulnerability reports from reaching maintainers. Signed-off-by: Patrick Szymkowiak Signed-off-by: Patrick szymkowiak --- SECURITY.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/SECURITY.md b/SECURITY.md index 2d06b77c3..0b7f3a9be 100644 --- a/SECURITY.md +++ b/SECURITY.md @@ -4,7 +4,7 @@ If you discover a security vulnerability in RTK, please report it to the maintainers privately: -- **Email**: security@rtk-ai.dev (or create a private security advisory on GitHub) +- **Email**: security@rtk-ai.app (or create a private security advisory on GitHub) - **Response time**: We aim to acknowledge reports within 48 hours - **Disclosure**: We follow responsible disclosure practices (90-day embargo) @@ -208,7 +208,7 @@ Critical vulnerabilities (remote code execution, data exfiltration) may be fast- ## Contact -- **Security issues**: security@rtk-ai.dev +- **Security issues**: security@rtk-ai.app - **General questions**: https://github.com/rtk-ai/rtk/discussions - **Maintainers**: @FlorianBruniaux (active fork maintainer) From da486bf394330c804cd1cd12e4b6835f18de5205 Mon Sep 17 00:00:00 2001 From: Patrick szymkowiak Date: Tue, 7 Apr 2026 15:42:15 +0200 Subject: [PATCH 092/204] fix(tee): prevent panic on UTF-8 multi-byte truncation boundary &raw[..max_file_size] panics if the byte offset falls inside a multi-byte UTF-8 character (e.g. Japanese, emoji). Now finds the nearest char boundary before slicing. Ref: issue #640 (L-3 finding) Signed-off-by: Patrick Szymkowiak Signed-off-by: Patrick szymkowiak --- src/core/tee.rs | 51 +++++++++++++++++++++++++++++++++++++++++++++++-- 1 file changed, 49 insertions(+), 2 deletions(-) diff --git a/src/core/tee.rs b/src/core/tee.rs index 238f2e326..c67ea8349 100644 --- a/src/core/tee.rs +++ b/src/core/tee.rs @@ -121,11 +121,17 @@ fn write_tee_file( let filename = format!("{}_{}.log", epoch, slug); let filepath = tee_dir.join(filename); - // Truncate at max_file_size + // Truncate at max_file_size (find a safe UTF-8 char boundary) let content = if raw.len() > max_file_size { + let boundary = raw + .char_indices() + .take_while(|(i, _)| *i < max_file_size) + .last() + .map(|(i, c)| i + c.len_utf8()) + .unwrap_or(0); format!( "{}\n\n--- truncated at {} bytes ---", - &raw[..max_file_size], + &raw[..boundary], max_file_size ) } else { @@ -356,6 +362,47 @@ mod tests { assert!(content.len() < 2000); } + #[test] + fn test_write_tee_file_truncation_utf8_boundary() { + let tmpdir = tempfile::tempdir().unwrap(); + // Create a string where the truncation point falls inside a multi-byte char. + // Japanese chars are 3 bytes each in UTF-8. + // 332 chars * 3 bytes = 996 bytes, then one more = 999 bytes. + // With max_file_size=998, the cut falls mid-character. + let japanese = "\u{6F22}".repeat(333); // 999 bytes of 3-byte chars + assert_eq!(japanese.len(), 999); + + // Truncate at 998 — falls in the middle of the 333rd character + let result = write_tee_file(&japanese, "test_utf8", tmpdir.path(), 998, 20); + assert!(result.is_some()); + + let path = result.unwrap(); + let content = fs::read_to_string(&path).unwrap(); + assert!(content.contains("--- truncated at 998 bytes ---")); + // Should contain 332 full characters (996 bytes), not panic + assert!(content.starts_with(&"\u{6F22}".repeat(332))); + } + + #[test] + fn test_write_tee_file_truncation_emoji() { + let tmpdir = tempfile::tempdir().unwrap(); + // Emoji are 4 bytes each in UTF-8 + let emojis = "\u{1F600}".repeat(100); // 400 bytes + assert_eq!(emojis.len(), 400); + + // Truncate at 201 — falls mid-emoji (4-byte boundary is at 200, 204) + let result = write_tee_file(&emojis, "test_emoji", tmpdir.path(), 201, 20); + assert!(result.is_some()); + + let path = result.unwrap(); + let content = fs::read_to_string(&path).unwrap(); + assert!(content.contains("--- truncated at 201 bytes ---")); + // The emoji portion should be exactly 200 bytes (50 emojis), + // rounded down from 201 to the nearest char boundary + let target = "\u{1F600}".repeat(50); + assert!(content.starts_with(&target)); + } + #[test] fn test_cleanup_old_files() { let tmpdir = tempfile::tempdir().unwrap(); From c85a387363e2079234b6141aad26418172c0e61a Mon Sep 17 00:00:00 2001 From: Trevin Chow Date: Tue, 31 Mar 2026 21:49:59 -0700 Subject: [PATCH 093/204] fix: report package-level failures (timeouts, signals) in go test summary When go test times out or is killed by a signal, the JSON stream contains a package-level {"action":"fail"} with no Test field and no FailedBuild field. The parser only handled test-level and build-level failures, so this event was silently dropped. The summary then saw 0 passes + 0 fails and reported "No tests found" instead of the actual failure. The fix adds handling for package-level failures: a new else branch in the fail handler, package-level output capture in the output handler, and inclusion in the summary count and display. Before: "Go test: No tests found" After: "Go test: 0 passed, 1 failed in 1 packages" with the timeout output shown under the package name. Fixes #958 Co-Authored-By: Claude Opus 4.6 (1M context) --- src/cmds/go/go_cmd.rs | 79 +++++++++++++++++++++++++++++++++++++------ 1 file changed, 69 insertions(+), 10 deletions(-) diff --git a/src/cmds/go/go_cmd.rs b/src/cmds/go/go_cmd.rs index 2734c26f8..e834eb6b4 100644 --- a/src/cmds/go/go_cmd.rs +++ b/src/cmds/go/go_cmd.rs @@ -38,6 +38,8 @@ struct PackageResult { build_failed: bool, build_errors: Vec, failed_tests: Vec<(String, Vec)>, // (test_name, output_lines) + package_failed: bool, // package-level failure (timeout, signal, etc.) + package_fail_output: Vec, // output lines collected before the package fail } pub fn run_test(args: &[String], verbose: u8) -> Result { @@ -350,6 +352,10 @@ fn filter_go_test_json(output: &str) -> String { pkg_result.build_errors = errors; } } + } else { + // Package-level failure without a specific test or build error + // (timeout, signal kill, panic before test execution, etc.) + pkg_result.package_failed = true; } } "skip" => { @@ -358,13 +364,21 @@ fn filter_go_test_json(output: &str) -> String { } } "output" => { - // Collect output for current test - if let (Some(test), Some(output_text)) = (&event.test, &event.output) { - let key = (package.clone(), test.clone()); - current_test_output - .entry(key) - .or_default() - .push(output_text.trim_end().to_string()); + if let Some(output_text) = &event.output { + if let Some(test) = &event.test { + // Collect output for current test + let key = (package.clone(), test.clone()); + current_test_output + .entry(key) + .or_default() + .push(output_text.trim_end().to_string()); + } else { + // Package-level output (timeout messages, signal info, etc.) + let trimmed = output_text.trim(); + if !trimmed.is_empty() { + pkg_result.package_fail_output.push(trimmed.to_string()); + } + } } } _ => {} // run, pause, cont, etc. @@ -377,8 +391,9 @@ fn filter_go_test_json(output: &str) -> String { let total_fail: usize = packages.values().map(|p| p.fail).sum(); let total_skip: usize = packages.values().map(|p| p.skip).sum(); let total_build_fail: usize = packages.values().filter(|p| p.build_failed).count(); + let total_pkg_fail: usize = packages.values().filter(|p| p.package_failed).count(); - let has_failures = total_fail > 0 || total_build_fail > 0; + let has_failures = total_fail > 0 || total_build_fail > 0 || total_pkg_fail > 0; if !has_failures && total_pass == 0 { return "Go test: No tests found".to_string(); @@ -395,7 +410,7 @@ fn filter_go_test_json(output: &str) -> String { result.push_str(&format!( "Go test: {} passed, {} failed", total_pass, - total_fail + total_build_fail + total_fail + total_build_fail + total_pkg_fail )); if total_skip > 0 { result.push_str(&format!(", {} skipped", total_skip)); @@ -403,7 +418,23 @@ fn filter_go_test_json(output: &str) -> String { result.push_str(&format!(" in {} packages\n", total_packages)); result.push_str("═══════════════════════════════════════\n"); - // Show build failures first + // Show package-level failures first (timeouts, signals, panics) + for (package, pkg_result) in packages.iter() { + if !pkg_result.package_failed { + continue; + } + + result.push_str(&format!("\n{} [FAIL]\n", compact_package_name(package))); + + for line in &pkg_result.package_fail_output { + let trimmed = line.trim(); + if !trimmed.is_empty() { + result.push_str(&format!(" {}\n", truncate(trimmed, 120))); + } + } + } + + // Show build failures for (package, pkg_result) in packages.iter() { if !pkg_result.build_failed { continue; @@ -695,6 +726,34 @@ mod tests { assert!(result.contains("values differ after normalization")); } + #[test] + fn test_filter_go_test_timeout_package_fail() { + // When go test times out, the JSON stream has a package-level "fail" + // with no Test field and no FailedBuild field. This should be reported + // as a failure, not "No tests found". + let output = r#"{"Time":"2024-01-01T10:00:00Z","Action":"start","Package":"example.com/foo"} +{"Time":"2024-01-01T10:01:03Z","Action":"output","Package":"example.com/foo","Output":"*** Test killed with quit: ran too long (1m3s).\n"} +{"Time":"2024-01-01T10:01:03Z","Action":"output","Package":"example.com/foo","Output":"FAIL\texample.com/foo\t63.001s\n"} +{"Time":"2024-01-01T10:01:03Z","Action":"fail","Package":"example.com/foo","Elapsed":63.003}"#; + + let result = filter_go_test_json(output); + assert!( + result.contains("1 failed"), + "Expected '1 failed' in output, got: {}", + result + ); + assert!( + !result.contains("No tests found"), + "Should not say 'No tests found' on timeout, got: {}", + result + ); + assert!( + result.contains("FAIL"), + "Expected failure output in summary, got: {}", + result + ); + } + #[test] fn test_filter_go_build_success() { let output = ""; From 15f666dd8dbd18648cb7bd14a6f9f3cac2f7d10b Mon Sep 17 00:00:00 2001 From: Patrick szymkowiak Date: Wed, 8 Apr 2026 11:10:29 +0200 Subject: [PATCH 094/204] =?UTF-8?q?fix(telemetry):=207=20bugs=20in=20enric?= =?UTF-8?q?hment=20=E2=80=94=20privacy=20leak,=20broken=20meta=5Fusage,=20?= =?UTF-8?q?pricing?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit P0 fixes: - top_passthrough: GROUP BY first word only, not full command args (privacy leak) - build_meta_usage: use dedicated count query instead of scanning deduplicated top list (was always returning 0 or 1) P1 fixes: - Single Tracker connection per ping (was opening 2 SQLite connections) - first_seen_days: propagate DB errors instead of unwrap_or(None) - ecosystem_mix: bound to 90 days instead of full table scan - avg_savings_per_command: compute per-command mean, not per-row mean - estimated_savings_usd_30d: use $3/Mtok (Sonnet input rate) not $5/Mtok Signed-off-by: Patrick Szymkowiak Signed-off-by: Patrick szymkowiak --- src/core/telemetry.rs | 91 +++++++++++++++++++++---------------------- src/core/tracking.rs | 45 +++++++++++++++------ 2 files changed, 77 insertions(+), 59 deletions(-) diff --git a/src/core/telemetry.rs b/src/core/telemetry.rs index d4bfefb86..524a0574b 100644 --- a/src/core/telemetry.rs +++ b/src/core/telemetry.rs @@ -61,10 +61,34 @@ fn send_ping() -> Result<(), Box> { let arch = std::env::consts::ARCH.to_string(); let install_method = detect_install_method(); - // Get stats from tracking DB + // Get stats from tracking DB (single connection for both basic + enriched) + let tracker = tracking::Tracker::new().ok(); let (commands_24h, top_commands, savings_pct, tokens_saved_24h, tokens_saved_total) = - get_stats(); - let enriched = get_enriched_stats(); + match &tracker { + Some(t) => get_stats(t), + None => (0, vec![], None, 0, 0), + }; + let enriched = match &tracker { + Some(t) => get_enriched_stats(t), + None => EnrichedStats { + passthrough_top: vec![], + parse_failures_24h: 0, + low_savings_commands: vec![], + avg_savings_per_command: 0.0, + hook_type: detect_hook_type(), + custom_toml_filters: count_custom_toml_filters(), + first_seen_days: 0, + active_days_30d: 0, + commands_total: 0, + ecosystem_mix: serde_json::json!({}), + tokens_saved_30d: 0, + estimated_savings_usd_30d: 0.0, + has_config_toml: detect_has_config(), + exclude_commands_count: count_exclude_commands(), + projects_count: 0, + meta_usage: serde_json::json!({}), + }, + }; let payload = serde_json::json!({ "device_hash": device_hash, @@ -183,23 +207,13 @@ fn salt_file_path() -> PathBuf { .join(".device_salt") } -fn get_stats() -> (i64, Vec, Option, i64, i64) { - let tracker = match tracking::Tracker::new() { - Ok(t) => t, - Err(_) => return (0, vec![], None, 0, 0), - }; - +fn get_stats(tracker: &tracking::Tracker) -> (i64, Vec, Option, i64, i64) { let since_24h = chrono::Utc::now() - chrono::Duration::hours(24); - // Get 24h command count and top commands from tracking DB let commands_24h = tracker.count_commands_since(since_24h).unwrap_or(0); - let top_commands = tracker.top_commands(5).unwrap_or_default(); - let savings_pct = tracker.overall_savings_pct().ok(); - let tokens_saved_24h = tracker.tokens_saved_24h(since_24h).unwrap_or(0); - let tokens_saved_total = tracker.total_tokens_saved().unwrap_or(0); ( @@ -237,31 +251,7 @@ struct EnrichedStats { meta_usage: serde_json::Value, } -fn get_enriched_stats() -> EnrichedStats { - let defaults = || EnrichedStats { - passthrough_top: vec![], - parse_failures_24h: 0, - low_savings_commands: vec![], - avg_savings_per_command: 0.0, - hook_type: detect_hook_type(), - custom_toml_filters: count_custom_toml_filters(), - first_seen_days: 0, - active_days_30d: 0, - commands_total: 0, - ecosystem_mix: serde_json::json!({}), - tokens_saved_30d: 0, - estimated_savings_usd_30d: 0.0, - has_config_toml: detect_has_config(), - exclude_commands_count: count_exclude_commands(), - projects_count: 0, - meta_usage: serde_json::json!({}), - }; - - let tracker = match tracking::Tracker::new() { - Ok(t) => t, - Err(_) => return defaults(), - }; - +fn get_enriched_stats(tracker: &tracking::Tracker) -> EnrichedStats { let since_24h = chrono::Utc::now() - chrono::Duration::hours(24); let passthrough_top = tracker @@ -296,9 +286,9 @@ fn get_enriched_stats() -> EnrichedStats { ); let tokens_saved_30d = tracker.tokens_saved_30d().unwrap_or(0); - // Estimate USD savings: Claude Sonnet input $3/Mtok, output $15/Mtok - // Weighted average ~$5/Mtok for typical input-heavy agent usage - let estimated_savings_usd_30d = tokens_saved_30d as f64 / 1_000_000.0 * 5.0; + // Estimate USD savings: tokens_saved are input tokens (CLI output compressed before + // reaching the LLM). Use input pricing: Claude Sonnet $3/Mtok. + let estimated_savings_usd_30d = tokens_saved_30d as f64 / 1_000_000.0 * 3.0; let projects_count = tracker.projects_count().unwrap_or(0); @@ -324,13 +314,12 @@ fn get_enriched_stats() -> EnrichedStats { } } -/// Build meta-command usage counts (gain, discover, proxy, verify, learn). +/// Build meta-command usage counts (gain, discover, proxy, verify, learn, init). fn build_meta_usage(tracker: &tracking::Tracker) -> serde_json::Value { let meta_cmds = ["gain", "discover", "proxy", "verify", "learn", "init"]; - let top = tracker.top_commands(50).unwrap_or_default(); let mut usage = serde_json::Map::new(); for meta in &meta_cmds { - let count = top.iter().filter(|c| c == meta).count(); + let count = tracker.count_meta_command(meta).unwrap_or(0); if count > 0 { usage.insert(meta.to_string(), serde_json::json!(count)); } @@ -551,7 +540,11 @@ mod tests { #[test] fn test_get_stats_returns_tuple() { - let (cmds, top, pct, saved_24h, saved_total) = get_stats(); + let tracker = match tracking::Tracker::new() { + Ok(t) => t, + Err(_) => return, // No DB — skip + }; + let (cmds, top, pct, saved_24h, saved_total) = get_stats(&tracker); assert!(cmds >= 0); assert!(top.len() <= 5); assert!(saved_24h >= 0); @@ -563,7 +556,11 @@ mod tests { #[test] fn test_enriched_stats_returns_valid_data() { - let stats = get_enriched_stats(); + let tracker = match tracking::Tracker::new() { + Ok(t) => t, + Err(_) => return, + }; + let stats = get_enriched_stats(&tracker); assert!(stats.passthrough_top.len() <= 5); assert!(stats.parse_failures_24h >= 0); assert!(stats.low_savings_commands.len() <= 5); diff --git a/src/core/tracking.rs b/src/core/tracking.rs index ae3cfa596..e28db5cf6 100644 --- a/src/core/tracking.rs +++ b/src/core/tracking.rs @@ -958,17 +958,18 @@ impl Tracker { } /// Top N passthrough commands (0% savings) — commands missing a filter. + /// Groups by first word only to avoid leaking arguments into telemetry. pub fn top_passthrough(&self, limit: usize) -> Result> { let mut stmt = self.conn.prepare( - "SELECT original_cmd, COUNT(*) as cnt FROM commands + "SELECT TRIM(SUBSTR(original_cmd, 1, INSTR(original_cmd || ' ', ' ') - 1)) as tool, + COUNT(*) as cnt FROM commands WHERE input_tokens = 0 AND output_tokens = 0 - GROUP BY original_cmd ORDER BY cnt DESC LIMIT ?1", + GROUP BY tool ORDER BY cnt DESC LIMIT ?1", )?; let rows = stmt.query_map(params![limit as i64], |row| { let cmd: String = row.get(0)?; let count: i64 = row.get(1)?; - let short = cmd.split_whitespace().take(3).collect::>().join(" "); - Ok((short, count)) + Ok((cmd, count)) })?; Ok(rows.filter_map(|r| r.ok()).collect()) } @@ -1002,22 +1003,42 @@ impl Tracker { Ok(rows.filter_map(|r| r.ok()).collect()) } - /// Average savings percentage per command (unweighted by volume). + /// Average savings percentage per command (unweighted — each command name counts once). pub fn avg_savings_per_command(&self) -> Result { let avg: f64 = self.conn.query_row( - "SELECT COALESCE(AVG(savings_pct), 0.0) FROM commands WHERE input_tokens > 0", + "SELECT COALESCE(AVG(avg_sav), 0.0) FROM ( + SELECT rtk_cmd, AVG(savings_pct) as avg_sav + FROM commands WHERE input_tokens > 0 + GROUP BY rtk_cmd + )", [], |row| row.get(0), )?; Ok(avg) } + /// Count invocations of a specific meta-command (by rtk_cmd suffix). + pub fn count_meta_command(&self, name: &str) -> Result { + let pattern = format!("rtk {}", name); + let count: i64 = self.conn.query_row( + "SELECT COUNT(*) FROM commands WHERE rtk_cmd LIKE ?1 || '%'", + params![pattern], + |row| row.get(0), + )?; + Ok(count) + } + /// Days since first recorded command (installation age). pub fn first_seen_days(&self) -> Result { - let oldest: Option = self - .conn - .query_row("SELECT MIN(timestamp) FROM commands", [], |row| row.get(0)) - .unwrap_or(None); + let oldest: Option = + match self + .conn + .query_row("SELECT MIN(timestamp) FROM commands", [], |row| row.get(0)) + { + Ok(v) => v, + Err(rusqlite::Error::QueryReturnedNoRows) => None, + Err(e) => return Err(anyhow::anyhow!("Failed to query first seen timestamp: {e}")), + }; match oldest { Some(ts) => { let first = chrono::NaiveDateTime::parse_from_str(&ts, "%Y-%m-%dT%H:%M:%S") @@ -1055,7 +1076,7 @@ impl Tracker { /// Ecosystem distribution as percentages (top categories by command prefix). pub fn ecosystem_mix(&self) -> Result> { let total: f64 = self.conn.query_row( - "SELECT COUNT(*) FROM commands WHERE input_tokens > 0", + "SELECT COUNT(*) FROM commands WHERE input_tokens > 0 AND timestamp >= datetime('now', '-90 days')", [], |row| row.get(0), )?; @@ -1064,7 +1085,7 @@ impl Tracker { } let mut stmt = self.conn.prepare( "SELECT rtk_cmd, COUNT(*) as cnt FROM commands - WHERE input_tokens > 0 + WHERE input_tokens > 0 AND timestamp >= datetime('now', '-90 days') GROUP BY rtk_cmd ORDER BY cnt DESC", )?; let mut categories: std::collections::HashMap = From be5c0576d95566f37f266fd9f92e2a1b263697bd Mon Sep 17 00:00:00 2001 From: Patrick szymkowiak Date: Wed, 8 Apr 2026 17:30:24 +0200 Subject: [PATCH 095/204] fix(docs): update TELEMETRY.md to match code after review fixes - passthrough_top example: "npm ci:8" -> "npm:8" (first word only) - estimated_savings_usd_30d: $5/Mtok -> $3/Mtok (input pricing) - Remove needless borrow in build_meta_usage (clippy) Signed-off-by: Patrick Szymkowiak Signed-off-by: Patrick szymkowiak --- docs/TELEMETRY.md | 4 ++-- src/core/telemetry.rs | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/docs/TELEMETRY.md b/docs/TELEMETRY.md index c364349a0..426cd0c74 100644 --- a/docs/TELEMETRY.md +++ b/docs/TELEMETRY.md @@ -55,7 +55,7 @@ This data directly drives our roadmap. For example, if telemetry shows that 40% | Field | Example | Purpose | |-------|---------|---------| -| `passthrough_top` | `["git tag:15", "npm ci:8"]` | Top 5 commands with 0% savings — these need filters | +| `passthrough_top` | `["git:15", "npm:8"]` | Top 5 commands with 0% savings — these need filters | | `parse_failures_24h` | `3` | Filter fragility — high count means filters are breaking | | `low_savings_commands` | `["rtk docker ps:25%"]` | Commands averaging <30% savings — filters to improve | | `avg_savings_per_command` | `68.5` | Unweighted average (vs global which is volume-biased) | @@ -78,7 +78,7 @@ This data directly drives our roadmap. For example, if telemetry shows that 40% | Field | Example | Purpose | |-------|---------|---------| | `tokens_saved_30d` | `12000000` | 30-day token savings for trend analysis | -| `estimated_savings_usd_30d` | `60.0` | Estimated dollar value saved (at ~$5/Mtok average API pricing) | +| `estimated_savings_usd_30d` | `36.0` | Estimated dollar value saved (at ~$3/Mtok input pricing, Claude Sonnet) | ### Adoption diff --git a/src/core/telemetry.rs b/src/core/telemetry.rs index 524a0574b..5d6139608 100644 --- a/src/core/telemetry.rs +++ b/src/core/telemetry.rs @@ -292,7 +292,7 @@ fn get_enriched_stats(tracker: &tracking::Tracker) -> EnrichedStats { let projects_count = tracker.projects_count().unwrap_or(0); - let meta_usage = build_meta_usage(&tracker); + let meta_usage = build_meta_usage(tracker); EnrichedStats { passthrough_top, From d0a3797ec580f96948489d1e7c3329ac22a6c4eb Mon Sep 17 00:00:00 2001 From: yosoyepa Date: Mon, 6 Apr 2026 00:43:14 -0500 Subject: [PATCH 096/204] feat(init): add native support for Kilo Code and Google Antigravity Add rtk init --agent kilocode and rtk init --agent antigravity commands. Kilo Code: installs .kilocode/rules/rtk-rules.md (project-scoped) Google Antigravity: installs .agents/rules/antigravity-rtk-rules.md (project-scoped) Both follow the same prompt-level guidance pattern as Cline and Windsurf, using rules files that instruct the agent to prefix shell commands with rtk. --- README.md | 6 +- .../guide/getting-started/supported-agents.md | 24 +++- hooks/antigravity/README.md | 9 ++ hooks/antigravity/rules.md | 32 +++++ hooks/kilocode/README.md | 9 ++ hooks/kilocode/rules.md | 32 +++++ src/hooks/init.rs | 130 ++++++++++++++++++ src/main.rs | 16 +++ 8 files changed, 254 insertions(+), 4 deletions(-) create mode 100644 hooks/antigravity/README.md create mode 100644 hooks/antigravity/rules.md create mode 100644 hooks/kilocode/README.md create mode 100644 hooks/kilocode/rules.md diff --git a/README.md b/README.md index 78116b252..d898b21d4 100644 --- a/README.md +++ b/README.md @@ -106,6 +106,8 @@ rtk init -g --codex # Codex (OpenAI) rtk init -g --agent cursor # Cursor rtk init --agent windsurf # Windsurf rtk init --agent cline # Cline / Roo Code +rtk init --agent kilocode # Kilo Code +rtk init --agent antigravity # Google Antigravity # 2. Restart your AI tool, then test git status # Automatically rewritten to rtk git status @@ -308,7 +310,7 @@ After install, **restart Claude Code**. ## Supported AI Tools -RTK supports 10 AI coding tools. Each integration transparently rewrites shell commands to `rtk` equivalents for 60-90% token savings. +RTK supports 12 AI coding tools. Each integration transparently rewrites shell commands to `rtk` equivalents for 60-90% token savings. | Tool | Install | Method | |------|---------|--------| @@ -323,6 +325,8 @@ RTK supports 10 AI coding tools. Each integration transparently rewrites shell c | **OpenCode** | `rtk init -g --opencode` | Plugin TS (tool.execute.before) | | **OpenClaw** | `openclaw plugins install ./openclaw` | Plugin TS (before_tool_call) | | **Mistral Vibe** | Planned ([#800](https://github.com/rtk-ai/rtk/issues/800)) | Blocked on upstream | +| **Kilo Code** | `rtk init --agent kilocode` | .kilocode/rules/rtk-rules.md (project-scoped) | +| **Google Antigravity** | `rtk init --agent antigravity` | .agents/rules/antigravity-rtk-rules.md (project-scoped) | For per-agent setup details, override controls, and graceful degradation, see the [Supported Agents guide](https://www.rtk-ai.app/guide/getting-started/supported-agents). diff --git a/docs/guide/getting-started/supported-agents.md b/docs/guide/getting-started/supported-agents.md index dc45bf970..233084652 100644 --- a/docs/guide/getting-started/supported-agents.md +++ b/docs/guide/getting-started/supported-agents.md @@ -1,13 +1,13 @@ --- title: Supported Agents -description: How to integrate RTK with Claude Code, Cursor, Copilot, Cline, Windsurf, Codex, and OpenCode +description: How to integrate RTK with Claude Code, Cursor, Copilot, Cline, Windsurf, Codex, OpenCode, Kilo Code, and Antigravity sidebar: order: 3 --- # Supported Agents -RTK supports 10 AI coding agents across 3 integration tiers. Mistral Vibe support is planned. +RTK supports 12 AI coding agents across 3 integration tiers. Mistral Vibe support is planned. ## How it works @@ -38,6 +38,8 @@ Agent runs "cargo test" | Cline / Roo Code | Rules file (prompt-level) | N/A | | Windsurf | Rules file (prompt-level) | N/A | | Codex CLI | AGENTS.md instructions | N/A | +| Kilo Code | Rules file (prompt-level) | N/A | +| Google Antigravity | Rules file (prompt-level) | N/A | | Mistral Vibe | Planned ([#800](https://github.com/rtk-ai/rtk/issues/800)) | Pending upstream | ## Installation by agent @@ -110,6 +112,22 @@ rtk init --windsurf # creates .windsurfrules in current project rtk init --codex # creates AGENTS.md or patches existing one ``` +### Kilo Code + +```bash +rtk init --agent kilocode # creates .kilocode/rules/rtk-rules.md in current project +``` + +Kilo Code reads `.kilocode/rules/` as custom instructions. RTK adds guidance telling Kilo Code to prefer `rtk ` over raw commands. + +### Google Antigravity + +```bash +rtk init --agent antigravity # creates .agents/rules/antigravity-rtk-rules.md in current project +``` + +Antigravity reads `.agents/rules/` as custom instructions. RTK adds guidance telling Antigravity to prefer `rtk ` over raw commands. + ### Mistral Vibe (planned) Support is blocked on upstream `BeforeToolCallback` ([mistral-vibe#531](https://github.com/mistralai/mistral-vibe/issues/531)). Tracked in [#800](https://github.com/rtk-ai/rtk/issues/800). @@ -122,7 +140,7 @@ Support is blocked on upstream `BeforeToolCallback` ([mistral-vibe#531](https:// | **Plugin** | TypeScript/JS in agent's plugin system | Transparent — in-place mutation | | **Rules file** | Prompt-level instructions | Guidance only — agent is told to prefer `rtk ` | -Rules file integrations (Cline, Windsurf, Codex) rely on the model following instructions. Full hook integrations (Claude Code, Cursor, Gemini) are guaranteed — the command is rewritten before the agent sees it. +Rules file integrations (Cline, Windsurf, Codex, Kilo Code, Antigravity) rely on the model following instructions. Full hook integrations (Claude Code, Cursor, Gemini) are guaranteed — the command is rewritten before the agent sees it. ## Graceful degradation diff --git a/hooks/antigravity/README.md b/hooks/antigravity/README.md new file mode 100644 index 000000000..0b279254b --- /dev/null +++ b/hooks/antigravity/README.md @@ -0,0 +1,9 @@ +# Google Antigravity Hooks + +> Part of [`hooks/`](../README.md) — see also [`src/hooks/`](../../src/hooks/README.md) for installation code + +## Specifics + +- Prompt-level guidance only (no programmatic hook) -- relies on Antigravity reading custom instructions +- `rules.md` contains the instruction to prefix all shell commands with `rtk`, usage examples, and meta commands +- Installed to `.agents/rules/antigravity-rtk-rules.md` (project-local) by `rtk init --agent antigravity` diff --git a/hooks/antigravity/rules.md b/hooks/antigravity/rules.md new file mode 100644 index 000000000..8ee44bcd4 --- /dev/null +++ b/hooks/antigravity/rules.md @@ -0,0 +1,32 @@ +# RTK - Rust Token Killer (Google Antigravity) + +**Usage**: Token-optimized CLI proxy for shell commands. + +## Rule + +Always prefix shell commands with `rtk` to minimize token consumption. + +Examples: + +```bash +rtk git status +rtk cargo test +rtk ls src/ +rtk grep "pattern" src/ +rtk find "*.rs" . +rtk docker ps +rtk gh pr list +``` + +## Meta Commands + +```bash +rtk gain # Show token savings +rtk gain --history # Command history with savings +rtk discover # Find missed RTK opportunities +rtk proxy # Run raw (no filtering, for debugging) +``` + +## Why + +RTK filters and compresses command output before it reaches the LLM context, saving 60-90% tokens on common operations. Always use `rtk ` instead of raw commands. diff --git a/hooks/kilocode/README.md b/hooks/kilocode/README.md new file mode 100644 index 000000000..2b38d9427 --- /dev/null +++ b/hooks/kilocode/README.md @@ -0,0 +1,9 @@ +# Kilo Code Hooks + +> Part of [`hooks/`](../README.md) — see also [`src/hooks/`](../../src/hooks/README.md) for installation code + +## Specifics + +- Prompt-level guidance only (no programmatic hook) -- relies on Kilo Code reading custom instructions +- `rules.md` contains the instruction to prefix all shell commands with `rtk`, usage examples, and meta commands +- Installed to `.kilocode/rules/rtk-rules.md` (project-local) by `rtk init --agent kilocode` diff --git a/hooks/kilocode/rules.md b/hooks/kilocode/rules.md new file mode 100644 index 000000000..53d764596 --- /dev/null +++ b/hooks/kilocode/rules.md @@ -0,0 +1,32 @@ +# RTK - Rust Token Killer (Kilo Code) + +**Usage**: Token-optimized CLI proxy for shell commands. + +## Rule + +Always prefix shell commands with `rtk` to minimize token consumption. + +Examples: + +```bash +rtk git status +rtk cargo test +rtk ls src/ +rtk grep "pattern" src/ +rtk find "*.rs" . +rtk docker ps +rtk gh pr list +``` + +## Meta Commands + +```bash +rtk gain # Show token savings +rtk gain --history # Command history with savings +rtk discover # Find missed RTK opportunities +rtk proxy # Run raw (no filtering, for debugging) +``` + +## Why + +RTK filters and compresses command output before it reaches the LLM context, saving 60-90% tokens on common operations. Always use `rtk ` instead of raw commands. diff --git a/src/hooks/init.rs b/src/hooks/init.rs index cd2e4e223..c65465962 100644 --- a/src/hooks/init.rs +++ b/src/hooks/init.rs @@ -1245,6 +1245,86 @@ fn run_windsurf_mode(verbose: u8) -> Result<()> { Ok(()) } +// ─── Kilo Code support ──────────────────────────────────────── + +const KILOCODE_RULES: &str = include_str!("../../hooks/kilocode/rules.md"); + +pub fn run_kilocode_mode(verbose: u8) -> Result<()> { + run_kilocode_mode_at(&std::env::current_dir()?, verbose) +} + +fn run_kilocode_mode_at(base_dir: &Path, verbose: u8) -> Result<()> { + // Kilo Code reads .kilocode/rules/ from the project root (workspace-scoped) + let target_dir = base_dir.join(".kilocode/rules"); + let rules_path = target_dir.join("rtk-rules.md"); + + let existing = fs::read_to_string(&rules_path).unwrap_or_default(); + if existing.contains("RTK") || existing.contains("rtk") { + println!("\nRTK already configured for Kilo Code in this project.\n"); + println!(" Rules: .kilocode/rules/rtk-rules.md (already present)"); + } else { + fs::create_dir_all(&target_dir).context("Failed to create .kilocode/rules directory")?; + let new_content = if existing.trim().is_empty() { + KILOCODE_RULES.to_string() + } else { + format!("{}\n\n{}", existing.trim(), KILOCODE_RULES) + }; + fs::write(&rules_path, &new_content) + .context("Failed to write .kilocode/rules/rtk-rules.md")?; + + if verbose > 0 { + eprintln!("Wrote .kilocode/rules/rtk-rules.md"); + } + + println!("\nRTK configured for Kilo Code.\n"); + println!(" Rules: .kilocode/rules/rtk-rules.md (installed)"); + } + println!(" Kilo Code will now use rtk commands for token savings."); + println!(" Test with: git status\n"); + + Ok(()) +} + +// ─── Google Antigravity support ─────────────────────────────── + +const ANTIGRAVITY_RULES: &str = include_str!("../../hooks/antigravity/rules.md"); + +pub fn run_antigravity_mode(verbose: u8) -> Result<()> { + run_antigravity_mode_at(&std::env::current_dir()?, verbose) +} + +fn run_antigravity_mode_at(base_dir: &Path, verbose: u8) -> Result<()> { + // Antigravity reads .agents/rules/ from the project root (workspace-scoped) + let target_dir = base_dir.join(".agents/rules"); + let rules_path = target_dir.join("antigravity-rtk-rules.md"); + + let existing = fs::read_to_string(&rules_path).unwrap_or_default(); + if existing.contains("RTK") || existing.contains("rtk") { + println!("\nRTK already configured for Antigravity in this project.\n"); + println!(" Rules: .agents/rules/antigravity-rtk-rules.md (already present)"); + } else { + fs::create_dir_all(&target_dir).context("Failed to create .agents/rules directory")?; + let new_content = if existing.trim().is_empty() { + ANTIGRAVITY_RULES.to_string() + } else { + format!("{}\n\n{}", existing.trim(), ANTIGRAVITY_RULES) + }; + fs::write(&rules_path, &new_content) + .context("Failed to write .agents/rules/antigravity-rtk-rules.md")?; + + if verbose > 0 { + eprintln!("Wrote .agents/rules/antigravity-rtk-rules.md"); + } + + println!("\nRTK configured for Google Antigravity.\n"); + println!(" Rules: .agents/rules/antigravity-rtk-rules.md (installed)"); + } + println!(" Antigravity will now use rtk commands for token savings."); + println!(" Test with: git status\n"); + + Ok(()) +} + fn run_codex_mode(global: bool, verbose: u8) -> Result<()> { let (agents_md_path, rtk_md_path) = if global { let codex_dir = resolve_codex_dir()?; @@ -2669,6 +2749,56 @@ More notes ); } + #[test] + fn test_kilocode_mode_creates_rules_file() { + let temp = TempDir::new().unwrap(); + run_kilocode_mode_at(temp.path(), 0).unwrap(); + + let rules_path = temp.path().join(".kilocode/rules/rtk-rules.md"); + assert!(rules_path.exists(), "Rules file should be created"); + let content = fs::read_to_string(&rules_path).unwrap(); + assert!(content.contains("RTK"), "Rules file should contain RTK"); + } + + #[test] + fn test_kilocode_mode_is_idempotent() { + let temp = TempDir::new().unwrap(); + run_kilocode_mode_at(temp.path(), 0).unwrap(); + + let path = temp.path().join(".kilocode/rules/rtk-rules.md"); + let first = fs::read_to_string(&path).unwrap(); + + // Second run should not overwrite + run_kilocode_mode_at(temp.path(), 0).unwrap(); + let second = fs::read_to_string(&path).unwrap(); + assert_eq!(first, second, "Idempotent: content should not change"); + } + + #[test] + fn test_antigravity_mode_creates_rules_file() { + let temp = TempDir::new().unwrap(); + run_antigravity_mode_at(temp.path(), 0).unwrap(); + + let rules_path = temp.path().join(".agents/rules/antigravity-rtk-rules.md"); + assert!(rules_path.exists(), "Rules file should be created"); + let content = fs::read_to_string(&rules_path).unwrap(); + assert!(content.contains("RTK"), "Rules file should contain RTK"); + } + + #[test] + fn test_antigravity_mode_is_idempotent() { + let temp = TempDir::new().unwrap(); + run_antigravity_mode_at(temp.path(), 0).unwrap(); + + let path = temp.path().join(".agents/rules/antigravity-rtk-rules.md"); + let first = fs::read_to_string(&path).unwrap(); + + // Second run should not overwrite + run_antigravity_mode_at(temp.path(), 0).unwrap(); + let second = fs::read_to_string(&path).unwrap(); + assert_eq!(first, second, "Idempotent: content should not change"); + } + #[test] fn test_patch_agents_md_creates_missing_file() { let temp = TempDir::new().unwrap(); diff --git a/src/main.rs b/src/main.rs index 1954ed8ea..bc9e7dfb9 100644 --- a/src/main.rs +++ b/src/main.rs @@ -40,6 +40,10 @@ pub enum AgentTarget { Windsurf, /// Cline / Roo Code (VS Code) Cline, + /// Kilo Code + Kilocode, + /// Google Antigravity + Antigravity, } #[derive(Parser)] @@ -1624,6 +1628,18 @@ fn run_cli() -> Result { hooks::init::run_gemini(global, hook_only, patch_mode, cli.verbose)?; } else if copilot { hooks::init::run_copilot(cli.verbose)?; + } else if agent == Some(AgentTarget::Kilocode) { + if global { + anyhow::bail!("Kilo Code is project-scoped. Use: rtk init --agent kilocode"); + } + hooks::init::run_kilocode_mode(cli.verbose)?; + } else if agent == Some(AgentTarget::Antigravity) { + if global { + anyhow::bail!( + "Antigravity is project-scoped. Use: rtk init --agent antigravity" + ); + } + hooks::init::run_antigravity_mode(cli.verbose)?; } else { let install_opencode = opencode; let install_claude = !opencode; From 4db7e86c2619679301f152e37ce4e9584ce52d0a Mon Sep 17 00:00:00 2001 From: Claude Date: Wed, 8 Apr 2026 22:51:55 +0000 Subject: [PATCH 097/204] docs: add --reset/--yes flags to docs/usage/AUDIT_GUIDE.md Adds reset flag documentation to the upstream file path: - Quick Reference: reset usage examples with and without --yes - Command Options: Reset Flag table with --reset and --yes descriptions - Warning note about irreversibility and non-interactive behaviour https://claude.ai/code/session_015WXbDdxEU3pXPBtheDkXcd --- docs/usage/AUDIT_GUIDE.md | 445 ++++++++++++++++++++++++++++++++++++++ 1 file changed, 445 insertions(+) create mode 100644 docs/usage/AUDIT_GUIDE.md diff --git a/docs/usage/AUDIT_GUIDE.md b/docs/usage/AUDIT_GUIDE.md new file mode 100644 index 000000000..27a0a1eda --- /dev/null +++ b/docs/usage/AUDIT_GUIDE.md @@ -0,0 +1,445 @@ +# RTK Token Savings Audit Guide + +Complete guide to analyzing your rtk token savings with temporal breakdowns and data exports. + +## Overview + +The `rtk gain` command provides comprehensive analytics for tracking your token savings across time periods. + +**Database Location**: `~/.local/share/rtk/history.db` +**Retention Policy**: 90 days +**Scope**: Global across all projects, worktrees, and Claude sessions + +## Quick Reference + +```bash +# Default summary view +rtk gain + +# Temporal breakdowns +rtk gain --daily # All days since tracking started +rtk gain --weekly # Aggregated by week +rtk gain --monthly # Aggregated by month +rtk gain --all # Show all breakdowns at once + +# Export formats +rtk gain --all --format json > savings.json +rtk gain --all --format csv > savings.csv + +# Combined flags +rtk gain --graph --history --quota # Classic view with extras +rtk gain --daily --weekly --monthly # Multiple breakdowns + +# Reset all tracking data +rtk gain --reset # prompts [y/N] before deleting +rtk gain --reset --yes # skip prompt (CI/scripts) +``` + +## Command Options + +### Temporal Flags + +| Flag | Description | Output | +|------|-------------|--------| +| `--daily` | Day-by-day breakdown | All days with full metrics | +| `--weekly` | Week-by-week breakdown | Aggregated by Sunday-Saturday weeks | +| `--monthly` | Month-by-month breakdown | Aggregated by calendar month | +| `--all` | All time breakdowns | Daily + Weekly + Monthly combined | + +### Classic Flags (still available) + +| Flag | Description | +|------|-------------| +| `--graph` | ASCII graph of last 30 days | +| `--history` | Recent 10 commands | +| `--quota` | Monthly quota analysis (Pro/5x/20x tiers) | +| `--tier ` | Quota tier: pro, 5x, 20x (default: 20x) | + +### Reset Flag + +| Flag | Description | +|------|-------------| +| `--reset` | Permanently delete all tracking data (commands + parse failures) | +| `--yes` | Skip the confirmation prompt (for CI/scripts) | + +> **Warning**: `--reset` is irreversible. It clears both the `commands` and `parse_failures` tables atomically. A `[y/N]` confirmation prompt is shown by default. In non-interactive environments (piped stdin), it defaults to `N` unless `--yes` is passed. + +### Export Formats + +| Format | Flag | Use Case | +|--------|------|----------| +| `text` | `--format text` (default) | Terminal display | +| `json` | `--format json` | Programmatic analysis, APIs | +| `csv` | `--format csv` | Excel, data analysis, plotting | + +## Output Examples + +### Daily Breakdown + +``` +📅 Daily Breakdown (3 days) +════════════════════════════════════════════════════════════════ +Date Cmds Input Output Saved Save% +──────────────────────────────────────────────────────────────── +2026-01-28 89 380.9K 26.7K 355.8K 93.4% +2026-01-29 102 894.5K 32.4K 863.7K 96.6% +2026-01-30 5 749 55 694 92.7% +──────────────────────────────────────────────────────────────── +TOTAL 196 1.3M 59.2K 1.2M 95.6% +``` + +**Metrics explained:** +- **Cmds**: Number of rtk commands executed +- **Input**: Estimated tokens from raw command output +- **Output**: Actual tokens after rtk filtering +- **Saved**: Input - Output (tokens prevented from reaching LLM) +- **Save%**: Percentage reduction (Saved / Input × 100) + +### Weekly Breakdown + +``` +📊 Weekly Breakdown (1 weeks) +════════════════════════════════════════════════════════════════════════ +Week Cmds Input Output Saved Save% +──────────────────────────────────────────────────────────────────────── +01-26 → 02-01 196 1.3M 59.2K 1.2M 95.6% +──────────────────────────────────────────────────────────────────────── +TOTAL 196 1.3M 59.2K 1.2M 95.6% +``` + +**Week definition**: Sunday to Saturday (ISO week starting Sunday at 00:00) + +### Monthly Breakdown + +``` +📆 Monthly Breakdown (1 months) +════════════════════════════════════════════════════════════════ +Month Cmds Input Output Saved Save% +──────────────────────────────────────────────────────────────── +2026-01 196 1.3M 59.2K 1.2M 95.6% +──────────────────────────────────────────────────────────────── +TOTAL 196 1.3M 59.2K 1.2M 95.6% +``` + +**Month format**: YYYY-MM (calendar month) + +### JSON Export + +```json +{ + "summary": { + "total_commands": 196, + "total_input": 1276098, + "total_output": 59244, + "total_saved": 1220217, + "avg_savings_pct": 95.62 + }, + "daily": [ + { + "date": "2026-01-28", + "commands": 89, + "input_tokens": 380894, + "output_tokens": 26744, + "saved_tokens": 355779, + "savings_pct": 93.41 + } + ], + "weekly": [...], + "monthly": [...] +} +``` + +**Use cases:** +- API integration +- Custom dashboards +- Automated reporting +- Data pipeline ingestion + +### CSV Export + +```csv +# Daily Data +date,commands,input_tokens,output_tokens,saved_tokens,savings_pct +2026-01-28,89,380894,26744,355779,93.41 +2026-01-29,102,894455,32445,863744,96.57 + +# Weekly Data +week_start,week_end,commands,input_tokens,output_tokens,saved_tokens,savings_pct +2026-01-26,2026-02-01,196,1276098,59244,1220217,95.62 + +# Monthly Data +month,commands,input_tokens,output_tokens,saved_tokens,savings_pct +2026-01,196,1276098,59244,1220217,95.62 +``` + +**Use cases:** +- Excel analysis +- Python/R data science +- Google Sheets dashboards +- Matplotlib/seaborn plotting + +## Analysis Workflows + +### Weekly Progress Tracking + +```bash +# Generate weekly report every Monday +rtk gain --weekly --format csv > reports/week-$(date +%Y-%W).csv + +# Compare this week vs last week +rtk gain --weekly | tail -3 +``` + +### Monthly Cost Analysis + +```bash +# Export monthly data for budget review +rtk gain --monthly --format json | jq '.monthly[] | + {month, saved_tokens, quota_pct: (.saved_tokens / 6000000 * 100)}' +``` + +### Data Science Analysis + +```python +import pandas as pd +import subprocess + +# Get CSV data +result = subprocess.run(['rtk', 'gain', '--all', '--format', 'csv'], + capture_output=True, text=True) + +# Parse daily data +lines = result.stdout.split('\n') +daily_start = lines.index('# Daily Data') + 2 +daily_end = lines.index('', daily_start) +daily_df = pd.read_csv(pd.StringIO('\n'.join(lines[daily_start:daily_end]))) + +# Plot savings trend +daily_df['date'] = pd.to_datetime(daily_df['date']) +daily_df.plot(x='date', y='savings_pct', kind='line') +``` + +### Excel Analysis + +1. Export CSV: `rtk gain --all --format csv > rtk-data.csv` +2. Open in Excel +3. Create pivot tables: + - Daily trends (line chart) + - Weekly totals (bar chart) + - Savings % distribution (histogram) + +### Dashboard Creation + +```bash +# Generate dashboard data daily via cron +0 0 * * * rtk gain --all --format json > /var/www/dashboard/rtk-stats.json + +# Serve with static site +cat > index.html <<'EOF' + + + +EOF +``` + +## Understanding Token Savings + +### Token Estimation + +rtk estimates tokens using `text.len() / 4` (4 characters per token average). + +**Accuracy**: ±10% compared to actual LLM tokenization (sufficient for trends). + +### Savings Calculation + +``` +Input Tokens = estimate_tokens(raw_command_output) +Output Tokens = estimate_tokens(rtk_filtered_output) +Saved Tokens = Input - Output +Savings % = (Saved / Input) × 100 +``` + +### Typical Savings by Command + +| Command | Typical Savings | Mechanism | +|---------|----------------|-----------| +| `rtk git status` | 77-93% | Compact stat format | +| `rtk eslint` | 84% | Group by rule | +| `rtk vitest run` | 94-99% | Show failures only | +| `rtk find` | 75% | Tree format | +| `rtk pnpm list` | 70-90% | Compact dependencies | +| `rtk grep` | 70% | Truncate + group | + +## Database Management + +### Inspect Raw Data + +```bash +# Location +ls -lh ~/.local/share/rtk/history.db + +# Schema +sqlite3 ~/.local/share/rtk/history.db ".schema" + +# Recent records +sqlite3 ~/.local/share/rtk/history.db \ + "SELECT timestamp, rtk_cmd, saved_tokens FROM commands + ORDER BY timestamp DESC LIMIT 10" + +# Total database size +sqlite3 ~/.local/share/rtk/history.db \ + "SELECT COUNT(*), + SUM(saved_tokens) as total_saved, + MIN(DATE(timestamp)) as first_record, + MAX(DATE(timestamp)) as last_record + FROM commands" +``` + +### Backup & Restore + +```bash +# Backup +cp ~/.local/share/rtk/history.db ~/backups/rtk-history-$(date +%Y%m%d).db + +# Restore +cp ~/backups/rtk-history-20260128.db ~/.local/share/rtk/history.db + +# Export for migration +sqlite3 ~/.local/share/rtk/history.db .dump > rtk-backup.sql +``` + +### Cleanup + +```bash +# Manual cleanup (older than 90 days) +sqlite3 ~/.local/share/rtk/history.db \ + "DELETE FROM commands WHERE timestamp < datetime('now', '-90 days')" + +# Reset all data +rm ~/.local/share/rtk/history.db +# Next rtk command will recreate database +``` + +## Integration Examples + +### GitHub Actions CI/CD + +```yaml +# .github/workflows/rtk-stats.yml +name: RTK Stats Report +on: + schedule: + - cron: '0 0 * * 1' # Weekly on Monday +jobs: + stats: + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v3 + - name: Install rtk + run: cargo install --path . + - name: Generate report + run: | + rtk gain --weekly --format json > stats/week-$(date +%Y-%W).json + - name: Commit stats + run: | + git add stats/ + git commit -m "Weekly rtk stats" + git push +``` + +### Slack Bot + +```python +import subprocess +import json +import requests + +def send_rtk_stats(): + result = subprocess.run(['rtk', 'gain', '--format', 'json'], + capture_output=True, text=True) + data = json.loads(result.stdout) + + message = f""" + 📊 *RTK Token Savings Report* + + Total Saved: {data['summary']['total_saved']:,} tokens + Savings Rate: {data['summary']['avg_savings_pct']:.1f}% + Commands: {data['summary']['total_commands']} + """ + + requests.post(SLACK_WEBHOOK_URL, json={'text': message}) +``` + +## Troubleshooting + +### No data showing + +```bash +# Check if database exists +ls -lh ~/.local/share/rtk/history.db + +# Check record count +sqlite3 ~/.local/share/rtk/history.db "SELECT COUNT(*) FROM commands" + +# Run a tracked command to generate data +rtk git status +``` + +### Export fails + +```bash +# Check for pipe errors +rtk gain --format json 2>&1 | tee /tmp/rtk-debug.log | jq . + +# Use release build to avoid warnings +cargo build --release +./target/release/rtk gain --format json +``` + +### Incorrect statistics + +Token estimation is a heuristic. For precise measurements: + +```bash +# Install tiktoken +pip install tiktoken + +# Validate estimation +rtk git status > output.txt +python -c " +import tiktoken +enc = tiktoken.get_encoding('cl100k_base') +text = open('output.txt').read() +print(f'Actual tokens: {len(enc.encode(text))}') +print(f'rtk estimate: {len(text) // 4}') +" +``` + +## Best Practices + +1. **Regular Exports**: `rtk gain --all --format json > monthly-$(date +%Y%m).json` +2. **Trend Analysis**: Compare week-over-week savings to identify optimization opportunities +3. **Command Profiling**: Use `--history` to see which commands save the most +4. **Backup Before Cleanup**: Always backup before manual database operations +5. **CI Integration**: Track savings across team in shared dashboards + +## See Also + +- [README.md](../README.md) - Full rtk documentation +- [CLAUDE.md](../CLAUDE.md) - Claude Code integration guide +- [ARCHITECTURE.md](../contributing/ARCHITECTURE.md) - Technical architecture From 514744e27b0e731f1336d34367d6fa117b57b0b2 Mon Sep 17 00:00:00 2001 From: Claude Date: Wed, 8 Apr 2026 22:53:27 +0000 Subject: [PATCH 098/204] docs: remove old docs/AUDIT_GUIDE.md path Upstream moved this to docs/usage/AUDIT_GUIDE.md in deda44f. Removing the old path so git resolves the rename correctly on merge. https://claude.ai/code/session_015WXbDdxEU3pXPBtheDkXcd --- docs/AUDIT_GUIDE.md | 432 -------------------------------------------- 1 file changed, 432 deletions(-) delete mode 100644 docs/AUDIT_GUIDE.md diff --git a/docs/AUDIT_GUIDE.md b/docs/AUDIT_GUIDE.md deleted file mode 100644 index 8bcebdffe..000000000 --- a/docs/AUDIT_GUIDE.md +++ /dev/null @@ -1,432 +0,0 @@ -# RTK Token Savings Audit Guide - -Complete guide to analyzing your rtk token savings with temporal breakdowns and data exports. - -## Overview - -The `rtk gain` command provides comprehensive analytics for tracking your token savings across time periods. - -**Database Location**: `~/.local/share/rtk/history.db` -**Retention Policy**: 90 days -**Scope**: Global across all projects, worktrees, and Claude sessions - -## Quick Reference - -```bash -# Default summary view -rtk gain - -# Temporal breakdowns -rtk gain --daily # All days since tracking started -rtk gain --weekly # Aggregated by week -rtk gain --monthly # Aggregated by month -rtk gain --all # Show all breakdowns at once - -# Export formats -rtk gain --all --format json > savings.json -rtk gain --all --format csv > savings.csv - -# Combined flags -rtk gain --graph --history --quota # Classic view with extras -rtk gain --daily --weekly --monthly # Multiple breakdowns -``` - -## Command Options - -### Temporal Flags - -| Flag | Description | Output | -|------|-------------|--------| -| `--daily` | Day-by-day breakdown | All days with full metrics | -| `--weekly` | Week-by-week breakdown | Aggregated by Sunday-Saturday weeks | -| `--monthly` | Month-by-month breakdown | Aggregated by calendar month | -| `--all` | All time breakdowns | Daily + Weekly + Monthly combined | - -### Classic Flags (still available) - -| Flag | Description | -|------|-------------| -| `--graph` | ASCII graph of last 30 days | -| `--history` | Recent 10 commands | -| `--quota` | Monthly quota analysis (Pro/5x/20x tiers) | -| `--tier ` | Quota tier: pro, 5x, 20x (default: 20x) | - -### Export Formats - -| Format | Flag | Use Case | -|--------|------|----------| -| `text` | `--format text` (default) | Terminal display | -| `json` | `--format json` | Programmatic analysis, APIs | -| `csv` | `--format csv` | Excel, data analysis, plotting | - -## Output Examples - -### Daily Breakdown - -``` -📅 Daily Breakdown (3 days) -════════════════════════════════════════════════════════════════ -Date Cmds Input Output Saved Save% -──────────────────────────────────────────────────────────────── -2026-01-28 89 380.9K 26.7K 355.8K 93.4% -2026-01-29 102 894.5K 32.4K 863.7K 96.6% -2026-01-30 5 749 55 694 92.7% -──────────────────────────────────────────────────────────────── -TOTAL 196 1.3M 59.2K 1.2M 95.6% -``` - -**Metrics explained:** -- **Cmds**: Number of rtk commands executed -- **Input**: Estimated tokens from raw command output -- **Output**: Actual tokens after rtk filtering -- **Saved**: Input - Output (tokens prevented from reaching LLM) -- **Save%**: Percentage reduction (Saved / Input × 100) - -### Weekly Breakdown - -``` -📊 Weekly Breakdown (1 weeks) -════════════════════════════════════════════════════════════════════════ -Week Cmds Input Output Saved Save% -──────────────────────────────────────────────────────────────────────── -01-26 → 02-01 196 1.3M 59.2K 1.2M 95.6% -──────────────────────────────────────────────────────────────────────── -TOTAL 196 1.3M 59.2K 1.2M 95.6% -``` - -**Week definition**: Sunday to Saturday (ISO week starting Sunday at 00:00) - -### Monthly Breakdown - -``` -📆 Monthly Breakdown (1 months) -════════════════════════════════════════════════════════════════ -Month Cmds Input Output Saved Save% -──────────────────────────────────────────────────────────────── -2026-01 196 1.3M 59.2K 1.2M 95.6% -──────────────────────────────────────────────────────────────── -TOTAL 196 1.3M 59.2K 1.2M 95.6% -``` - -**Month format**: YYYY-MM (calendar month) - -### JSON Export - -```json -{ - "summary": { - "total_commands": 196, - "total_input": 1276098, - "total_output": 59244, - "total_saved": 1220217, - "avg_savings_pct": 95.62 - }, - "daily": [ - { - "date": "2026-01-28", - "commands": 89, - "input_tokens": 380894, - "output_tokens": 26744, - "saved_tokens": 355779, - "savings_pct": 93.41 - } - ], - "weekly": [...], - "monthly": [...] -} -``` - -**Use cases:** -- API integration -- Custom dashboards -- Automated reporting -- Data pipeline ingestion - -### CSV Export - -```csv -# Daily Data -date,commands,input_tokens,output_tokens,saved_tokens,savings_pct -2026-01-28,89,380894,26744,355779,93.41 -2026-01-29,102,894455,32445,863744,96.57 - -# Weekly Data -week_start,week_end,commands,input_tokens,output_tokens,saved_tokens,savings_pct -2026-01-26,2026-02-01,196,1276098,59244,1220217,95.62 - -# Monthly Data -month,commands,input_tokens,output_tokens,saved_tokens,savings_pct -2026-01,196,1276098,59244,1220217,95.62 -``` - -**Use cases:** -- Excel analysis -- Python/R data science -- Google Sheets dashboards -- Matplotlib/seaborn plotting - -## Analysis Workflows - -### Weekly Progress Tracking - -```bash -# Generate weekly report every Monday -rtk gain --weekly --format csv > reports/week-$(date +%Y-%W).csv - -# Compare this week vs last week -rtk gain --weekly | tail -3 -``` - -### Monthly Cost Analysis - -```bash -# Export monthly data for budget review -rtk gain --monthly --format json | jq '.monthly[] | - {month, saved_tokens, quota_pct: (.saved_tokens / 6000000 * 100)}' -``` - -### Data Science Analysis - -```python -import pandas as pd -import subprocess - -# Get CSV data -result = subprocess.run(['rtk', 'gain', '--all', '--format', 'csv'], - capture_output=True, text=True) - -# Parse daily data -lines = result.stdout.split('\n') -daily_start = lines.index('# Daily Data') + 2 -daily_end = lines.index('', daily_start) -daily_df = pd.read_csv(pd.StringIO('\n'.join(lines[daily_start:daily_end]))) - -# Plot savings trend -daily_df['date'] = pd.to_datetime(daily_df['date']) -daily_df.plot(x='date', y='savings_pct', kind='line') -``` - -### Excel Analysis - -1. Export CSV: `rtk gain --all --format csv > rtk-data.csv` -2. Open in Excel -3. Create pivot tables: - - Daily trends (line chart) - - Weekly totals (bar chart) - - Savings % distribution (histogram) - -### Dashboard Creation - -```bash -# Generate dashboard data daily via cron -0 0 * * * rtk gain --all --format json > /var/www/dashboard/rtk-stats.json - -# Serve with static site -cat > index.html <<'EOF' - - - -EOF -``` - -## Understanding Token Savings - -### Token Estimation - -rtk estimates tokens using `text.len() / 4` (4 characters per token average). - -**Accuracy**: ±10% compared to actual LLM tokenization (sufficient for trends). - -### Savings Calculation - -``` -Input Tokens = estimate_tokens(raw_command_output) -Output Tokens = estimate_tokens(rtk_filtered_output) -Saved Tokens = Input - Output -Savings % = (Saved / Input) × 100 -``` - -### Typical Savings by Command - -| Command | Typical Savings | Mechanism | -|---------|----------------|-----------| -| `rtk git status` | 77-93% | Compact stat format | -| `rtk eslint` | 84% | Group by rule | -| `rtk vitest run` | 94-99% | Show failures only | -| `rtk find` | 75% | Tree format | -| `rtk pnpm list` | 70-90% | Compact dependencies | -| `rtk grep` | 70% | Truncate + group | - -## Database Management - -### Inspect Raw Data - -```bash -# Location -ls -lh ~/.local/share/rtk/history.db - -# Schema -sqlite3 ~/.local/share/rtk/history.db ".schema" - -# Recent records -sqlite3 ~/.local/share/rtk/history.db \ - "SELECT timestamp, rtk_cmd, saved_tokens FROM commands - ORDER BY timestamp DESC LIMIT 10" - -# Total database size -sqlite3 ~/.local/share/rtk/history.db \ - "SELECT COUNT(*), - SUM(saved_tokens) as total_saved, - MIN(DATE(timestamp)) as first_record, - MAX(DATE(timestamp)) as last_record - FROM commands" -``` - -### Backup & Restore - -```bash -# Backup -cp ~/.local/share/rtk/history.db ~/backups/rtk-history-$(date +%Y%m%d).db - -# Restore -cp ~/backups/rtk-history-20260128.db ~/.local/share/rtk/history.db - -# Export for migration -sqlite3 ~/.local/share/rtk/history.db .dump > rtk-backup.sql -``` - -### Cleanup - -```bash -# Manual cleanup (older than 90 days) -sqlite3 ~/.local/share/rtk/history.db \ - "DELETE FROM commands WHERE timestamp < datetime('now', '-90 days')" - -# Reset all data -rm ~/.local/share/rtk/history.db -# Next rtk command will recreate database -``` - -## Integration Examples - -### GitHub Actions CI/CD - -```yaml -# .github/workflows/rtk-stats.yml -name: RTK Stats Report -on: - schedule: - - cron: '0 0 * * 1' # Weekly on Monday -jobs: - stats: - runs-on: ubuntu-latest - steps: - - uses: actions/checkout@v3 - - name: Install rtk - run: cargo install --path . - - name: Generate report - run: | - rtk gain --weekly --format json > stats/week-$(date +%Y-%W).json - - name: Commit stats - run: | - git add stats/ - git commit -m "Weekly rtk stats" - git push -``` - -### Slack Bot - -```python -import subprocess -import json -import requests - -def send_rtk_stats(): - result = subprocess.run(['rtk', 'gain', '--format', 'json'], - capture_output=True, text=True) - data = json.loads(result.stdout) - - message = f""" - 📊 *RTK Token Savings Report* - - Total Saved: {data['summary']['total_saved']:,} tokens - Savings Rate: {data['summary']['avg_savings_pct']:.1f}% - Commands: {data['summary']['total_commands']} - """ - - requests.post(SLACK_WEBHOOK_URL, json={'text': message}) -``` - -## Troubleshooting - -### No data showing - -```bash -# Check if database exists -ls -lh ~/.local/share/rtk/history.db - -# Check record count -sqlite3 ~/.local/share/rtk/history.db "SELECT COUNT(*) FROM commands" - -# Run a tracked command to generate data -rtk git status -``` - -### Export fails - -```bash -# Check for pipe errors -rtk gain --format json 2>&1 | tee /tmp/rtk-debug.log | jq . - -# Use release build to avoid warnings -cargo build --release -./target/release/rtk gain --format json -``` - -### Incorrect statistics - -Token estimation is a heuristic. For precise measurements: - -```bash -# Install tiktoken -pip install tiktoken - -# Validate estimation -rtk git status > output.txt -python -c " -import tiktoken -enc = tiktoken.get_encoding('cl100k_base') -text = open('output.txt').read() -print(f'Actual tokens: {len(enc.encode(text))}') -print(f'rtk estimate: {len(text) // 4}') -" -``` - -## Best Practices - -1. **Regular Exports**: `rtk gain --all --format json > monthly-$(date +%Y%m).json` -2. **Trend Analysis**: Compare week-over-week savings to identify optimization opportunities -3. **Command Profiling**: Use `--history` to see which commands save the most -4. **Backup Before Cleanup**: Always backup before manual database operations -5. **CI Integration**: Track savings across team in shared dashboards - -## See Also - -- [README.md](../README.md) - Full rtk documentation -- [CLAUDE.md](../CLAUDE.md) - Claude Code integration guide -- [ARCHITECTURE.md](../ARCHITECTURE.md) - Technical architecture From b73506867e02d5bfabf586ecc799a38c86340e1b Mon Sep 17 00:00:00 2001 From: Patrick szymkowiak Date: Thu, 9 Apr 2026 09:05:16 +0200 Subject: [PATCH 099/204] docs: add Windows setup instructions and troubleshooting MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Windows users downloading the pre-built binary had no guidance — double-clicking rtk.exe flashes a terminal and exits immediately. - README: add warning under Windows binary link, add full Windows section with WSL vs native comparison table - installation.md: add pre-built binaries section with Windows note - supported-agents.md: add Windows support section (CLAUDE.md fallback) - troubleshooting.md: add "double-click does nothing" and "hook not working" entries for Windows users Prompted by Discord user report (PedroLyra). Signed-off-by: Patrick szymkowiak --- README.md | 39 +++++++++++++++++++ docs/guide/getting-started/installation.md | 10 +++++ .../guide/getting-started/supported-agents.md | 10 +++++ docs/guide/troubleshooting.md | 30 +++++++++++++- 4 files changed, 88 insertions(+), 1 deletion(-) diff --git a/README.md b/README.md index 78116b252..221086f1e 100644 --- a/README.md +++ b/README.md @@ -87,6 +87,8 @@ Download from [releases](https://github.com/rtk-ai/rtk/releases): - Linux: `rtk-x86_64-unknown-linux-musl.tar.gz` / `rtk-aarch64-unknown-linux-gnu.tar.gz` - Windows: `rtk-x86_64-pc-windows-msvc.zip` +> **Windows users**: Extract the zip and place `rtk.exe` somewhere in your PATH (e.g. `C:\Users\\.local\bin`). Run RTK from **Command Prompt**, **PowerShell**, or **Windows Terminal** — do not double-click the `.exe` (it will flash and close). For the best experience, use [WSL](https://learn.microsoft.com/en-us/windows/wsl/install) where the full hook system works natively. See [Windows setup](#windows) below for details. + ### Verify Installation ```bash @@ -306,6 +308,43 @@ rtk init --show # Verify installation After install, **restart Claude Code**. +## Windows + +RTK works on Windows with some limitations. The auto-rewrite hook (`rtk-rewrite.sh`) requires a Unix shell, so on native Windows RTK falls back to **CLAUDE.md injection mode** — your AI assistant receives RTK instructions but commands are not rewritten automatically. + +### Recommended: WSL (full support) + +For the best experience, use [WSL](https://learn.microsoft.com/en-us/windows/wsl/install) (Windows Subsystem for Linux). Inside WSL, RTK works exactly like Linux — full hook support, auto-rewrite, everything: + +```bash +# Inside WSL +curl -fsSL https://raw.githubusercontent.com/rtk-ai/rtk/refs/heads/master/install.sh | sh +rtk init -g +``` + +### Native Windows (limited support) + +On native Windows (cmd.exe / PowerShell), RTK filters work but the hook does not auto-rewrite commands: + +```powershell +# 1. Download and extract rtk-x86_64-pc-windows-msvc.zip from releases +# 2. Add rtk.exe to your PATH +# 3. Initialize (falls back to CLAUDE.md injection) +rtk init -g +# 4. Use rtk explicitly +rtk cargo test +rtk git status +``` + +**Important**: Do not double-click `rtk.exe` — it is a CLI tool that prints usage and exits immediately. Always run it from a terminal (Command Prompt, PowerShell, or Windows Terminal). + +| Feature | WSL | Native Windows | +|---------|-----|----------------| +| Filters (cargo, git, etc.) | Full | Full | +| Auto-rewrite hook | Yes | No (CLAUDE.md fallback) | +| `rtk init -g` | Hook mode | CLAUDE.md mode | +| `rtk gain` / analytics | Full | Full | + ## Supported AI Tools RTK supports 10 AI coding tools. Each integration transparently rewrites shell commands to `rtk` equivalents for 60-90% token savings. diff --git a/docs/guide/getting-started/installation.md b/docs/guide/getting-started/installation.md index 3f3eac286..3f9ee991d 100644 --- a/docs/guide/getting-started/installation.md +++ b/docs/guide/getting-started/installation.md @@ -43,6 +43,16 @@ brew install rtk-ai/tap/rtk cargo install rtk ``` +## Pre-built binaries (Windows, Linux, macOS) + +Download from [GitHub releases](https://github.com/rtk-ai/rtk/releases): + +- macOS: `rtk-x86_64-apple-darwin.tar.gz` / `rtk-aarch64-apple-darwin.tar.gz` +- Linux: `rtk-x86_64-unknown-linux-musl.tar.gz` / `rtk-aarch64-unknown-linux-gnu.tar.gz` +- Windows: `rtk-x86_64-pc-windows-msvc.zip` + +**Windows users**: Extract the zip and place `rtk.exe` in a directory on your PATH. Run RTK from Command Prompt, PowerShell, or Windows Terminal — do not double-click the `.exe` (it prints usage and exits immediately). For full hook support, use [WSL](https://learn.microsoft.com/en-us/windows/wsl/install) instead. + ## Verify installation ```bash diff --git a/docs/guide/getting-started/supported-agents.md b/docs/guide/getting-started/supported-agents.md index dc45bf970..a007c0dd0 100644 --- a/docs/guide/getting-started/supported-agents.md +++ b/docs/guide/getting-started/supported-agents.md @@ -124,6 +124,16 @@ Support is blocked on upstream `BeforeToolCallback` ([mistral-vibe#531](https:// Rules file integrations (Cline, Windsurf, Codex) rely on the model following instructions. Full hook integrations (Claude Code, Cursor, Gemini) are guaranteed — the command is rewritten before the agent sees it. +## Windows support + +The shell hook (`rtk-rewrite.sh`) requires a Unix shell. On native Windows: + +- `rtk init -g` automatically falls back to **CLAUDE.md injection mode** (prompt-level instructions) +- Filters work normally (`rtk cargo test`, `rtk git status`) +- Auto-rewrite does not work — the AI assistant is instructed to use RTK but commands are not intercepted + +For full hook support on Windows, use [WSL](https://learn.microsoft.com/en-us/windows/wsl/install). Inside WSL, all agents with shell hook integration (Claude Code, Cursor, Gemini) work identically to Linux. + ## Graceful degradation Hooks never block command execution. If RTK is missing, the hook exits cleanly and the raw command runs unchanged: diff --git a/docs/guide/troubleshooting.md b/docs/guide/troubleshooting.md index 5efcabd9b..8f9a0db35 100644 --- a/docs/guide/troubleshooting.md +++ b/docs/guide/troubleshooting.md @@ -90,7 +90,35 @@ source ~/.zshrc # or ~/.bashrc rtk --version ``` -## RTK not working on Windows +## RTK on Windows + +### Double-clicking rtk.exe does nothing + +**Symptom:** You double-click `rtk.exe`, a terminal flashes and closes instantly. + +**Cause:** RTK is a command-line tool. With no arguments, it prints usage and exits. The console window opens and closes before you can read anything. + +**Fix:** Open a terminal first, then run RTK from there: +- Press `Win+R`, type `cmd`, press Enter +- Or open PowerShell or Windows Terminal +- Then run: `rtk --version` + +### Hook not working (no auto-rewrite) + +**Symptom:** `rtk init -g` shows "Falling back to --claude-md mode" on Windows. + +**Cause:** The auto-rewrite hook (`rtk-rewrite.sh`) requires a Unix shell. Native Windows doesn't have one. + +**Fix:** Use [WSL](https://learn.microsoft.com/en-us/windows/wsl/install) for full hook support: +```bash +# Inside WSL +curl -fsSL https://raw.githubusercontent.com/rtk-ai/rtk/refs/heads/master/install.sh | sh +rtk init -g # full hook mode works in WSL +``` + +On native Windows, RTK falls back to CLAUDE.md injection. Your AI assistant gets RTK instructions but won't auto-rewrite commands. You can still use RTK manually: `rtk cargo test`, `rtk git status`, etc. + +### Node.js tools not found **Symptom:** ``` From d442799e34d522c87a6eb60c2ff373385d201315 Mon Sep 17 00:00:00 2001 From: michaelschleiss Date: Thu, 9 Apr 2026 09:38:22 +0200 Subject: [PATCH 100/204] fix(init): honor CODEX_HOME for Codex global paths --- CHANGELOG.md | 4 ++ hooks/README.md | 2 +- hooks/codex/README.md | 2 +- src/hooks/README.md | 2 +- src/hooks/init.rs | 128 +++++++++++++++++++++++++++++++++++++----- 5 files changed, 121 insertions(+), 17 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 16f997663..1c1067489 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -44,6 +44,10 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0 * **aws:** add shared runner `run_aws_filtered()` eliminating per-handler boilerplate * **tee:** add `force_tee_hint()` — truncated output saves full data to file with recovery hint +### Bug Fixes + +* **init:** honor `CODEX_HOME` for Codex global install paths and cleanup + ## [0.34.3](https://github.com/rtk-ai/rtk/compare/v0.34.2...v0.34.3) (2026-04-02) diff --git a/hooks/README.md b/hooks/README.md index 9d5e63809..62875c028 100644 --- a/hooks/README.md +++ b/hooks/README.md @@ -38,7 +38,7 @@ Each agent subdirectory has its own README with hook-specific details: - **[`cursor/`](cursor/README.md)** — Shell hook, Cursor JSON format, empty `{}` response requirement - **[`cline/`](cline/README.md)** — Rules file (prompt-level), `.clinerules` project-local installation - **[`windsurf/`](windsurf/README.md)** — Rules file (prompt-level), `.windsurfrules` workspace-scoped -- **[`codex/`](codex/README.md)** — Awareness document, `AGENTS.md` integration, `~/.codex/` location +- **[`codex/`](codex/README.md)** — Awareness document, `AGENTS.md` integration, `$CODEX_HOME` or `~/.codex/` location - **[`opencode/`](opencode/README.md)** — TypeScript plugin, `zx` library, `tool.execute.before` event, in-place mutation ## Supported Agents diff --git a/hooks/codex/README.md b/hooks/codex/README.md index e922e6365..50030e958 100644 --- a/hooks/codex/README.md +++ b/hooks/codex/README.md @@ -6,4 +6,4 @@ - Prompt-level guidance via awareness document -- no programmatic hook - `rtk-awareness.md` is injected into `AGENTS.md` with an `@RTK.md` reference -- Installed to `~/.codex/` by `rtk init --codex` +- Installed to `$CODEX_HOME` when set, otherwise `~/.codex/`, by `rtk init --codex` diff --git a/src/hooks/README.md b/src/hooks/README.md index 3853844e2..bf947a0f1 100644 --- a/src/hooks/README.md +++ b/src/hooks/README.md @@ -28,7 +28,7 @@ LLM agent integration layer that installs, validates, and executes command-rewri | Claude-MD (legacy) | `rtk init --claude-md` | 134-line RTK block | CLAUDE.md | | Windsurf | `rtk init -g --agent windsurf` | `.windsurfrules` | -- | | Cline | `rtk init --agent cline` | `.clinerules` | -- | -| Codex | `rtk init --codex` | RTK.md | AGENTS.md | +| Codex | `rtk init --codex` | RTK.md in `$CODEX_HOME` or `~/.codex` | AGENTS.md | | Cursor | `rtk init -g --agent cursor` | Cursor hook | hooks.json | diff --git a/src/hooks/init.rs b/src/hooks/init.rs index c65465962..42a3db7ca 100644 --- a/src/hooks/init.rs +++ b/src/hooks/init.rs @@ -7,8 +7,8 @@ use std::path::{Path, PathBuf}; use tempfile::NamedTempFile; use super::constants::{ - BEFORE_TOOL_KEY, CLAUDE_DIR, GEMINI_HOOK_FILE, HOOKS_JSON, HOOKS_SUBDIR, PRE_TOOL_USE_KEY, - REWRITE_HOOK_FILE, SETTINGS_JSON, + BEFORE_TOOL_KEY, CLAUDE_DIR, CODEX_DIR, GEMINI_HOOK_FILE, HOOKS_JSON, HOOKS_SUBDIR, + PRE_TOOL_USE_KEY, REWRITE_HOOK_FILE, SETTINGS_JSON, }; use super::integrity; @@ -673,6 +673,7 @@ fn uninstall_codex(global: bool, verbose: u8) -> Result<()> { fn uninstall_codex_at(codex_dir: &Path, verbose: u8) -> Result> { let mut removed = Vec::new(); + let absolute_rtk_md_ref = codex_rtk_md_ref(codex_dir); let rtk_md_path = codex_dir.join(RTK_MD); if rtk_md_path.exists() { @@ -685,7 +686,11 @@ fn uninstall_codex_at(codex_dir: &Path, verbose: u8) -> Result> { } let agents_md_path = codex_dir.join(AGENTS_MD); - if remove_rtk_reference_from_agents(&agents_md_path, verbose)? { + if remove_rtk_reference_from_agents( + &agents_md_path, + &[RTK_MD_REF, absolute_rtk_md_ref.as_str()], + verbose, + )? { removed.push("AGENTS.md: removed @RTK.md reference".to_string()); } @@ -1333,6 +1338,15 @@ fn run_codex_mode(global: bool, verbose: u8) -> Result<()> { (PathBuf::from(AGENTS_MD), PathBuf::from(RTK_MD)) }; + run_codex_mode_with_paths(agents_md_path, rtk_md_path, global, verbose) +} + +fn run_codex_mode_with_paths( + agents_md_path: PathBuf, + rtk_md_path: PathBuf, + global: bool, + verbose: u8, +) -> Result<()> { if global { if let Some(parent) = agents_md_path.parent() { fs::create_dir_all(parent).with_context(|| { @@ -1348,7 +1362,11 @@ fn run_codex_mode(global: bool, verbose: u8) -> Result<()> { // from any CWD (worktrees, nested projects). Codex resolves @ references // relative to CWD, not the AGENTS.md file location. let rtk_md_ref = if global { - format!("@{}", rtk_md_path.display()) + codex_rtk_md_ref( + rtk_md_path + .parent() + .context("RTK.md path missing parent directory")?, + ) } else { RTK_MD_REF.to_string() }; @@ -1549,20 +1567,30 @@ fn patch_agents_md(path: &Path, rtk_md_ref: &str, verbose: u8) -> Result { Ok(true) } -fn remove_rtk_reference_from_agents(path: &Path, verbose: u8) -> Result { +fn has_rtk_reference(content: &str, refs: &[&str]) -> bool { + content + .lines() + .map(str::trim) + .any(|line| refs.contains(&line)) +} + +fn remove_rtk_reference_from_agents(path: &Path, refs: &[&str], verbose: u8) -> Result { if !path.exists() { return Ok(false); } let content = fs::read_to_string(path) .with_context(|| format!("Failed to read AGENTS.md: {}", path.display()))?; - if !content.contains(RTK_MD_REF) { + if !has_rtk_reference(&content, refs) { return Ok(false); } let new_content = content .lines() - .filter(|line| !line.trim().starts_with(RTK_MD_REF)) + .filter(|line| { + let trimmed = line.trim(); + !refs.contains(&trimmed) + }) .collect::>() .join("\n"); let cleaned = clean_double_blanks(&new_content); @@ -1571,7 +1599,7 @@ fn remove_rtk_reference_from_agents(path: &Path, verbose: u8) -> Result { if verbose > 0 { eprintln!( - "Removed @RTK.md reference from AGENTS.md: {}", + "Removed RTK.md reference from AGENTS.md: {}", path.display() ); } @@ -1628,7 +1656,27 @@ fn resolve_claude_dir() -> Result { } fn resolve_codex_dir() -> Result { - resolve_home_subdir(".codex") + resolve_codex_dir_from( + std::env::var_os("CODEX_HOME").map(PathBuf::from), + dirs::home_dir(), + ) +} + +fn resolve_codex_dir_from( + codex_home: Option, + home_dir: Option, +) -> Result { + if let Some(path) = codex_home.filter(|path| !path.as_os_str().is_empty()) { + return Ok(path); + } + + home_dir + .map(|home| home.join(CODEX_DIR)) + .context("Cannot determine Codex config directory. Set $CODEX_HOME or $HOME.") +} + +fn codex_rtk_md_ref(codex_dir: &Path) -> String { + format!("@{}", codex_dir.join(RTK_MD).display()) } fn resolve_opencode_dir() -> Result { @@ -2126,7 +2174,7 @@ fn show_claude_config() -> Result<()> { println!(" rtk init -g --claude-md # Legacy: full injection into ~/.claude/CLAUDE.md"); println!(" rtk init -g --hook-only # Hook only, no RTK.md"); println!(" rtk init --codex # Configure local AGENTS.md + RTK.md"); - println!(" rtk init -g --codex # Configure ~/.codex/AGENTS.md + ~/.codex/RTK.md"); + println!(" rtk init -g --codex # Configure $CODEX_HOME/AGENTS.md + $CODEX_HOME/RTK.md (or ~/.codex/)"); println!(" rtk init -g --opencode # OpenCode plugin only"); println!(" rtk init -g --agent cursor # Install Cursor Agent hooks"); @@ -2137,6 +2185,7 @@ fn show_codex_config() -> Result<()> { let codex_dir = resolve_codex_dir()?; let global_agents_md = codex_dir.join(AGENTS_MD); let global_rtk_md = codex_dir.join(RTK_MD); + let global_rtk_md_ref = codex_rtk_md_ref(&codex_dir); let local_agents_md = PathBuf::from(AGENTS_MD); let local_rtk_md = PathBuf::from(RTK_MD); @@ -2150,8 +2199,8 @@ fn show_codex_config() -> Result<()> { if global_agents_md.exists() { let content = fs::read_to_string(&global_agents_md)?; - if content.contains(RTK_MD_REF) { - println!("[ok] Global AGENTS.md: @RTK.md reference"); + if has_rtk_reference(&content, &[RTK_MD_REF, global_rtk_md_ref.as_str()]) { + println!("[ok] Global AGENTS.md: RTK.md reference"); } else if content.contains("| filter_fn() |----------->| Print | + | Spawn |--------->| filter |----------->| Print | +---------+ stderr +---------------+ +-------+ - | | - v v + | (live) | + v v +----------+ +---------+ | raw = | | Track | | stdout + | | savings | @@ -60,14 +60,33 @@ The shared wrapper in [`core/runner.rs`](../core/runner.rs) encapsulates the six +-----------+ ``` -**Six phases in order:** +### Filter modes -1. **Execute** — `cmd.output()` captures stdout + stderr -2. **Filter** — `filter_fn` receives stdout-only or combined, returns compressed string -3. **Print** — filtered output printed; if tee enabled, appends recovery hint on failure -4. **Stderr passthrough** — when `filter_stdout_only`: stderr printed via `eprintln!()` unconditionally -5. **Track** — `timer.track()` records raw vs filtered for token savings -6. **Exit code** — returns `Ok(exit_code)` to caller; `main.rs` calls `process::exit(code)` once +All execution goes through `core::stream::run_streaming()` with one of four `FilterMode` variants. The runner entry points (`run_filtered`, `run_streamed`, `run_passthrough`) select the appropriate mode automatically — module authors don't interact with `FilterMode` directly. + +| FilterMode | How it works | Used by | +|------------|-------------|---------| +| **`CaptureOnly`** | Buffers all stdout silently, then passes the full string to `filter_fn` post-hoc. Stderr streams to terminal in real time. | `run_filtered()` (default path) | +| **`Buffered`** | Buffers all stdout, applies filter, then prints the result. Stderr streams live. Chosen automatically by `run_filtered()` when `filter_stdout_only` is set. | `run_filtered()` (stdout-only path) | +| **`Streaming`** | Feeds each stdout line to a `StreamFilter::feed_line()` as it arrives. Emitted lines print immediately. Calls `flush()` after process exits for final output. | `run_streamed()` | +| **`Passthrough`** | Inherits the parent TTY directly — no piping, no buffering. `raw`/`filtered` are empty. | `run_passthrough()` | + +### When to use which + +| Scenario | Runner | FilterMode | Why | +|----------|--------|------------|-----| +| Parse structured output (JSON, tables) | `run_filtered()` | CaptureOnly/Buffered | Filter needs full text to parse structure | +| Long-running, line-parseable output | `run_streamed()` | Streaming | Low memory, real-time output | +| No filtering, just track usage | `run_passthrough()` | Passthrough | Zero overhead, inherits TTY | +| Custom logic (multi-command, file I/O) | Manual with `exec_capture()` | CaptureOnly | Full control over execution | + +### Phases + +1. **Spawn** — `run_streaming()` starts the child process with piped stdout/stderr (or inherited TTY for Passthrough) +2. **Filter** — stdout is processed per the FilterMode; stderr is forwarded to the terminal in real time via a dedicated reader thread +3. **Print** — filtered output is written to stdout (live for Streaming, post-hoc for CaptureOnly/Buffered); if tee enabled, appends recovery hint on failure +4. **Track** — `timer.track()` records raw vs filtered for token savings +5. **Exit code** — returns `Ok(exit_code)` to caller; `main.rs` calls `process::exit(code)` once **`RunOptions` builder:** @@ -96,14 +115,85 @@ pub fn run(args: &[String], verbose: u8) -> Result { Exit code handling is **fully automatic** when using `run_filtered()` — the wrapper extracts the exit code (including Unix signal handling via 128+signal), tracks savings, and returns `Ok(exit_code)`. Module authors just return the result. +**Streaming filters (line-by-line):** + +Use `runner::run_streamed()` when the command is long-running or produces unbounded output that should be filtered line-by-line. Three levels of abstraction, from simplest to most flexible: + +**Level 1: `RegexBlockFilter`** — regex start pattern + indent continuation (3-5 lines) + +For block-based errors where blocks start with a regex match and continue on indented lines. Handles skip prefixes, block counting, and summary automatically. + +```rust +use crate::core::stream::{BlockStreamFilter, RegexBlockFilter}; + +pub fn run(args: &[String], verbose: u8) -> Result { + let mut cmd = resolved_command("mycmd"); + for arg in args { cmd.arg(arg); } + + let filter = RegexBlockFilter::new("mycmd", r"^error\[") + .skip_prefixes(&["warning:", "note:"]); + + runner::run_streamed( + cmd, "mycmd", &args.join(" "), + Box::new(BlockStreamFilter::new(filter)), + runner::RunOptions::with_tee("mycmd"), + ) +} +``` + +`RegexBlockFilter` provides: regex-based block start detection, indent-based continuation (space/tab), configurable line skipping via prefixes, and automatic summary (`"mycmd: 3 blocks in output"` or `"mycmd: no errors found"`). + +**Level 2: `BlockHandler` trait** — custom block detection with state tracking + +When you need custom block start/continuation logic or stateful parsing beyond regex + indent. Implement the `BlockHandler` trait and wrap in `BlockStreamFilter`. + +```rust +use crate::core::stream::{BlockHandler, BlockStreamFilter}; + +struct MyHandler { error_count: usize } + +impl BlockHandler for MyHandler { + fn should_skip(&mut self, line: &str) -> bool { line.is_empty() } + fn is_block_start(&mut self, line: &str) -> bool { + if line.starts_with("FAIL") { self.error_count += 1; true } else { false } + } + fn is_block_continuation(&mut self, line: &str, _block: &[String]) -> bool { + line.starts_with(" ") || line.starts_with("at ") + } + fn format_summary(&self, _exit_code: i32, _raw: &str) -> Option { + Some(format!("{} failures\n", self.error_count)) + } +} +``` + +See `cmds/rust/cargo_cmd.rs::CargoBuildHandler` and `cmds/js/tsc_cmd.rs::TscHandler` for production examples. + +**Level 3: `StreamFilter` trait** — full line-by-line control + +When block-based parsing doesn't fit (e.g., state machines, multi-phase output, line transforms). Implement `StreamFilter` directly. + +```rust +use crate::core::stream::StreamFilter; + +struct MyFilter { state: State } + +impl StreamFilter for MyFilter { + fn feed_line(&mut self, line: &str) -> Option { + // Return Some(text) to emit, None to suppress + if line.contains("error") { Some(format!("{}\n", line)) } else { None } + } + fn flush(&mut self) -> String { String::new() } + fn on_exit(&mut self, exit_code: i32, raw: &str) -> Option { None } +} +``` + +See `cmds/rust/runner.rs::ErrorStreamFilter` for a complete reference implementation (state machine that tracks error blocks across lines). + **Example — passthrough command (no filtering):** ```rust pub fn run_passthrough(args: &[OsString], verbose: u8) -> Result { - let status = resolved_command("mycmd").args(args) - .stdin(Stdio::inherit()).stdout(Stdio::inherit()).stderr(Stdio::inherit()) - .status().context("Failed to run mycmd")?; - Ok(exit_code_from_status(&status, "mycmd")) + runner::run_passthrough("mycmd", args, verbose) } ``` diff --git a/src/cmds/js/tsc_cmd.rs b/src/cmds/js/tsc_cmd.rs index 20d1e7aba..6c1f23cb5 100644 --- a/src/cmds/js/tsc_cmd.rs +++ b/src/cmds/js/tsc_cmd.rs @@ -1,13 +1,13 @@ //! Filters TypeScript compiler errors, grouping them by file and error code. use crate::core::runner; +use crate::core::stream::{BlockHandler, BlockStreamFilter}; use crate::core::utils::{resolved_command, tool_exists, truncate}; use anyhow::Result; use regex::Regex; -use std::collections::HashMap; +use std::collections::{HashMap, HashSet}; pub fn run(args: &[String], verbose: u8) -> Result { - // Try tsc directly first, fallback to npx if not found let tsc_exists = tool_exists("tsc"); let mut cmd = if tsc_exists { @@ -27,15 +27,82 @@ pub fn run(args: &[String], verbose: u8) -> Result { eprintln!("Running: {} {}", tool, args.join(" ")); } - runner::run_filtered( + runner::run_streamed( cmd, "tsc", &args.join(" "), - |raw| filter_tsc_output(raw), + Box::new(BlockStreamFilter::new(TscHandler::new())), runner::RunOptions::with_tee("tsc"), ) } +struct TscHandler { + error_count: usize, + files: HashSet, + code_counts: HashMap, +} + +impl TscHandler { + fn new() -> Self { + Self { + error_count: 0, + files: HashSet::new(), + code_counts: HashMap::new(), + } + } +} + +impl BlockHandler for TscHandler { + fn should_skip(&mut self, line: &str) -> bool { + line.starts_with("Found ") + } + + fn is_block_start(&mut self, line: &str) -> bool { + lazy_static::lazy_static! { + static ref TSC_ERROR: Regex = Regex::new( + r"^(.+?)\((\d+),(\d+)\):\s+(error|warning)\s+(TS\d+):\s+(.+)$" + ).unwrap(); + } + if let Some(caps) = TSC_ERROR.captures(line) { + self.error_count += 1; + self.files.insert(caps[1].to_string()); + *self.code_counts.entry(caps[5].to_string()).or_insert(0) += 1; + true + } else { + false + } + } + + fn is_block_continuation(&mut self, line: &str, _block: &[String]) -> bool { + line.starts_with(" ") || line.starts_with('\t') + } + + fn format_summary(&self, _exit_code: i32, _raw: &str) -> Option { + if self.error_count == 0 { + return Some("TypeScript: No errors found\n".to_string()); + } + + let mut result = format!( + "═══════════════════════════════════════\nTypeScript: {} errors in {} files\n", + self.error_count, + self.files.len() + ); + + if self.code_counts.len() > 1 { + let mut counts: Vec<_> = self.code_counts.iter().collect(); + counts.sort_by(|a, b| b.1.cmp(a.1)); + let codes_str: Vec = counts + .iter() + .take(5) + .map(|(code, count)| format!("{} ({}x)", code, count)) + .collect(); + result.push_str(&format!("Top codes: {}\n", codes_str.join(", "))); + } + + Some(result) + } +} + pub(crate) fn filter_tsc_output(output: &str) -> String { lazy_static::lazy_static! { // Pattern: src/file.ts(12,5): error TS2322: Type 'string' is not assignable to type 'number'. @@ -232,4 +299,51 @@ src/app.tsx(20,5): error TS2345: Argument of type 'number' is not assignable to let result = filter_tsc_output(output); assert!(result.contains("No errors found")); } + + // --- Streaming handler tests --- + + use crate::core::stream::tests::run_block_filter; + + #[test] + fn test_tsc_stream_errors() { + let input = "\ +src/server/api/auth.ts(12,5): error TS2322: Type 'string' is not assignable to type 'number'. +src/server/api/auth.ts(15,10): error TS2345: Argument of type 'number' is not assignable to parameter of type 'string'. +src/components/Button.tsx(8,3): error TS2339: Property 'onClick' does not exist on type 'ButtonProps'. + +Found 3 errors in 2 files. +"; + let mut f = BlockStreamFilter::new(TscHandler::new()); + let result = run_block_filter(&mut f, input, 1); + assert!(result.contains("TS2322"), "got: {}", result); + assert!(result.contains("TS2345"), "got: {}", result); + assert!(result.contains("3 errors in 2 files"), "got: {}", result); + assert!(!result.contains("Found 3"), "got: {}", result); + } + + #[test] + fn test_tsc_stream_no_errors() { + let input = "Found 0 errors. Watching for file changes.\n"; + let mut f = BlockStreamFilter::new(TscHandler::new()); + let result = run_block_filter(&mut f, input, 0); + assert!(result.contains("No errors found"), "got: {}", result); + } + + #[test] + fn test_tsc_stream_continuation_lines() { + let input = "\ +src/app.tsx(10,3): error TS2322: Type '{ children: Element; }' is not assignable to type 'Props'. + Property 'children' does not exist on type 'Props'. +src/app.tsx(20,5): error TS2345: Argument of type 'number' is not assignable. +"; + let mut f = BlockStreamFilter::new(TscHandler::new()); + let result = run_block_filter(&mut f, input, 1); + assert!( + result.contains("Property 'children' does not exist"), + "got: {}", + result + ); + assert!(result.contains("TS2322"), "got: {}", result); + assert!(result.contains("TS2345"), "got: {}", result); + } } diff --git a/src/cmds/rust/cargo_cmd.rs b/src/cmds/rust/cargo_cmd.rs index 427fed76b..41fa43466 100644 --- a/src/cmds/rust/cargo_cmd.rs +++ b/src/cmds/rust/cargo_cmd.rs @@ -1,6 +1,7 @@ //! Filters cargo output — build errors, test results, clippy warnings. use crate::core::runner; +use crate::core::stream::{BlockHandler, BlockStreamFilter, StreamFilter}; use crate::core::utils::{resolved_command, truncate}; use anyhow::Result; use std::collections::HashMap; @@ -67,6 +68,216 @@ fn restore_double_dash_with_raw(args: &[String], raw_args: &[String]) -> Vec, +} + +impl CargoBuildHandler { + fn new() -> Self { + Self { + compiled: 0, + warnings: 0, + error_count: 0, + finished_line: None, + } + } +} + +impl BlockHandler for CargoBuildHandler { + fn should_skip(&mut self, line: &str) -> bool { + let trimmed = line.trim_start(); + if trimmed.starts_with("Compiling") || trimmed.starts_with("Checking") { + self.compiled += 1; + return true; + } + if trimmed.starts_with("Downloading") || trimmed.starts_with("Downloaded") { + return true; + } + if trimmed.starts_with("Finished") { + self.finished_line = Some(trimmed.to_string()); + return true; + } + if line.starts_with("warning:") + && line.contains("generated") + && line.contains("warning") + { + return true; + } + if (line.starts_with("error:") || line.starts_with("error[")) + && (line.contains("aborting due to") || line.contains("could not compile")) + { + return true; + } + false + } + + fn is_block_start(&mut self, line: &str) -> bool { + if line.starts_with("error[") || line.starts_with("error:") { + self.error_count += 1; + return true; + } + if line.starts_with("warning:") || line.starts_with("warning[") { + self.warnings += 1; + return true; + } + false + } + + fn is_block_continuation(&mut self, line: &str, block: &[String]) -> bool { + !(line.trim().is_empty() && block.len() > 3) + } + + fn format_summary(&self, _exit_code: i32, _raw: &str) -> Option { + if self.error_count == 0 && self.warnings == 0 { + let mut s = format!("cargo build ({} crates compiled)", self.compiled); + if let Some(ref finished) = self.finished_line { + s = format!("{}\n{}", s, finished); + } + Some(format!("{}\n", s)) + } else { + Some(format!( + "═══════════════════════════════════════\ncargo build: {} errors, {} warnings ({} crates)\n", + self.error_count, self.warnings, self.compiled + )) + } + } +} + +struct CargoTestHandler { + in_failure_section: bool, + in_failure_names: bool, + summary_lines: Vec, + has_compile_errors: bool, +} + +impl CargoTestHandler { + fn new() -> Self { + Self { + in_failure_section: false, + in_failure_names: false, + summary_lines: Vec::new(), + has_compile_errors: false, + } + } +} + +impl BlockHandler for CargoTestHandler { + fn should_skip(&mut self, line: &str) -> bool { + let trimmed = line.trim_start(); + if trimmed.starts_with("Compiling") + || trimmed.starts_with("Downloading") + || trimmed.starts_with("Downloaded") + || trimmed.starts_with("Finished") + { + return true; + } + if line.starts_with("running ") { + return true; + } + if line.starts_with("test ") && line.ends_with("... ok") { + return true; + } + // Track compile errors for fallback + if trimmed.starts_with("error[") || trimmed.starts_with("error:") { + self.has_compile_errors = true; + } + // "failures:" toggles section state + if line == "failures:" { + if self.in_failure_section { + // Second "failures:" = list of failure names — skip them + self.in_failure_names = true; + } + self.in_failure_section = true; + return true; + } + // Skip the failure name listing section + if self.in_failure_names { + if line.starts_with("test result:") { + self.in_failure_names = false; + self.in_failure_section = false; + self.summary_lines.push(line.to_string()); + return true; + } + return true; + } + if line.starts_with("test result:") { + self.summary_lines.push(line.to_string()); + self.in_failure_section = false; + return true; + } + false + } + + fn is_block_start(&mut self, line: &str) -> bool { + self.in_failure_section && line.starts_with("---- ") + } + + fn is_block_continuation(&mut self, line: &str, _block: &[String]) -> bool { + self.in_failure_section && !line.starts_with("---- ") + } + + fn format_summary(&self, _exit_code: i32, raw: &str) -> Option { + if self.summary_lines.is_empty() && self.has_compile_errors { + let build_filtered = filter_cargo_build(raw); + if build_filtered.starts_with("cargo build:") { + return Some(format!( + "{}\n", + build_filtered.replacen("cargo build:", "cargo test:", 1) + )); + } + // Fallback: last 5 meaningful lines + let meaningful: Vec<&str> = raw + .lines() + .filter(|l| !l.trim().is_empty() && !l.trim_start().starts_with("Compiling")) + .collect(); + let last5: Vec<&str> = meaningful.iter().rev().take(5).rev().copied().collect(); + return Some(format!("{}\n", last5.join("\n"))); + } + + // No failures emitted — aggregate pass results + let mut aggregated: Option = None; + let mut all_parsed = true; + + for line in &self.summary_lines { + if let Some(parsed) = AggregatedTestResult::parse_line(line) { + if let Some(ref mut agg) = aggregated { + agg.merge(&parsed); + } else { + aggregated = Some(parsed); + } + } else { + all_parsed = false; + break; + } + } + + if all_parsed { + if let Some(agg) = aggregated { + if agg.suites > 0 { + return Some(format!("{}\n", agg.format_compact())); + } + } + } + + // Fallback: show raw summary lines + if !self.summary_lines.is_empty() { + let mut s = String::new(); + for line in &self.summary_lines { + s.push_str(line); + s.push('\n'); + } + return Some(s); + } + + None + } +} + /// Generic cargo command runner with filtering. /// Builds the Command with restored `--` separator, then delegates to shared runner. fn run_cargo_filtered( @@ -99,12 +310,49 @@ where ) } +fn run_cargo_streamed( + subcommand: &str, + args: &[String], + verbose: u8, + filter: Box, +) -> Result { + let mut cmd = resolved_command("cargo"); + cmd.arg(subcommand); + + let restored_args = restore_double_dash(args); + for arg in &restored_args { + cmd.arg(arg); + } + + if verbose > 0 { + eprintln!("Running: cargo {} {}", subcommand, restored_args.join(" ")); + } + + runner::run_streamed( + cmd, + &format!("cargo {}", subcommand), + &restored_args.join(" "), + filter, + runner::RunOptions::with_tee(&format!("cargo_{}", subcommand)), + ) +} + fn run_build(args: &[String], verbose: u8) -> Result { - run_cargo_filtered("build", args, verbose, filter_cargo_build) + run_cargo_streamed( + "build", + args, + verbose, + Box::new(BlockStreamFilter::new(CargoBuildHandler::new())), + ) } fn run_test(args: &[String], verbose: u8) -> Result { - run_cargo_filtered("test", args, verbose, filter_cargo_test) + run_cargo_streamed( + "test", + args, + verbose, + Box::new(BlockStreamFilter::new(CargoTestHandler::new())), + ) } fn run_clippy(args: &[String], verbose: u8) -> Result { @@ -112,7 +360,12 @@ fn run_clippy(args: &[String], verbose: u8) -> Result { } fn run_check(args: &[String], verbose: u8) -> Result { - run_cargo_filtered("check", args, verbose, filter_cargo_build) + run_cargo_streamed( + "check", + args, + verbose, + Box::new(BlockStreamFilter::new(CargoBuildHandler::new())), + ) } fn run_install(args: &[String], verbose: u8) -> Result { @@ -537,7 +790,6 @@ fn filter_cargo_nextest(output: &str) -> String { String::new() } -/// Filter cargo build/check output - strip "Compiling"/"Checking" lines, keep errors + summary fn filter_cargo_build(output: &str) -> String { let mut errors: Vec = Vec::new(); let mut warnings = 0; @@ -1788,4 +2040,122 @@ error: test run failed result ); } + + // --- Streaming handler tests --- + + use crate::core::stream::tests::run_block_filter; + + #[test] + fn test_cargo_build_stream_success() { + let input = " Compiling libc v0.2.153\n Compiling cfg-if v1.0.0\n Compiling rtk v0.5.0\n Finished dev [unoptimized + debuginfo] target(s) in 15.23s\n"; + let mut f = BlockStreamFilter::new(CargoBuildHandler::new()); + let result = run_block_filter(&mut f, input, 0); + assert!(result.contains("3 crates compiled"), "got: {}", result); + assert!(result.contains("Finished"), "got: {}", result); + assert!(!result.contains("Compiling"), "got: {}", result); + } + + #[test] + fn test_cargo_build_stream_errors() { + let input = r#" Compiling rtk v0.5.0 +error[E0308]: mismatched types + --> src/main.rs:10:5 + | +10| "hello" + | ^^^^^^^ expected `i32`, found `&str` + +error: aborting due to 1 previous error +"#; + let mut f = BlockStreamFilter::new(CargoBuildHandler::new()); + let result = run_block_filter(&mut f, input, 1); + assert!(result.contains("E0308"), "got: {}", result); + assert!(result.contains("mismatched types"), "got: {}", result); + assert!(result.contains("1 errors"), "got: {}", result); + assert!(!result.contains("aborting"), "got: {}", result); + } + + #[test] + fn test_cargo_test_stream_all_pass() { + let input = r#" Compiling rtk v0.5.0 + Finished test [unoptimized + debuginfo] target(s) in 2.53s + Running target/debug/deps/rtk-abc123 + +running 15 tests +test utils::tests::test_truncate_short_string ... ok +test utils::tests::test_truncate_long_string ... ok +test utils::tests::test_strip_ansi_simple ... ok + +test result: ok. 15 passed; 0 failed; 0 ignored; 0 measured; 0 filtered out; finished in 0.01s +"#; + let mut f = BlockStreamFilter::new(CargoTestHandler::new()); + let result = run_block_filter(&mut f, input, 0); + assert!( + result.contains("cargo test: 15 passed (1 suite, 0.01s)"), + "got: {}", + result + ); + assert!(!result.contains("Compiling"), "got: {}", result); + } + + #[test] + fn test_cargo_test_stream_failures() { + let input = r#"running 5 tests +test foo::test_a ... ok +test foo::test_b ... FAILED +test foo::test_c ... ok + +failures: + +---- foo::test_b stdout ---- +thread 'foo::test_b' panicked at 'assert_eq!(1, 2)' + +failures: + foo::test_b + +test result: FAILED. 4 passed; 1 failed; 0 ignored; 0 measured; 0 filtered out +"#; + let mut f = BlockStreamFilter::new(CargoTestHandler::new()); + let result = run_block_filter(&mut f, input, 1); + assert!(result.contains("test_b"), "got: {}", result); + assert!(result.contains("panicked"), "got: {}", result); + } + + #[test] + fn test_cargo_test_stream_multi_suite() { + let input = r#" Running unittests src/lib.rs (target/debug/deps/rtk-abc123) + +running 50 tests +test result: ok. 50 passed; 0 failed; 0 ignored; 0 measured; 0 filtered out; finished in 0.45s + + Running unittests src/main.rs (target/debug/deps/rtk-def456) + +running 30 tests +test result: ok. 30 passed; 0 failed; 0 ignored; 0 measured; 0 filtered out; finished in 0.30s +"#; + let mut f = BlockStreamFilter::new(CargoTestHandler::new()); + let result = run_block_filter(&mut f, input, 0); + assert!( + result.contains("cargo test: 80 passed (2 suites, 0.75s)"), + "got: {}", + result + ); + } + + #[test] + fn test_cargo_test_stream_compile_error() { + let input = r#" Compiling rtk v0.31.0 (/workspace/projects/rtk) +error[E0425]: cannot find value `missing_symbol` in this scope + --> tests/repro_compile_fail.rs:3:13 + | +3 | let _ = missing_symbol; + | ^^^^^^^^^^^^^^ not found in this scope + +For more information about this error, try `rustc --explain E0425`. +error: could not compile `rtk` (test "repro_compile_fail") due to 1 previous error +"#; + let mut f = BlockStreamFilter::new(CargoTestHandler::new()); + let result = run_block_filter(&mut f, input, 1); + assert!(result.contains("cargo test:"), "got: {}", result); + assert!(result.contains("1 errors"), "got: {}", result); + } } diff --git a/src/cmds/rust/runner.rs b/src/cmds/rust/runner.rs index 4a80f2fb6..51b2ba790 100644 --- a/src/cmds/rust/runner.rs +++ b/src/cmds/rust/runner.rs @@ -1,69 +1,105 @@ //! Runs arbitrary commands and captures only stderr or test failures. -use crate::core::stream::{self, FilterMode, StdinMode}; -use crate::core::tracking; -use anyhow::{Context, Result}; +use crate::core::stream::StreamFilter; +use anyhow::Result; +use lazy_static::lazy_static; use regex::Regex; use std::process::Command; -/// Run a command and filter output to show only errors/warnings -pub fn run_err(command: &str, verbose: u8) -> Result { - let timer = tracking::TimedExecution::start(); +lazy_static! { + static ref ERROR_PATTERNS: Vec = vec![ + // Generic errors + Regex::new(r"(?i)^.*error[\s:\[].*$").unwrap(), + Regex::new(r"(?i)^.*\berr\b.*$").unwrap(), + Regex::new(r"(?i)^.*warning[\s:\[].*$").unwrap(), + Regex::new(r"(?i)^.*\bwarn\b.*$").unwrap(), + Regex::new(r"(?i)^.*failed.*$").unwrap(), + Regex::new(r"(?i)^.*failure.*$").unwrap(), + Regex::new(r"(?i)^.*exception.*$").unwrap(), + Regex::new(r"(?i)^.*panic.*$").unwrap(), + // Rust specific + Regex::new(r"^error\[E\d+\]:.*$").unwrap(), + Regex::new(r"^\s*--> .*:\d+:\d+$").unwrap(), + // Python + Regex::new(r"^Traceback.*$").unwrap(), + Regex::new(r#"^\s*File ".*", line \d+.*$"#).unwrap(), + // JavaScript/TypeScript + Regex::new(r"^\s*at .*:\d+:\d+.*$").unwrap(), + // Go + Regex::new(r"^.*\.go:\d+:.*$").unwrap(), + ]; +} - if verbose > 0 { - eprintln!("Running: {}", command); - } +struct ErrorStreamFilter { + in_error_block: bool, + blank_count: usize, + emitted_any: bool, +} - let mut cmd = if cfg!(target_os = "windows") { - let mut c = Command::new("cmd"); - c.args(["/C", command]); - c - } else { - let mut c = Command::new("sh"); - c.args(["-c", command]); - c - }; +impl ErrorStreamFilter { + fn new() -> Self { + Self { + in_error_block: false, + blank_count: 0, + emitted_any: false, + } + } +} - let result = stream::run_streaming(&mut cmd, StdinMode::Null, FilterMode::CaptureOnly) - .context("Failed to execute command")?; +impl StreamFilter for ErrorStreamFilter { + fn feed_line(&mut self, line: &str) -> Option { + let is_error = ERROR_PATTERNS.iter().any(|p| p.is_match(line)); + if is_error { + self.in_error_block = true; + self.blank_count = 0; + self.emitted_any = true; + Some(format!("{}\n", line)) + } else if self.in_error_block { + if line.trim().is_empty() { + self.blank_count += 1; + if self.blank_count >= 2 { + self.in_error_block = false; + None + } else { + self.emitted_any = true; + Some(format!("{}\n", line)) + } + } else if line.starts_with(' ') || line.starts_with('\t') { + self.blank_count = 0; + self.emitted_any = true; + Some(format!("{}\n", line)) + } else { + self.in_error_block = false; + None + } + } else { + None + } + } - let raw = &result.raw; - let exit_code = result.exit_code; - let filtered = filter_errors(raw); - let mut rtk = String::new(); + fn flush(&mut self) -> String { + String::new() + } - if filtered.is_empty() { + fn on_exit(&mut self, exit_code: i32, raw: &str) -> Option { + if self.emitted_any { + return None; + } if exit_code == 0 { - rtk.push_str("[ok] Command completed successfully (no errors)"); + Some("[ok] Command completed successfully (no errors)".to_string()) } else { - rtk.push_str(&format!("[FAIL] Command failed (exit code: {})\n", exit_code)); + let mut msg = format!("[FAIL] Command failed (exit code: {})\n", exit_code); let lines: Vec<&str> = raw.lines().collect(); for line in lines.iter().rev().take(10).rev() { - rtk.push_str(&format!(" {}\n", line)); + msg.push_str(&format!(" {}\n", line)); } + Some(msg) } - } else { - rtk.push_str(&filtered); - } - - if let Some(hint) = crate::core::tee::tee_and_hint(raw, "err", exit_code) { - println!("{}\n{}", rtk, hint); - } else { - println!("{}", rtk); } - timer.track(command, "rtk run-err", raw, &rtk); - Ok(exit_code) } -/// Run tests and show only failures -pub fn run_test(command: &str, verbose: u8) -> Result { - let timer = tracking::TimedExecution::start(); - - if verbose > 0 { - eprintln!("Running tests: {}", command); - } - - let mut cmd = if cfg!(target_os = "windows") { +fn build_shell_command(command: &str) -> Command { + if cfg!(target_os = "windows") { let mut c = Command::new("cmd"); c.args(["/C", command]); c @@ -71,48 +107,42 @@ pub fn run_test(command: &str, verbose: u8) -> Result { let mut c = Command::new("sh"); c.args(["-c", command]); c - }; - - let result = stream::run_streaming(&mut cmd, StdinMode::Null, FilterMode::CaptureOnly) - .context("Failed to execute test command")?; + } +} - let raw = &result.raw; - let exit_code = result.exit_code; - let summary = extract_test_summary(raw, command); - if let Some(hint) = crate::core::tee::tee_and_hint(raw, "test", exit_code) { - println!("{}\n{}", summary, hint); - } else { - println!("{}", summary); +/// Run a command and filter output to show only errors/warnings +pub fn run_err(command: &str, verbose: u8) -> Result { + if verbose > 0 { + eprintln!("Running: {}", command); } - timer.track(command, "rtk run-test", raw, &summary); - Ok(exit_code) + let cmd = build_shell_command(command); + crate::core::runner::run_streamed( + cmd, + "err", + command, + Box::new(ErrorStreamFilter::new()), + crate::core::runner::RunOptions::with_tee("err"), + ) } -fn filter_errors(output: &str) -> String { - lazy_static::lazy_static! { - static ref ERROR_PATTERNS: Vec = vec![ - // Generic errors - Regex::new(r"(?i)^.*error[\s:\[].*$").unwrap(), - Regex::new(r"(?i)^.*\berr\b.*$").unwrap(), - Regex::new(r"(?i)^.*warning[\s:\[].*$").unwrap(), - Regex::new(r"(?i)^.*\bwarn\b.*$").unwrap(), - Regex::new(r"(?i)^.*failed.*$").unwrap(), - Regex::new(r"(?i)^.*failure.*$").unwrap(), - Regex::new(r"(?i)^.*exception.*$").unwrap(), - Regex::new(r"(?i)^.*panic.*$").unwrap(), - // Rust specific - Regex::new(r"^error\[E\d+\]:.*$").unwrap(), - Regex::new(r"^\s*--> .*:\d+:\d+$").unwrap(), - // Python - Regex::new(r"^Traceback.*$").unwrap(), - Regex::new(r#"^\s*File ".*", line \d+.*$"#).unwrap(), - // JavaScript/TypeScript - Regex::new(r"^\s*at .*:\d+:\d+.*$").unwrap(), - // Go - Regex::new(r"^.*\.go:\d+:.*$").unwrap(), - ]; +/// Run tests and show only failures +pub fn run_test(command: &str, verbose: u8) -> Result { + if verbose > 0 { + eprintln!("Running tests: {}", command); } + let cmd = build_shell_command(command); + let command_owned = command.to_string(); + crate::core::runner::run_filtered( + cmd, + "test", + command, + move |raw| extract_test_summary(raw, &command_owned), + crate::core::runner::RunOptions::with_tee("test"), + ) +} +#[cfg(test)] +fn filter_errors(output: &str) -> String { let mut result = Vec::new(); let mut in_error_block = false; let mut blank_count = 0; @@ -133,7 +163,6 @@ fn filter_errors(output: &str) -> String { result.push(line.to_string()); } } else if line.starts_with(' ') || line.starts_with('\t') { - // Continuation of error result.push(line.to_string()); blank_count = 0; } else { @@ -149,20 +178,17 @@ fn extract_test_summary(output: &str, command: &str) -> String { let mut result = Vec::new(); let lines: Vec<&str> = output.lines().collect(); - // Detect test framework let is_cargo = command.contains("cargo test"); let is_pytest = command.contains("pytest"); let is_jest = command.contains("jest") || command.contains("npm test") || command.contains("yarn test"); let is_go = command.contains("go test"); - // Collect failures let mut failures = Vec::new(); let mut in_failure = false; let mut failure_lines = Vec::new(); for line in lines.iter() { - // Cargo test if is_cargo { if line.contains("test result:") { result.push(line.to_string()); @@ -178,7 +204,6 @@ fn extract_test_summary(output: &str, command: &str) -> String { } } - // Pytest if is_pytest { if line.contains(" passed") || line.contains(" failed") || line.contains(" error") { result.push(line.to_string()); @@ -188,7 +213,6 @@ fn extract_test_summary(output: &str, command: &str) -> String { } } - // Jest if is_jest { if line.contains("Tests:") || line.contains("Test Suites:") { result.push(line.to_string()); @@ -198,7 +222,6 @@ fn extract_test_summary(output: &str, command: &str) -> String { } } - // Go test if is_go { if line.starts_with("ok") || line.starts_with("FAIL") || line.starts_with("---") { result.push(line.to_string()); @@ -209,7 +232,6 @@ fn extract_test_summary(output: &str, command: &str) -> String { } } - // Build output let mut output = String::new(); if !failures.is_empty() { @@ -229,7 +251,6 @@ fn extract_test_summary(output: &str, command: &str) -> String { output.push_str(&format!(" {}\n", r)); } } else { - // Fallback: show last few lines output.push_str("OUTPUT (last 5 lines):\n"); let start = lines.len().saturating_sub(5); for line in &lines[start..] { diff --git a/src/core/runner.rs b/src/core/runner.rs index 02ce9d41e..cb406da40 100644 --- a/src/core/runner.rs +++ b/src/core/runner.rs @@ -3,9 +3,8 @@ use anyhow::{Context, Result}; use std::process::Command; -use crate::core::stream::{self, FilterMode, StdinMode}; +use crate::core::stream::{self, FilterMode, StdinMode, StreamFilter}; use crate::core::tracking; -use crate::core::utils::exit_code_from_status; pub fn print_with_hint(filtered: &str, raw: &str, tee_label: &str, exit_code: i32) { if let Some(hint) = crate::core::tee::tee_and_hint(raw, tee_label, exit_code) { @@ -54,80 +53,143 @@ impl<'a> RunOptions<'a> { } } -pub fn run_filtered( +pub enum RunMode<'a> { + Filtered(Box String + 'a>), + Streamed(Box), + Passthrough, +} + +pub fn run( mut cmd: Command, tool_name: &str, args_display: &str, + mode: RunMode<'_>, + opts: RunOptions<'_>, +) -> Result { + let timer = tracking::TimedExecution::start(); + let cmd_label = format!("{} {}", tool_name, args_display); + + match mode { + RunMode::Filtered(filter_fn) => { + let result = stream::run_streaming(&mut cmd, StdinMode::Null, FilterMode::CaptureOnly) + .with_context(|| format!("Failed to run {}", tool_name))?; + + let exit_code = result.exit_code; + let raw = &result.raw; + let raw_stdout = &result.raw_stdout; + + if opts.skip_filter_on_failure && exit_code != 0 { + timer.track(&cmd_label, &format!("rtk {}", cmd_label), raw, raw); + return Ok(exit_code); + } + + let text_to_filter = if opts.filter_stdout_only { + raw_stdout + } else { + raw + }; + let filtered = filter_fn(text_to_filter); + + if let Some(label) = opts.tee_label { + print_with_hint(&filtered, raw, label, exit_code); + } else if opts.no_trailing_newline { + print!("{}", filtered); + } else { + println!("{}", filtered); + } + + let raw_for_tracking = if opts.filter_stdout_only { + raw_stdout + } else { + raw + }; + timer.track( + &cmd_label, + &format!("rtk {}", cmd_label), + raw_for_tracking, + &filtered, + ); + Ok(exit_code) + } + RunMode::Streamed(filter) => { + let result = + stream::run_streaming(&mut cmd, StdinMode::Null, FilterMode::Streaming(filter)) + .with_context(|| format!("Failed to run {}", tool_name))?; + + if let Some(label) = opts.tee_label { + if let Some(hint) = + crate::core::tee::tee_and_hint(&result.raw, label, result.exit_code) + { + println!("{}", hint); + } + } + + timer.track( + &cmd_label, + &format!("rtk {}", cmd_label), + &result.raw, + &result.filtered, + ); + Ok(result.exit_code) + } + RunMode::Passthrough => { + let result = + stream::run_streaming(&mut cmd, StdinMode::Inherit, FilterMode::Passthrough) + .with_context(|| format!("Failed to run {}", tool_name))?; + + timer.track_passthrough(&cmd_label, &format!("rtk {} (passthrough)", cmd_label)); + Ok(result.exit_code) + } + } +} + +pub fn run_filtered( + cmd: Command, + tool_name: &str, + args_display: &str, filter_fn: F, opts: RunOptions<'_>, ) -> Result where F: Fn(&str) -> String, { - let timer = tracking::TimedExecution::start(); - - // CaptureOnly: stderr streams live, stdout buffered silently. - // result.filtered = raw_stdout, result.raw = stdout + stderr - let result = stream::run_streaming(&mut cmd, StdinMode::Null, FilterMode::CaptureOnly) - .with_context(|| format!("Failed to run {}", tool_name))?; - - let exit_code = result.exit_code; - let raw_stdout = &result.filtered; - let raw = &result.raw; - - if opts.skip_filter_on_failure && exit_code != 0 { - timer.track( - &format!("{} {}", tool_name, args_display), - &format!("rtk {} {}", tool_name, args_display), - raw, - raw, - ); - return Ok(exit_code); - } - - let text_to_filter = if opts.filter_stdout_only { - raw_stdout - } else { - raw - }; - let filtered = filter_fn(text_to_filter); - - if let Some(label) = opts.tee_label { - print_with_hint(&filtered, raw, label, exit_code); - } else if opts.no_trailing_newline { - print!("{}", filtered); - } else { - println!("{}", filtered); - } - - let raw_for_tracking = if opts.filter_stdout_only { - raw_stdout - } else { - raw - }; - timer.track( - &format!("{} {}", tool_name, args_display), - &format!("rtk {} {}", tool_name, args_display), - raw_for_tracking, - &filtered, - ); - - Ok(exit_code) + run( + cmd, + tool_name, + args_display, + RunMode::Filtered(Box::new(filter_fn)), + opts, + ) } pub fn run_passthrough(tool: &str, args: &[std::ffi::OsString], verbose: u8) -> Result { - let timer = tracking::TimedExecution::start(); if verbose > 0 { eprintln!("{} passthrough: {:?}", tool, args); } - let status = crate::core::utils::resolved_command(tool) - .args(args) - .status() - .with_context(|| format!("Failed to run {}", tool))?; + let mut cmd = crate::core::utils::resolved_command(tool); + cmd.args(args); let args_str = tracking::args_display(args); - timer.track_passthrough( - &format!("{} {}", tool, args_str), - &format!("rtk {} {} (passthrough)", tool, args_str), - ); - Ok(exit_code_from_status(&status, tool)) + run( + cmd, + tool, + &args_str, + RunMode::Passthrough, + RunOptions::default(), + ) +} + +pub fn run_streamed( + cmd: Command, + tool_name: &str, + args_display: &str, + filter: Box, + opts: RunOptions<'_>, +) -> Result { + run( + cmd, + tool_name, + args_display, + RunMode::Streamed(filter), + opts, + ) } diff --git a/src/core/stream.rs b/src/core/stream.rs index b9137ac44..7e0dcba15 100644 --- a/src/core/stream.rs +++ b/src/core/stream.rs @@ -1,10 +1,149 @@ use anyhow::{Context, Result}; +use regex::Regex; use std::io::{self, BufRead, BufReader, BufWriter, Write}; use std::process::{Command, Stdio}; pub trait StreamFilter { fn feed_line(&mut self, line: &str) -> Option; fn flush(&mut self) -> String; + fn on_exit(&mut self, _exit_code: i32, _raw: &str) -> Option { + None + } +} + +pub trait BlockHandler { + fn should_skip(&mut self, line: &str) -> bool; + fn is_block_start(&mut self, line: &str) -> bool; + fn is_block_continuation(&mut self, line: &str, block: &[String]) -> bool; + fn format_summary(&self, exit_code: i32, raw: &str) -> Option; +} + +pub struct BlockStreamFilter { + handler: H, + in_block: bool, + current_block: Vec, + blocks_emitted: usize, +} + +impl BlockStreamFilter { + pub fn new(handler: H) -> Self { + Self { + handler, + in_block: false, + current_block: Vec::new(), + blocks_emitted: 0, + } + } + + fn emit_block(&mut self) -> Option { + if self.current_block.is_empty() { + return None; + } + let block = self.current_block.join("\n"); + self.current_block.clear(); + self.blocks_emitted += 1; + Some(format!("{}\n", block)) + } +} + +impl StreamFilter for BlockStreamFilter { + fn feed_line(&mut self, line: &str) -> Option { + if self.handler.should_skip(line) { + return None; + } + + if self.handler.is_block_start(line) { + let prev = self.emit_block(); + self.current_block.push(line.to_string()); + self.in_block = true; + prev + } else if self.in_block { + if self + .handler + .is_block_continuation(line, &self.current_block) + { + self.current_block.push(line.to_string()); + None + } else { + self.in_block = false; + self.emit_block() + } + } else { + None + } + } + + fn flush(&mut self) -> String { + self.emit_block().unwrap_or_default() + } + + fn on_exit(&mut self, exit_code: i32, raw: &str) -> Option { + self.handler.format_summary(exit_code, raw) + } +} + +#[allow(dead_code)] // available for command modules; currently used in tests only +pub struct RegexBlockFilter { + start_re: Regex, + skip_prefixes: Vec, + tool_name: String, + block_count: usize, +} + +impl RegexBlockFilter { + pub fn new(tool_name: &str, start_pattern: &str) -> Self { + Self { + start_re: Regex::new(start_pattern).unwrap_or_else(|e| { + panic!("RegexBlockFilter: bad pattern '{}': {}", start_pattern, e) + }), + skip_prefixes: Vec::new(), + tool_name: tool_name.to_string(), + block_count: 0, + } + } + + #[allow(dead_code)] + pub fn skip_prefix(mut self, prefix: &str) -> Self { + self.skip_prefixes.push(prefix.to_string()); + self + } + + #[allow(dead_code)] + pub fn skip_prefixes(mut self, prefixes: &[&str]) -> Self { + self.skip_prefixes + .extend(prefixes.iter().map(|s| s.to_string())); + self + } +} + +impl BlockHandler for RegexBlockFilter { + fn should_skip(&mut self, line: &str) -> bool { + self.skip_prefixes.iter().any(|p| line.starts_with(p)) + } + + fn is_block_start(&mut self, line: &str) -> bool { + if self.start_re.is_match(line) { + self.block_count += 1; + true + } else { + false + } + } + + fn is_block_continuation(&mut self, line: &str, _block: &[String]) -> bool { + line.starts_with(' ') || line.starts_with('\t') + } + + fn format_summary(&self, _exit_code: i32, _raw: &str) -> Option { + if self.block_count == 0 { + Some(format!("{}: no errors found\n", self.tool_name)) + } else { + Some(format!( + "{}: {} blocks in output\n", + self.tool_name, self.block_count + )) + } + } } pub trait StdinFilter: Send { @@ -12,10 +151,12 @@ pub trait StdinFilter: Send { fn flush(&mut self) -> String; } +#[allow(dead_code)] // test utility: wraps closures as StreamFilter pub struct LineFilter Option> { f: F, } +#[allow(dead_code)] impl Option> LineFilter { pub fn new(f: F) -> Self { Self { f } @@ -32,15 +173,16 @@ impl Option> StreamFilter for LineFilter { } } -pub enum FilterMode { - Streaming(Box), - Buffered(fn(&str) -> String), +pub enum FilterMode<'a> { + Streaming(Box), + Buffered(Box String + 'a>), CaptureOnly, Passthrough, } pub enum StdinMode { Inherit, + #[allow(dead_code)] // future API: stdin filtering for interactive commands Filter(Box), Null, } @@ -48,6 +190,7 @@ pub enum StdinMode { pub struct StreamResult { pub exit_code: i32, pub raw: String, + pub raw_stdout: String, pub filtered: String, } @@ -76,8 +219,28 @@ pub fn status_to_exit_code(status: std::process::ExitStatus) -> i32 { pub fn run_streaming( cmd: &mut Command, stdin_mode: StdinMode, - stdout_mode: FilterMode, + stdout_mode: FilterMode<'_>, ) -> Result { + if matches!(stdout_mode, FilterMode::Passthrough) { + match &stdin_mode { + StdinMode::Inherit => { + cmd.stdin(Stdio::inherit()); + } + _ => { + cmd.stdin(Stdio::null()); + } + }; + cmd.stdout(Stdio::inherit()); + cmd.stderr(Stdio::inherit()); + let status = cmd.status().context("Failed to spawn process")?; + return Ok(StreamResult { + exit_code: status_to_exit_code(status), + raw: String::new(), + raw_stdout: String::new(), + filtered: String::new(), + }); + } + match &stdin_mode { StdinMode::Inherit => { cmd.stdin(Stdio::inherit()); @@ -96,6 +259,8 @@ pub fn run_streaming( } } + let live_stderr = matches!(stdout_mode, FilterMode::Streaming(_)); + let mut child = ChildGuard(cmd.spawn().context("Failed to spawn process")?); let stdin_thread: Option> = match stdin_mode { @@ -130,45 +295,44 @@ pub fn run_streaming( let stderr = child.0.stderr.take().context("No child stderr handle")?; let stderr_thread = std::thread::spawn(move || -> String { let mut raw_err = String::new(); - let stderr_out = io::stderr(); - let mut err_out = stderr_out.lock(); - for line in BufReader::new(stderr).lines().map_while(Result::ok) { - writeln!(err_out, "{}", line).ok(); - raw_err.push_str(&line); - raw_err.push('\n'); + if live_stderr { + let stderr_out = io::stderr(); + let mut err_out = stderr_out.lock(); + for line in BufReader::new(stderr).lines().map_while(Result::ok) { + writeln!(err_out, "{}", line).ok(); + raw_err.push_str(&line); + raw_err.push('\n'); + } + } else { + for line in BufReader::new(stderr).lines().map_while(Result::ok) { + raw_err.push_str(&line); + raw_err.push('\n'); + } } raw_err }); let stdout = child.0.stdout.take().context("No child stdout handle")?; - const RAW_CAP: usize = 1_048_576; + const RAW_CAP: usize = 10_485_760; let mut raw_stdout = String::new(); let mut filtered = String::new(); + let mut capped = false; + let mut saved_filter: Option> = None; { let stdout_handle = io::stdout(); let mut out = stdout_handle.lock(); match stdout_mode { - FilterMode::Passthrough => { - for line in BufReader::new(stdout).lines().map_while(Result::ok) { - if raw_stdout.len() + line.len() + 1 <= RAW_CAP { - raw_stdout.push_str(&line); - raw_stdout.push('\n'); - } - match writeln!(out, "{}", line) { - Err(e) if e.kind() == io::ErrorKind::BrokenPipe => break, - Err(e) => return Err(e.into()), - Ok(_) => {} - } - } - filtered = raw_stdout.clone(); - } + FilterMode::Passthrough => unreachable!("handled by early-return above"), FilterMode::Streaming(mut filter) => { for line in BufReader::new(stdout).lines().map_while(Result::ok) { - if raw_stdout.len() + line.len() + 1 <= RAW_CAP { + if raw_stdout.len() + line.len() < RAW_CAP { raw_stdout.push_str(&line); raw_stdout.push('\n'); + } else if !capped { + capped = true; + eprintln!("[rtk] warning: output exceeds 10 MiB — filter input truncated"); } if let Some(output) = filter.feed_line(&line) { filtered.push_str(&output); @@ -186,12 +350,16 @@ pub fn run_streaming( Err(e) => return Err(e.into()), Ok(_) => {} } + saved_filter = Some(filter); } FilterMode::Buffered(filter_fn) => { for line in BufReader::new(stdout).lines().map_while(Result::ok) { - if raw_stdout.len() + line.len() + 1 <= RAW_CAP { + if raw_stdout.len() + line.len() < RAW_CAP { raw_stdout.push_str(&line); raw_stdout.push('\n'); + } else if !capped { + capped = true; + eprintln!("[rtk] warning: output exceeds 10 MiB — filter input truncated"); } } filtered = filter_fn(&raw_stdout); @@ -203,9 +371,12 @@ pub fn run_streaming( } FilterMode::CaptureOnly => { for line in BufReader::new(stdout).lines().map_while(Result::ok) { - if raw_stdout.len() + line.len() + 1 <= RAW_CAP { + if raw_stdout.len() + line.len() < RAW_CAP { raw_stdout.push_str(&line); raw_stdout.push('\n'); + } else if !capped { + capped = true; + eprintln!("[rtk] warning: output exceeds 10 MiB — filter input truncated"); } } filtered = raw_stdout.clone(); @@ -222,10 +393,20 @@ pub fn run_streaming( } let status = child.0.wait().context("Failed to wait for child")?; + let exit_code = status_to_exit_code(status); + let raw = format!("{}{}", raw_stdout, raw_stderr); + + if let Some(mut f) = saved_filter { + if let Some(post) = f.on_exit(exit_code, &raw) { + filtered.push_str(&post); + print!("{}", post); + } + } Ok(StreamResult { - exit_code: status_to_exit_code(status), - raw: format!("{}{}", raw_stdout, raw_stderr), + exit_code, + raw, + raw_stdout, filtered, }) } @@ -257,7 +438,7 @@ pub fn exec_capture(cmd: &mut Command) -> Result { } #[cfg(test)] -mod tests { +pub(crate) mod tests { use super::*; use std::process::Command; @@ -312,6 +493,7 @@ mod tests { let r = StreamResult { exit_code: 0, raw: String::new(), + raw_stdout: String::new(), filtered: String::new(), }; assert!(r.success()); @@ -322,6 +504,7 @@ mod tests { let r = StreamResult { exit_code: 1, raw: String::new(), + raw_stdout: String::new(), filtered: String::new(), }; assert!(!r.success()); @@ -332,6 +515,7 @@ mod tests { let r = StreamResult { exit_code: 137, raw: String::new(), + raw_stdout: String::new(), filtered: String::new(), }; assert!(!r.success()); @@ -343,7 +527,8 @@ mod tests { cmd.arg("hello"); let result = run_streaming(&mut cmd, StdinMode::Null, FilterMode::Passthrough).unwrap(); assert_eq!(result.exit_code, 0); - assert!(result.raw.contains("hello")); + // Passthrough inherits TTY — raw/filtered are empty + assert!(result.raw.is_empty()); } #[test] @@ -399,27 +584,33 @@ mod tests { fn test_run_streaming_buffered_filter() { let mut cmd = Command::new("printf"); cmd.arg("line1\nline2\nline3\n"); - fn upper(s: &str) -> String { - s.to_uppercase() - } - let result = run_streaming(&mut cmd, StdinMode::Null, FilterMode::Buffered(upper)).unwrap(); + let result = run_streaming( + &mut cmd, + StdinMode::Null, + FilterMode::Buffered(Box::new(|s: &str| s.to_uppercase())), + ) + .unwrap(); assert!(result.filtered.contains("LINE1")); assert!(result.filtered.contains("LINE2")); assert_eq!(result.exit_code, 0); } #[test] - fn test_run_streaming_raw_cap_at_1mb() { + fn test_run_streaming_raw_cap_at_10mb() { let mut cmd = Command::new("sh"); - cmd.args(["-c", "yes | head -600000"]); - let result = run_streaming(&mut cmd, StdinMode::Null, FilterMode::Passthrough).unwrap(); + // ~11 MiB of 80-char lines (fast: fewer lines than `yes | head -6M`) + cmd.args([ + "-c", + "dd if=/dev/zero bs=1024 count=11264 2>/dev/null | tr '\\0' 'a' | fold -w 80", + ]); + let result = run_streaming(&mut cmd, StdinMode::Null, FilterMode::CaptureOnly).unwrap(); assert!( - result.raw.len() <= 1_048_576 + 100, - "raw should be capped at ~1 MiB, got {} bytes", + result.raw.len() <= 10_485_760 + 100, + "raw should be capped at ~10 MiB, got {} bytes", result.raw.len() ); assert!( - result.raw.len() > 100_000, + result.raw.len() > 1_000_000, "Should have captured significant data" ); } @@ -427,7 +618,7 @@ mod tests { #[test] fn test_child_guard_prevents_zombie() { let mut cmd = Command::new("true"); - let result = run_streaming(&mut cmd, StdinMode::Null, FilterMode::Passthrough); + let result = run_streaming(&mut cmd, StdinMode::Null, FilterMode::CaptureOnly); assert!(result.is_ok()); assert_eq!(result.unwrap().exit_code, 0); } @@ -443,16 +634,16 @@ mod tests { fn test_run_streaming_raw_contains_stdout() { let mut cmd = Command::new("echo"); cmd.arg("test_output_xyz"); - let result = run_streaming(&mut cmd, StdinMode::Null, FilterMode::Passthrough).unwrap(); + let result = run_streaming(&mut cmd, StdinMode::Null, FilterMode::CaptureOnly).unwrap(); assert!(result.raw.contains("test_output_xyz")); } #[test] - fn test_run_streaming_filtered_equals_raw_in_passthrough() { + fn test_run_streaming_capture_only_filtered_equals_raw() { let mut cmd = Command::new("echo"); cmd.arg("check_equality"); - let result = run_streaming(&mut cmd, StdinMode::Null, FilterMode::Passthrough).unwrap(); - assert_eq!(result.filtered.trim(), result.raw.trim()); + let result = run_streaming(&mut cmd, StdinMode::Null, FilterMode::CaptureOnly).unwrap(); + assert_eq!(result.filtered.trim(), result.raw_stdout.trim()); } #[test] @@ -500,4 +691,129 @@ mod tests { }; assert_eq!(r.combined(), ""); } + + pub fn run_block_filter(filter: &mut dyn StreamFilter, input: &str, exit_code: i32) -> String { + let mut output = String::new(); + for line in input.lines() { + if let Some(s) = filter.feed_line(line) { + output.push_str(&s); + } + } + output.push_str(&filter.flush()); + if let Some(post) = filter.on_exit(exit_code, input) { + output.push_str(&post); + } + output + } + + struct TestHandler; + + impl BlockHandler for TestHandler { + fn should_skip(&mut self, line: &str) -> bool { + line.starts_with("SKIP") + } + fn is_block_start(&mut self, line: &str) -> bool { + line.starts_with("ERROR") + } + fn is_block_continuation(&mut self, line: &str, _block: &[String]) -> bool { + line.starts_with(" ") + } + fn format_summary(&self, _exit_code: i32, _raw: &str) -> Option { + Some("DONE\n".to_string()) + } + } + + #[test] + fn test_block_filter_emits_blocks() { + let mut f = BlockStreamFilter::new(TestHandler); + let input = "SKIP noise\nERROR first\n detail1\nnon-block\nERROR second\n detail2\n"; + let result = run_block_filter(&mut f, input, 0); + assert!(result.contains("ERROR first\n detail1"), "got: {}", result); + assert!( + result.contains("ERROR second\n detail2"), + "got: {}", + result + ); + assert!(!result.contains("SKIP"), "got: {}", result); + assert!(result.ends_with("DONE\n"), "got: {}", result); + } + + #[test] + fn test_block_filter_no_blocks() { + let mut f = BlockStreamFilter::new(TestHandler); + let result = run_block_filter(&mut f, "nothing here\njust text\n", 0); + assert_eq!(result, "DONE\n"); + } + + #[test] + fn test_regex_block_filter_emits_blocks() { + let handler = RegexBlockFilter::new("test", r"^error\["); + let mut f = BlockStreamFilter::new(handler); + let input = "ok line\nerror[E0308]: mismatched types\n expected `u32`\nok again\nerror[E0599]: no method\n help: try\n"; + let result = run_block_filter(&mut f, input, 1); + assert!( + result.contains("error[E0308]: mismatched types\n expected `u32`"), + "got: {}", + result + ); + assert!( + result.contains("error[E0599]: no method\n help: try"), + "got: {}", + result + ); + assert!( + result.contains("test: 2 blocks in output"), + "got: {}", + result + ); + } + + #[test] + fn test_regex_block_filter_skip_prefix() { + let handler = RegexBlockFilter::new("test", r"^error").skip_prefix("warning:"); + let mut f = BlockStreamFilter::new(handler); + let input = "warning: unused var\nerror: bad type\n detail\nwarning: dead code\n"; + let result = run_block_filter(&mut f, input, 1); + assert!(result.contains("error: bad type"), "got: {}", result); + assert!(!result.contains("warning:"), "got: {}", result); + } + + #[test] + fn test_regex_block_filter_no_blocks() { + let handler = RegexBlockFilter::new("mytest", r"^FAIL"); + let mut f = BlockStreamFilter::new(handler); + let result = run_block_filter(&mut f, "all passed\nok\n", 0); + assert_eq!(result, "mytest: no errors found\n"); + } + + #[test] + fn test_regex_block_filter_indent_continuation() { + let handler = RegexBlockFilter::new("test", r"^ERR"); + let mut f = BlockStreamFilter::new(handler); + let input = "ERR space indent\n two spaces\n\ttab indent\nnon-indent\n"; + let result = run_block_filter(&mut f, input, 1); + assert!( + result.contains("ERR space indent\n two spaces\n\ttab indent"), + "got: {}", + result + ); + assert!(!result.contains("non-indent"), "got: {}", result); + } + + #[test] + fn test_regex_block_filter_multiple_skip_prefixes() { + let handler = + RegexBlockFilter::new("test", r"^error").skip_prefixes(&["note:", "warning:", "help:"]); + let mut f = BlockStreamFilter::new(handler); + let input = "note: see docs\nwarning: unused\nhelp: try this\nerror: fatal\n details\n"; + let result = run_block_filter(&mut f, input, 1); + assert!(!result.contains("note:"), "got: {}", result); + assert!(!result.contains("warning:"), "got: {}", result); + assert!(!result.contains("help:"), "got: {}", result); + assert!( + result.contains("error: fatal\n details"), + "got: {}", + result + ); + } } diff --git a/src/discover/registry.rs b/src/discover/registry.rs index 4a295e256..8f21016c0 100644 --- a/src/discover/registry.rs +++ b/src/discover/registry.rs @@ -347,7 +347,8 @@ fn strip_trailing_redirects(cmd: &str) -> (&str, &str) { /// Returns `None` if the command is unsupported or ignored (hook should pass through). /// /// Handles compound commands (`&&`, `||`, `;`) by rewriting each segment independently. -/// For pipes (`|`), only rewrites the first command (the filter stays raw). +/// For pipes (`|`), only rewrites the left-hand command (pipe targets stay raw), +/// but continues rewriting segments after subsequent `&&`/`||`/`;` operators. pub fn rewrite_command(cmd: &str, excluded: &[String]) -> Option { let trimmed = cmd.trim(); if trimmed.is_empty() { @@ -381,6 +382,9 @@ fn rewrite_compound(cmd: &str, excluded: &[String]) -> Option { let mut seg_start: usize = 0; for tok in &tokens { + if tok.offset < seg_start { + continue; + } match tok.kind { TokenKind::Operator => { let seg = cmd[seg_start..tok.offset].trim(); @@ -420,9 +424,25 @@ fn rewrite_compound(cmd: &str, excluded: &[String]) -> Option { any_changed = true; } result.push_str(&rewritten); - result.push(' '); - result.push_str(cmd[tok.offset..].trim_start()); - return if any_changed { Some(result) } else { None }; + + let pipe_group_end = tokens.iter().find(|t| { + t.offset > tok.offset + && (t.kind == TokenKind::Operator + || (t.kind == TokenKind::Shellism && t.value == "&")) + }); + + match pipe_group_end { + Some(next_op) => { + result.push(' '); + result.push_str(cmd[tok.offset..next_op.offset].trim()); + seg_start = next_op.offset; + } + None => { + result.push(' '); + result.push_str(cmd[tok.offset..].trim_start()); + return if any_changed { Some(result) } else { None }; + } + } } TokenKind::Shellism if tok.value == "&" => { let seg = cmd[seg_start..tok.offset].trim(); @@ -2507,4 +2527,57 @@ mod tests { Classification::Ignored ); } + + // --- Pipe + operator rewrite --- + + #[test] + fn test_rewrite_pipe_then_and() { + assert_eq!( + rewrite_command("git log | head -5 && git stash", &[]), + Some("rtk git log | head -5 && rtk git stash".into()) + ); + } + + #[test] + fn test_rewrite_pipe_then_semicolon() { + assert_eq!( + rewrite_command("cargo test | head; git status", &[]), + Some("rtk cargo test | head; rtk git status".into()) + ); + } + + #[test] + fn test_rewrite_pipe_then_or() { + assert_eq!( + rewrite_command("cargo test | grep FAIL || git stash", &[]), + Some("rtk cargo test | grep FAIL || rtk git stash".into()) + ); + } + + #[test] + fn test_rewrite_env_pipe_then_and() { + assert_eq!( + rewrite_command( + "RUST_BACKTRACE=1 cargo test 2>&1 | grep FAILED && git stash", + &[] + ), + Some("RUST_BACKTRACE=1 rtk cargo test 2>&1 | grep FAILED && rtk git stash".into()) + ); + } + + #[test] + fn test_rewrite_and_then_pipe() { + assert_eq!( + rewrite_command("git status && cargo test | grep FAIL", &[]), + Some("rtk git status && rtk cargo test | grep FAIL".into()) + ); + } + + #[test] + fn test_rewrite_multi_pipe_then_and() { + assert_eq!( + rewrite_command("git log | head | tail && git status", &[]), + Some("rtk git log | head | tail && rtk git status".into()) + ); + } } diff --git a/src/hooks/hook_cmd.rs b/src/hooks/hook_cmd.rs index d270673ab..844875853 100644 --- a/src/hooks/hook_cmd.rs +++ b/src/hooks/hook_cmd.rs @@ -285,7 +285,10 @@ pub fn run_claude() -> Result<()> { let v: Value = match serde_json::from_str(input) { Ok(v) => v, - Err(_) => return Ok(()), + Err(e) => { + let _ = writeln!(io::stderr(), "[rtk hook] Failed to parse JSON input: {e}"); + return Ok(()); + } }; let cmd = match v diff --git a/src/main.rs b/src/main.rs index 220031bf6..5c6313289 100644 --- a/src/main.rs +++ b/src/main.rs @@ -2028,7 +2028,6 @@ fn run_cli() -> Result { if raw.trim().is_empty() { 0 } else { - // Execute via shell passthrough with token tracking use std::process::Command as ProcCommand; let shell = if cfg!(windows) { "cmd" } else { "sh" }; let flag = if cfg!(windows) { "/C" } else { "-c" }; @@ -2079,8 +2078,30 @@ fn run_cli() -> Result { eprintln!("Proxy mode: {} {}", cmd_name, cmd_args.join(" ")); } - // ISSUE #897: ChildGuard kills child on error/panic to prevent - // orphan processes that caused a 514GB memory leak + kernel panic. + // ISSUE #897: Kill proxy child on SIGINT/SIGTERM to prevent orphan + // processes. Drop-based ChildGuard doesn't run on signals with + // panic=abort, so we register a signal handler that kills the child + // PID stored in this atomic. + use std::sync::atomic::{AtomicU32, Ordering}; + static PROXY_CHILD_PID: AtomicU32 = AtomicU32::new(0); + + #[cfg(unix)] + { + unsafe extern "C" fn handle_signal(sig: libc::c_int) { + let pid = PROXY_CHILD_PID.load(Ordering::SeqCst); + if pid != 0 { + libc::kill(pid as libc::pid_t, libc::SIGTERM); + libc::waitpid(pid as libc::pid_t, std::ptr::null_mut(), 0); + } + libc::signal(sig, libc::SIG_DFL); + libc::raise(sig); + } + unsafe { + libc::signal(libc::SIGINT, handle_signal as libc::sighandler_t); + libc::signal(libc::SIGTERM, handle_signal as libc::sighandler_t); + } + } + struct ChildGuard(Option); impl Drop for ChildGuard { fn drop(&mut self) { @@ -2088,6 +2109,7 @@ fn run_cli() -> Result { let _ = child.kill(); let _ = child.wait(); } + PROXY_CHILD_PID.store(0, Ordering::SeqCst); } } @@ -2100,6 +2122,11 @@ fn run_cli() -> Result { .context(format!("Failed to execute command: {}", cmd_name))?, )); + // Store child PID for signal handler before anything can fail + if let Some(ref inner) = child.0 { + PROXY_CHILD_PID.store(inner.id(), Ordering::SeqCst); + } + let inner = child.0.as_mut().context("Child process missing")?; let stdout_pipe = inner .stdout From 2bb5265595c4a80fe1ad7e9ab3ffc8dd013b019c Mon Sep 17 00:00:00 2001 From: aesoft <43991222+aeppling@users.noreply.github.com> Date: Fri, 10 Apr 2026 12:09:29 +0200 Subject: [PATCH 103/204] fix(stream): P0 fixes from PR #956 review - pipe_cmd: fix panic on multi-byte UTF-8 at 1024 byte boundary (floor_char_boundary in auto_detect_filter) - pipe_cmd: cap stdin at 10 MiB to prevent OOM (reuses RAW_CAP) - stream: hoist RAW_CAP to pub const at module level - hook_cmd: check deny before get_rewritten in handle_vscode (matches handle_copilot_cli and run_claude order) - hook_cmd: escape backslash and pipe in audit log sanitizer - tsc_cmd: hoist duplicate TSC_ERROR regex to single module-level lazy_static --- src/cmds/js/tsc_cmd.rs | 18 +++++++----------- src/cmds/system/pipe_cmd.rs | 21 ++++++++++++++++++++- src/core/stream.rs | 3 ++- src/hooks/hook_cmd.rs | 28 ++++++++++++++++++++-------- 4 files changed, 49 insertions(+), 21 deletions(-) diff --git a/src/cmds/js/tsc_cmd.rs b/src/cmds/js/tsc_cmd.rs index 6c1f23cb5..e87988289 100644 --- a/src/cmds/js/tsc_cmd.rs +++ b/src/cmds/js/tsc_cmd.rs @@ -4,9 +4,16 @@ use crate::core::runner; use crate::core::stream::{BlockHandler, BlockStreamFilter}; use crate::core::utils::{resolved_command, tool_exists, truncate}; use anyhow::Result; +use lazy_static::lazy_static; use regex::Regex; use std::collections::{HashMap, HashSet}; +lazy_static! { + static ref TSC_ERROR: Regex = Regex::new( + r"^(.+?)\((\d+),(\d+)\):\s+(error|warning)\s+(TS\d+):\s+(.+)$" + ).unwrap(); +} + pub fn run(args: &[String], verbose: u8) -> Result { let tsc_exists = tool_exists("tsc"); @@ -58,11 +65,6 @@ impl BlockHandler for TscHandler { } fn is_block_start(&mut self, line: &str) -> bool { - lazy_static::lazy_static! { - static ref TSC_ERROR: Regex = Regex::new( - r"^(.+?)\((\d+),(\d+)\):\s+(error|warning)\s+(TS\d+):\s+(.+)$" - ).unwrap(); - } if let Some(caps) = TSC_ERROR.captures(line) { self.error_count += 1; self.files.insert(caps[1].to_string()); @@ -104,12 +106,6 @@ impl BlockHandler for TscHandler { } pub(crate) fn filter_tsc_output(output: &str) -> String { - lazy_static::lazy_static! { - // Pattern: src/file.ts(12,5): error TS2322: Type 'string' is not assignable to type 'number'. - static ref TSC_ERROR: Regex = Regex::new( - r"^(.+?)\((\d+),(\d+)\):\s+(error|warning)\s+(TS\d+):\s+(.+)$" - ).unwrap(); - } struct TsError { file: String, diff --git a/src/cmds/system/pipe_cmd.rs b/src/cmds/system/pipe_cmd.rs index 5736d173d..304f3f2af 100644 --- a/src/cmds/system/pipe_cmd.rs +++ b/src/cmds/system/pipe_cmd.rs @@ -1,6 +1,8 @@ use anyhow::Result; use std::io::Read; +use crate::core::stream::RAW_CAP; + pub fn resolve_filter(name: &str) -> Option String> { match name { "cargo-test" | "cargo" => Some(crate::cmds::rust::cargo_cmd::filter_cargo_test), @@ -128,7 +130,10 @@ fn find_wrapper(input: &str) -> String { } pub fn auto_detect_filter(input: &str) -> fn(&str) -> String { - let first_1k = &input[..input.len().min(1024)]; + let end = input.len().min(1024); + // Avoid panic: byte 1024 may fall inside a multi-byte UTF-8 char + let end = input.floor_char_boundary(end); + let first_1k = &input[..end]; if first_1k.contains("test result:") && first_1k.contains("passed;") { return crate::cmds::rust::cargo_cmd::filter_cargo_test; @@ -189,8 +194,12 @@ fn identity_filter(input: &str) -> String { pub fn run(filter_name: Option<&str>, passthrough: bool) -> Result<()> { let mut buf = String::new(); std::io::stdin() + .take((RAW_CAP + 1) as u64) .read_to_string(&mut buf) .map_err(|e| anyhow::anyhow!("Failed to read stdin: {}", e))?; + if buf.len() > RAW_CAP { + anyhow::bail!("stdin exceeds {} byte limit", RAW_CAP); + } if passthrough { print!("{}", buf); @@ -405,6 +414,16 @@ mod tests { assert_eq!(out, ""); } + #[test] + fn test_auto_detect_multibyte_at_1024_boundary() { + // Build input where byte 1024 falls inside a multi-byte char (é = 2 bytes) + let mut input = "a".repeat(1023); + input.push('é'); // 2-byte char starting at byte 1023, ends at 1025 + let f = auto_detect_filter(&input); + let out = f(&input); + assert_eq!(out, input); + } + #[test] fn test_auto_detect_single_line_unknown() { let input = "hello world\n"; diff --git a/src/core/stream.rs b/src/core/stream.rs index 7e0dcba15..212316d7b 100644 --- a/src/core/stream.rs +++ b/src/core/stream.rs @@ -216,6 +216,8 @@ pub fn status_to_exit_code(status: std::process::ExitStatus) -> i32 { } // ISSUE #897: ChildGuard RAII prevents zombie processes that caused kernel panic +pub const RAW_CAP: usize = 10_485_760; // 10 MiB + pub fn run_streaming( cmd: &mut Command, stdin_mode: StdinMode, @@ -313,7 +315,6 @@ pub fn run_streaming( }); let stdout = child.0.stdout.take().context("No child stdout handle")?; - const RAW_CAP: usize = 10_485_760; let mut raw_stdout = String::new(); let mut filtered = String::new(); let mut capped = false; diff --git a/src/hooks/hook_cmd.rs b/src/hooks/hook_cmd.rs index 844875853..ed8bd29c8 100644 --- a/src/hooks/hook_cmd.rs +++ b/src/hooks/hook_cmd.rs @@ -121,18 +121,16 @@ fn get_rewritten(cmd: &str) -> Option { } fn handle_vscode(cmd: &str) -> Result<()> { - let rewritten = match get_rewritten(cmd) { - Some(r) => r, - None => return Ok(()), - }; - let verdict = permissions::check_command(cmd); - - // Deny: pass through without rewrite — let the host tool handle it. if verdict == PermissionVerdict::Deny { return Ok(()); } + let rewritten = match get_rewritten(cmd) { + Some(r) => r, + None => return Ok(()), + }; + // Allow (explicit rule matched): auto-allow the rewritten command. // Ask/Default (no allow rule matched): rewrite but let the host tool prompt. let decision = match verdict { @@ -247,7 +245,10 @@ fn audit_log(action: &str, original: &str, rewritten: &str) { /// Escape newlines to prevent log-line injection in the pipe-delimited audit log. fn sanitize_log_field(s: &str) -> String { - s.replace('\n', "\\n").replace('\r', "\\r") + s.replace('\\', "\\\\") + .replace('|', "\\|") + .replace('\n', "\\n") + .replace('\r', "\\r") } fn audit_log_inner(action: &str, original: &str, rewritten: &str) -> Option<()> { @@ -822,6 +823,17 @@ mod tests { assert!(sanitized.contains("\\n")); } + #[test] + fn test_audit_log_sanitizes_pipe_delimiter() { + let sanitized = sanitize_log_field("git log | head"); + assert!( + !sanitized.contains(" | "), + "unescaped ' | ' breaks field parsing: {}", + sanitized + ); + assert!(sanitized.contains("\\|")); + } + #[test] fn test_claude_unicode_null_passthrough() { let input = claude_input("git status \u{0000}\u{FEFF}"); From f4074f898a9b73b72bbcd8b18afab4831dcda406 Mon Sep 17 00:00:00 2001 From: Ousama Ben Younes Date: Fri, 10 Apr 2026 09:53:50 +0000 Subject: [PATCH 104/204] fix(clippy): show full error blocks instead of truncated headline Previously `filter_cargo_clippy` only captured the first line of each error diagnostic (truncated to 160 chars), discarding the file location, code snippet, `^` markers, and `= note:` context that developers need to actually fix the issue. This made rtk unusable for `cargo clippy -- -D warnings`. Now errors are collected as full multi-line blocks (up to 15 lines each, up to 10 errors total), matching the approach used by `filter_cargo_build`. Warning grouping by lint rule is unchanged. Fixes #602 Co-Authored-By: Claude Sonnet 4.6 --- src/cmds/rust/cargo_cmd.rs | 131 ++++++++++++++++++++++++++++--------- 1 file changed, 101 insertions(+), 30 deletions(-) diff --git a/src/cmds/rust/cargo_cmd.rs b/src/cmds/rust/cargo_cmd.rs index f0a37e71c..f4a187af0 100644 --- a/src/cmds/rust/cargo_cmd.rs +++ b/src/cmds/rust/cargo_cmd.rs @@ -862,50 +862,62 @@ fn filter_cargo_test(output: &str) -> String { result.trim().to_string() } -/// Filter cargo clippy output - group warnings by lint rule +/// Filter cargo clippy output - show full error blocks, group warnings by lint rule fn filter_cargo_clippy(output: &str) -> String { let mut by_rule: HashMap> = HashMap::new(); let mut error_count = 0; let mut warning_count = 0; - let mut error_details: Vec = Vec::new(); + // Each entry is a full multi-line error block (headline + location + code context) + let mut error_blocks: Vec> = Vec::new(); - // Parse clippy output lines - // Format: "warning: description\n --> file:line:col\n |\n | code\n" let mut current_rule = String::new(); + let mut in_error = false; + let mut current_block: Vec = Vec::new(); for line in output.lines() { - // Skip compilation lines + // Skip compilation progress lines if line.trim_start().starts_with("Compiling") || line.trim_start().starts_with("Checking") || line.trim_start().starts_with("Downloading") || line.trim_start().starts_with("Downloaded") || line.trim_start().starts_with("Finished") { + if in_error && !current_block.is_empty() { + error_blocks.push(current_block.clone()); + current_block.clear(); + in_error = false; + } continue; } - // "warning: unused variable [unused_variables]" or "warning: description [clippy::rule_name]" - if (line.starts_with("warning:") || line.starts_with("warning[")) - || (line.starts_with("error:") || line.starts_with("error[")) + // Skip noise: summary counts and abort lines + if (line.contains("generated") && line.contains("warning")) + || line.contains("aborting due to") + || line.contains("could not compile") { - // Skip summary lines: "warning: `rtk` (bin) generated 5 warnings" - if line.contains("generated") && line.contains("warning") { - continue; - } - // Skip "error: aborting" / "error: could not compile" - if line.contains("aborting due to") || line.contains("could not compile") { - continue; + continue; + } + + let is_error_line = line.starts_with("error:") || line.starts_with("error["); + let is_warning_line = line.starts_with("warning:") || line.starts_with("warning["); + + if is_error_line || is_warning_line { + // Flush any in-progress error block before starting a new diagnostic + if in_error && !current_block.is_empty() { + error_blocks.push(current_block.clone()); + current_block.clear(); } + in_error = false; - let is_error = line.starts_with("error"); - if is_error { + if is_error_line { error_count += 1; - error_details.push(truncate(line.trim(), 160)); + in_error = true; + current_block.push(line.to_string()); } else { warning_count += 1; } - // Extract rule name from brackets + // Extract rule/error-code from brackets for warning grouping current_rule = if let Some(bracket_start) = line.rfind('[') { if let Some(bracket_end) = line.rfind(']') { line[bracket_start + 1..bracket_end].to_string() @@ -913,8 +925,7 @@ fn filter_cargo_clippy(output: &str) -> String { line.to_string() } } else { - // No bracket: use the message itself as the rule - let prefix = if is_error { "error: " } else { "warning: " }; + let prefix = if is_error_line { "error: " } else { "warning: " }; line.strip_prefix(prefix).unwrap_or(line).to_string() }; } else if line.trim_start().starts_with("--> ") { @@ -925,9 +936,29 @@ fn filter_cargo_clippy(output: &str) -> String { .or_default() .push(location); } + if in_error { + current_block.push(line.to_string()); + } + } else if in_error { + if line.trim().is_empty() { + // Blank line terminates the error block + if !current_block.is_empty() { + error_blocks.push(current_block.clone()); + current_block.clear(); + } + in_error = false; + } else if current_block.len() < 15 { + // Collect code-context lines (|, ^, = note:, help:, etc.) + current_block.push(line.to_string()); + } } } + // Flush final error block + if in_error && !current_block.is_empty() { + error_blocks.push(current_block); + } + if error_count == 0 && warning_count == 0 { return "cargo clippy: No issues found".to_string(); } @@ -939,18 +970,21 @@ fn filter_cargo_clippy(output: &str) -> String { )); result.push_str("═══════════════════════════════════════\n"); - if !error_details.is_empty() { - result.push_str("\nError details:\n"); - for (idx, detail) in error_details.iter().take(5).enumerate() { - result.push_str(&format!(" {}. {}\n", idx + 1, detail)); + // Show full error blocks so developers can see what needs fixing + if !error_blocks.is_empty() { + result.push_str("\nErrors:\n"); + for block in error_blocks.iter().take(10) { + for block_line in block { + result.push_str(&format!(" {}\n", truncate(block_line, 160))); + } + result.push('\n'); } - if error_details.len() > 5 { - result.push_str(&format!(" ... +{} more errors\n", error_details.len() - 5)); + if error_blocks.len() > 10 { + result.push_str(&format!(" ... +{} more errors\n", error_blocks.len() - 10)); } - result.push('\n'); } - // Sort rules by frequency + // Sort warning rules by frequency let mut rule_counts: Vec<_> = by_rule.iter().collect(); rule_counts.sort_by(|a, b| b.1.len().cmp(&a.1.len())); @@ -1371,10 +1405,47 @@ warning: unused variable: `x` [unused_variables] "#; let result = filter_cargo_clippy(output); assert!(result.contains("cargo clippy: 1 errors, 1 warnings")); - assert!(result.contains("Error details:")); + assert!(result.contains("Errors:")); assert!(result.contains("struct literals are not allowed here")); } + #[test] + fn test_filter_cargo_clippy_shows_full_error_block() { + // Full multi-line error block must be shown so the developer can debug + let output = r#" Checking rtk v0.5.0 +error[E0308]: mismatched types + --> src/main.rs:10:5 + | +9 | fn foo() -> i32 { + | --- expected `i32` because of return type +10| "hello" + | ^^^^^^^ expected `i32`, found `&str` + +error: aborting due to 1 previous error +"#; + let result = filter_cargo_clippy(output); + assert!(result.contains("cargo clippy: 1 errors, 0 warnings"), "got: {}", result); + assert!(result.contains("error[E0308]: mismatched types"), "got: {}", result); + assert!(result.contains("src/main.rs:10:5"), "got: {}", result); + assert!(result.contains("expected `i32`, found `&str`"), "got: {}", result); + } + + #[test] + fn test_filter_cargo_clippy_multiple_errors_show_all_blocks() { + let output = r#"error[E0308]: mismatched types + --> src/foo.rs:5:3 + +error[E0425]: cannot find value `x` + --> src/bar.rs:12:9 + +error: aborting due to 2 previous errors +"#; + let result = filter_cargo_clippy(output); + assert!(result.contains("2 errors"), "got: {}", result); + assert!(result.contains("src/foo.rs:5:3"), "got: {}", result); + assert!(result.contains("src/bar.rs:12:9"), "got: {}", result); + } + #[test] fn test_filter_cargo_install_success() { let output = r#" Installing rtk v0.11.0 From 4a228208e3094a0819d10e0c62ba37ee1538698d Mon Sep 17 00:00:00 2001 From: aesoft <43991222+aeppling@users.noreply.github.com> Date: Fri, 10 Apr 2026 13:10:01 +0200 Subject: [PATCH 105/204] feat(cicd): enforce cicd sast & package check - semgrep for sast check by yml rules - dependabot for package detection - update CICD doc - clippy -D unsafe_code hard fail --- .github/dependabot.yml | 17 ++++++ .github/workflows/CICD.md | 40 +++++++------- .github/workflows/ci.yml | 16 +++++- .semgrep.yml | 108 ++++++++++++++++++++++++++++++++++++++ 4 files changed, 161 insertions(+), 20 deletions(-) create mode 100644 .github/dependabot.yml create mode 100644 .semgrep.yml diff --git a/.github/dependabot.yml b/.github/dependabot.yml new file mode 100644 index 000000000..57dba0f42 --- /dev/null +++ b/.github/dependabot.yml @@ -0,0 +1,17 @@ +version: 2 +updates: + - package-ecosystem: "cargo" + directory: "/" + schedule: + interval: "weekly" + labels: + - "dependencies" + open-pull-requests-limit: 5 + + - package-ecosystem: "github-actions" + directory: "/" + schedule: + interval: "weekly" + labels: + - "dependencies" + - "ci" diff --git a/.github/workflows/CICD.md b/.github/workflows/CICD.md index 53776a00d..b20e9cff6 100644 --- a/.github/workflows/CICD.md +++ b/.github/workflows/CICD.md @@ -14,27 +14,29 @@ Trigger: pull_request to develop or master └────────┬─────────┘ │ ┌────────▼─────────┐ - │ clippy │ - └──┬───┬───┬───┬───┘ - │ │ │ │ - ┌──────────────┘ │ │ └──────────────┐ - │ ┌───────┘ └───────┐ │ - ▼ ▼ ▼ ▼ - ┌──────────────┐ ┌──────────────┐ ┌───────────┐ ┌──────────┐ - │ test │ │Security Scan │ │ benchmark │ │ validate │ - │ ubuntu │ │ cargo audit │ │ >=80% │ │ ai agent │ - │ windows │ │ (advisory) │ │ savings │ │ doc │ - │ macos │ │ │ │ │ │ │ - └──────┬───────┘ └──────┬───────┘ └─────┬─────┘ └────┬─────┘ - │ │ │ │ - └────────────────┴───────┬───────┴─────────────┘ - │ - ┌──────────▼─────────┐ - │ All must pass │ - │ to merge │ - └────────────────────┘ + │ clippy │ + │ -D unsafe_code │ + └┬───┬───┬───┬───┬─┘ + │ │ │ │ │ + ┌───────────────┘ │ │ │ └───────────────┐ + │ ┌───────────┘ │ └──────────┐ │ + ▼ ▼ ▼ ▼ ▼ + ┌──────────┐ ┌──────────┐ ┌───────────┐ ┌─────────┐ ┌──────────┐ + │ test │ │ security │ │ semgrep │ │benchmark│ │ doc │ + │ ubuntu │ │ cargo │ │ AST-aware │ │ >=80% │ │ review │ + │ windows │ │ audit │ │ diff-only │ │ savings │ │ ai agent │ + │ macos │ │ patterns │ │ │ │ │ │ │ + └────┬─────┘ └────┬─────┘ └─────┬─────┘ └────┬────┘ └────┬─────┘ + │ │ │ │ │ + └────────────┴─────────┬───┴─────────────┴────────────┘ + │ + ┌──────────▼─────────┐ + │ All must pass │ + │ to merge │ + └────────────────────┘ + DCO check (independent, develop PRs only) + + Dependabot (weekly: Cargo deps + GitHub Actions) ``` ## Merge to develop — pre-release (cd.yml) diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index a1b2c261e..9ebb4c4ae 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -46,7 +46,7 @@ jobs: with: components: clippy - uses: Swatinem/rust-cache@v2 - - run: cargo clippy --all-targets + - run: cargo clippy --all-targets -- -D unsafe_code # ─── Parallel gates (all need code to compile) ─── @@ -185,6 +185,20 @@ jobs: echo "- Require approval from 2 maintainers" >> $GITHUB_STEP_SUMMARY echo "- Test in isolated environment before merge" >> $GITHUB_STEP_SUMMARY + semgrep: + name: semgrep security scan + needs: clippy + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v4 + with: + fetch-depth: 0 + - uses: returntocorp/semgrep-action@v1 + with: + config: .semgrep.yml + env: + SEMGREP_BASELINE_REF: ${{ github.event.pull_request.base.sha }} + benchmark: name: benchmark needs: clippy diff --git a/.semgrep.yml b/.semgrep.yml new file mode 100644 index 000000000..4e8cc1f22 --- /dev/null +++ b/.semgrep.yml @@ -0,0 +1,108 @@ +rules: + - id: dynamic-command-execution + patterns: + - pattern: Command::new($ARG) + - pattern-not: Command::new("...") + message: > + Dynamic shell invocation via Command::new($ARG). + RTK only executes known CLI tools — use string literals, not variables. + languages: [rust] + severity: ERROR + + - id: unsafe-block + pattern: unsafe { ... } + message: > + Unsafe block detected. RTK has no legitimate need for unsafe code. + languages: [rust] + severity: ERROR + + - id: ld-preload-manipulation + pattern-either: + - pattern: $CMD.env("LD_PRELOAD", ...) + - pattern: $CMD.env("LD_LIBRARY_PATH", ...) + message: > + LD_PRELOAD/LD_LIBRARY_PATH manipulation detected. + This can hijack shared library loading — forbidden in RTK. + languages: [rust] + severity: ERROR + + - id: raw-socket-usage + pattern-either: + - pattern: TcpStream::$METHOD(...) + - pattern: UdpSocket::$METHOD(...) + - pattern: TcpListener::$METHOD(...) + message: > + Raw socket usage detected. RTK is a CLI proxy — it should not + open network connections directly. Use ureq in telemetry only. + languages: [rust] + severity: ERROR + + - id: reqwest-forbidden + pattern: reqwest::$METHOD(...) + message: > + reqwest is forbidden in RTK. The project uses ureq for HTTP + (telemetry only). Adding reqwest increases binary size and attack surface. + languages: [rust] + severity: ERROR + + - id: interpreter-execution + pattern-either: + - pattern: Command::new("curl") + - pattern: Command::new("wget") + - pattern: Command::new("python") + - pattern: Command::new("python3") + - pattern: Command::new("node") + - pattern: Command::new("bash") + - pattern: Command::new("sh") + - pattern: Command::new("perl") + - pattern: Command::new("ruby") + message: > + Direct interpreter/downloader execution detected. + RTK proxies user commands — it should never spawn interpreters + or download tools on its own. + languages: [rust] + severity: ERROR + + - id: ureq-outside-telemetry + pattern: ureq::$METHOD(...) + paths: + exclude: + - /src/core/telemetry.rs + message: > + ureq usage outside of src/core/telemetry.rs. + HTTP calls are restricted to the telemetry module to prevent data exfiltration. + languages: [rust] + severity: ERROR + + # ── WARNING rules (non-blocking, flag for review) ── + + - id: path-env-manipulation + pattern-either: + - pattern: $CMD.env("PATH", ...) + - pattern: std::env::set_var("PATH", ...) + - pattern: env::set_var("PATH", ...) + message: > + PATH environment variable manipulation detected. + Hijacking PATH can redirect command resolution to attacker-controlled binaries. + languages: [rust] + severity: WARNING + + - id: sensitive-path-reference + pattern-regex: \.(ssh|bashrc|zshrc|bash_profile|profile)|authorized_keys|/etc/passwd|/etc/shadow + message: > + Reference to sensitive system path detected. + RTK filters should not access dotfiles, SSH keys, or system credential files. + languages: [rust] + severity: WARNING + + - id: filesystem-deletion + pattern-either: + - pattern: fs::remove_file(...) + - pattern: fs::remove_dir_all(...) + - pattern: std::fs::remove_file(...) + - pattern: std::fs::remove_dir_all(...) + message: > + File/directory deletion detected. Expected in hooks/init cleanup, + surprising in a filter module. Verify intent. + languages: [rust] + severity: WARNING From bcbf8ed2b5cdec6af48585ff541a25a049f51c65 Mon Sep 17 00:00:00 2001 From: aesoft <43991222+aeppling@users.noreply.github.com> Date: Fri, 10 Apr 2026 13:18:41 +0200 Subject: [PATCH 106/204] fix(ci): allow unsafe_code for libc signal handler Only one allow use in the codebase --- src/main.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/main.rs b/src/main.rs index 78a85b248..8e8e03872 100644 --- a/src/main.rs +++ b/src/main.rs @@ -2010,6 +2010,7 @@ fn run_cli() -> Result { static PROXY_CHILD_PID: AtomicU32 = AtomicU32::new(0); #[cfg(unix)] + #[allow(unsafe_code)] { unsafe extern "C" fn handle_signal(sig: libc::c_int) { let pid = PROXY_CHILD_PID.load(Ordering::SeqCst); @@ -2017,7 +2018,6 @@ fn run_cli() -> Result { libc::kill(pid as libc::pid_t, libc::SIGTERM); libc::waitpid(pid as libc::pid_t, std::ptr::null_mut(), 0); } - // Re-raise with default handler so parent sees correct exit status libc::signal(sig, libc::SIG_DFL); libc::raise(sig); } From 8857e1725e483d7e047f1875f68570b8c7efc5a8 Mon Sep 17 00:00:00 2001 From: aesoft <43991222+aeppling@users.noreply.github.com> Date: Fri, 10 Apr 2026 13:25:01 +0200 Subject: [PATCH 107/204] fix(cicd): semgrep use docker (git action archived) --- .github/workflows/ci.yml | 8 +++----- 1 file changed, 3 insertions(+), 5 deletions(-) diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 9ebb4c4ae..8c342d4e3 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -189,15 +189,13 @@ jobs: name: semgrep security scan needs: clippy runs-on: ubuntu-latest + container: + image: semgrep/semgrep steps: - uses: actions/checkout@v4 with: fetch-depth: 0 - - uses: returntocorp/semgrep-action@v1 - with: - config: .semgrep.yml - env: - SEMGREP_BASELINE_REF: ${{ github.event.pull_request.base.sha }} + - run: semgrep scan --config .semgrep.yml --baseline-commit ${{ github.event.pull_request.base.sha }} --error benchmark: name: benchmark From dc5877579ef97f10a57136fa0ccf9b0049d67aca Mon Sep 17 00:00:00 2001 From: aesoft <43991222+aeppling@users.noreply.github.com> Date: Fri, 10 Apr 2026 13:58:52 +0200 Subject: [PATCH 108/204] docs(docs): stop manual CHANGELOG edits and use release please --- .claude/agents/rust-rtk.md | 2 +- .claude/skills/ship.md | 37 +++++++----------------- CONTRIBUTING.md | 58 ++++++++++++++++++++++++++++++-------- src/cmds/README.md | 2 +- 4 files changed, 59 insertions(+), 40 deletions(-) diff --git a/.claude/agents/rust-rtk.md b/.claude/agents/rust-rtk.md index 8efe67f0e..d32e344b7 100644 --- a/.claude/agents/rust-rtk.md +++ b/.claude/agents/rust-rtk.md @@ -509,7 +509,7 @@ rtk newcmd args - Update `CLAUDE.md` Module Responsibilities table - Update `README.md` with command support -- Update `CHANGELOG.md` +- CHANGELOG.md is auto-generated by release-please — do not edit manually ## Performance Targets diff --git a/.claude/skills/ship.md b/.claude/skills/ship.md index 380a8ba2b..b774bcb42 100644 --- a/.claude/skills/ship.md +++ b/.claude/skills/ship.md @@ -61,8 +61,9 @@ git status # Should show "nothing to commit, working tree clean" **Files to update**: 1. `Cargo.toml` (line 3): `version = "X.Y.Z"` -2. `CHANGELOG.md` (add new section) -3. `README.md` (if version mentioned) +2. `README.md` (if version mentioned) + +> **Note**: `CHANGELOG.md` is auto-generated by release-please from conventional commit messages — do not edit manually. **Example**: ```toml @@ -77,21 +78,11 @@ name = "rtk" version = "0.17.0" # New version ``` -**CHANGELOG.md template**: -```markdown -## [0.17.0] - 2026-02-15 - -### Added -- `rtk pytest` command for Python test filtering (90% token reduction) -- Support for `pytest` JSON output parsing -- Integration with `uv` package manager auto-detection - -### Fixed -- Shell escaping for PowerShell on Windows -- Memory leak in regex pattern caching - -### Changed -- Updated `cargo test` filter to show test names in failures +**Commit message quality matters** — release-please generates CHANGELOG entries directly from your `feat:` and `fix:` commits: +``` +feat(pytest): add Python test filtering with JSON output parsing +fix(shell): correct PowerShell escaping on Windows +perf(cargo): lazy-compile clippy regex patterns ``` ### Step 3: Build and Verify @@ -119,13 +110,12 @@ hyperfine 'target/release/rtk git status' --warmup 3 ```bash # Stage version files -git add Cargo.toml Cargo.lock CHANGELOG.md README.md +git add Cargo.toml Cargo.lock README.md # Commit with version tag git commit -m "chore(release): bump version to v0.17.0 - Updated Cargo.toml version -- Updated CHANGELOG.md with release notes - Verified all quality checks pass - Benchmarked performance (<10ms startup) @@ -361,14 +351,7 @@ target/release/rtk --version **Symptom**: CHANGELOG.md has conflicts after rebase -**Solution**: -```bash -# Always add new entries at top -# Manual merge: -# 1. Keep all entries from both branches -# 2. Sort by version (newest first) -# 3. Ensure date format consistency -``` +**Solution**: Do not edit CHANGELOG.md manually. It is auto-generated by release-please from conventional commit messages when merging to master. ## Security Considerations diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index 4ae5bfca5..6cd87369b 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -107,15 +107,50 @@ For the step-by-step checklist (create filter, register rewrite pattern, registe --- +## Commit Messages & Changelog + +RTK uses [Conventional Commits](https://www.conventionalcommits.org/) and [release-please](https://github.com/googleapis/release-please) to **auto-generate CHANGELOG.md, version bumps, and GitHub releases**. Never edit `CHANGELOG.md` manually — it is fully managed by release-please from your commit messages. + +### Commit format + +``` +(): +``` + +| Type | Semver Impact | When to Use | +|------|---------------|-------------| +| `feat` | Minor | New features, new filters, new command support | +| `fix` | Patch | Bug fixes, corrections | +| `perf` | Patch | Performance improvements | +| `refactor` | — | Code restructuring (no changelog entry) | +| `docs` | — | Documentation only | +| `chore` | — | Maintenance, CI, deps | +| `feat!` / `fix!` | Major | Breaking changes (add `!` after type) | + +**Scope** should match the module or area: `git`, `cargo`, `gh`, `hook`, `tracking`, `cicd`, etc. + +### Examples + +``` +feat(kubectl): add pod log filtering +fix(git): preserve merge commit messages in log filter +perf(cargo): lazy-compile clippy regex patterns +feat!(hook): change rewrite config format +``` + +These commit messages directly become CHANGELOG entries when release-please creates a release PR. Write them as if they will be read by users. + +--- + ## Branch Naming Convention Git branch names cannot include spaces or colons, so we use slash-prefixed names. Pick the prefix that matches your change type and follow it with an optional scope and a short, kebab-case description. -| Prefix | Semver Impact | When to Use | -|--------|---------------|-------------| -| `fix/` | Patch | Bug fixes, corrections, minor adjustments | -| `feat/` | Minor | New features, new filters, new command support | -| `chore/` | Major | Breaking changes, API changes, removed functionality | +| Prefix | When to Use | +|--------|-------------| +| `fix/` | Bug fixes, corrections, minor adjustments | +| `feat/` | New features, new filters, new command support | +| `chore/` | CI/CD, deps, maintenance, breaking changes | Combine the prefix with a scope if it adds clarity (e.g. `git`, `kubectl`, `filter`, `tracking`, `config`) and finish with a descriptive slug: `fix/-` or `feat/`. @@ -137,7 +172,7 @@ chore/release-pipeline-cleanup **For large features or refactors**, prefer multi-part PRs over one enormous PR. Split the work into logical, reviewable chunks that can each be merged independently. Examples: - feat(Part 1): Add data model and tests - feat(Part 2): Add CLI command and integration -- feat(Part 3): Update documentation and CHANGELOG +- feat(Part 3): Update documentation **Why**: Small, focused PRs are easier to review, safer to merge, and faster to ship. Large PRs slow down review, hide bugs, and increase merge conflict risk. @@ -166,7 +201,7 @@ Every change **must** include tests. See [Testing](#testing) below. ### 4. Add Documentation -Every change **must** include documentation updates. See [Documentation](#documentation) below. +Documentation updates are required for new filters, new features, and changes that affect already-documented behavior. Bug fixes and refactors typically don't need doc updates. See [Documentation](#documentation) below. ### Contributor License Agreement (CLA) @@ -235,17 +270,18 @@ cargo fmt --all --check && cargo clippy --all-targets && cargo test ## Documentation -Every change **must** include documentation updates. Use this table to find which docs to update: +Documentation updates are required for new filters, new features, and changes that affect already-documented behavior. Use this table to find which docs to update: | What you changed | Update these docs | |------------------|-------------------| -| New Rust filter (`src/cmds/`) | Ecosystem `README.md` (e.g., `src/cmds/git/README.md`), [README.md](README.md) command list, [CHANGELOG.md](CHANGELOG.md) | -| New TOML filter (`src/filters/`) | [src/filters/README.md](src/filters/README.md) if naming conventions change, [README.md](README.md) command list, [CHANGELOG.md](CHANGELOG.md) | +| New Rust filter (`src/cmds/`) | Ecosystem `README.md` (e.g., `src/cmds/git/README.md`), [README.md](README.md) command list | +| New TOML filter (`src/filters/`) | [src/filters/README.md](src/filters/README.md) if naming conventions change, [README.md](README.md) command list | | New rewrite pattern | `src/discover/rules.rs` — see [Adding a New Command Filter](src/cmds/README.md#adding-a-new-command-filter) | | Core infrastructure (`src/core/`) | [src/core/README.md](src/core/README.md), [docs/contributing/TECHNICAL.md](docs/contributing/TECHNICAL.md) if flow changes | | Hook system (`src/hooks/`) | [src/hooks/README.md](src/hooks/README.md), [hooks/README.md](hooks/README.md) for agent-facing docs | | Architecture or design change | [ARCHITECTURE.md](docs/contributing/ARCHITECTURE.md), [docs/contributing/TECHNICAL.md](docs/contributing/TECHNICAL.md) | -| Bug fix or breaking change | [CHANGELOG.md](CHANGELOG.md) | + +> **Note**: Do NOT edit `CHANGELOG.md` manually — it is auto-generated by [release-please](https://github.com/googleapis/release-please) from your commit messages. See [Commit Messages & Changelog](#commit-messages--changelog). **Navigation**: [CONTRIBUTING.md](CONTRIBUTING.md) (you are here) → [docs/contributing/TECHNICAL.md](docs/contributing/TECHNICAL.md) (architecture + flow) → each folder's `README.md` (implementation details). diff --git a/src/cmds/README.md b/src/cmds/README.md index 5e0f633b3..e2260ba91 100644 --- a/src/cmds/README.md +++ b/src/cmds/README.md @@ -194,7 +194,7 @@ Adding a new filter or command requires changes in multiple places. For TOML-vs- - Add routing match arm in `main.rs`: `Commands::Mycmd { args } => mycmd_cmd::run(&args, cli.verbose)?,` 3. **Add rewrite pattern** — Entry in `src/discover/rules.rs` (PATTERNS + RULES arrays at matching index) so hooks auto-rewrite the command 4. **Write tests** — Real fixture, snapshot test, token savings >= 60% (see [testing rules](../../.claude/rules/cli-testing.md)) -5. **Update docs** — Ecosystem README, CHANGELOG.md +5. **Update docs** — Ecosystem README (CHANGELOG.md is auto-generated by release-please) ### TOML filter (simple line-based filtering) From 71eeedab4d771986b3d3dc5c439f5646135ff96c Mon Sep 17 00:00:00 2001 From: aesoft <43991222+aeppling@users.noreply.github.com> Date: Fri, 10 Apr 2026 14:00:23 +0200 Subject: [PATCH 109/204] feat(stream): P1 fixes from PR #956 review + trigger feat release tag --- src/cmds/system/pipe_cmd.rs | 66 ++++++++++++++++++++++++++++++++++++- src/core/stream.rs | 41 ++++++++++++++++++++--- src/hooks/hook_cmd.rs | 15 +++++++++ src/main.rs | 2 +- 4 files changed, 117 insertions(+), 7 deletions(-) diff --git a/src/cmds/system/pipe_cmd.rs b/src/cmds/system/pipe_cmd.rs index 304f3f2af..c0c73b272 100644 --- a/src/cmds/system/pipe_cmd.rs +++ b/src/cmds/system/pipe_cmd.rs @@ -191,6 +191,14 @@ fn identity_filter(input: &str) -> String { input.to_string() } +fn apply_filter(filter_fn: fn(&str) -> String, input: &str) -> String { + std::panic::catch_unwind(std::panic::AssertUnwindSafe(|| filter_fn(input))) + .unwrap_or_else(|_| { + eprintln!("[rtk] warning: filter panicked — passing through raw output"); + input.to_string() + }) +} + pub fn run(filter_name: Option<&str>, passthrough: bool) -> Result<()> { let mut buf = String::new(); std::io::stdin() @@ -218,7 +226,7 @@ pub fn run(filter_name: Option<&str>, passthrough: bool) -> Result<()> { None => auto_detect_filter(&buf), }; - let output = filter_fn(&buf); + let output = apply_filter(filter_fn, &buf); print!("{}", output); Ok(()) } @@ -457,6 +465,62 @@ mod tests { assert!(resolve_filter("prettier").is_some()); } + #[test] + fn test_panicking_filter_returns_passthrough() { + fn panicking_filter(_input: &str) -> String { + panic!("filter bug"); + } + let input = "some output\n"; + let result = super::apply_filter(panicking_filter, input); + assert_eq!(result, input); + } + + fn count_tokens(s: &str) -> usize { + s.split_whitespace().count() + } + + #[test] + fn test_grep_wrapper_token_savings() { + // Realistic rg output: 200 matches across 10 files (20 per file → 10 shown + truncation) + let mut input = String::new(); + for file_idx in 1..=10 { + for line in 1..=20 { + input.push_str(&format!( + "src/cmds/module{}/handler.rs:{}: let result = process_request(ctx, &payload).await?;\n", + file_idx, line * 10 + )); + } + } + let output = grep_wrapper(&input); + let savings = 100.0 - (count_tokens(&output) as f64 / count_tokens(&input) as f64 * 100.0); + assert!( + savings >= 40.0, + "grep filter: expected ≥40% savings, got {:.1}% (in={}, out={})", + savings, count_tokens(&input), count_tokens(&output) + ); + } + + #[test] + fn test_find_wrapper_token_savings() { + // Realistic find output: 500 files across 30 dirs (20-dir cap + 10-file cap both trigger) + let mut input = String::new(); + for dir in 1..=30 { + for file in 1..=17 { + input.push_str(&format!( + "./src/components/feature{}/sub_{}/component_{}.tsx\n", + dir, dir, file + )); + } + } + let output = find_wrapper(&input); + let savings = 100.0 - (count_tokens(&output) as f64 / count_tokens(&input) as f64 * 100.0); + assert!( + savings >= 40.0, + "find filter: expected ≥40% savings, got {:.1}% (in={}, out={})", + savings, count_tokens(&input), count_tokens(&output) + ); + } + #[test] fn test_auto_detect_mypy_output() { let input = "src/app.py:42: error: Argument 1 has incompatible type [arg-type]\n\ diff --git a/src/core/stream.rs b/src/core/stream.rs index 212316d7b..9f662477f 100644 --- a/src/core/stream.rs +++ b/src/core/stream.rs @@ -297,18 +297,28 @@ pub fn run_streaming( let stderr = child.0.stderr.take().context("No child stderr handle")?; let stderr_thread = std::thread::spawn(move || -> String { let mut raw_err = String::new(); + let mut capped = false; if live_stderr { let stderr_out = io::stderr(); let mut err_out = stderr_out.lock(); for line in BufReader::new(stderr).lines().map_while(Result::ok) { writeln!(err_out, "{}", line).ok(); - raw_err.push_str(&line); - raw_err.push('\n'); + if raw_err.len() + line.len() < RAW_CAP { + raw_err.push_str(&line); + raw_err.push('\n'); + } else if !capped { + capped = true; + eprintln!("[rtk] warning: stderr exceeds 10 MiB — capture truncated"); + } } } else { for line in BufReader::new(stderr).lines().map_while(Result::ok) { - raw_err.push_str(&line); - raw_err.push('\n'); + if raw_err.len() + line.len() < RAW_CAP { + raw_err.push_str(&line); + raw_err.push('\n'); + } else if !capped { + capped = true; + } } } raw_err @@ -400,7 +410,11 @@ pub fn run_streaming( if let Some(mut f) = saved_filter { if let Some(post) = f.on_exit(exit_code, &raw) { filtered.push_str(&post); - print!("{}", post); + match write!(io::stdout(), "{}", post) { + Err(e) if e.kind() == io::ErrorKind::BrokenPipe => {} + Err(e) => return Err(e.into()), + Ok(_) => {} + } } } @@ -616,6 +630,23 @@ pub(crate) mod tests { ); } + #[test] + fn test_run_streaming_stderr_cap_at_10mb() { + let mut cmd = Command::new("sh"); + // ~11 MiB on stderr, nothing on stdout + cmd.args([ + "-c", + "dd if=/dev/zero bs=1024 count=11264 2>/dev/null | tr '\\0' 'a' | fold -w 80 1>&2", + ]); + let result = run_streaming(&mut cmd, StdinMode::Null, FilterMode::CaptureOnly).unwrap(); + // raw = raw_stdout + raw_stderr; stdout is empty so raw ≈ stderr size + assert!( + result.raw.len() <= RAW_CAP + 200, + "stderr in raw should be capped at ~10 MiB, got {} bytes", + result.raw.len() + ); + } + #[test] fn test_child_guard_prevents_zombie() { let mut cmd = Command::new("true"); diff --git a/src/hooks/hook_cmd.rs b/src/hooks/hook_cmd.rs index ed8bd29c8..1481fc218 100644 --- a/src/hooks/hook_cmd.rs +++ b/src/hooks/hook_cmd.rs @@ -856,4 +856,19 @@ mod tests { PermissionVerdict::Deny ); } + + #[test] + fn test_gemini_deny_blocks_rewrite() { + use super::permissions::check_command_with_rules; + let deny = vec!["cargo test".to_string()]; + assert_eq!( + check_command_with_rules("cargo test", &deny, &[], &[]), + PermissionVerdict::Deny + ); + // Denied commands must not be rewritten — Gemini handler checks deny before rewrite + assert!( + get_rewritten("cargo test").is_some(), + "cargo test should be rewritable when not denied" + ); + } } diff --git a/src/main.rs b/src/main.rs index 5c6313289..c5a176f79 100644 --- a/src/main.rs +++ b/src/main.rs @@ -560,7 +560,7 @@ enum Commands { min_occurrences: usize, }, - /// Execute a shell command via the RTK native executor (filters + tracking) + /// Execute a shell command via sh -c (raw, no filtering or tracking) Run { /// Command string to execute (use -c for shell-like invocation) #[arg(short = 'c', long = "command")] From cce04811c7501e3ed97ff6b0cfb6517c4d29f9ac Mon Sep 17 00:00:00 2001 From: aesoft <43991222+aeppling@users.noreply.github.com> Date: Fri, 10 Apr 2026 14:22:19 +0200 Subject: [PATCH 110/204] Update troubleshooting.md --- docs/guide/troubleshooting.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/guide/troubleshooting.md b/docs/guide/troubleshooting.md index 8f9a0db35..7eb3b41ff 100644 --- a/docs/guide/troubleshooting.md +++ b/docs/guide/troubleshooting.md @@ -116,7 +116,7 @@ curl -fsSL https://raw.githubusercontent.com/rtk-ai/rtk/refs/heads/master/instal rtk init -g # full hook mode works in WSL ``` -On native Windows, RTK falls back to CLAUDE.md injection. Your AI assistant gets RTK instructions but won't auto-rewrite commands. You can still use RTK manually: `rtk cargo test`, `rtk git status`, etc. +On native Windows, RTK falls back to CLAUDE.md injection. Your AI assistant gets RTK instructions but won't auto-rewrite commands. It can still use RTK manually: `rtk cargo test`, `rtk git status`, etc. ### Node.js tools not found From 6a5bc847e06cf6066e6f4aeed5a3ad0803a3649b Mon Sep 17 00:00:00 2001 From: aesoft <43991222+aeppling@users.noreply.github.com> Date: Fri, 10 Apr 2026 16:04:08 +0200 Subject: [PATCH 111/204] fix(telemetry): RGPD-compliant, consent gate, erasure, privacy controls + docs update --- docs/TELEMETRY.md | 60 +++++++++++--- docs/usage/FEATURES.md | 27 ++++--- docs/usage/TRACKING.md | 2 +- src/core/config.rs | 52 ++++++++++-- src/core/mod.rs | 1 + src/core/telemetry.rs | 23 +++--- src/core/telemetry_cmd.rs | 165 ++++++++++++++++++++++++++++++++++++++ src/hooks/init.rs | 78 +++++++++++++++++- src/main.rs | 11 +++ 9 files changed, 372 insertions(+), 47 deletions(-) create mode 100644 src/core/telemetry_cmd.rs diff --git a/docs/TELEMETRY.md b/docs/TELEMETRY.md index 426cd0c74..12ffa071e 100644 --- a/docs/TELEMETRY.md +++ b/docs/TELEMETRY.md @@ -1,6 +1,11 @@ # Telemetry -RTK collects anonymous, aggregate usage metrics once per day to help improve the product. Telemetry is **enabled by default** and can be disabled at any time. +RTK collects anonymous, aggregate usage metrics once per day to help improve the product. Telemetry is **disabled by default** and requires explicit consent during `rtk init` or `rtk telemetry enable`. + +## Data Collector + +**Entity**: `RTK AI Labs` +**Contact**: contact@rtk-ai.app ## Why we collect telemetry @@ -29,7 +34,7 @@ This data directly drives our roadmap. For example, if telemetry shows that 40% | Field | Example | Purpose | |-------|---------|---------| -| `device_hash` | `a3f8c9...` (64 hex chars) | Count unique installations. Salted SHA-256 of hostname + username with a per-device random salt stored locally (`~/.local/share/rtk/.device_salt`). Not reversible. | +| `device_hash` | `a3f8c9...` (64 hex chars) | Count unique installations. SHA-256 of a per-device random salt stored locally (`~/.local/share/rtk/.device_salt`). Not reversible. No hostname or username included. | ### Environment @@ -111,30 +116,59 @@ This data directly drives our roadmap. For example, if telemetry shows that 40% - Personally identifiable information - IP addresses (not logged server-side) -## Opt-out +## Consent -Telemetry can be disabled instantly with either method: +Telemetry requires explicit opt-in consent (GDPR Art. 6, 7). Consent is requested during `rtk init` or via `rtk telemetry enable`. Without consent, no data is sent. ```bash -# Environment variable (per-session or in shell profile) -export RTK_TELEMETRY_DISABLED=1 +rtk telemetry status # Check current consent state +rtk telemetry enable # Give consent (interactive prompt) +rtk telemetry disable # Withdraw consent +rtk telemetry forget # Withdraw consent + delete local data + request server erasure +``` -# Or permanently in config file -# ~/.config/rtk/config.toml -[telemetry] -enabled = false +Environment variable override (blocks telemetry regardless of consent): +```bash +export RTK_TELEMETRY_DISABLED=1 ``` -When disabled, `rtk init` shows `[info] Anonymous telemetry is disabled`. No data is sent, no background thread is spawned, no network requests are made. +## Retention Policy + +- **Server-side**: telemetry records are retained for a maximum of **12 months**, then automatically purged. +- **Client-side**: the local SQLite database (`~/.local/share/rtk/tracking.db`) retains data for **90 days** by default (configurable via `tracking.history_days` in `config.toml`). -## Data handling +## Your Rights (GDPR) + +Under the EU General Data Protection Regulation, you have the right to: + +- **Access** your data: `rtk telemetry status` shows your device hash; the telemetry payload is fully documented above. +- **Rectification**: since data is anonymous and aggregate, rectification is not applicable. +- **Erasure** (Art. 17): run `rtk telemetry forget` to delete local data and send an erasure request to the server. Alternatively, email contact@rtk-ai.app with your device hash. +- **Restriction of processing**: `rtk telemetry disable` stops all data collection immediately. +- **Portability**: the local SQLite database at `~/.local/share/rtk/tracking.db` contains all locally stored data. +- **Objection**: `rtk telemetry disable` or `export RTK_TELEMETRY_DISABLED=1`. + +## Erasure Procedure + +1. Run `rtk telemetry forget` — this disables telemetry, deletes your device salt and ping marker, and sends an erasure request to the server. +2. If the server is unreachable, the CLI prints fallback instructions with your device hash and the contact email. +3. You can also email contact@rtk-ai.app directly to request manual erasure. + +## Data Handling - Telemetry endpoint URL and auth token are injected at **compile time** via `option_env!()` — they are not in the source code -- The server is hosted on GCP Cloud Run with TLS +- All communications use HTTPS (TLS) - Data is used exclusively for RTK product improvement - No data is sold or shared with third parties - Aggregate statistics may be published (e.g. "70% of RTK users are on macOS") +### Server-side Requirements + +The telemetry server must implement: +- `POST /erasure` endpoint accepting `{"device_hash": "...", "action": "erasure"}` +- Automatic purge of records older than 12 months +- Audit log for erasure requests (GDPR Art. 17(2) accountability) + ## For contributors The telemetry implementation lives in `src/core/telemetry.rs`. Key design decisions: diff --git a/docs/usage/FEATURES.md b/docs/usage/FEATURES.md index 061a604a9..75eb0bdef 100644 --- a/docs/usage/FEATURES.md +++ b/docs/usage/FEATURES.md @@ -1322,7 +1322,9 @@ max_files = 20 # Rotation : garder les N derniers fichiers # directory = "/custom/tee/path" # Chemin personnalise (optionnel) [telemetry] -enabled = true # Telemetrie anonyme (1 ping/jour, opt-out possible) +enabled = false # Telemetrie anonyme (1 ping/jour, requiert consentement) +# consent_given = true # Defini automatiquement par `rtk init` ou `rtk telemetry enable` +# consent_date = "..." # Date du consentement (RFC 3339) [hooks] exclude_commands = [] # Commandes a exclure de la recriture automatique @@ -1371,21 +1373,26 @@ FAILED: 2/15 tests ## Telemetrie -RTK envoie un ping anonyme une fois par jour (23h d'intervalle) pour des statistiques d'utilisation. +RTK peut envoyer un ping anonyme une fois par jour (23h d'intervalle) pour des statistiques d'utilisation. La telemetrie est **desactivee par defaut** et requiert un consentement explicite (RGPD Art. 6, 7). -**Donnees envoyees :** hash de device, version, OS, architecture, nombre de commandes/24h, top commandes, pourcentage d'economies. +**Donnees envoyees :** hash de device (SHA-256 d'un sel aleatoire), version, OS, architecture, nombre de commandes/24h, top commandes, pourcentage d'economies. -**Desactiver :** +**Responsable du traitement :** `RTK AI Labs`, contact@rtk-ai.app + +**Gerer la telemetrie :** ```bash -# Via variable d'environnement -export RTK_TELEMETRY_DISABLED=1 +rtk telemetry status # Voir l'etat du consentement +rtk telemetry enable # Donner son consentement (prompt interactif) +rtk telemetry disable # Retirer son consentement +rtk telemetry forget # Retirer + supprimer donnees locales + demande d'effacement serveur +``` -# Via config.toml -[telemetry] -enabled = false +**Desactiver via variable d'environnement :** +```bash +export RTK_TELEMETRY_DISABLED=1 ``` -Aucune donnee personnelle, aucun contenu de commande, aucun chemin de fichier n'est transmis. +Aucune donnee personnelle, aucun contenu de commande, aucun chemin de fichier n'est transmis. Conservation serveur : 12 mois max. Details : [docs/TELEMETRY.md](../TELEMETRY.md) --- diff --git a/docs/usage/TRACKING.md b/docs/usage/TRACKING.md index 82c12883d..97ff6d95a 100644 --- a/docs/usage/TRACKING.md +++ b/docs/usage/TRACKING.md @@ -539,7 +539,7 @@ let _ = conn.execute( ## Security & Privacy - **Local storage only**: Tracking database never leaves the machine -- **Telemetry enabled by default**: RTK sends a daily anonymous usage ping (version, OS, command counts, token savings). Device identity is a salted SHA-256 hash. Opt out with `RTK_TELEMETRY_DISABLED=1` or `[telemetry] enabled = false` in `~/.config/rtk/config.toml` +- **Telemetry requires consent**: RTK can send a daily anonymous usage ping (version, OS, command counts, token savings). Disabled by default, requires explicit consent via `rtk init` or `rtk telemetry enable`. Manage with `rtk telemetry status/disable/forget`. Override: `RTK_TELEMETRY_DISABLED=1` - **User control**: Users can delete `~/.local/share/rtk/tracking.db` anytime - **90-day retention**: Old data automatically purged diff --git a/src/core/config.rs b/src/core/config.rs index 248f80a33..88ae9a173 100644 --- a/src/core/config.rs +++ b/src/core/config.rs @@ -88,15 +88,13 @@ impl Default for FilterConfig { } } -#[derive(Debug, Serialize, Deserialize)] +#[derive(Debug, Default, Serialize, Deserialize)] pub struct TelemetryConfig { pub enabled: bool, -} - -impl Default for TelemetryConfig { - fn default() -> Self { - Self { enabled: true } - } + #[serde(default, skip_serializing_if = "Option::is_none")] + pub consent_given: Option, + #[serde(default, skip_serializing_if = "Option::is_none")] + pub consent_date: Option, } #[derive(Debug, Serialize, Deserialize)] @@ -130,11 +128,14 @@ pub fn limits() -> LimitsConfig { Config::load().map(|c| c.limits).unwrap_or_default() } -/// Check if telemetry is enabled in config. Returns None if config can't be loaded. pub fn telemetry_enabled() -> Option { Config::load().ok().map(|c| c.telemetry.enabled) } +pub fn telemetry_consent() -> Option { + Config::load().ok().and_then(|c| c.telemetry.consent_given) +} + impl Config { pub fn load() -> Result { let path = get_config_path()?; @@ -220,4 +221,39 @@ history_days = 90 let config: Config = toml::from_str(toml).expect("valid toml"); assert!(config.hooks.exclude_commands.is_empty()); } + + #[test] + fn test_old_toml_without_consent_fields() { + let toml = r#" +[telemetry] +enabled = true +"#; + let config: Config = toml::from_str(toml).expect("valid toml"); + assert!(config.telemetry.enabled); + assert!(config.telemetry.consent_given.is_none()); + assert!(config.telemetry.consent_date.is_none()); + } + + #[test] + fn test_telemetry_default_disabled() { + let config = Config::default(); + assert!(!config.telemetry.enabled); + assert!(config.telemetry.consent_given.is_none()); + } + + #[test] + fn test_telemetry_consent_roundtrip() { + let toml = r#" +[telemetry] +enabled = true +consent_given = true +consent_date = "2026-04-10T12:00:00Z" +"#; + let config: Config = toml::from_str(toml).expect("valid toml"); + assert_eq!(config.telemetry.consent_given, Some(true)); + assert_eq!( + config.telemetry.consent_date.as_deref(), + Some("2026-04-10T12:00:00Z") + ); + } } diff --git a/src/core/mod.rs b/src/core/mod.rs index c5d1e9306..462e664b6 100644 --- a/src/core/mod.rs +++ b/src/core/mod.rs @@ -7,6 +7,7 @@ pub mod filter; pub mod runner; pub mod tee; pub mod telemetry; +pub mod telemetry_cmd; pub mod toml_filter; pub mod tracking; pub mod utils; diff --git a/src/core/telemetry.rs b/src/core/telemetry.rs index 5d6139608..62865dc0b 100644 --- a/src/core/telemetry.rs +++ b/src/core/telemetry.rs @@ -27,6 +27,12 @@ pub fn maybe_ping() { return; } + // RGPD: require explicit consent before any telemetry + match config::telemetry_consent() { + Some(true) => {} + Some(false) | None => return, + } + // Check opt-out: config.toml if let Some(false) = config::telemetry_enabled() { return; @@ -139,21 +145,10 @@ fn send_ping() -> Result<(), Box> { Ok(()) } -fn generate_device_hash() -> String { +pub fn generate_device_hash() -> String { let salt = get_or_create_salt(); - let hostname = hostname::get() - .map(|h| h.to_string_lossy().to_string()) - .unwrap_or_default(); - let username = std::env::var("USER") - .or_else(|_| std::env::var("USERNAME")) - .unwrap_or_default(); - let mut hasher = Sha256::new(); hasher.update(salt.as_bytes()); - hasher.update(b":"); - hasher.update(hostname.as_bytes()); - hasher.update(b":"); - hasher.update(username.as_bytes()); format!("{:x}", hasher.finalize()) } @@ -200,7 +195,7 @@ fn random_salt() -> String { buf.iter().map(|b| format!("{:02x}", b)).collect() } -fn salt_file_path() -> PathBuf { +pub fn salt_file_path() -> PathBuf { dirs::data_local_dir() .unwrap_or_else(|| PathBuf::from("/tmp")) .join("rtk") @@ -426,7 +421,7 @@ fn install_method_from_path(path: &str) -> &'static str { } } -fn telemetry_marker_path() -> PathBuf { +pub fn telemetry_marker_path() -> PathBuf { let data_dir = dirs::data_local_dir() .unwrap_or_else(|| PathBuf::from("/tmp")) .join(RTK_DATA_DIR); diff --git a/src/core/telemetry_cmd.rs b/src/core/telemetry_cmd.rs new file mode 100644 index 000000000..bad574f5d --- /dev/null +++ b/src/core/telemetry_cmd.rs @@ -0,0 +1,165 @@ +use anyhow::{Context, Result}; +use clap::Subcommand; + +#[derive(Debug, Subcommand)] +pub enum TelemetrySubcommand { + Status, + Enable, + Disable, + Forget, +} + +pub fn run(command: &TelemetrySubcommand) -> Result<()> { + match command { + TelemetrySubcommand::Status => run_status(), + TelemetrySubcommand::Enable => run_enable(), + TelemetrySubcommand::Disable => run_disable(), + TelemetrySubcommand::Forget => run_forget(), + } +} + +fn run_status() -> Result<()> { + let config = crate::core::config::Config::load().unwrap_or_default(); + + let consent_str = match config.telemetry.consent_given { + Some(true) => "yes", + Some(false) => "no", + None => "never asked", + }; + + let enabled_str = if config.telemetry.enabled { + "yes" + } else { + "no" + }; + + let env_override = std::env::var("RTK_TELEMETRY_DISABLED").unwrap_or_default() == "1"; + + println!("Telemetry status:"); + println!(" consent: {}", consent_str); + if let Some(date) = &config.telemetry.consent_date { + println!(" consent date: {}", date); + } + println!(" enabled: {}", enabled_str); + if env_override { + println!(" env override: RTK_TELEMETRY_DISABLED=1 (blocked)"); + } + + let salt_path = super::telemetry::salt_file_path(); + if salt_path.exists() { + let hash = super::telemetry::generate_device_hash(); + println!(" device hash: {}...{}", &hash[..8], &hash[56..]); + } else { + println!(" device hash: (no salt file)"); + } + + println!(); + println!("Data controller: RTK AI Labs, contact@rtk-ai.app"); + println!("Details: https://github.com/rtk-ai/rtk/blob/main/docs/TELEMETRY.md"); + + Ok(()) +} + +fn run_enable() -> Result<()> { + use std::io::{self, BufRead, IsTerminal}; + + if !io::stdin().is_terminal() { + anyhow::bail!( + "consent requires interactive terminal — cannot enable telemetry in piped mode" + ); + } + + eprintln!("RTK collects anonymous usage metrics once per day to improve filters."); + eprintln!(); + eprintln!(" What: command names (not arguments), token savings, OS, version"); + eprintln!(" Who: RTK AI Labs, contact@rtk-ai.app"); + eprintln!(" Details: https://github.com/rtk-ai/rtk/blob/main/docs/TELEMETRY.md"); + eprintln!(); + eprint!("Enable anonymous telemetry? [y/N] "); + + let stdin = io::stdin(); + let mut line = String::new(); + stdin + .lock() + .read_line(&mut line) + .context("Failed to read user input")?; + + let accepted = { + let response = line.trim().to_lowercase(); + response == "y" || response == "yes" + }; + + crate::hooks::init::save_telemetry_consent(accepted)?; + + if accepted { + println!("Telemetry enabled. Disable anytime: rtk telemetry disable"); + } else { + println!("Telemetry not enabled."); + } + + Ok(()) +} + +fn run_disable() -> Result<()> { + crate::hooks::init::save_telemetry_consent(false)?; + println!("Telemetry disabled."); + Ok(()) +} + +fn run_forget() -> Result<()> { + crate::hooks::init::save_telemetry_consent(false)?; + + let salt_path = super::telemetry::salt_file_path(); + let marker_path = super::telemetry::telemetry_marker_path(); + + let device_hash = if salt_path.exists() { + Some(super::telemetry::generate_device_hash()) + } else { + None + }; + + if salt_path.exists() { + std::fs::remove_file(&salt_path) + .with_context(|| format!("Failed to delete {}", salt_path.display()))?; + } + + if marker_path.exists() { + let _ = std::fs::remove_file(&marker_path); + } + + if let Some(hash) = device_hash { + match send_erasure_request(&hash) { + Ok(()) => { + println!("Erasure request sent to server."); + } + Err(e) => { + eprintln!("rtk: could not reach server: {}", e); + eprintln!(" To complete erasure, email contact@rtk-ai.app"); + eprintln!(" with your device hash: {}...{}", &hash[..8], &hash[56..]); + } + } + } + + println!("Local telemetry data deleted. Telemetry disabled."); + Ok(()) +} + +fn send_erasure_request(device_hash: &str) -> Result<(), Box> { + let url = option_env!("RTK_TELEMETRY_URL"); + let url = match url { + Some(u) => format!("{}/erasure", u), + None => return Err("no telemetry endpoint configured".into()), + }; + + let payload = serde_json::json!({ + "device_hash": device_hash, + "action": "erasure", + }); + + ureq::post(&url) + .set("Content-Type", "application/json") + .timeout(std::time::Duration::from_secs(5)) + .send_string(&payload.to_string())?; + + Ok(()) +} diff --git a/src/hooks/init.rs b/src/hooks/init.rs index c65465962..83ee7e5f9 100644 --- a/src/hooks/init.rs +++ b/src/hooks/init.rs @@ -291,6 +291,8 @@ pub fn run( install_cursor_hooks(verbose)?; } + prompt_telemetry_consent()?; + println!(); Ok(()) @@ -436,7 +438,81 @@ fn prompt_user_consent(settings_path: &Path) -> Result { Ok(response == "y" || response == "yes") } -/// Print manual instructions for settings.json patching +pub fn save_telemetry_consent(accepted: bool) -> Result<()> { + let mut config = crate::core::config::Config::load().unwrap_or_default(); + config.telemetry.consent_given = Some(accepted); + config.telemetry.enabled = accepted; + config.telemetry.consent_date = Some(chrono::Utc::now().to_rfc3339()); + config + .save() + .context("Failed to save telemetry consent to config.toml") +} + +fn prompt_telemetry_consent() -> Result<()> { + use std::io::{self, BufRead, IsTerminal}; + + let config = crate::core::config::Config::load().unwrap_or_default(); + match config.telemetry.consent_given { + Some(true) => return Ok(()), + Some(false) => { + let should_reask = config + .telemetry + .consent_date + .as_deref() + .and_then(|s| chrono::DateTime::parse_from_rfc3339(s).ok()) + .map(|date| { + (chrono::Utc::now() - date.with_timezone(&chrono::Utc)).num_days() >= 14 + }) + .unwrap_or(false); + if !should_reask { + return Ok(()); + } + } + None => {} + } + + eprintln!(); + eprintln!("--- Telemetry ---"); + eprintln!("RTK collects anonymous usage metrics once per day to improve filters."); + eprintln!(); + eprintln!(" What: command names (not arguments), token savings, OS, version"); + eprintln!(" Why: prioritize filter development for the most-used commands"); + eprintln!(" Who: RTK AI Labs, contact@rtk-ai.app"); + eprintln!(" Rights: disable anytime with `rtk telemetry disable`,"); + eprintln!(" request erasure with `rtk telemetry forget`"); + eprintln!(" Details: https://github.com/rtk-ai/rtk/blob/main/docs/TELEMETRY.md"); + eprintln!(); + eprint!("Enable anonymous telemetry? [y/N] "); + + if !io::stdin().is_terminal() { + eprintln!("(non-interactive mode, defaulting to N)"); + save_telemetry_consent(false)?; + return Ok(()); + } + + let stdin = io::stdin(); + let mut line = String::new(); + stdin + .lock() + .read_line(&mut line) + .context("Failed to read user input")?; + + let accepted = { + let response = line.trim().to_lowercase(); + response == "y" || response == "yes" + }; + + save_telemetry_consent(accepted)?; + + if accepted { + eprintln!(" Telemetry enabled. Disable anytime: rtk telemetry disable"); + } else { + eprintln!(" Telemetry disabled."); + } + + Ok(()) +} + fn print_manual_instructions(hook_path: &Path, include_opencode: bool) { println!("\n MANUAL STEP: Add this to ~/.claude/settings.json:"); println!(" {{"); diff --git a/src/main.rs b/src/main.rs index 78a85b248..8b475c780 100644 --- a/src/main.rs +++ b/src/main.rs @@ -539,6 +539,12 @@ enum Commands { /// Show RTK adoption across Claude Code sessions Session {}, + /// Manage telemetry consent and data (RGPD/GDPR) + Telemetry { + #[command(subcommand)] + command: core::telemetry_cmd::TelemetrySubcommand, + }, + /// Learn CLI corrections from Claude Code error history Learn { /// Filter by project path (substring match) @@ -1831,6 +1837,11 @@ fn run_cli() -> Result { 0 } + Commands::Telemetry { command } => { + core::telemetry_cmd::run(&command)?; + 0 + } + Commands::Learn { project, all, From 577c311ecaaa8ae94f22dbe252152424d4333d04 Mon Sep 17 00:00:00 2001 From: "CK iRonin.IT" Date: Fri, 10 Apr 2026 13:54:00 -0400 Subject: [PATCH 112/204] fix(curl): skip JSON schema conversion for internal/localhost URLs --- src/cmds/cloud/curl_cmd.rs | 45 ++++++++++++++++++++++++++++---------- 1 file changed, 34 insertions(+), 11 deletions(-) diff --git a/src/cmds/cloud/curl_cmd.rs b/src/cmds/cloud/curl_cmd.rs index d6930ef67..79de57cdc 100644 --- a/src/cmds/cloud/curl_cmd.rs +++ b/src/cmds/cloud/curl_cmd.rs @@ -38,7 +38,7 @@ pub fn run(args: &[String], verbose: u8) -> Result { let raw = stdout.to_string(); // Auto-detect JSON and pipe through filter - let filtered = filter_curl_output(&stdout); + let filtered = filter_curl_output(&stdout, args); println!("{}", filtered); timer.track( @@ -51,17 +51,20 @@ pub fn run(args: &[String], verbose: u8) -> Result { Ok(0) } -fn filter_curl_output(output: &str) -> String { +fn filter_curl_output(output: &str, args: &[String]) -> String { let trimmed = output.trim(); // Try JSON detection: starts with { or [ if (trimmed.starts_with('{') || trimmed.starts_with('[')) && (trimmed.ends_with('}') || trimmed.ends_with(']')) { - if let Ok(schema) = json_cmd::filter_json_string(trimmed, 5) { - // Only use schema if it's actually shorter than the original (#297) - if schema.len() <= trimmed.len() { - return schema; + // Skip schema conversion for internal/localhost URLs (issues #1152, #1157) + if !is_internal_url(args) { + if let Ok(schema) = json_cmd::filter_json_string(trimmed, 5) { + // Only use schema if it's actually shorter than the original (#297) + if schema.len() <= trimmed.len() { + return schema; + } } } } @@ -87,6 +90,17 @@ fn filter_curl_output(output: &str) -> String { .join("\n") } +fn is_internal_url(args: &[String]) -> bool { + args.iter().any(|a| { + let lower = a.to_lowercase(); + lower.starts_with("http://localhost") + || lower.starts_with("http://127.0.0.1") + || lower.starts_with("http://[::1]") + || lower.starts_with("https://localhost") + || lower.starts_with("https://127.0.0.1") + }) +} + #[cfg(test)] mod tests { use super::*; @@ -95,7 +109,7 @@ mod tests { fn test_filter_curl_json() { // Large JSON where schema is shorter than original — schema should be returned let output = r#"{"name": "a very long user name here", "count": 42, "items": [1, 2, 3], "description": "a very long description that takes up many characters in the original JSON payload", "status": "active", "url": "https://example.com/api/v1/users/123"}"#; - let result = filter_curl_output(output); + let result = filter_curl_output(output, &[]); assert!(result.contains("name")); assert!(result.contains("string")); assert!(result.contains("int")); @@ -104,14 +118,14 @@ mod tests { #[test] fn test_filter_curl_json_array() { let output = r#"[{"id": 1}, {"id": 2}]"#; - let result = filter_curl_output(output); + let result = filter_curl_output(output, &[]); assert!(result.contains("id")); } #[test] fn test_filter_curl_non_json() { let output = "Hello, World!\nThis is plain text."; - let result = filter_curl_output(output); + let result = filter_curl_output(output, &[]); assert!(result.contains("Hello, World!")); assert!(result.contains("plain text")); } @@ -120,7 +134,7 @@ mod tests { fn test_filter_curl_json_small_returns_original() { // Small JSON where schema would be larger than original (issue #297) let output = r#"{"r2Ready":true,"status":"ok"}"#; - let result = filter_curl_output(output); + let result = filter_curl_output(output, &[]); // Schema would be "{\n r2Ready: bool,\n status: string\n}" which is longer // Should return the original JSON unchanged assert_eq!(result.trim(), output.trim()); @@ -130,9 +144,18 @@ mod tests { fn test_filter_curl_long_output() { let lines: Vec = (0..50).map(|i| format!("Line {}", i)).collect(); let output = lines.join("\n"); - let result = filter_curl_output(&output); + let result = filter_curl_output(&output, &[]); assert!(result.contains("Line 0")); assert!(result.contains("Line 29")); assert!(result.contains("more lines")); } + + #[test] + fn test_is_internal_url_localhost() { + assert!(is_internal_url(&["http://localhost:9222/json/version".to_string()])); + assert!(is_internal_url(&["http://127.0.0.1:8080/api".to_string()])); + assert!(is_internal_url(&["-s".to_string(), "http://localhost:3000".to_string()])); + assert!(!is_internal_url(&["https://api.example.com/data".to_string()])); + assert!(!is_internal_url(&["https://github.com".to_string()])); + } } From 40462c05e66f116928de365a0d271bdfd61cec72 Mon Sep 17 00:00:00 2001 From: "CK iRonin.IT" Date: Fri, 10 Apr 2026 13:54:02 -0400 Subject: [PATCH 113/204] fix(hooks): ensure default permission verdict prompts user for confirmation --- src/hooks/rewrite_cmd.rs | 89 ++++++++++++++++++++++++++++++++++++++++ 1 file changed, 89 insertions(+) diff --git a/src/hooks/rewrite_cmd.rs b/src/hooks/rewrite_cmd.rs index f459f7e24..172d75899 100644 --- a/src/hooks/rewrite_cmd.rs +++ b/src/hooks/rewrite_cmd.rs @@ -70,4 +70,93 @@ mod tests { Some("rtk git status".into()) ); } + + /// SECURITY: Verify the exit code protocol for permission verdicts. + /// + /// The bash hook (.claude/hooks/rtk-rewrite.sh) interprets exit codes as: + /// 0 → auto-allow (sets permissionDecision: "allow") + /// 1 → passthrough (no RTK equivalent) + /// 2 → deny (let Claude Code handle natively) + /// 3 → ask (rewrite but omit permissionDecision, forcing user prompt) + /// + /// CRITICAL: PermissionVerdict::Default MUST map to exit 3 (ask), NOT exit 0. + /// If Default were mapped to exit 0, any command without an explicit permission + /// rule would be auto-allowed — bypassing Claude Code's least-privilege default. + /// See: https://github.com/rtk-ai/rtk/issues/1155 + mod exit_code_protocol { + use super::registry; + use crate::hooks::permissions::{check_command_with_rules, PermissionVerdict}; + + /// Exit code that `run()` returns for each verdict: + /// Allow → 0 (exit Ok(())) + /// Ask → 3 (process::exit(3)) + /// Default→ 3 (process::exit(3)) — grouped with Ask + /// Deny → 2 (process::exit(2)) — handled before rewrite match + fn expected_exit_code(verdict: &PermissionVerdict) -> i32 { + match verdict { + PermissionVerdict::Allow => 0, + PermissionVerdict::Deny => 2, + PermissionVerdict::Ask => 3, + PermissionVerdict::Default => 3, // MUST be 3, not 0! + } + } + + #[test] + fn test_default_verdict_maps_to_ask_exit_code() { + // When no rules match, verdict is Default → exit code must be 3 (ask). + let verdict = check_command_with_rules("git status", &[], &[], &[]); + assert_eq!(verdict, PermissionVerdict::Default); + assert_eq!( + expected_exit_code(&verdict), + 3, + "Default verdict MUST exit with code 3 (ask), not 0 (allow)" + ); + } + + #[test] + fn test_allow_verdict_maps_to_allow_exit_code() { + let allow = vec!["git *".to_string()]; + let verdict = check_command_with_rules("git status", &[], &[], &allow); + assert_eq!(verdict, PermissionVerdict::Allow); + assert_eq!(expected_exit_code(&verdict), 0); + } + + #[test] + fn test_ask_verdict_maps_to_ask_exit_code() { + let ask = vec!["git push".to_string()]; + let verdict = check_command_with_rules("git push origin main", &[], &ask, &[]); + assert_eq!(verdict, PermissionVerdict::Ask); + assert_eq!(expected_exit_code(&verdict), 3); + } + + #[test] + fn test_deny_verdict_maps_to_deny_exit_code() { + let deny = vec!["rm -rf".to_string()]; + let verdict = check_command_with_rules("rm -rf /tmp/test", &deny, &[], &[]); + assert_eq!(verdict, PermissionVerdict::Deny); + assert_eq!(expected_exit_code(&verdict), 2); + } + + #[test] + fn test_no_auto_allow_bypass_for_unrecognized_commands() { + // SECURITY: A command with no permission rules and no matching allow rule + // must NOT be auto-allowed. This is the core of issue #1155. + // Even though `git status` can be rewritten to `rtk git status`, + // the absence of an allow rule means Default → exit 3 → ask. + let verdict = check_command_with_rules("git status", &[], &[], &[]); + assert_eq!(verdict, PermissionVerdict::Default); + + // Verify the rewrite exists (so the hook would output it), + // but the exit code forces user confirmation. + assert!(registry::rewrite_command("git status", &[]).is_some()); + assert_eq!(expected_exit_code(&verdict), 3); + } + + #[test] + fn test_default_never_equals_allow() { + // Sentinel: ensure Default and Allow are distinct enum variants. + // If this ever fails, the entire permission model is broken. + assert_ne!(PermissionVerdict::Default, PermissionVerdict::Allow); + } + } } From 40c9dbc7dbbf9332d6859060765c582a880f0fde Mon Sep 17 00:00:00 2001 From: Patrick szymkowiak Date: Sat, 11 Apr 2026 09:54:57 +0200 Subject: [PATCH 114/204] fix(hooks): require all segments to match allow rules (#1213) Compound commands (`&&`, `||`, `|`, `;`) previously received PermissionVerdict::Allow when *any single segment* matched an allow rule. This allowed a permission escalation where an LLM agent could chain an allowed command with an unapproved one (e.g. `git status && git add .`) to bypass user confirmation. Replace the `any_allow` flag with `all_segments_allowed` logic: every non-empty segment must independently match an allow rule for the chain to receive Allow. If any segment fails to match, the verdict demotes to Default (ask). Deny still short-circuits on any match, and Ask still wins over a partial Allow. Also add a `saw_segment` guard and a `!allow_rules.is_empty()` check to prevent a vacuous Allow on empty commands or empty rule sets. Add 6 regression tests covering the reproduction case from #1213, all four compound separators, and ask-wins-over-partial-allow precedence. Fixes #1213 Signed-off-by: Patrick szymkowiak --- src/hooks/permissions.rs | 145 ++++++++++++++++++++++++++++++++++++--- 1 file changed, 135 insertions(+), 10 deletions(-) diff --git a/src/hooks/permissions.rs b/src/hooks/permissions.rs index 187489811..e2ef497fa 100644 --- a/src/hooks/permissions.rs +++ b/src/hooks/permissions.rs @@ -34,21 +34,27 @@ pub(crate) fn check_command_with_rules( ) -> PermissionVerdict { let segments = split_compound_command(cmd); let mut any_ask = false; - let mut any_allow = false; + // Every non-empty segment must independently match an allow rule for the + // compound command to receive Allow. See issue #1213: previously a single + // matching segment escalated the entire chain to Allow, enabling bypass. + let mut all_segments_allowed = true; + let mut saw_segment = false; for segment in &segments { let segment = segment.trim(); if segment.is_empty() { continue; } + saw_segment = true; - // Deny takes highest priority + // Deny takes highest priority — any segment matching Deny blocks the whole chain. for pattern in deny_rules { if command_matches_pattern(segment, pattern) { return PermissionVerdict::Deny; } } + // Ask — if any segment matches an ask rule, the final verdict is Ask. if !any_ask { for pattern in ask_rules { if command_matches_pattern(segment, pattern) { @@ -58,20 +64,23 @@ pub(crate) fn check_command_with_rules( } } - if !any_allow && !any_ask { - for pattern in allow_rules { - if command_matches_pattern(segment, pattern) { - any_allow = true; - break; - } + // Allow — every non-empty segment must match an allow rule independently. + // As soon as one segment fails to match, the entire chain loses Allow status. + if all_segments_allowed { + let matched = allow_rules + .iter() + .any(|pattern| command_matches_pattern(segment, pattern)); + if !matched { + all_segments_allowed = false; } } } - // Precedence: Deny > Ask > Allow > Default (ask) + // Precedence: Deny > Ask > Allow > Default (ask). + // Allow requires (1) at least one segment seen, (2) all segments matched, (3) non-empty rules. if any_ask { PermissionVerdict::Ask - } else if any_allow { + } else if saw_segment && all_segments_allowed && !allow_rules.is_empty() { PermissionVerdict::Allow } else { PermissionVerdict::Default @@ -532,4 +541,120 @@ mod tests { PermissionVerdict::Default ); } + + // --- Regression tests for #1213 --- + // Compound command permission escalation: a single allowed segment must NOT + // grant Allow to the entire chain. Every non-empty segment must match + // independently. + + #[test] + fn test_compound_allow_requires_every_segment() { + // Reproduces #1213: `git status` is allowed but `git add .` is not. + // Previously the chain was escalated to Allow — must now demote to Default. + let allow = vec![ + "git status *".to_string(), + "git status".to_string(), + "cargo *".to_string(), + ]; + + // Single allowed command → Allow + assert_eq!( + check_command_with_rules("git status", &[], &[], &allow), + PermissionVerdict::Allow + ); + + // Single unallowed command → Default + assert_eq!( + check_command_with_rules("git add .", &[], &[], &allow), + PermissionVerdict::Default + ); + + // BUG #1213: chain with one allowed + one unallowed → must be Default + assert_eq!( + check_command_with_rules("git status && git add .", &[], &[], &allow), + PermissionVerdict::Default, + "allowed segment must not escalate unallowed segment" + ); + + // Three-segment chain with middle unallowed → Default + assert_eq!( + check_command_with_rules( + "cargo test && git add . && git commit -m foo", + &[], + &[], + &allow, + ), + PermissionVerdict::Default, + "middle unallowed segment must demote the whole chain" + ); + + // Unallowed-then-allowed ordering must also demote + assert_eq!( + check_command_with_rules("git add . && git status", &[], &[], &allow), + PermissionVerdict::Default, + "unallowed first segment must demote the chain" + ); + } + + #[test] + fn test_compound_allow_all_segments_matched() { + // All segments match → Allow (regression: wildcard allow still works) + let allow = vec!["git *".to_string(), "cargo *".to_string()]; + + assert_eq!( + check_command_with_rules("git status && cargo test", &[], &[], &allow), + PermissionVerdict::Allow + ); + + assert_eq!( + check_command_with_rules( + "git log --oneline && cargo build && git status", + &[], + &[], + &allow + ), + PermissionVerdict::Allow + ); + } + + #[test] + fn test_compound_allow_semicolon_separator() { + // `;` separator must be handled identically to `&&`. + let allow = vec!["git status".to_string()]; + assert_eq!( + check_command_with_rules("git status; git push", &[], &[], &allow), + PermissionVerdict::Default + ); + } + + #[test] + fn test_compound_allow_pipe_separator() { + // `|` separator must be handled identically to `&&`. + let allow = vec!["git log".to_string()]; + assert_eq!( + check_command_with_rules("git log | grep foo", &[], &[], &allow), + PermissionVerdict::Default + ); + } + + #[test] + fn test_compound_allow_or_separator() { + // `||` separator must also split segments. + let allow = vec!["cargo build".to_string()]; + assert_eq!( + check_command_with_rules("cargo build || cargo clean", &[], &[], &allow), + PermissionVerdict::Default + ); + } + + #[test] + fn test_compound_ask_still_wins_over_partial_allow() { + // If any segment hits an ask rule, verdict is Ask (ask > allow). + let ask = vec!["git push".to_string()]; + let allow = vec!["git *".to_string()]; + assert_eq!( + check_command_with_rules("git status && git push origin main", &[], &ask, &allow), + PermissionVerdict::Ask + ); + } } From 9979c699307a4adad2c2df0f2bc3b663df653311 Mon Sep 17 00:00:00 2001 From: Ousama Ben Younes Date: Sat, 11 Apr 2026 10:16:12 +0000 Subject: [PATCH 115/204] fix(git): re-insert -- separator when clap consumes it from git diff args (#1215) clap's trailing_var_arg=true silently drops -- when it appears as the first positional argument. This caused `rtk git diff -- ` to arrive in run_diff without the separator, making git treat the path as a revision and emit "fatal: ambiguous argument". Adds normalize_diff_args() which re-inserts -- before the first path-like argument (contains /, \, starts with . or ~) when -- is absent from the args vec. The fix is a no-op when -- is already present (e.g. rtk git diff HEAD -- file preserves the separator correctly). Generated by Claude Code Vibe coded by ousamabenyounes Co-Authored-By: Claude --- src/cmds/git/git.rs | 114 ++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 114 insertions(+) diff --git a/src/cmds/git/git.rs b/src/cmds/git/git.rs index 334459bef..2921da430 100644 --- a/src/cmds/git/git.rs +++ b/src/cmds/git/git.rs @@ -59,6 +59,44 @@ pub fn run( } } +/// Returns true if `arg` looks like a file-system path rather than a git revision. +/// +/// Used by `normalize_diff_args` to decide where to inject `--`. +fn looks_like_path(arg: &str) -> bool { + // Path separators are the strongest signal + arg.contains('/') || arg.contains('\\') || arg.starts_with('.') || arg.starts_with('~') +} + +/// Re-insert `--` before the first path-like argument when clap has consumed it. +/// +/// clap's `trailing_var_arg = true` silently drops `--` when it appears as the +/// first positional argument (before any other positional). This means: +/// `rtk git diff -- file` → args = ["file"] (clap ate `--`) +/// `rtk git diff HEAD -- file` → args = ["HEAD", "--", "file"] (preserved) +/// +/// Without the `--` separator git may treat an unambiguous path as a revision and +/// emit "fatal: ambiguous argument". We re-insert `--` before the first +/// path-like argument when `--` is absent so git always gets the correct intent. +fn normalize_diff_args(args: &[String]) -> Vec { + // Already has `--` — nothing to do + if args.iter().any(|a| a == "--") { + return args.to_vec(); + } + // Find the first non-flag arg that looks like a path + let path_start = args + .iter() + .position(|arg| !arg.starts_with('-') && looks_like_path(arg)); + match path_start { + Some(idx) => { + let mut out = args[..idx].to_vec(); + out.push("--".to_string()); + out.extend_from_slice(&args[idx..]); + out + } + None => args.to_vec(), + } +} + fn run_diff( args: &[String], max_lines: Option, @@ -67,6 +105,9 @@ fn run_diff( ) -> Result { let timer = tracking::TimedExecution::start(); + // Re-insert `--` when clap's trailing_var_arg consumed it (issue #1215) + let args = &normalize_diff_args(args); + // Check if user wants stat output let wants_stat = args .iter() @@ -1795,6 +1836,79 @@ mod tests { ); } + // ----- normalize_diff_args (issue #1215) ----- + + /// Baseline: `--` already present → no-op, args unchanged. + #[test] + fn test_normalize_diff_args_noop_when_separator_present() { + let args = vec![ + "HEAD".to_string(), + "--".to_string(), + "src/main.rs".to_string(), + ]; + assert_eq!(normalize_diff_args(&args), args); + } + + /// Core regression: clap ate `--` before a path with `/`. + /// `normalize_diff_args` must re-insert it. + #[test] + fn test_normalize_diff_args_reinserts_separator_before_path_with_slash() { + let args = vec!["apps/client/frontend/src/MyComponent.tsx".to_string()]; + let normalized = normalize_diff_args(&args); + assert_eq!( + normalized, + vec!["--".to_string(), "apps/client/frontend/src/MyComponent.tsx".to_string()], + "-- must be injected before the path argument" + ); + } + + /// Ref before path: args like ["HEAD", "src/foo.rs"] get `--` inserted before the path. + #[test] + fn test_normalize_diff_args_reinserts_separator_after_ref() { + let args = vec!["HEAD".to_string(), "src/foo.rs".to_string()]; + let normalized = normalize_diff_args(&args); + assert_eq!( + normalized, + vec!["HEAD".to_string(), "--".to_string(), "src/foo.rs".to_string()] + ); + } + + /// Flags before path: `["--cached", "src/foo.rs"]` → `["--cached", "--", "src/foo.rs"]`. + #[test] + fn test_normalize_diff_args_reinserts_separator_after_flag() { + let args = vec!["--cached".to_string(), "src/foo.rs".to_string()]; + let normalized = normalize_diff_args(&args); + assert_eq!( + normalized, + vec!["--cached".to_string(), "--".to_string(), "src/foo.rs".to_string()] + ); + } + + /// Pure flags (no paths) → no injection. + #[test] + fn test_normalize_diff_args_no_injection_for_pure_flags() { + let args = vec!["--stat".to_string(), "--cached".to_string()]; + assert_eq!(normalize_diff_args(&args), args); + } + + /// Dotfile / relative-path detection (starts with `.`). + #[test] + fn test_normalize_diff_args_dotfile_is_path() { + let args = vec![".gitignore".to_string()]; + let normalized = normalize_diff_args(&args); + assert_eq!( + normalized, + vec!["--".to_string(), ".gitignore".to_string()] + ); + } + + /// A bare word that isn't path-like (e.g. a branch name) → no injection. + #[test] + fn test_normalize_diff_args_no_injection_for_bare_ref() { + let args = vec!["HEAD".to_string()]; + assert_eq!(normalize_diff_args(&args), args); + } + #[test] fn test_is_blob_show_arg() { assert!(is_blob_show_arg("develop:modules/pairs_backtest.py")); From 9722d5ebd8916f9b398bdc01b1102d42ab2b8795 Mon Sep 17 00:00:00 2001 From: Ousama Ben Younes Date: Sat, 11 Apr 2026 10:20:18 +0000 Subject: [PATCH 116/204] fix(go): prevent double-counting failures when package-level fail cascades from test failures (#958) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit go test -json emits both a test-level {"action":"fail","Test":"..."} and a trailing package-level {"action":"fail"} (no Test field) for every failing test. The parser was counting both events, so a run with 1 failing test was reported as "2 failed" in the summary header. Fix: total_pkg_fail now only includes packages where package_failed is true AND there are no individual test- or build-level failures — i.e. pure package-level failures such as timeouts and panics. Also exclude such packages from the [FAIL] display section when test-level results are already shown below. Three new tests cover: - no double-count on a regular test failure - exact reproduction from issue #958 (signal: quit line) - timeout with passing tests before the kill Generated by Claude Code Vibe coded by ousamabenyounes Co-Authored-By: Claude --- CHANGELOG.md | 6 +++ src/cmds/go/go_cmd.rs | 99 +++++++++++++++++++++++++++++++++++++++++-- 2 files changed, 102 insertions(+), 3 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 1c1067489..02e80021b 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -5,6 +5,12 @@ All notable changes to rtk (Rust Token Killer) will be documented in this file. The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html). +## [Unreleased] + +### Bug Fixes + +* **go:** fix double-counted failure in `go test` summary when test-level failures also trigger a package-level fail event ([#958](https://github.com/rtk-ai/rtk/issues/958)) + ## [0.34.3](https://github.com/rtk-ai/rtk/compare/v0.34.2...v0.34.3) (2026-04-02) diff --git a/src/cmds/go/go_cmd.rs b/src/cmds/go/go_cmd.rs index e834eb6b4..2f4836342 100644 --- a/src/cmds/go/go_cmd.rs +++ b/src/cmds/go/go_cmd.rs @@ -391,7 +391,13 @@ fn filter_go_test_json(output: &str) -> String { let total_fail: usize = packages.values().map(|p| p.fail).sum(); let total_skip: usize = packages.values().map(|p| p.skip).sum(); let total_build_fail: usize = packages.values().filter(|p| p.build_failed).count(); - let total_pkg_fail: usize = packages.values().filter(|p| p.package_failed).count(); + // Only count package-level fails for packages with no individual test or build failures. + // go test -json emits a trailing package-level {"action":"fail"} after any test failure + // too, but that event is just a cascade — the individual test failures are already counted. + let total_pkg_fail: usize = packages + .values() + .filter(|p| p.package_failed && p.fail == 0 && !p.build_failed) + .count(); let has_failures = total_fail > 0 || total_build_fail > 0 || total_pkg_fail > 0; @@ -418,9 +424,11 @@ fn filter_go_test_json(output: &str) -> String { result.push_str(&format!(" in {} packages\n", total_packages)); result.push_str("═══════════════════════════════════════\n"); - // Show package-level failures first (timeouts, signals, panics) + // Show package-level failures first (timeouts, signals, panics). + // Skip packages that already have individual test-level failures — those are displayed + // in the per-package section below and the package-level event is just a cascade. for (package, pkg_result) in packages.iter() { - if !pkg_result.package_failed { + if !pkg_result.package_failed || pkg_result.fail > 0 || pkg_result.build_failed { continue; } @@ -754,6 +762,91 @@ mod tests { ); } + #[test] + fn test_filter_go_test_no_double_count_on_test_failure() { + // go test -json always emits a package-level {"action":"fail"} after each + // test-level failure. The package-level event is a cascade, not an additional + // failure. The summary header must show "1 failed", not "2 failed". + let output = r#"{"Time":"2024-01-01T10:00:00Z","Action":"run","Package":"example.com/foo","Test":"TestFail"} +{"Time":"2024-01-01T10:00:01Z","Action":"output","Package":"example.com/foo","Test":"TestFail","Output":"=== RUN TestFail\n"} +{"Time":"2024-01-01T10:00:02Z","Action":"output","Package":"example.com/foo","Test":"TestFail","Output":" Error: expected 5, got 3\n"} +{"Time":"2024-01-01T10:00:03Z","Action":"fail","Package":"example.com/foo","Test":"TestFail","Elapsed":0.5} +{"Time":"2024-01-01T10:00:03Z","Action":"fail","Package":"example.com/foo","Elapsed":0.5}"#; + + let result = filter_go_test_json(output); + // The summary header must say "1 failed", not "2 failed" (no double-counting). + assert!( + result.starts_with("Go test: 0 passed, 1 failed"), + "Expected header 'Go test: 0 passed, 1 failed', got: {}", + result + ); + assert!(result.contains("TestFail")); + assert!(result.contains("expected 5, got 3")); + // The package must NOT appear twice (once as "[FAIL]" and once with test details). + assert_eq!( + result.matches("foo").count(), + 1, + "Package name should appear exactly once, got: {}", + result + ); + } + + #[test] + fn test_filter_go_test_timeout_with_signal_quit_output() { + // Exact reproduction of the scenario from issue #958: the signal: quit line + // appears as a separate JSON output event. + let output = r#"{"Action":"start","Package":"example.com/pkg"} +{"Action":"output","Package":"example.com/pkg","Output":"*** Test killed with quit: ran too long (1m30s).\n"} +{"Action":"output","Package":"example.com/pkg","Output":"signal: quit\n"} +{"Action":"output","Package":"example.com/pkg","Output":"FAIL\texample.com/pkg\t90.000s\n"} +{"Action":"fail","Package":"example.com/pkg","Elapsed":90.001}"#; + + let result = filter_go_test_json(output); + assert!( + result.starts_with("Go test: 0 passed, 1 failed"), + "Expected 'Go test: 0 passed, 1 failed', got: {}", + result + ); + assert!( + !result.contains("No tests found"), + "Must not say 'No tests found' on timeout, got: {}", + result + ); + assert!( + result.contains("Test killed with quit"), + "Should show the timeout message, got: {}", + result + ); + } + + #[test] + fn test_filter_go_test_timeout_with_passing_tests_before_kill() { + // Some tests pass before the package times out. + // Summary should show both pass and fail counts. + let output = r#"{"Action":"run","Package":"example.com/foo","Test":"TestFast"} +{"Action":"pass","Package":"example.com/foo","Test":"TestFast","Elapsed":0.001} +{"Action":"run","Package":"example.com/foo","Test":"TestHang"} +{"Action":"output","Package":"example.com/foo","Output":"*** Test killed with quit: ran too long (30s).\n"} +{"Action":"fail","Package":"example.com/foo","Elapsed":30.001}"#; + + let result = filter_go_test_json(output); + assert!( + result.starts_with("Go test: 1 passed, 1 failed"), + "Expected 'Go test: 1 passed, 1 failed', got: {}", + result + ); + assert!( + !result.contains("No tests found"), + "Must not say 'No tests found', got: {}", + result + ); + assert!( + result.contains("Test killed with quit"), + "Should show timeout message, got: {}", + result + ); + } + #[test] fn test_filter_go_build_success() { let output = ""; From 840571fe90ed14fb7e96f9b9000a1bac6d196d23 Mon Sep 17 00:00:00 2001 From: aesoft <43991222+aeppling@users.noreply.github.com> Date: Sat, 11 Apr 2026 17:24:04 +0200 Subject: [PATCH 117/204] fix(core): review 956 various fix MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit 4 fixes applied (all confirmed introduced by PR #956, all tests pass): - P0 NEW-passthrough — pipe_cmd.rs: passthrough before cap read - P1 BUFFERED-panic — stream.rs: catch_unwind on Buffered filter - P1 STREAM-postcap — stream.rs: stop feeding filter after cap - P2 OFFBYONE-rawcap — stream.rs: 5 cap boundary checks fixed 5 findings dropped (not introduced by PR or not bugs): - DENY-claude: pre-existing on master - AUDIT-asymmetry: intentional scope choice, not a bug - GEMINI-test: pre-existing test pattern from master - SAVINGS-threshold: 40% is correct (filters achieve ~46%) - STDERR-test: cosmetic CI, not correctness --- src/cmds/rust/cargo_cmd.rs | 106 +++++++++-------------------- src/cmds/system/pipe_cmd.rs | 11 +-- src/core/stream.rs | 24 +++++-- src/discover/lexer.rs | 77 +++++++++++++++++++++ src/discover/registry.rs | 33 +-------- src/hooks/hook_cmd.rs | 129 ++++++++++++++++++------------------ src/hooks/permissions.rs | 28 ++++++-- 7 files changed, 220 insertions(+), 188 deletions(-) diff --git a/src/cmds/rust/cargo_cmd.rs b/src/cmds/rust/cargo_cmd.rs index 41fa43466..f94b04a7c 100644 --- a/src/cmds/rust/cargo_cmd.rs +++ b/src/cmds/rust/cargo_cmd.rs @@ -791,98 +791,56 @@ fn filter_cargo_nextest(output: &str) -> String { } fn filter_cargo_build(output: &str) -> String { - let mut errors: Vec = Vec::new(); - let mut warnings = 0; - let mut error_count = 0; - let mut compiled = 0; - let mut in_error = false; - let mut current_error = Vec::new(); - let mut finished_line: Option = None; + let mut handler = CargoBuildHandler::new(); + let mut blocks: Vec> = Vec::new(); + let mut current_block: Vec = Vec::new(); + let mut in_block = false; for line in output.lines() { - if line.trim_start().starts_with("Compiling") || line.trim_start().starts_with("Checking") { - compiled += 1; + if handler.should_skip(line) { continue; } - if line.trim_start().starts_with("Downloading") - || line.trim_start().starts_with("Downloaded") - { - continue; - } - if line.trim_start().starts_with("Finished") { - finished_line = Some(line.trim_start().to_string()); - continue; - } - - // Detect error/warning blocks - if line.starts_with("error[") || line.starts_with("error:") { - // Skip "error: aborting due to" summary lines - if line.contains("aborting due to") || line.contains("could not compile") { - continue; - } - if in_error && !current_error.is_empty() { - errors.push(current_error.join("\n")); - current_error.clear(); - } - error_count += 1; - in_error = true; - current_error.push(line.to_string()); - } else if line.starts_with("warning:") - && line.contains("generated") - && line.contains("warning") - { - // "warning: `crate` generated N warnings" summary line - continue; - } else if line.starts_with("warning:") || line.starts_with("warning[") { - if in_error && !current_error.is_empty() { - errors.push(current_error.join("\n")); - current_error.clear(); + if handler.is_block_start(line) { + if in_block && !current_block.is_empty() { + blocks.push(std::mem::take(&mut current_block)); } - warnings += 1; - in_error = true; - current_error.push(line.to_string()); - } else if in_error { - if line.trim().is_empty() && current_error.len() > 3 { - errors.push(current_error.join("\n")); - current_error.clear(); - in_error = false; + in_block = true; + current_block.push(line.to_string()); + } else if in_block { + if handler.is_block_continuation(line, ¤t_block) { + current_block.push(line.to_string()); } else { - current_error.push(line.to_string()); + blocks.push(std::mem::take(&mut current_block)); + in_block = false; } } } - - if !current_error.is_empty() { - errors.push(current_error.join("\n")); + if !current_block.is_empty() { + blocks.push(current_block); } - if error_count == 0 && warnings == 0 { - return if let Some(finished) = finished_line { - format!("cargo build ({} crates compiled)\n{}", compiled, finished) - } else { - format!("cargo build ({} crates compiled)", compiled) - }; + if handler.error_count == 0 && handler.warnings == 0 { + let mut s = format!("cargo build ({} crates compiled)", handler.compiled); + if let Some(ref finished) = handler.finished_line { + s = format!("{}\n{}", s, finished); + } + return s; } - let mut result = String::new(); - result.push_str(&format!( - "cargo build: {} errors, {} warnings ({} crates)\n", - error_count, warnings, compiled - )); - result.push_str("═══════════════════════════════════════\n"); - - for (i, err) in errors.iter().enumerate().take(15) { - result.push_str(err); + let mut result = format!( + "cargo build: {} errors, {} warnings ({} crates)\n═══════════════════════════════════════\n", + handler.error_count, handler.warnings, handler.compiled + ); + for (i, blk) in blocks.iter().enumerate().take(15) { + result.push_str(&blk.join("\n")); result.push('\n'); - if i < errors.len() - 1 { + if i < blocks.len() - 1 { result.push('\n'); } } - - if errors.len() > 15 { - result.push_str(&format!("\n... +{} more issues\n", errors.len() - 15)); + if blocks.len() > 15 { + result.push_str(&format!("\n... +{} more issues\n", blocks.len() - 15)); } - result.trim().to_string() } diff --git a/src/cmds/system/pipe_cmd.rs b/src/cmds/system/pipe_cmd.rs index c0c73b272..0af3448f3 100644 --- a/src/cmds/system/pipe_cmd.rs +++ b/src/cmds/system/pipe_cmd.rs @@ -200,6 +200,12 @@ fn apply_filter(filter_fn: fn(&str) -> String, input: &str) -> String { } pub fn run(filter_name: Option<&str>, passthrough: bool) -> Result<()> { + if passthrough { + std::io::copy(&mut std::io::stdin(), &mut std::io::stdout()) + .map_err(|e| anyhow::anyhow!("Failed to relay stdin: {}", e))?; + return Ok(()); + } + let mut buf = String::new(); std::io::stdin() .take((RAW_CAP + 1) as u64) @@ -209,11 +215,6 @@ pub fn run(filter_name: Option<&str>, passthrough: bool) -> Result<()> { anyhow::bail!("stdin exceeds {} byte limit", RAW_CAP); } - if passthrough { - print!("{}", buf); - return Ok(()); - } - let filter_fn = match filter_name { Some(name) => resolve_filter(name).ok_or_else(|| { anyhow::anyhow!( diff --git a/src/core/stream.rs b/src/core/stream.rs index 9f662477f..75d576e54 100644 --- a/src/core/stream.rs +++ b/src/core/stream.rs @@ -303,7 +303,7 @@ pub fn run_streaming( let mut err_out = stderr_out.lock(); for line in BufReader::new(stderr).lines().map_while(Result::ok) { writeln!(err_out, "{}", line).ok(); - if raw_err.len() + line.len() < RAW_CAP { + if raw_err.len() + line.len() + 1 <= RAW_CAP { raw_err.push_str(&line); raw_err.push('\n'); } else if !capped { @@ -313,7 +313,7 @@ pub fn run_streaming( } } else { for line in BufReader::new(stderr).lines().map_while(Result::ok) { - if raw_err.len() + line.len() < RAW_CAP { + if raw_err.len() + line.len() + 1 <= RAW_CAP { raw_err.push_str(&line); raw_err.push('\n'); } else if !capped { @@ -338,12 +338,16 @@ pub fn run_streaming( FilterMode::Passthrough => unreachable!("handled by early-return above"), FilterMode::Streaming(mut filter) => { for line in BufReader::new(stdout).lines().map_while(Result::ok) { - if raw_stdout.len() + line.len() < RAW_CAP { + if capped { + continue; + } + if raw_stdout.len() + line.len() + 1 <= RAW_CAP { raw_stdout.push_str(&line); raw_stdout.push('\n'); - } else if !capped { + } else { capped = true; eprintln!("[rtk] warning: output exceeds 10 MiB — filter input truncated"); + continue; } if let Some(output) = filter.feed_line(&line) { filtered.push_str(&output); @@ -365,7 +369,7 @@ pub fn run_streaming( } FilterMode::Buffered(filter_fn) => { for line in BufReader::new(stdout).lines().map_while(Result::ok) { - if raw_stdout.len() + line.len() < RAW_CAP { + if raw_stdout.len() + line.len() + 1 <= RAW_CAP { raw_stdout.push_str(&line); raw_stdout.push('\n'); } else if !capped { @@ -373,7 +377,13 @@ pub fn run_streaming( eprintln!("[rtk] warning: output exceeds 10 MiB — filter input truncated"); } } - filtered = filter_fn(&raw_stdout); + filtered = std::panic::catch_unwind(std::panic::AssertUnwindSafe(|| { + filter_fn(&raw_stdout) + })) + .unwrap_or_else(|_| { + eprintln!("[rtk] warning: filter panicked — passing through raw output"); + raw_stdout.clone() + }); match write!(out, "{}", filtered) { Err(e) if e.kind() == io::ErrorKind::BrokenPipe => {} Err(e) => return Err(e.into()), @@ -382,7 +392,7 @@ pub fn run_streaming( } FilterMode::CaptureOnly => { for line in BufReader::new(stdout).lines().map_while(Result::ok) { - if raw_stdout.len() + line.len() < RAW_CAP { + if raw_stdout.len() + line.len() + 1 <= RAW_CAP { raw_stdout.push_str(&line); raw_stdout.push('\n'); } else if !capped { diff --git a/src/discover/lexer.rs b/src/discover/lexer.rs index a5ea114ab..8a126530a 100644 --- a/src/discover/lexer.rs +++ b/src/discover/lexer.rs @@ -258,6 +258,54 @@ fn flush_arg(tokens: &mut Vec, current: &mut String, offset: usize) } } +/// Split a shell command on operators (`&&`, `||`, `;`) and optionally pipes (`|`), +/// respecting quoted strings via the lexer. +/// +/// When `stop_at_pipe` is true, returns only segments before the first `|` +/// (used by command rewriting — only the left side of a pipe gets rewritten). +/// When false, splits through pipes too (used by permission checking — +/// every segment must be validated). +pub fn split_on_operators(cmd: &str, stop_at_pipe: bool) -> Vec<&str> { + let trimmed = cmd.trim(); + if trimmed.is_empty() { + return vec![]; + } + + let tokens = tokenize(trimmed); + let mut results = Vec::new(); + let mut seg_start: usize = 0; + + for tok in &tokens { + match tok.kind { + TokenKind::Operator => { + let segment = trimmed[seg_start..tok.offset].trim(); + if !segment.is_empty() { + results.push(segment); + } + seg_start = tok.offset + tok.value.len(); + } + TokenKind::Pipe => { + let segment = trimmed[seg_start..tok.offset].trim(); + if !segment.is_empty() { + results.push(segment); + } + if stop_at_pipe { + return results; + } + seg_start = tok.offset + tok.value.len(); + } + _ => {} + } + } + + let tail = trimmed[seg_start..].trim(); + if !tail.is_empty() { + results.push(tail); + } + + results +} + pub fn strip_quotes(s: &str) -> String { let chars: Vec = s.chars().collect(); if chars.len() >= 2 @@ -952,4 +1000,33 @@ mod tests { fn test_strip_quotes_mismatched() { assert_eq!(strip_quotes("\"hello'"), "\"hello'"); } + + #[test] + fn test_split_on_operators_stop_at_pipe() { + assert_eq!(split_on_operators("a | b | c", true), vec!["a"]); + assert_eq!(split_on_operators("a && b | c", true), vec!["a", "b"]); + } + + #[test] + fn test_split_on_operators_through_pipes() { + assert_eq!(split_on_operators("a | b | c", false), vec!["a", "b", "c"]); + assert_eq!( + split_on_operators("a && b | c ; d", false), + vec!["a", "b", "c", "d"] + ); + } + + #[test] + fn test_split_on_operators_quoted() { + assert_eq!( + split_on_operators(r#"echo "a && b" && cargo test"#, false), + vec![r#"echo "a && b""#, "cargo test"] + ); + } + + #[test] + fn test_split_on_operators_empty() { + assert!(split_on_operators("", false).is_empty()); + assert!(split_on_operators(" ", true).is_empty()); + } } diff --git a/src/discover/registry.rs b/src/discover/registry.rs index 8f21016c0..eaa54aa3e 100644 --- a/src/discover/registry.rs +++ b/src/discover/registry.rs @@ -3,7 +3,7 @@ use lazy_static::lazy_static; use regex::{Regex, RegexSet}; -use super::lexer::{tokenize, TokenKind}; +use super::lexer::{split_on_operators, tokenize, TokenKind}; use super::rules::{IGNORED_EXACT, IGNORED_PREFIXES, RULES}; /// Result of classifying a command. @@ -221,36 +221,7 @@ pub fn split_command_chain(cmd: &str) -> Vec<&str> { return vec![trimmed]; } - let tokens = tokenize(trimmed); - let mut results = Vec::new(); - let mut seg_start: usize = 0; - - for tok in &tokens { - match tok.kind { - TokenKind::Operator => { - let segment = trimmed[seg_start..tok.offset].trim(); - if !segment.is_empty() { - results.push(segment); - } - seg_start = tok.offset + tok.value.len(); - } - TokenKind::Pipe => { - let segment = trimmed[seg_start..tok.offset].trim(); - if !segment.is_empty() { - results.push(segment); - } - return results; - } - _ => {} - } - } - - let segment = trimmed[seg_start..].trim(); - if !segment.is_empty() { - results.push(segment); - } - - results + split_on_operators(trimmed, true) } /// Strip git global options before the subcommand (#163). diff --git a/src/hooks/hook_cmd.rs b/src/hooks/hook_cmd.rs index 1481fc218..5f002b7a9 100644 --- a/src/hooks/hook_cmd.rs +++ b/src/hooks/hook_cmd.rs @@ -275,53 +275,51 @@ fn audit_log_inner(action: &str, original: &str, rewritten: &str) -> Option<()> // ── Claude Code native hook ──────────────────────────────────── -/// Run the Claude Code PreToolUse hook natively. -pub fn run_claude() -> Result<()> { - let input = read_stdin_limited()?; - - let input = input.trim(); - if input.is_empty() { - return Ok(()); - } - - let v: Value = match serde_json::from_str(input) { - Ok(v) => v, - Err(e) => { - let _ = writeln!(io::stderr(), "[rtk hook] Failed to parse JSON input: {e}"); - return Ok(()); - } - }; +enum PayloadAction { + Rewrite { + cmd: String, + rewritten: String, + output: Value, + }, + Skip { + reason: &'static str, + cmd: String, + }, + Ignore, +} +fn process_claude_payload(v: &Value) -> PayloadAction { let cmd = match v .pointer("/tool_input/command") .and_then(|c| c.as_str()) .filter(|c| !c.is_empty()) { - Some(c) => c.to_string(), - None => return Ok(()), + Some(c) => c, + None => return PayloadAction::Ignore, }; - let verdict = permissions::check_command(&cmd); + let verdict = permissions::check_command(cmd); if verdict == PermissionVerdict::Deny { - audit_log("skip:deny_rule", &cmd, ""); - return Ok(()); + return PayloadAction::Skip { + reason: "skip:deny_rule", + cmd: cmd.to_string(), + }; } - let rewritten = match get_rewritten(&cmd) { + let rewritten = match get_rewritten(cmd) { Some(r) => r, None => { - audit_log("skip:no_match", &cmd, ""); - return Ok(()); + return PayloadAction::Skip { + reason: "skip:no_match", + cmd: cmd.to_string(), + } } }; - audit_log("rewrite", &cmd, &rewritten); - - // Clone original tool_input, replace only "command" let updated_input = { let mut ti = v.get("tool_input").cloned().unwrap_or_else(|| json!({})); if let Some(obj) = ti.as_object_mut() { - obj.insert("command".into(), Value::String(rewritten)); + obj.insert("command".into(), Value::String(rewritten.clone())); } ti }; @@ -332,7 +330,6 @@ pub fn run_claude() -> Result<()> { "updatedInput": updated_input }); - // Only include permissionDecision for Allow (not Ask) if verdict == PermissionVerdict::Allow { hook_output .as_object_mut() @@ -340,51 +337,55 @@ pub fn run_claude() -> Result<()> { .insert("permissionDecision".into(), json!("allow")); } - let output = json!({ "hookSpecificOutput": hook_output }); - let _ = writeln!(io::stdout(), "{output}"); - Ok(()) + PayloadAction::Rewrite { + cmd: cmd.to_string(), + rewritten, + output: json!({ "hookSpecificOutput": hook_output }), + } } -/// Process a Claude hook payload from a string (for testing). -#[cfg(test)] -fn run_claude_inner(input: &str) -> Option { - let v: Value = serde_json::from_str(input).ok()?; - - let cmd = v - .pointer("/tool_input/command") - .and_then(|c| c.as_str()) - .filter(|c| !c.is_empty())?; +/// Run the Claude Code PreToolUse hook natively. +pub fn run_claude() -> Result<()> { + let input = read_stdin_limited()?; - let verdict = permissions::check_command(cmd); - if verdict == PermissionVerdict::Deny { - return None; + let input = input.trim(); + if input.is_empty() { + return Ok(()); } - let rewritten = get_rewritten(cmd)?; - - let updated_input = { - let mut ti = v.get("tool_input").cloned().unwrap_or_else(|| json!({})); - if let Some(obj) = ti.as_object_mut() { - obj.insert("command".into(), Value::String(rewritten)); + let v: Value = match serde_json::from_str(input) { + Ok(v) => v, + Err(e) => { + let _ = writeln!(io::stderr(), "[rtk hook] Failed to parse JSON input: {e}"); + return Ok(()); } - ti }; - let mut hook_output = json!({ - "hookEventName": PRE_TOOL_USE_KEY, - "permissionDecisionReason": "RTK auto-rewrite", - "updatedInput": updated_input - }); - - if verdict == PermissionVerdict::Allow { - hook_output - .as_object_mut() - .unwrap() - .insert("permissionDecision".into(), json!("allow")); + match process_claude_payload(&v) { + PayloadAction::Rewrite { + cmd, + rewritten, + output, + } => { + audit_log("rewrite", &cmd, &rewritten); + let _ = writeln!(io::stdout(), "{output}"); + } + PayloadAction::Skip { reason, cmd } => { + audit_log(reason, &cmd, ""); + } + PayloadAction::Ignore => {} } - let output = json!({ "hookSpecificOutput": hook_output }); - Some(output.to_string()) + Ok(()) +} + +#[cfg(test)] +fn run_claude_inner(input: &str) -> Option { + let v: Value = serde_json::from_str(input).ok()?; + match process_claude_payload(&v) { + PayloadAction::Rewrite { output, .. } => Some(output.to_string()), + _ => None, + } } // ── Cursor native hook ───────────────────────────────────────── diff --git a/src/hooks/permissions.rs b/src/hooks/permissions.rs index e189d8aca..552fbcdee 100644 --- a/src/hooks/permissions.rs +++ b/src/hooks/permissions.rs @@ -1,5 +1,6 @@ use super::constants::{CLAUDE_DIR, SETTINGS_JSON, SETTINGS_LOCAL_JSON}; use crate::core::stream::exec_capture; +use crate::discover::lexer::split_on_operators; use serde_json::Value; use std::path::PathBuf; @@ -267,14 +268,8 @@ fn glob_matches(cmd: &str, pattern: &str) -> bool { true } -/// Split a compound shell command into individual segments. -/// -/// Splits on `&&`, `||`, `|`, and `;`. Not a full shell parser — handles common cases. fn split_compound_command(cmd: &str) -> Vec<&str> { - cmd.split("&&") - .flat_map(|s| s.split("||")) - .flat_map(|s| s.split(['|', ';'])) - .collect() + split_on_operators(cmd, false) } #[cfg(test)] @@ -391,6 +386,25 @@ mod tests { ); } + #[test] + fn test_quoted_operators_not_split() { + // "&&" inside quotes must NOT cause a split — old naive splitter got this wrong + let deny = vec!["git push --force".to_string()]; + assert_eq!( + check_command_with_rules(r#"echo "git push --force && danger""#, &deny, &[], &[]), + PermissionVerdict::Default + ); + } + + #[test] + fn test_pipe_segments_checked() { + let deny = vec!["rm -rf".to_string()]; + assert_eq!( + check_command_with_rules("cat file | rm -rf /", &deny, &[], &[]), + PermissionVerdict::Deny + ); + } + #[test] fn test_ask_verdict() { let ask = vec!["git push".to_string()]; From f68fa0087c03d6882993b7b3eaee98e1dbab41b4 Mon Sep 17 00:00:00 2001 From: David Alecrim Date: Sat, 11 Apr 2026 12:35:06 -0300 Subject: [PATCH 118/204] fix(ccusage): add --yes flag and warn when falling back to npx Fixes silent hang when ccusage is not installed globally. npx prompts for confirmation before installing the package, blocking the subprocess indefinitely. Adding --yes auto-confirms and the info message sets user expectations. Fixes rtk-ai/rtk#1226 --- src/analytics/ccusage.rs | 3 +++ 1 file changed, 3 insertions(+) diff --git a/src/analytics/ccusage.rs b/src/analytics/ccusage.rs index 15d73109b..6ca2f26ee 100644 --- a/src/analytics/ccusage.rs +++ b/src/analytics/ccusage.rs @@ -95,7 +95,9 @@ fn build_command() -> Option { } // Fallback: try npx + eprintln!("[info] ccusage not installed globally, fetching via npx..."); let npx_check = resolved_command("npx") + .arg("--yes") .arg("ccusage") .arg("--help") .stdout(std::process::Stdio::null()) @@ -104,6 +106,7 @@ fn build_command() -> Option { if npx_check.map(|s| s.success()).unwrap_or(false) { let mut cmd = resolved_command("npx"); + cmd.arg("--yes"); cmd.arg("ccusage"); return Some(cmd); } From ba235d85974c0a85b25e290a8bb83648800438a6 Mon Sep 17 00:00:00 2001 From: Nicolas Le Cam Date: Sun, 12 Apr 2026 02:28:39 +0200 Subject: [PATCH 119/204] fix(pnpm): list command not working Signed-off-by: Nicolas Le Cam --- src/cmds/js/pnpm_cmd.rs | 30 +++++++++++++++++++----------- 1 file changed, 19 insertions(+), 11 deletions(-) diff --git a/src/cmds/js/pnpm_cmd.rs b/src/cmds/js/pnpm_cmd.rs index 9ec832620..b1c9fb856 100644 --- a/src/cmds/js/pnpm_cmd.rs +++ b/src/cmds/js/pnpm_cmd.rs @@ -15,17 +15,18 @@ use crate::parser::{ /// pnpm list JSON output structure #[derive(Debug, Deserialize)] struct PnpmListOutput { + name: String, #[serde(flatten)] - packages: HashMap, + package: PackageJsonListItem, } #[derive(Debug, Deserialize)] -struct PnpmPackage { +struct PackageJsonListItem { version: Option, #[serde(rename = "dependencies", default)] - dependencies: HashMap, + dependencies: HashMap, #[serde(rename = "devDependencies", default)] - dev_dependencies: HashMap, + dev_dependencies: HashMap, } /// pnpm outdated JSON output structure @@ -52,13 +53,19 @@ impl OutputParser for PnpmListParser { fn parse(input: &str) -> ParseResult { // Tier 1: Try JSON parsing - match serde_json::from_str::(input) { + match serde_json::from_str::>(input) { Ok(json) => { let mut dependencies = Vec::new(); let mut total_count = 0; - for (name, pkg) in &json.packages { - collect_dependencies(name, pkg, false, &mut dependencies, &mut total_count); + for pkg in &json { + collect_dependencies( + pkg.name.as_str(), + &pkg.package, + false, + &mut dependencies, + &mut total_count, + ); } let result = DependencyState { @@ -88,7 +95,7 @@ impl OutputParser for PnpmListParser { /// Recursively collect dependencies from pnpm package tree fn collect_dependencies( name: &str, - pkg: &PnpmPackage, + pkg: &PackageJsonListItem, is_dev: bool, deps: &mut Vec, count: &mut usize, @@ -502,8 +509,9 @@ mod tests { #[test] fn test_pnpm_list_parser_json() { - let json = r#"{ - "my-project": { + let json = r#"[ + { + "name": "my-project", "version": "1.0.0", "dependencies": { "express": { @@ -511,7 +519,7 @@ mod tests { } } } - }"#; + ]"#; let result = PnpmListParser::parse(json); assert_eq!(result.tier(), 1); From d22759b8c5254ad9c4a455f10cb7de75e92df581 Mon Sep 17 00:00:00 2001 From: Patrick szymkowiak Date: Wed, 25 Mar 2026 20:47:13 +0100 Subject: [PATCH 120/204] feat(benchmark): add multipass VM integration test suite Bun/TypeScript orchestrator that creates an Ubuntu 24.04 VM via multipass, installs all dev tools (Rust, Go, Node, Python, .NET, Terraform, etc.), builds RTK, and runs 103 tests across 11 phases: - Cargo quality (fmt, clippy, test) - 47 Rust built-in commands (git, ls, grep, cargo, pytest, go, tsc...) - 21 TOML filter commands (df, ps, shellcheck, hadolint, helm...) - Hook rewrite engine (17 rewrite assertions) - Exit code preservation - Token savings verification (avg 81%) - Pipe compatibility - Edge cases (unicode, ANSI, empty output) - Performance (memory < 20MB) - Concurrency (10 parallel executions) Usage: bun run scripts/benchmark/run.ts # Full suite (~3 min) bun run scripts/benchmark/run.ts --quick # Skip perf/concurrency bun run scripts/benchmark/run.ts --phase 3 # Single phase bun run scripts/benchmark/cleanup.ts # Delete VM bun run scripts/benchmark/rebuild.ts # Fast rebuild Prerequisites: brew install multipass, bun Signed-off-by: Patrick szymkowiak --- .gitignore | 6 +- scripts/benchmark/cleanup.ts | 11 + scripts/benchmark/cloud-init.yaml | 315 +++++++++++++++++++++++ scripts/benchmark/lib/report.ts | 115 +++++++++ scripts/benchmark/lib/test.ts | 166 ++++++++++++ scripts/benchmark/lib/vm.ts | 172 +++++++++++++ scripts/benchmark/rebuild.ts | 17 ++ scripts/benchmark/run.ts | 409 ++++++++++++++++++++++++++++++ 8 files changed, 1209 insertions(+), 2 deletions(-) create mode 100644 scripts/benchmark/cleanup.ts create mode 100644 scripts/benchmark/cloud-init.yaml create mode 100644 scripts/benchmark/lib/report.ts create mode 100644 scripts/benchmark/lib/test.ts create mode 100644 scripts/benchmark/lib/vm.ts create mode 100644 scripts/benchmark/rebuild.ts create mode 100644 scripts/benchmark/run.ts diff --git a/.gitignore b/.gitignore index fdba09783..947ca4fe7 100644 --- a/.gitignore +++ b/.gitignore @@ -28,8 +28,10 @@ Thumbs.db # Test artifacts *.cast.bak -# Benchmark results -scripts/benchmark/ +# Benchmark results (fixture data, not infra) +scripts/benchmark/diff/ +scripts/benchmark/rtk/ +scripts/benchmark/unix/ benchmark-report.md # SQLite databases diff --git a/scripts/benchmark/cleanup.ts b/scripts/benchmark/cleanup.ts new file mode 100644 index 000000000..7cc38edba --- /dev/null +++ b/scripts/benchmark/cleanup.ts @@ -0,0 +1,11 @@ +#!/usr/bin/env bun +/** + * Delete the RTK test VM. + * Usage: bun run scripts/benchmark/cleanup.ts + */ + +import { vmDelete } from "./lib/vm"; + +console.log("Deleting rtk-test VM..."); +await vmDelete(); +console.log("Done."); diff --git a/scripts/benchmark/cloud-init.yaml b/scripts/benchmark/cloud-init.yaml new file mode 100644 index 000000000..a528c5aa0 --- /dev/null +++ b/scripts/benchmark/cloud-init.yaml @@ -0,0 +1,315 @@ +#cloud-config +# RTK Integration Test VM — Ubuntu 24.04 +# Installs all tools needed for comprehensive RTK testing (~200 commands) +# Usage: multipass launch --name rtk-test --cloud-init scripts/benchmark/cloud-init.yaml --cpus 2 --memory 4G --disk 20G 24.04 + +package_update: true +package_upgrade: false + +packages: + # System tools + - curl + - wget + - jq + - git + - make + - cmake + - rsync + - sqlite3 + - shellcheck + - yamllint + - postgresql-client + - docker.io + - containerd + - python3 + - python3-pip + - python3-venv + - pipx + # Build essentials (for Rust compilation) + - build-essential + - pkg-config + - libssl-dev + - libsqlite3-dev + # Misc + - hyperfine + - unzip + - tree + +runcmd: + # ── Rust toolchain ── + - su - ubuntu -c 'curl --proto "=https" --tlsv1.2 -sSf https://sh.rustup.rs | sh -s -- -y' + + # ── Node.js 22 + package managers ── + - curl -fsSL https://deb.nodesource.com/setup_22.x | bash - + - apt-get install -y nodejs + - npm install -g pnpm yarn + - npm install -g eslint prettier typescript + - npm install -g markdownlint-cli + + # ── Go 1.22 ── + - curl -fsSL https://go.dev/dl/go1.22.5.linux-amd64.tar.gz | tar -C /usr/local -xz + - echo 'export PATH=$PATH:/usr/local/go/bin:/home/ubuntu/go/bin' >> /home/ubuntu/.bashrc + - su - ubuntu -c 'export PATH=$PATH:/usr/local/go/bin && go install github.com/golangci/golangci-lint/cmd/golangci-lint@latest' + + # ── Python tools ── + - pipx install ruff + - pipx install mypy + - pipx install poetry + - pip3 install --break-system-packages pytest uv pre-commit + + # ── .NET 8 SDK ── + - | + wget https://dot.net/v1/dotnet-install.sh -O /tmp/dotnet-install.sh + chmod +x /tmp/dotnet-install.sh + /tmp/dotnet-install.sh --channel 8.0 --install-dir /usr/local/share/dotnet + ln -sf /usr/local/share/dotnet/dotnet /usr/local/bin/dotnet + echo 'export DOTNET_ROOT=/usr/local/share/dotnet' >> /home/ubuntu/.bashrc + + # ── Terraform ── + - | + wget -qO- https://apt.releases.hashicorp.com/gpg | gpg --dearmor -o /usr/share/keyrings/hashicorp-archive-keyring.gpg + echo "deb [signed-by=/usr/share/keyrings/hashicorp-archive-keyring.gpg] https://apt.releases.hashicorp.com $(lsb_release -cs) main" > /etc/apt/sources.list.d/hashicorp.list + apt-get update && apt-get install -y terraform + + # ── Helm ── + - curl https://raw.githubusercontent.com/helm/helm/main/scripts/get-helm-3 | bash + + # ── Hadolint ── + - | + wget -qO /usr/local/bin/hadolint https://github.com/hadolint/hadolint/releases/latest/download/hadolint-Linux-x86_64 + chmod +x /usr/local/bin/hadolint + + # ── Docker setup ── + - usermod -aG docker ubuntu + - systemctl enable docker + - systemctl start docker + + # ── kubectl (standalone binary) ── + - | + curl -LO "https://dl.k8s.io/release/$(curl -L -s https://dl.k8s.io/release/stable.txt)/bin/linux/amd64/kubectl" + install -o root -g root -m 0755 kubectl /usr/local/bin/kubectl + rm kubectl + + # ── ansible ── + - pip3 install --break-system-packages ansible-core + + # ── Mock tools (too heavy to install) ── + - | + cat > /usr/local/bin/gcloud << 'MOCK' + #!/bin/bash + if [ "$1" = "version" ] || [ "$1" = "--version" ]; then + echo "Google Cloud SDK 400.0.0" + echo "bq 2.0.80" + echo "core 2023.01.01" + echo "gsutil 5.17" + else + echo "gcloud mock: $*" + fi + MOCK + chmod +x /usr/local/bin/gcloud + + - | + cat > /usr/local/bin/shopify << 'MOCK' + #!/bin/bash + echo "Shopify CLI 3.0.0 (mock)" + if [ "$1" = "theme" ] && [ "$2" = "check" ]; then + echo "Running theme check..." + echo " 1 issue found" + echo " [warn] Missing alt text on image" + fi + MOCK + chmod +x /usr/local/bin/shopify + + - | + cat > /usr/local/bin/pio << 'MOCK' + #!/bin/bash + if [ "$1" = "--version" ]; then echo "PlatformIO Core, version 6.1.0" + elif [ "$1" = "run" ]; then + echo "Processing esp32dev (platform: espressif32; board: esp32dev)" + echo "Linking .pio/build/esp32dev/firmware.elf" + echo "========================= [SUCCESS] =========================" + fi + MOCK + chmod +x /usr/local/bin/pio + + - | + cat > /usr/local/bin/quarto << 'MOCK' + #!/bin/bash + if [ "$1" = "--version" ]; then echo "1.3.450" + elif [ "$1" = "render" ]; then echo "Rendering document..."; echo "Output created: document.html" + fi + MOCK + chmod +x /usr/local/bin/quarto + + - | + cat > /usr/local/bin/sops << 'MOCK' + #!/bin/bash + if [ "$1" = "--version" ]; then echo "sops 3.7.3"; fi + MOCK + chmod +x /usr/local/bin/sops + + - | + cat > /usr/local/bin/swift << 'MOCK' + #!/bin/bash + if [ "$1" = "--version" ]; then echo "Swift version 5.9.2 (swift-5.9.2-RELEASE)" + elif [ "$1" = "build" ]; then echo "Compiling Swift module..."; echo "Build complete! (0.42s)" + fi + MOCK + chmod +x /usr/local/bin/swift + + # ── Fake test projects ── + + # Node.js project with errors + - | + su - ubuntu -c ' + mkdir -p /tmp/test-node/src && cd /tmp/test-node + npm init -y >/dev/null 2>&1 + echo "{\"compilerOptions\":{\"strict\":true,\"noEmit\":true,\"target\":\"ES2020\",\"module\":\"ESNext\",\"moduleResolution\":\"node\"},\"include\":[\"src\"]}" > tsconfig.json + echo "const x: number = \"not a number\";\nconst unused = 42;\nfunction greet(name: string): string { return name }\ngreet(123);" > src/index.ts + echo "{\"rules\":{\"no-unused-vars\":\"error\",\"semi\":[\"error\",\"always\"]}}" > .eslintrc.json + echo "const x = 1;const y=2; const z =3" > src/ugly.ts + ' + + # Python project with errors + - | + su - ubuntu -c ' + mkdir -p /tmp/test-python && cd /tmp/test-python + cat > main.py << "PYEOF" + import os + import sys + unused_import = 1 + def add(a: int, b: int) -> str: + return a + b + x: int = "hello" + PYEOF + cat > test_main.py << "PYEOF" + def test_pass(): + assert 1 + 1 == 2 + def test_fail(): + assert 1 + 1 == 3, "math is broken" + PYEOF + cat > pyproject.toml << "PYEOF" + [tool.ruff] + line-length = 80 + select = ["E", "F", "W"] + [tool.mypy] + strict = true + [tool.pytest.ini_options] + testpaths = ["."] + PYEOF + ' + + # Go project with errors + - | + su - ubuntu -c ' + export PATH=$PATH:/usr/local/go/bin + mkdir -p /tmp/test-go && cd /tmp/test-go + go mod init test-go 2>/dev/null + cat > main.go << "GOEOF" + package main + import "fmt" + func main() { fmt.Println("hello") } + func unused() { var x int; _ = x } + GOEOF + cat > main_test.go << "GOEOF" + package main + import "testing" + func TestPass(t *testing.T) { if 1+1 != 2 { t.Fatal("math") } } + func TestFail(t *testing.T) { t.Fatal("expected failure") } + GOEOF + ' + + # Rust project with errors + - | + su - ubuntu -c ' + export PATH=$HOME/.cargo/bin:$PATH + mkdir -p /tmp/test-rust && cd /tmp/test-rust + cargo init --name test-rust 2>/dev/null + cat > src/main.rs << "RSEOF" + fn main() { + let x = vec![1, 2, 3]; + let _y = x.iter().map(|i| i.clone()).collect::>(); + println!("hello"); + } + #[cfg(test)] + mod tests { + #[test] fn test_pass() { assert_eq!(1 + 1, 2); } + #[test] fn test_fail() { assert_eq!(1 + 1, 3); } + } + RSEOF + ' + + # Dockerfiles for hadolint + - | + su - ubuntu -c ' + cat > /tmp/Dockerfile.bad << "DEOF" + FROM ubuntu:latest + RUN apt-get update && apt-get install -y curl wget git + RUN cd /tmp && wget http://example.com/script.sh && bash script.sh + EXPOSE 80 443 8080 + DEOF + ' + + # Shell/YAML/Markdown test files + - | + su - ubuntu -c ' + printf "#!/bin/bash\necho \$foo\nls *.txt\ncd \$(pwd)\n[ -f file ] && rm file\n" > /tmp/test.sh + printf "foo: bar\nbaz: qux\nlist:\n - item1\n - item2\ntruthy: yes\n" > /tmp/test.yaml + printf "#Header without space\nSome text\n\n* List item\n+ Mixed markers\n" > /tmp/test.md + ' + + # Git repo for testing + - | + su - ubuntu -c ' + mkdir -p /tmp/test-git && cd /tmp/test-git + git init && git config user.email "test@rtk.dev" && git config user.name "RTK Test" + for i in $(seq 1 20); do echo "line $i" >> file.txt && git add file.txt && git commit -m "feat: commit number $i"; done + echo "modified" >> file.txt && echo "new file" > new.txt + ' + + # Large log file for dedup testing + - | + su - ubuntu -c ' + for i in $(seq 1 500); do + printf "[2026-03-25 10:00:00] INFO Starting service...\n[2026-03-25 10:00:01] WARN Connection timeout\n[2026-03-25 10:00:01] ERROR Failed to connect: refused\n" + done > /tmp/large.log + for i in $(seq 1 50); do echo "[2026-03-25 10:05:00] FATAL Out of memory"; done >> /tmp/large.log + ' + + # .env file + - | + su - ubuntu -c ' + printf "DATABASE_URL=postgres://user:pass@localhost:5432/db\nAPI_KEY=sk-1234567890abcdef\nSECRET_TOKEN=ghp_xxxx\nNODE_ENV=production\nPORT=3000\n" > /tmp/.env + ' + + # Makefile + - | + su - ubuntu -c ' + printf ".PHONY: all test\nall:\n\t@echo Building...\n\t@echo Build complete\ntest:\n\t@echo Running tests...\n\t@echo 2 tests passed\n" > /tmp/Makefile + ' + + # Terraform project + - | + su - ubuntu -c ' + mkdir -p /tmp/test-terraform && cd /tmp/test-terraform + printf "terraform {\n required_version = \">= 1.0\"\n}\nresource \"null_resource\" \"test\" {\n triggers = { always = timestamp() }\n}\noutput \"test\" { value = \"hello\" }\n" > main.tf + ' + + # Helm chart + - su - ubuntu -c 'mkdir -p /tmp/test-helm && cd /tmp/test-helm && helm create test-chart 2>/dev/null || true' + + # .NET project + - | + export DOTNET_ROOT=/usr/local/share/dotnet + su - ubuntu -c ' + export DOTNET_ROOT=/usr/local/share/dotnet && export PATH=$PATH:$DOTNET_ROOT + mkdir -p /tmp/test-dotnet && cd /tmp/test-dotnet + dotnet new console -n TestApp --force 2>/dev/null || true + ' + + # Signal completion + - touch /home/ubuntu/.cloud-init-complete + - chown ubuntu:ubuntu /home/ubuntu/.cloud-init-complete + - echo "RTK cloud-init setup complete" | tee /var/log/rtk-setup.log + +final_message: "RTK test VM ready in $UPTIME seconds" diff --git a/scripts/benchmark/lib/report.ts b/scripts/benchmark/lib/report.ts new file mode 100644 index 000000000..fa4a62526 --- /dev/null +++ b/scripts/benchmark/lib/report.ts @@ -0,0 +1,115 @@ +/** + * Report generation for RTK integration test results. + */ + +import type { TestResult } from "./test"; +import { getCounts, getResults } from "./test"; + +interface BuildInfo { + buildTime: number; + binarySize: number; + version: string; + branch: string; + commit: string; +} + +export function generateReport(buildInfo: BuildInfo): string { + const { total, passed, failed, skipped } = getCounts(); + const results = getResults(); + const passRate = total > 0 ? Math.round((passed * 100) / total) : 0; + + const lines: string[] = []; + + lines.push("======================================================"); + lines.push(" RTK INTEGRATION TEST REPORT"); + lines.push("======================================================"); + lines.push(""); + lines.push(`Date: ${new Date().toISOString()}`); + lines.push(`Branch: ${buildInfo.branch}`); + lines.push(`Commit: ${buildInfo.commit}`); + lines.push(`Version: ${buildInfo.version}`); + lines.push(`Binary: ${buildInfo.binarySize} bytes`); + lines.push(`Build: ${buildInfo.buildTime}s`); + lines.push(""); + + // Summary + lines.push("--- Summary ---"); + lines.push(`Total: ${total}`); + lines.push(`Passed: ${passed} (${passRate}%)`); + lines.push(`Failed: ${failed}`); + lines.push(`Skipped: ${skipped}`); + lines.push(""); + + // Group results by phase (name prefix before ":") + const phases = new Map(); + for (const r of results) { + const colonIdx = r.name.indexOf(":"); + const phase = colonIdx > 0 ? r.name.slice(0, colonIdx) : "misc"; + if (!phases.has(phase)) phases.set(phase, []); + phases.get(phase)!.push(r); + } + + for (const [phase, phaseResults] of phases) { + const pPassed = phaseResults.filter((r) => r.status === "PASS").length; + const pTotal = phaseResults.length; + lines.push(`--- ${phase} (${pPassed}/${pTotal}) ---`); + + for (const r of phaseResults) { + const shortName = r.name.includes(":") ? r.name.split(":")[1] : r.name; + lines.push(` ${r.status.padEnd(4)} | ${shortName} | ${r.detail}`); + } + lines.push(""); + } + + // Failures detail + const failures = results.filter((r) => r.status === "FAIL"); + if (failures.length > 0) { + lines.push("--- Failures ---"); + for (const f of failures) { + lines.push(` ${f.name}: ${f.detail}`); + } + lines.push(""); + } + + // Token savings summary + const savingsResults = results.filter((r) => r.savings !== undefined); + if (savingsResults.length > 0) { + const avgSavings = Math.round( + savingsResults.reduce((sum, r) => sum + (r.savings ?? 0), 0) / + savingsResults.length + ); + const minSavings = Math.min( + ...savingsResults.map((r) => r.savings ?? 100) + ); + const maxSavings = Math.max(...savingsResults.map((r) => r.savings ?? 0)); + lines.push("--- Token Savings ---"); + lines.push(`Average: ${avgSavings}%`); + lines.push(`Min: ${minSavings}%`); + lines.push(`Max: ${maxSavings}%`); + lines.push(""); + } + + // Verdict + lines.push("======================================================"); + if (failed === 0) { + lines.push(" Verdict: READY FOR RELEASE"); + } else if (failed <= 2) { + lines.push(` Verdict: READY (${failed} minor issues)`); + } else { + lines.push(` Verdict: NOT READY (${failed} failures)`); + } + lines.push("======================================================"); + + return lines.join("\n"); +} + +/** Save report to file */ +export async function saveReport( + buildInfo: BuildInfo, + outPath: string +): Promise { + const report = generateReport(buildInfo); + await Bun.write(outPath, report); + console.log(`\nReport saved to: ${outPath}`); + return report; +} diff --git a/scripts/benchmark/lib/test.ts b/scripts/benchmark/lib/test.ts new file mode 100644 index 000000000..29297781a --- /dev/null +++ b/scripts/benchmark/lib/test.ts @@ -0,0 +1,166 @@ +/** + * Test helpers for RTK integration testing. + */ + +import { vmExec, RTK_BIN } from "./vm"; + +export type TestStatus = "PASS" | "FAIL" | "SKIP"; + +export interface TestResult { + name: string; + status: TestStatus; + detail: string; + exitCode?: number; + outputSize?: number; + savings?: number; + duration?: number; +} + +const results: TestResult[] = []; + +export function getResults(): TestResult[] { + return results; +} + +export function getCounts() { + const total = results.length; + const passed = results.filter((r) => r.status === "PASS").length; + const failed = results.filter((r) => r.status === "FAIL").length; + const skipped = results.filter((r) => r.status === "SKIP").length; + return { total, passed, failed, skipped }; +} + +function record(result: TestResult) { + results.push(result); + const icon = + result.status === "PASS" + ? "\x1b[32mPASS\x1b[0m" + : result.status === "FAIL" + ? "\x1b[31mFAIL\x1b[0m" + : "\x1b[33mSKIP\x1b[0m"; + console.log(` ${icon} | ${result.name} | ${result.detail}`); +} + +/** + * Test a command exits with expected code and doesn't crash. + * expectedExit: number or "any" (just checks no signal death) + */ +export async function testCmd( + name: string, + cmd: string, + expectedExit: number | "any" = 0 +): Promise { + const start = Date.now(); + const { stdout, stderr, exitCode } = await vmExec(cmd); + const duration = Date.now() - start; + const outputSize = stdout.length + stderr.length; + + let status: TestStatus; + let detail: string; + + if (expectedExit === "any") { + // Just check it didn't die from signal (exit >= 128) + if (exitCode < 128) { + status = "PASS"; + detail = `exit=${exitCode} | ${outputSize}b | ${duration}ms`; + } else { + status = "FAIL"; + detail = `SIGNAL exit=${exitCode} | ${outputSize}b`; + } + } else if (exitCode === expectedExit) { + status = "PASS"; + detail = `exit=${exitCode} | ${outputSize}b | ${duration}ms`; + } else { + status = "FAIL"; + detail = `expected exit=${expectedExit}, got ${exitCode} | ${outputSize}b`; + } + + const result: TestResult = { + name, + status, + detail, + exitCode, + outputSize, + duration, + }; + record(result); + return result; +} + +/** + * Test token savings: compare raw command output vs RTK filtered output. + */ +export async function testSavings( + name: string, + rawCmd: string, + rtkCmd: string, + targetPct: number +): Promise { + const raw = await vmExec(rawCmd); + const rtk = await vmExec(rtkCmd); + + const rawSize = raw.stdout.length; + const rtkSize = rtk.stdout.length; + + if (rawSize === 0) { + const result: TestResult = { + name, + status: "SKIP", + detail: "raw output empty", + }; + record(result); + return result; + } + + const savings = Math.round(100 - (rtkSize * 100) / rawSize); + + let status: TestStatus; + let detail: string; + + if (savings >= targetPct) { + status = "PASS"; + detail = `raw=${rawSize}b filtered=${rtkSize}b savings=${savings}% (target: >=${targetPct}%)`; + } else { + status = "FAIL"; + detail = `savings=${savings}% < target ${targetPct}% (raw=${rawSize}b filtered=${rtkSize}b)`; + } + + const result: TestResult = { name, status, detail, savings }; + record(result); + return result; +} + +/** + * Test rewrite engine: input -> expected output. + */ +export async function testRewrite( + input: string, + expected: string +): Promise { + const { stdout } = await vmExec(`${RTK_BIN} rewrite '${input}'`); + const actual = stdout.trim(); + + let status: TestStatus; + let detail: string; + + if (actual === expected) { + status = "PASS"; + detail = `'${input}' -> '${actual}'`; + } else { + status = "FAIL"; + detail = `'${input}' -> expected '${expected}', got '${actual}'`; + } + + const result: TestResult = { name: `rewrite: ${input}`, status, detail }; + record(result); + return result; +} + +/** + * Skip a test with a reason. + */ +export function skipTest(name: string, reason: string): TestResult { + const result: TestResult = { name, status: "SKIP", detail: reason }; + record(result); + return result; +} diff --git a/scripts/benchmark/lib/vm.ts b/scripts/benchmark/lib/vm.ts new file mode 100644 index 000000000..dbe09051b --- /dev/null +++ b/scripts/benchmark/lib/vm.ts @@ -0,0 +1,172 @@ +/** + * Multipass VM management for RTK integration testing. + */ + +import { $ } from "bun"; + +const VM_NAME = "rtk-test"; +const CLOUD_INIT = "scripts/benchmark/cloud-init.yaml"; + +export interface VmInfo { + name: string; + state: string; + ipv4: string; +} + +/** Check if VM exists and is running */ +export async function vmExists(): Promise { + const result = await $`multipass list --format json`.quiet(); + const data = JSON.parse(result.stdout.toString()); + return data.list?.some((vm: VmInfo) => vm.name === VM_NAME) ?? false; +} + +/** Check if VM is running */ +export async function vmRunning(): Promise { + const result = await $`multipass list --format json`.quiet(); + const data = JSON.parse(result.stdout.toString()); + const vm = data.list?.find((v: VmInfo) => v.name === VM_NAME); + return vm?.state === "Running"; +} + +/** Create a new VM with cloud-init (20 min timeout for full provisioning) */ +export async function vmCreate(): Promise { + console.log(`[vm] Creating ${VM_NAME} with cloud-init (this takes ~10-15 min)...`); + // --timeout 1200 = 20 min for cloud-init to finish installing Rust, Go, Node, .NET, etc. + await $`multipass launch --name ${VM_NAME} --cpus 2 --memory 4G --disk 20G --timeout 1200 --cloud-init ${CLOUD_INIT} 24.04`; +} + +/** Start existing VM */ +export async function vmStart(): Promise { + console.log(`[vm] Starting ${VM_NAME}...`); + await $`multipass start ${VM_NAME}`; +} + +/** Execute a command in the VM, returns stdout */ +export async function vmExec(cmd: string): Promise<{ + stdout: string; + stderr: string; + exitCode: number; +}> { + const result = await $`multipass exec ${VM_NAME} -- bash -c ${cmd}` + .quiet() + .nothrow(); + return { + stdout: result.stdout.toString(), + stderr: result.stderr.toString(), + exitCode: result.exitCode, + }; +} + +/** Transfer a file to the VM */ +export async function vmTransfer( + localPath: string, + remotePath: string +): Promise { + await $`multipass transfer ${localPath} ${VM_NAME}:${remotePath}`; +} + +/** Wait for cloud-init to complete (max 40 min — installs Rust, Go, Node, .NET, etc.) */ +export async function vmWaitReady(maxWaitSec = 2400): Promise { + console.log("[vm] Waiting for cloud-init..."); + const start = Date.now(); + while ((Date.now() - start) / 1000 < maxWaitSec) { + const { exitCode } = await vmExec( + "test -f /home/ubuntu/.cloud-init-complete" + ); + if (exitCode === 0) { + const elapsed = Math.round((Date.now() - start) / 1000); + console.log(`[vm] Cloud-init complete after ${elapsed}s`); + return true; + } + await Bun.sleep(10_000); + } + console.error("[vm] Cloud-init timed out!"); + return false; +} + +/** Transfer RTK source and build in release mode */ +export async function vmBuildRtk(projectRoot: string): Promise<{ + buildTime: number; + binarySize: number; + version: string; +}> { + console.log("[vm] Transferring RTK source..."); + + // Create tarball excluding heavy dirs and macOS resource forks (._*) + await $`COPYFILE_DISABLE=1 tar czf /tmp/rtk-src.tar.gz --exclude target --exclude .git --exclude node_modules --exclude "index.html*" --exclude "._*" -C ${projectRoot} .`; + await vmTransfer("/tmp/rtk-src.tar.gz", "/tmp/rtk-src.tar.gz"); + await vmExec( + "mkdir -p /home/ubuntu/rtk && cd /home/ubuntu/rtk && tar xzf /tmp/rtk-src.tar.gz" + ); + + console.log("[vm] Building RTK (release)..."); + const start = Date.now(); + const { stdout, exitCode } = await vmExec( + "export PATH=$HOME/.cargo/bin:$PATH && cd /home/ubuntu/rtk && cargo build --release 2>&1 | tail -5" + ); + const buildTime = Math.round((Date.now() - start) / 1000); + + if (exitCode !== 0) { + throw new Error(`Build failed:\n${stdout}`); + } + + const { stdout: sizeStr } = await vmExec( + "stat -c%s /home/ubuntu/rtk/target/release/rtk" + ); + const binarySize = parseInt(sizeStr.trim(), 10); + + const { stdout: version } = await vmExec( + "/home/ubuntu/rtk/target/release/rtk --version" + ); + + console.log( + `[vm] Build OK in ${buildTime}s — ${binarySize} bytes — ${version.trim()}` + ); + + return { buildTime, binarySize, version: version.trim() }; +} + +/** Delete the VM */ +export async function vmDelete(): Promise { + console.log(`[vm] Deleting ${VM_NAME}...`); + await $`multipass delete ${VM_NAME} --purge`.nothrow(); +} + +/** Ensure VM is ready (create or reuse) */ +export async function vmEnsureReady(): Promise { + if (await vmExists()) { + if (!(await vmRunning())) { + await vmStart(); + } + console.log(`[vm] Reusing existing VM ${VM_NAME}`); + // Check if cloud-init is still running + const { exitCode } = await vmExec( + "test -f /home/ubuntu/.cloud-init-complete" + ); + if (exitCode !== 0) { + console.log("[vm] Cloud-init still running, waiting..."); + const ready = await vmWaitReady(); + if (!ready) { + throw new Error( + "Cloud-init timed out. Check: multipass exec rtk-test -- cat /var/log/cloud-init-output.log" + ); + } + } + } else { + await vmCreate(); + // multipass launch --timeout should wait, but double-check + const { exitCode } = await vmExec( + "test -f /home/ubuntu/.cloud-init-complete" + ); + if (exitCode !== 0) { + const ready = await vmWaitReady(); + if (!ready) { + throw new Error( + "Cloud-init timed out. Check: multipass exec rtk-test -- cat /var/log/cloud-init-output.log" + ); + } + } + } +} + +export const RTK_BIN = "/home/ubuntu/rtk/target/release/rtk"; diff --git a/scripts/benchmark/rebuild.ts b/scripts/benchmark/rebuild.ts new file mode 100644 index 000000000..1d06277ff --- /dev/null +++ b/scripts/benchmark/rebuild.ts @@ -0,0 +1,17 @@ +#!/usr/bin/env bun +/** + * Fast rebuild: reuse existing VM, just transfer source and recompile. + * Usage: bun run scripts/benchmark/rebuild.ts + */ + +import { vmEnsureReady, vmBuildRtk } from "./lib/vm"; + +const PROJECT_ROOT = new URL("../../", import.meta.url).pathname.replace(/\/$/, ""); + +await vmEnsureReady(); +const info = await vmBuildRtk(PROJECT_ROOT); + +console.log(`\nRebuild complete:`); +console.log(` Version: ${info.version}`); +console.log(` Binary: ${info.binarySize} bytes`); +console.log(` Time: ${info.buildTime}s`); diff --git a/scripts/benchmark/run.ts b/scripts/benchmark/run.ts new file mode 100644 index 000000000..3c4ed7b4c --- /dev/null +++ b/scripts/benchmark/run.ts @@ -0,0 +1,409 @@ +#!/usr/bin/env bun +/** + * RTK Full Integration Test Suite — Multipass VM + * + * Usage: + * bun run scripts/benchmark/run.ts # Full suite + * bun run scripts/benchmark/run.ts --quick # Skip slow phases (perf, concurrency) + * bun run scripts/benchmark/run.ts --phase 3 # Run specific phase only + * + * Prerequisites: + * brew install multipass + */ + +import { $ } from "bun"; +import { vmEnsureReady, vmBuildRtk, vmExec, RTK_BIN } from "./lib/vm"; +import { testCmd, testSavings, testRewrite, skipTest, getCounts } from "./lib/test"; +import { saveReport } from "./lib/report"; + +const args = process.argv.slice(2); +const quick = args.includes("--quick"); +const phaseOnly = args.includes("--phase") + ? parseInt(args[args.indexOf("--phase") + 1], 10) + : null; + +const PROJECT_ROOT = new URL("../../", import.meta.url).pathname.replace(/\/$/, ""); +const RTK = RTK_BIN; + +function shouldRun(phase: number): boolean { + return phaseOnly === null || phaseOnly === phase; +} + +function heading(phase: number, title: string) { + console.log(`\n\x1b[34m[Phase ${phase}] ${title}\x1b[0m`); +} + +// ══════════════════════════════════════════════════════════════ +// Phase 0: VM Setup +// ══════════════════════════════════════════════════════════════ + +console.log("\x1b[34m[rtk-test] RTK Full Integration Test Suite\x1b[0m"); +console.log(`Project: ${PROJECT_ROOT}`); + +await vmEnsureReady(); + +// ══════════════════════════════════════════════════════════════ +// Phase 1: Transfer & Build +// ══════════════════════════════════════════════════════════════ + +heading(1, "Transfer & Build"); +const branch = (await $`git -C ${PROJECT_ROOT} branch --show-current`.text()).trim(); +const commit = (await $`git -C ${PROJECT_ROOT} log --oneline -1`.text()).trim(); +const buildInfo = await vmBuildRtk(PROJECT_ROOT); + +// Binary size check +// ARM Linux binaries are larger (~6.5MB) than x86 stripped (~4MB) +const sizeLimit = 8_388_608; // 8MB +if (buildInfo.binarySize < sizeLimit) { + console.log(` \x1b[32mPASS\x1b[0m | binary size | ${buildInfo.binarySize} bytes < 8MB`); +} else { + console.log(` \x1b[31mFAIL\x1b[0m | binary size | ${buildInfo.binarySize} bytes >= 8MB`); +} + +// ══════════════════════════════════════════════════════════════ +// Phase 2: Cargo Quality (fmt, clippy, test) +// ══════════════════════════════════════════════════════════════ + +if (shouldRun(2)) { + heading(2, "Cargo Quality"); + + // NOTE: fmt/clippy may fail if non-Rust files (benchmark .ts) are transferred — test "any" exit + await testCmd( + "quality:cargo fmt", + "export PATH=$HOME/.cargo/bin:$PATH && cd /home/ubuntu/rtk && cargo fmt --all --check 2>&1", + "any" + ); + + await testCmd( + "quality:cargo clippy", + "export PATH=$HOME/.cargo/bin:$PATH && cd /home/ubuntu/rtk && cargo clippy --all-targets 2>&1", + "any" + ); + + const testResult = await vmExec( + "export PATH=$HOME/.cargo/bin:$PATH && cd /home/ubuntu/rtk && cargo test --all 2>&1" + ); + const testLine = testResult.stdout.split("\n").find((l) => l.startsWith("test result")); + if (testLine?.includes("0 failed")) { + const count = testLine.match(/(\d+) passed/)?.[1] ?? "?"; + await testCmd("quality:cargo test", `echo '${count} tests passed'`); + } else { + await testCmd("quality:cargo test", "exit 1", 0); + } +} + +// ══════════════════════════════════════════════════════════════ +// Phase 3: Rust Built-in Commands +// ══════════════════════════════════════════════════════════════ + +if (shouldRun(3)) { + heading(3, "Rust Built-in Commands"); + + // Git + await testCmd("git:status", `cd /tmp/test-git && ${RTK} git status`); + await testCmd("git:log", `cd /tmp/test-git && ${RTK} git log -5`); + await testCmd("git:log --oneline", `cd /tmp/test-git && ${RTK} git log --oneline -10`); + await testCmd("git:diff", `cd /tmp/test-git && ${RTK} git diff`, "any"); + await testCmd("git:branch", `cd /tmp/test-git && ${RTK} git branch`); + await testCmd("git:add --dry-run", `cd /tmp/test-git && ${RTK} git add --dry-run .`, "any"); + + // Files + await testCmd("files:ls", `${RTK} ls /home/ubuntu/rtk`); + await testCmd("files:ls src/", `${RTK} ls /home/ubuntu/rtk/src/`); + await testCmd("files:ls -R", `${RTK} ls -R /home/ubuntu/rtk/src/`); + await testCmd("files:read", `${RTK} read /home/ubuntu/rtk/src/main.rs`); + await testCmd("files:read aggressive", `${RTK} read /home/ubuntu/rtk/src/main.rs -l aggressive`); + await testCmd("files:smart", `${RTK} smart /home/ubuntu/rtk/src/main.rs`); + await testCmd("files:find *.rs", `${RTK} find '*.rs' /home/ubuntu/rtk/src/`); + await testCmd("files:wc", `${RTK} wc /home/ubuntu/rtk/src/main.rs`); + await testCmd("files:diff", `${RTK} diff /home/ubuntu/rtk/src/main.rs /home/ubuntu/rtk/src/utils.rs`); + + // Search + await testCmd("search:grep", `${RTK} grep 'fn main' /home/ubuntu/rtk/src/`); + + // Data + await testCmd("data:json", `${RTK} json /tmp/test-node/package.json`); + await testCmd("data:deps", `cd /home/ubuntu/rtk && ${RTK} deps`); + await testCmd("data:env", `${RTK} env`); + + // Runners + await testCmd("runner:summary", `${RTK} summary 'echo hello world'`); + // NOTE: rtk err swallows exit code (known bug) — test output only, not exit code + await testCmd("runner:err", `${RTK} err false`, "any"); + await testCmd("runner:test", `${RTK} test 'echo ok'`, "any"); + + // Logs + await testCmd("log:large", `${RTK} log /tmp/large.log`); + + // Network + await testCmd("net:curl", `${RTK} curl https://httpbin.org/get`, "any"); + + // GitHub + await testCmd("gh:pr list", `cd /home/ubuntu/rtk && ${RTK} gh pr list`, "any"); + + // Cargo (Rust test project) + await testCmd("cargo:build", `export PATH=$HOME/.cargo/bin:$PATH && cd /tmp/test-rust && ${RTK} cargo build`, "any"); + await testCmd("cargo:test", `export PATH=$HOME/.cargo/bin:$PATH && cd /tmp/test-rust && ${RTK} cargo test`, "any"); + await testCmd("cargo:clippy", `export PATH=$HOME/.cargo/bin:$PATH && cd /tmp/test-rust && ${RTK} cargo clippy`, "any"); + + // Python + await testCmd("python:pytest", `cd /tmp/test-python && ${RTK} pytest`, "any"); + await testCmd("python:ruff check", `cd /tmp/test-python && ${RTK} ruff check .`, "any"); + await testCmd("python:mypy", `cd /tmp/test-python && ${RTK} mypy .`, "any"); + await testCmd("python:pip list", `${RTK} pip list`); + + // Go + await testCmd("go:test", `export PATH=$PATH:/usr/local/go/bin && cd /tmp/test-go && ${RTK} go test ./...`, "any"); + await testCmd("go:build", `export PATH=$PATH:/usr/local/go/bin && cd /tmp/test-go && ${RTK} go build .`, "any"); + await testCmd("go:vet", `export PATH=$PATH:/usr/local/go/bin && cd /tmp/test-go && ${RTK} go vet ./...`, "any"); + await testCmd("go:golangci-lint", `export PATH=$PATH:/usr/local/go/bin:$HOME/go/bin && cd /tmp/test-go && ${RTK} golangci-lint run`, "any"); + + // TypeScript + await testCmd("ts:tsc", `cd /tmp/test-node && ${RTK} tsc --noEmit`, "any"); + + // Linters + await testCmd("lint:eslint", `cd /tmp/test-node && ${RTK} lint 'eslint src/'`, "any"); + await testCmd("lint:prettier", `cd /tmp/test-node && ${RTK} prettier --check src/`, "any"); + + // Docker + await testCmd("docker:ps", `${RTK} docker ps`, "any"); + await testCmd("docker:images", `${RTK} docker images`, "any"); + + // Kubernetes + await testCmd("k8s:pods", `${RTK} kubectl pods`, "any"); + + // .NET + await testCmd("dotnet:build", `export DOTNET_ROOT=/usr/local/share/dotnet && export PATH=$PATH:$DOTNET_ROOT && cd /tmp/test-dotnet/TestApp 2>/dev/null && ${RTK} dotnet build || echo 'dotnet skip'`, "any"); + + // Meta + await testCmd("meta:gain", `${RTK} gain`); + await testCmd("meta:gain --history", `${RTK} gain --history`); + await testCmd("meta:proxy", `${RTK} proxy echo 'proxy test'`); + await testCmd("meta:verify", `${RTK} verify`, "any"); +} + +// ══════════════════════════════════════════════════════════════ +// Phase 4: TOML Filter Commands +// ══════════════════════════════════════════════════════════════ + +if (shouldRun(4)) { + heading(4, "TOML Filter Commands"); + + // System + await testCmd("toml:df", `${RTK} df -h`); + await testCmd("toml:du", `${RTK} du -sh /tmp`, "any"); + await testCmd("toml:ps", `${RTK} ps aux`); + await testCmd("toml:ping", `${RTK} ping -c 2 127.0.0.1`); + + // Build tools + await testCmd("toml:make", `cd /tmp && ${RTK} make -f Makefile`, "any"); + await testCmd("toml:rsync", `${RTK} rsync --version`); + + // Linters + await testCmd("toml:shellcheck", `${RTK} shellcheck /tmp/test.sh`, "any"); + await testCmd("toml:hadolint", `${RTK} hadolint /tmp/Dockerfile.bad`, "any"); + await testCmd("toml:yamllint", `${RTK} yamllint /tmp/test.yaml`, "any"); + await testCmd("toml:markdownlint", `${RTK} markdownlint /tmp/test.md`, "any"); + + // Cloud/Infra + await testCmd("toml:terraform", `${RTK} terraform --version`, "any"); + await testCmd("toml:helm", `${RTK} helm version`, "any"); + await testCmd("toml:ansible", `${RTK} ansible-playbook --version`, "any"); + + // Mocked tools + await testCmd("toml:gcloud", `${RTK} gcloud version`); + await testCmd("toml:shopify", `${RTK} shopify theme check`, "any"); + await testCmd("toml:pio", `${RTK} pio run`, "any"); + await testCmd("toml:quarto", `${RTK} quarto render`, "any"); + await testCmd("toml:sops", `${RTK} sops --version`); + await testCmd("toml:swift", `${RTK} swift build`, "any"); + await testCmd("toml:kubectl", `${RTK} kubectl version --client`, "any"); +} + +// ══════════════════════════════════════════════════════════════ +// Phase 5: Hook Rewrite Engine +// ══════════════════════════════════════════════════════════════ + +if (shouldRun(5)) { + heading(5, "Hook Rewrite Engine"); + + // Basic rewrites + await testRewrite("git status", "rtk git status"); + await testRewrite("git log --oneline -10", "rtk git log --oneline -10"); + await testRewrite("cargo test", "rtk cargo test"); + await testRewrite("cargo build --release", "rtk cargo build --release"); + await testRewrite("docker ps", "rtk docker ps"); + // NOTE: rtk rewrites "kubectl get pods" to "rtk kubectl get pods" (preserves get) + await testRewrite("kubectl get pods", "rtk kubectl get pods"); + await testRewrite("ruff check", "rtk ruff check"); + await testRewrite("pytest", "rtk pytest"); + await testRewrite("go test", "rtk go test"); + await testRewrite("pnpm list", "rtk pnpm list"); + await testRewrite("gh pr list", "rtk gh pr list"); + await testRewrite("df -h", "rtk df -h"); + await testRewrite("ps aux", "rtk ps aux"); + + // Compound + await testRewrite("cargo test && git status", "rtk cargo test && rtk git status"); + // NOTE: shell strips single quotes in vmExec, so 'msg' becomes msg + await testRewrite("git add . && git commit -m msg", "rtk git add . && rtk git commit -m msg"); + + // No rewrite (shell builtins) — rtk rewrite returns empty string + exit 1 + // We test via testCmd since testRewrite expects non-empty output + await testCmd("rewrite:cd (no rewrite)", `${RTK} rewrite 'cd /tmp'`, 1); + await testCmd("rewrite:export (no rewrite)", `${RTK} rewrite 'export FOO=bar'`, 1); +} + +// ══════════════════════════════════════════════════════════════ +// Phase 6: Exit Code Preservation +// ══════════════════════════════════════════════════════════════ + +if (shouldRun(6)) { + heading(6, "Exit Code Preservation"); + + // Success + await testCmd("exit:git status=0", `cd /tmp/test-git && ${RTK} git status`, 0); + await testCmd("exit:ls=0", `${RTK} ls /tmp`, 0); + await testCmd("exit:gain=0", `${RTK} gain`, 0); + + // Failures + // rg returns exit 1 (no match) or 2 (error) — accept both + await testCmd("exit:grep NOTFOUND", `${RTK} grep NOTFOUND_XYZ_123 /tmp`, "any"); +} + +// ══════════════════════════════════════════════════════════════ +// Phase 7: Token Savings +// ══════════════════════════════════════════════════════════════ + +if (shouldRun(7)) { + heading(7, "Token Savings"); + + await testSavings( + "savings:git log", + "cd /tmp/test-git && git log -20", + `cd /tmp/test-git && ${RTK} git log -20`, + 60 + ); + await testSavings( + "savings:ls", + "ls -la /home/ubuntu/rtk/src/", + `${RTK} ls /home/ubuntu/rtk/src/`, + 60 + ); + await testSavings( + "savings:log dedup", + "cat /tmp/large.log", + `${RTK} log /tmp/large.log`, + 80 + ); + await testSavings( + "savings:read aggressive", + "cat /home/ubuntu/rtk/src/main.rs", + `${RTK} read /home/ubuntu/rtk/src/main.rs -l aggressive`, + 50 + ); +} + +// ══════════════════════════════════════════════════════════════ +// Phase 8: Pipe Compatibility +// ══════════════════════════════════════════════════════════════ + +if (shouldRun(8)) { + heading(8, "Pipe Compatibility"); + + await testCmd("pipe:git status|wc", `cd /tmp/test-git && ${RTK} git status | wc -l`); + await testCmd("pipe:ls|wc", `${RTK} ls /home/ubuntu/rtk/src/ | wc -l`); + await testCmd("pipe:grep|head", `${RTK} grep 'fn' /home/ubuntu/rtk/src/ | head -5`); +} + +// ══════════════════════════════════════════════════════════════ +// Phase 9: Edge Cases +// ══════════════════════════════════════════════════════════════ + +if (shouldRun(9)) { + heading(9, "Edge Cases"); + + await testCmd("edge:summary true", `${RTK} summary 'true'`, "any"); + await testCmd("edge:grep NOTFOUND", `${RTK} grep NOTFOUND_XYZ /home/ubuntu/rtk/src/`, 1); + await testCmd("edge:unicode", `echo 'hello world' > /tmp/uni.txt && ${RTK} grep 'hello' /tmp`, "any"); +} + +// ══════════════════════════════════════════════════════════════ +// Phase 10: Performance (skip with --quick) +// ══════════════════════════════════════════════════════════════ + +if (shouldRun(10) && !quick) { + heading(10, "Performance"); + + // hyperfine + const { exitCode: hfExist } = await vmExec("command -v hyperfine"); + if (hfExist === 0) { + const { stdout: hfOut } = await vmExec( + `cd /tmp/test-git && hyperfine --warmup 3 --min-runs 5 '${RTK} git status' 'git status' --export-json /dev/stdout 2>/dev/null` + ); + try { + const hf = JSON.parse(hfOut); + const rtkMean = (hf.results?.[0]?.mean * 1000).toFixed(1); + const rawMean = (hf.results?.[1]?.mean * 1000).toFixed(1); + console.log(` Startup: rtk=${rtkMean}ms raw=${rawMean}ms`); + } catch { + console.log(" hyperfine output parse failed"); + } + } else { + skipTest("perf:hyperfine", "not installed"); + } + + // Memory + const { stdout: memOut } = await vmExec( + `cd /tmp/test-git && /usr/bin/time -v ${RTK} git status 2>&1 | grep 'Maximum resident'` + ); + const memKb = parseInt(memOut.match(/(\d+)/)?.[1] ?? "0", 10); + if (memKb > 0 && memKb < 20000) { + await testCmd("perf:memory", `echo '${memKb} KB < 20MB'`); + } else if (memKb > 0) { + await testCmd("perf:memory", `echo '${memKb} KB >= 20MB' && exit 1`, 0); + } +} else if (quick && shouldRun(10)) { + skipTest("perf:hyperfine", "--quick mode"); + skipTest("perf:memory", "--quick mode"); +} + +// ══════════════════════════════════════════════════════════════ +// Phase 11: Concurrency (skip with --quick) +// ══════════════════════════════════════════════════════════════ + +if (shouldRun(11) && !quick) { + heading(11, "Concurrency"); + + await testCmd( + "concurrency:10x git status", + `cd /tmp/test-git && for i in $(seq 1 10); do ${RTK} git status >/dev/null & done; wait` + ); +} else if (quick && shouldRun(11)) { + skipTest("concurrency:10x", "--quick mode"); +} + +// ══════════════════════════════════════════════════════════════ +// Report +// ══════════════════════════════════════════════════════════════ + +const report = await saveReport( + { ...buildInfo, branch, commit }, + "/tmp/rtk-test-report.txt" +); + +console.log("\n" + report); + +const { total, passed, failed, skipped } = getCounts(); +const passRate = total > 0 ? Math.round((passed * 100) / total) : 0; + +if (failed === 0) { + console.log(`\n\x1b[32m READY FOR RELEASE — ${passed}/${total} (${passRate}%)\x1b[0m\n`); + process.exit(0); +} else if (failed <= 2) { + console.log(`\n\x1b[33m READY (${failed} minor issues) — ${passed}/${total} (${passRate}%)\x1b[0m\n`); + process.exit(0); +} else { + console.log(`\n\x1b[31m NOT READY — ${failed} failures — ${passed}/${total} (${passRate}%)\x1b[0m\n`); + process.exit(1); +} From d13c185aac64d14288b574df44623723a69e7b95 Mon Sep 17 00:00:00 2001 From: Patrick szymkowiak Date: Thu, 26 Mar 2026 09:01:53 +0100 Subject: [PATCH 121/204] fix(benchmark): address review feedback from @FlorianBruniaux - Strict exit codes: cargo/python/go tests expect exact exit codes instead of "any" (catches real regressions) - Binary size limit documented: 8MB for ARM Linux VM vs 5MB x86 stripped - --phase NaN guard: error message instead of silent no-op - Verdict: 0 failures = READY, any failure = NOT READY (no more "minor issues" budget) - rtk err exit code bug tracked in #846 Signed-off-by: Patrick szymkowiak --- scripts/benchmark/lib/report.ts | 2 -- scripts/benchmark/run.ts | 51 +++++++++++++++++---------------- 2 files changed, 26 insertions(+), 27 deletions(-) diff --git a/scripts/benchmark/lib/report.ts b/scripts/benchmark/lib/report.ts index fa4a62526..1fb751c39 100644 --- a/scripts/benchmark/lib/report.ts +++ b/scripts/benchmark/lib/report.ts @@ -93,8 +93,6 @@ export function generateReport(buildInfo: BuildInfo): string { lines.push("======================================================"); if (failed === 0) { lines.push(" Verdict: READY FOR RELEASE"); - } else if (failed <= 2) { - lines.push(` Verdict: READY (${failed} minor issues)`); } else { lines.push(` Verdict: NOT READY (${failed} failures)`); } diff --git a/scripts/benchmark/run.ts b/scripts/benchmark/run.ts index 3c4ed7b4c..a828e3df7 100644 --- a/scripts/benchmark/run.ts +++ b/scripts/benchmark/run.ts @@ -18,9 +18,14 @@ import { saveReport } from "./lib/report"; const args = process.argv.slice(2); const quick = args.includes("--quick"); -const phaseOnly = args.includes("--phase") +const phaseArg = args.includes("--phase") ? parseInt(args[args.indexOf("--phase") + 1], 10) : null; +const phaseOnly = phaseArg !== null && !Number.isNaN(phaseArg) ? phaseArg : null; +if (args.includes("--phase") && phaseOnly === null) { + console.error("Error: --phase requires a number (e.g. --phase 3)"); + process.exit(1); +} const PROJECT_ROOT = new URL("../../", import.meta.url).pathname.replace(/\/$/, ""); const RTK = RTK_BIN; @@ -52,8 +57,10 @@ const commit = (await $`git -C ${PROJECT_ROOT} log --oneline -1`.text()).trim(); const buildInfo = await vmBuildRtk(PROJECT_ROOT); // Binary size check -// ARM Linux binaries are larger (~6.5MB) than x86 stripped (~4MB) -const sizeLimit = 8_388_608; // 8MB +// ARM Linux release binaries are ~6.5MB (vs ~4MB x86 stripped). +// CLAUDE.md target is <5MB for stripped x86 release builds. +// VM builds are ARM + not fully stripped, so we use a relaxed 8MB limit here. +const sizeLimit = 8_388_608; // 8MB (relaxed for ARM Linux VM) if (buildInfo.binarySize < sizeLimit) { console.log(` \x1b[32mPASS\x1b[0m | binary size | ${buildInfo.binarySize} bytes < 8MB`); } else { @@ -67,17 +74,14 @@ if (buildInfo.binarySize < sizeLimit) { if (shouldRun(2)) { heading(2, "Cargo Quality"); - // NOTE: fmt/clippy may fail if non-Rust files (benchmark .ts) are transferred — test "any" exit await testCmd( "quality:cargo fmt", - "export PATH=$HOME/.cargo/bin:$PATH && cd /home/ubuntu/rtk && cargo fmt --all --check 2>&1", - "any" + "export PATH=$HOME/.cargo/bin:$PATH && cd /home/ubuntu/rtk && cargo fmt --all --check 2>&1" ); await testCmd( "quality:cargo clippy", - "export PATH=$HOME/.cargo/bin:$PATH && cd /home/ubuntu/rtk && cargo clippy --all-targets 2>&1", - "any" + "export PATH=$HOME/.cargo/bin:$PATH && cd /home/ubuntu/rtk && cargo clippy --all-targets -- -D warnings 2>&1" ); const testResult = await vmExec( @@ -128,7 +132,7 @@ if (shouldRun(3)) { // Runners await testCmd("runner:summary", `${RTK} summary 'echo hello world'`); - // NOTE: rtk err swallows exit code (known bug) — test output only, not exit code + // BUG: rtk err swallows exit code — tracked in #846 await testCmd("runner:err", `${RTK} err false`, "any"); await testCmd("runner:test", `${RTK} test 'echo ok'`, "any"); @@ -141,22 +145,22 @@ if (shouldRun(3)) { // GitHub await testCmd("gh:pr list", `cd /home/ubuntu/rtk && ${RTK} gh pr list`, "any"); - // Cargo (Rust test project) - await testCmd("cargo:build", `export PATH=$HOME/.cargo/bin:$PATH && cd /tmp/test-rust && ${RTK} cargo build`, "any"); - await testCmd("cargo:test", `export PATH=$HOME/.cargo/bin:$PATH && cd /tmp/test-rust && ${RTK} cargo test`, "any"); - await testCmd("cargo:clippy", `export PATH=$HOME/.cargo/bin:$PATH && cd /tmp/test-rust && ${RTK} cargo clippy`, "any"); + // Cargo (test project has intentional test failure → exit 101) + await testCmd("cargo:build", `export PATH=$HOME/.cargo/bin:$PATH && cd /tmp/test-rust && ${RTK} cargo build`); + await testCmd("cargo:test", `export PATH=$HOME/.cargo/bin:$PATH && cd /tmp/test-rust && ${RTK} cargo test`, 101); + await testCmd("cargo:clippy", `export PATH=$HOME/.cargo/bin:$PATH && cd /tmp/test-rust && ${RTK} cargo clippy`); - // Python - await testCmd("python:pytest", `cd /tmp/test-python && ${RTK} pytest`, "any"); - await testCmd("python:ruff check", `cd /tmp/test-python && ${RTK} ruff check .`, "any"); - await testCmd("python:mypy", `cd /tmp/test-python && ${RTK} mypy .`, "any"); + // Python (test project has intentional failures) + await testCmd("python:pytest", `cd /tmp/test-python && ${RTK} pytest`, 1); + await testCmd("python:ruff check", `cd /tmp/test-python && ${RTK} ruff check .`, 1); + await testCmd("python:mypy", `cd /tmp/test-python && ${RTK} mypy .`, 1); await testCmd("python:pip list", `${RTK} pip list`); - // Go - await testCmd("go:test", `export PATH=$PATH:/usr/local/go/bin && cd /tmp/test-go && ${RTK} go test ./...`, "any"); - await testCmd("go:build", `export PATH=$PATH:/usr/local/go/bin && cd /tmp/test-go && ${RTK} go build .`, "any"); - await testCmd("go:vet", `export PATH=$PATH:/usr/local/go/bin && cd /tmp/test-go && ${RTK} go vet ./...`, "any"); - await testCmd("go:golangci-lint", `export PATH=$PATH:/usr/local/go/bin:$HOME/go/bin && cd /tmp/test-go && ${RTK} golangci-lint run`, "any"); + // Go (test project has intentional test failure) + await testCmd("go:test", `export PATH=$PATH:/usr/local/go/bin && cd /tmp/test-go && ${RTK} go test ./...`, 1); + await testCmd("go:build", `export PATH=$PATH:/usr/local/go/bin && cd /tmp/test-go && ${RTK} go build .`, 1); + await testCmd("go:vet", `export PATH=$PATH:/usr/local/go/bin && cd /tmp/test-go && ${RTK} go vet ./...`, 1); + await testCmd("go:golangci-lint", `export PATH=$PATH:/usr/local/go/bin:$HOME/go/bin && cd /tmp/test-go && ${RTK} golangci-lint run`, 1); // TypeScript await testCmd("ts:tsc", `cd /tmp/test-node && ${RTK} tsc --noEmit`, "any"); @@ -400,9 +404,6 @@ const passRate = total > 0 ? Math.round((passed * 100) / total) : 0; if (failed === 0) { console.log(`\n\x1b[32m READY FOR RELEASE — ${passed}/${total} (${passRate}%)\x1b[0m\n`); process.exit(0); -} else if (failed <= 2) { - console.log(`\n\x1b[33m READY (${failed} minor issues) — ${passed}/${total} (${passRate}%)\x1b[0m\n`); - process.exit(0); } else { console.log(`\n\x1b[31m NOT READY — ${failed} failures — ${passed}/${total} (${passRate}%)\x1b[0m\n`); process.exit(1); From 1fbb6d935b4a0d031a7862cba312eebe1303ba9b Mon Sep 17 00:00:00 2001 From: Patrick szymkowiak Date: Thu, 26 Mar 2026 09:35:58 +0100 Subject: [PATCH 122/204] feat(benchmark): add Swift ecosystem tests (6 commands + savings) Signed-off-by: Patrick szymkowiak --- scripts/benchmark/run.ts | 20 +++++++++++++++++++- 1 file changed, 19 insertions(+), 1 deletion(-) diff --git a/scripts/benchmark/run.ts b/scripts/benchmark/run.ts index a828e3df7..700ad0f45 100644 --- a/scripts/benchmark/run.ts +++ b/scripts/benchmark/run.ts @@ -220,7 +220,13 @@ if (shouldRun(4)) { await testCmd("toml:pio", `${RTK} pio run`, "any"); await testCmd("toml:quarto", `${RTK} quarto render`, "any"); await testCmd("toml:sops", `${RTK} sops --version`); - await testCmd("toml:swift", `${RTK} swift build`, "any"); + // Swift ecosystem + await testCmd("toml:swift build", `${RTK} swift build`, "any"); + await testCmd("toml:swift test", `${RTK} swift test`, "any"); + await testCmd("toml:swift run", `${RTK} swift run`, "any"); + await testCmd("toml:swift package", `${RTK} swift package resolve`, "any"); + await testCmd("toml:swiftlint", `${RTK} swiftlint`, "any"); + await testCmd("toml:swiftformat", `${RTK} swiftformat`, "any"); await testCmd("toml:kubectl", `${RTK} kubectl version --client`, "any"); } @@ -306,6 +312,18 @@ if (shouldRun(7)) { `${RTK} read /home/ubuntu/rtk/src/main.rs -l aggressive`, 50 ); + await testSavings( + "savings:swift test", + "swift test", + `${RTK} swift test`, + 60 + ); + await testSavings( + "savings:swiftlint", + "swiftlint", + `${RTK} swiftlint`, + 20 + ); } // ══════════════════════════════════════════════════════════════ From 663471203337d7ca6fd76d763b001619c3196059 Mon Sep 17 00:00:00 2001 From: aesoft <43991222+aeppling@users.noreply.github.com> Date: Sun, 12 Apr 2026 11:51:15 +0200 Subject: [PATCH 123/204] Update CHANGELOG.md --- CHANGELOG.md | 6 ------ 1 file changed, 6 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 02e80021b..1c1067489 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -5,12 +5,6 @@ All notable changes to rtk (Rust Token Killer) will be documented in this file. The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html). -## [Unreleased] - -### Bug Fixes - -* **go:** fix double-counted failure in `go test` summary when test-level failures also trigger a package-level fail event ([#958](https://github.com/rtk-ai/rtk/issues/958)) - ## [0.34.3](https://github.com/rtk-ai/rtk/compare/v0.34.2...v0.34.3) (2026-04-02) From 87ee81f08be5e7b1ca79513b1a91925d455f4f5c Mon Sep 17 00:00:00 2001 From: Patrick szymkowiak Date: Sun, 12 Apr 2026 11:55:57 +0200 Subject: [PATCH 124/204] fix(benchmark): address PR review feedback - Run cargo test directly through testCmd instead of faking results - Add 60s per-test timeout on vmExec to prevent hung suite - Make report path configurable via --report flag (default: project root) - Fix shell injection in testRewrite by escaping single quotes Signed-off-by: Patrick szymkowiak --- scripts/benchmark/lib/test.ts | 3 ++- scripts/benchmark/lib/vm.ts | 27 ++++++++++++++++++--------- scripts/benchmark/run.ts | 15 ++++++--------- 3 files changed, 26 insertions(+), 19 deletions(-) diff --git a/scripts/benchmark/lib/test.ts b/scripts/benchmark/lib/test.ts index 29297781a..cffe6148b 100644 --- a/scripts/benchmark/lib/test.ts +++ b/scripts/benchmark/lib/test.ts @@ -137,7 +137,8 @@ export async function testRewrite( input: string, expected: string ): Promise { - const { stdout } = await vmExec(`${RTK_BIN} rewrite '${input}'`); + const escaped = input.replace(/'/g, "'\\''"); + const { stdout } = await vmExec(`${RTK_BIN} rewrite '${escaped}'`); const actual = stdout.trim(); let status: TestStatus; diff --git a/scripts/benchmark/lib/vm.ts b/scripts/benchmark/lib/vm.ts index dbe09051b..fcbf3a815 100644 --- a/scripts/benchmark/lib/vm.ts +++ b/scripts/benchmark/lib/vm.ts @@ -41,20 +41,29 @@ export async function vmStart(): Promise { await $`multipass start ${VM_NAME}`; } -/** Execute a command in the VM, returns stdout */ -export async function vmExec(cmd: string): Promise<{ +/** Execute a command in the VM, returns stdout (60s timeout per test by default) */ +export async function vmExec( + cmd: string, + timeoutMs = 60_000 +): Promise<{ stdout: string; stderr: string; exitCode: number; }> { - const result = await $`multipass exec ${VM_NAME} -- bash -c ${cmd}` + const exec = $`multipass exec ${VM_NAME} -- bash -c ${cmd}` .quiet() - .nothrow(); - return { - stdout: result.stdout.toString(), - stderr: result.stderr.toString(), - exitCode: result.exitCode, - }; + .nothrow() + .then((r) => ({ + stdout: r.stdout.toString(), + stderr: r.stderr.toString(), + exitCode: r.exitCode, + })); + + const timeout = new Promise<{ stdout: string; stderr: string; exitCode: number }>((_, reject) => + setTimeout(() => reject(new Error(`vmExec timed out after ${timeoutMs}ms: ${cmd}`)), timeoutMs) + ); + + return Promise.race([exec, timeout]); } /** Transfer a file to the VM */ diff --git a/scripts/benchmark/run.ts b/scripts/benchmark/run.ts index 700ad0f45..3d964963c 100644 --- a/scripts/benchmark/run.ts +++ b/scripts/benchmark/run.ts @@ -26,6 +26,9 @@ if (args.includes("--phase") && phaseOnly === null) { console.error("Error: --phase requires a number (e.g. --phase 3)"); process.exit(1); } +const reportPath = args.includes("--report") + ? args[args.indexOf("--report") + 1] + : `${new URL("../../", import.meta.url).pathname.replace(/\/$/, "")}/benchmark-report.txt`; const PROJECT_ROOT = new URL("../../", import.meta.url).pathname.replace(/\/$/, ""); const RTK = RTK_BIN; @@ -84,16 +87,10 @@ if (shouldRun(2)) { "export PATH=$HOME/.cargo/bin:$PATH && cd /home/ubuntu/rtk && cargo clippy --all-targets -- -D warnings 2>&1" ); - const testResult = await vmExec( + await testCmd( + "quality:cargo test", "export PATH=$HOME/.cargo/bin:$PATH && cd /home/ubuntu/rtk && cargo test --all 2>&1" ); - const testLine = testResult.stdout.split("\n").find((l) => l.startsWith("test result")); - if (testLine?.includes("0 failed")) { - const count = testLine.match(/(\d+) passed/)?.[1] ?? "?"; - await testCmd("quality:cargo test", `echo '${count} tests passed'`); - } else { - await testCmd("quality:cargo test", "exit 1", 0); - } } // ══════════════════════════════════════════════════════════════ @@ -411,7 +408,7 @@ if (shouldRun(11) && !quick) { const report = await saveReport( { ...buildInfo, branch, commit }, - "/tmp/rtk-test-report.txt" + reportPath ); console.log("\n" + report); From 7d99c9ed88f57be50250d8a4117fafad8aff63d5 Mon Sep 17 00:00:00 2001 From: aesoft <43991222+aeppling@users.noreply.github.com> Date: Sun, 12 Apr 2026 12:16:37 +0200 Subject: [PATCH 125/204] Update CHANGELOG.md --- CHANGELOG.md | 1 - 1 file changed, 1 deletion(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 767a7b12d..e98489a2e 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -9,7 +9,6 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0 ### Bug Fixes -* **pytest:** fix `rtk pytest -q` incorrectly reporting "No tests collected" when tests ran ([#565](https://github.com/rtk-ai/rtk/issues/565)) — quiet mode summary line (no `===` wrapper) was not captured by the parser, causing `parse_summary_line("")` to return `(0, 0, 0)` and trigger the wrong message. Also fix false "No tests collected" when only skipped tests exist. * **diff:** correct truncation overflow count in condense_unified_diff ([#833](https://github.com/rtk-ai/rtk/pull/833)) ([5399f83](https://github.com/rtk-ai/rtk/commit/5399f83)) * **git:** replace vague truncation markers with exact counts in log and grep output ([#833](https://github.com/rtk-ai/rtk/pull/833)) ([185fb97](https://github.com/rtk-ai/rtk/commit/185fb97)) From 2e4cc4bb5226444c8c0bfc827baf0c101c3759e8 Mon Sep 17 00:00:00 2001 From: aesoft <43991222+aeppling@users.noreply.github.com> Date: Sun, 12 Apr 2026 12:59:22 +0200 Subject: [PATCH 126/204] fix(telemetry): consent, erasure, auth, docs --- README.md | 17 ++++++++++------- docs/TELEMETRY.md | 17 +++++++++-------- src/core/telemetry_cmd.rs | 26 ++++++++++++++++++++++---- src/hooks/init.rs | 26 ++++++-------------------- 4 files changed, 47 insertions(+), 39 deletions(-) diff --git a/README.md b/README.md index 9772104c3..7c4166b9b 100644 --- a/README.md +++ b/README.md @@ -370,7 +370,7 @@ brew uninstall rtk # If installed via Homebrew ## Privacy & Telemetry -RTK collects **anonymous, aggregate usage metrics** once per day, **enabled by default**. This data helps us build a better product: identifying which commands need filters, which filters need improvement, and how much value RTK delivers. For the full list of fields, data handling, and contributor guidelines, see **[docs/TELEMETRY.md](docs/TELEMETRY.md)**. +RTK can collect **anonymous, aggregate usage metrics** once per day. Telemetry is **disabled by default** and requires **explicit opt-in consent** (GDPR Art. 6, 7) during `rtk init` or via `rtk telemetry enable`. This data helps us build a better product: identifying which commands need filters, which filters need improvement, and how much value RTK delivers. For the full list of fields, data handling, and contributor guidelines, see **[docs/TELEMETRY.md](docs/TELEMETRY.md)**. **What is collected and why:** @@ -391,14 +391,17 @@ All data is **aggregate counts or anonymized command names** (first 3 words, no **What is NOT collected:** source code, file paths, command arguments, secrets, environment variables, personal data, or repository contents. -**Opt-out** (any of these): +**Manage telemetry:** ```bash -# Environment variable -export RTK_TELEMETRY_DISABLED=1 +rtk telemetry status # Check current consent state +rtk telemetry enable # Give consent (interactive prompt) +rtk telemetry disable # Withdraw consent — stops all collection immediately +rtk telemetry forget # Withdraw consent + delete all local data + request server-side erasure +``` -# Or in config file (~/.config/rtk/config.toml) -[telemetry] -enabled = false +**Override via environment:** +```bash +export RTK_TELEMETRY_DISABLED=1 # Blocks telemetry regardless of consent ``` ## Star History diff --git a/docs/TELEMETRY.md b/docs/TELEMETRY.md index 12ffa071e..1bded7163 100644 --- a/docs/TELEMETRY.md +++ b/docs/TELEMETRY.md @@ -114,7 +114,7 @@ This data directly drives our roadmap. For example, if telemetry shows that 40% - Secrets, API keys, or environment variable values - Repository names or URLs - Personally identifiable information -- IP addresses (not logged server-side) +- IP addresses (not stored in telemetry pings; stored temporarily in erasure audit log for accountability, anonymized after 6 months) ## Consent @@ -134,8 +134,9 @@ export RTK_TELEMETRY_DISABLED=1 ## Retention Policy -- **Server-side**: telemetry records are retained for a maximum of **12 months**, then automatically purged. -- **Client-side**: the local SQLite database (`~/.local/share/rtk/tracking.db`) retains data for **90 days** by default (configurable via `tracking.history_days` in `config.toml`). +- **Server-side**: telemetry records are retained for a maximum of **12 months**, then automatically purged (periodic task every 24 hours). +- **Server-side (erasure log)**: IP addresses in the erasure audit log are **anonymized after 6 months** (GDPR — IP is personal data). +- **Client-side**: the local SQLite database (`~/.local/share/rtk/tracking.db`) retains data for **90 days** by default (configurable via `tracking.history_days` in `config.toml`). Deleted entirely by `rtk telemetry forget`. ## Your Rights (GDPR) @@ -150,8 +151,8 @@ Under the EU General Data Protection Regulation, you have the right to: ## Erasure Procedure -1. Run `rtk telemetry forget` — this disables telemetry, deletes your device salt and ping marker, and sends an erasure request to the server. -2. If the server is unreachable, the CLI prints fallback instructions with your device hash and the contact email. +1. Run `rtk telemetry forget` — this disables telemetry, deletes your device salt, ping marker, and local tracking database (`history.db`), then sends an erasure request to the server. +2. If the server is unreachable, the CLI prints your full device hash and fallback instructions to email contact@rtk-ai.app for manual erasure. 3. You can also email contact@rtk-ai.app directly to request manual erasure. ## Data Handling @@ -165,9 +166,9 @@ Under the EU General Data Protection Regulation, you have the right to: ### Server-side Requirements The telemetry server must implement: -- `POST /erasure` endpoint accepting `{"device_hash": "...", "action": "erasure"}` -- Automatic purge of records older than 12 months -- Audit log for erasure requests (GDPR Art. 17(2) accountability) +- `POST /erasure` endpoint accepting `{"device_hash": "...", "action": "erasure"}`, authenticated via `X-RTK-Token` +- Automatic periodic purge of telemetry records older than 12 months +- Audit log for erasure requests (GDPR Art. 17(2) accountability) with IP anonymization after 6 months ## For contributors diff --git a/src/core/telemetry_cmd.rs b/src/core/telemetry_cmd.rs index bad574f5d..f129f4ac9 100644 --- a/src/core/telemetry_cmd.rs +++ b/src/core/telemetry_cmd.rs @@ -112,6 +112,7 @@ fn run_forget() -> Result<()> { let salt_path = super::telemetry::salt_file_path(); let marker_path = super::telemetry::telemetry_marker_path(); + // Compute device hash before deleting the salt let device_hash = if salt_path.exists() { Some(super::telemetry::generate_device_hash()) } else { @@ -127,6 +128,19 @@ fn run_forget() -> Result<()> { let _ = std::fs::remove_file(&marker_path); } + // Purge local tracking database (GDPR Art. 17 — right to erasure applies to local data too) + let db_path = dirs::data_local_dir() + .unwrap_or_else(|| std::path::PathBuf::from(".")) + .join(super::constants::RTK_DATA_DIR) + .join(super::constants::HISTORY_DB); + if db_path.exists() { + match std::fs::remove_file(&db_path) { + Ok(()) => println!("Local tracking database deleted: {}", db_path.display()), + Err(e) => eprintln!("rtk: could not delete {}: {}", db_path.display(), e), + } + } + + // Send server-side erasure request if let Some(hash) = device_hash { match send_erasure_request(&hash) { Ok(()) => { @@ -135,7 +149,7 @@ fn run_forget() -> Result<()> { Err(e) => { eprintln!("rtk: could not reach server: {}", e); eprintln!(" To complete erasure, email contact@rtk-ai.app"); - eprintln!(" with your device hash: {}...{}", &hash[..8], &hash[56..]); + eprintln!(" with your device hash: {}", hash); } } } @@ -156,9 +170,13 @@ fn send_erasure_request(device_hash: &str) -> Result<(), Box Result<()> { let config = crate::core::config::Config::load().unwrap_or_default(); match config.telemetry.consent_given { Some(true) => return Ok(()), - Some(false) => { - let should_reask = config - .telemetry - .consent_date - .as_deref() - .and_then(|s| chrono::DateTime::parse_from_rfc3339(s).ok()) - .map(|date| { - (chrono::Utc::now() - date.with_timezone(&chrono::Utc)).num_days() >= 14 - }) - .unwrap_or(false); - if !should_reask { - return Ok(()); - } - } + Some(false) => return Ok(()), None => {} } + if !io::stdin().is_terminal() { + save_telemetry_consent(false)?; + return Ok(()); + } + eprintln!(); eprintln!("--- Telemetry ---"); eprintln!("RTK collects anonymous usage metrics once per day to improve filters."); @@ -484,12 +476,6 @@ fn prompt_telemetry_consent() -> Result<()> { eprintln!(); eprint!("Enable anonymous telemetry? [y/N] "); - if !io::stdin().is_terminal() { - eprintln!("(non-interactive mode, defaulting to N)"); - save_telemetry_consent(false)?; - return Ok(()); - } - let stdin = io::stdin(); let mut line = String::new(); stdin From b6bc98a064d37dd6545cff8569dde64c0cf8c4f5 Mon Sep 17 00:00:00 2001 From: aesoft <43991222+aeppling@users.noreply.github.com> Date: Sun, 12 Apr 2026 14:04:00 +0200 Subject: [PATCH 127/204] Update CHANGELOG.md --- CHANGELOG.md | 39 --------------------------------------- 1 file changed, 39 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 07af09d07..1c1067489 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -5,45 +5,6 @@ All notable changes to rtk (Rust Token Killer) will be documented in this file. The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html). -## [0.35.0](https://github.com/rtk-ai/rtk/compare/v0.34.3...v0.35.0) (2026-04-06) - - -### Features - -* **aws:** expand CLI filters from 8 to 25 subcommands ([402c48e](https://github.com/rtk-ai/rtk/commit/402c48e66988e638a5b4f4dd193238fc1d0fe18f)) - - -### Bug Fixes - -* **cmd:** read/cat multiple file and consistent behavior ([3f58018](https://github.com/rtk-ai/rtk/commit/3f58018f4af1d7206457929cf80bb4534203c3ee)) -* **docs:** clean some docs + disclaimer ([deda44f](https://github.com/rtk-ai/rtk/commit/deda44f73607981f3d27ecc6341ce927aab34d37)) -* **gh:** pass through gh pr merge instead of canned response ([#938](https://github.com/rtk-ai/rtk/issues/938)) ([8465ca9](https://github.com/rtk-ai/rtk/commit/8465ca953fa9d70dcc971a941c19465d456eb7d4)) -* **gh:** pass through gh pr merge instead of canned response ([#938](https://github.com/rtk-ai/rtk/issues/938)) ([e1f2845](https://github.com/rtk-ai/rtk/commit/e1f2845df06a8d8b8325945dc4940ec5f530e4cc)) -* **git:** inherit stdin for commit and push to preserve SSH signing ([#733](https://github.com/rtk-ai/rtk/issues/733)) ([eefeae4](https://github.com/rtk-ai/rtk/commit/eefeae45656ff2607c3f519c8eae235e3f0fe411)) -* **git:** inherit stdin for commit and push to preserve SSH signing ([#733](https://github.com/rtk-ai/rtk/issues/733)) ([6cee6c6](https://github.com/rtk-ai/rtk/commit/6cee6c60b80f914ed9505e3925d85cadec43ab97)) -* **git:** preserve full diff hunk headers ([62f4452](https://github.com/rtk-ai/rtk/commit/62f445227679f3df293fe35e9b18cc5ab39d7963)) -* **git:** preserve full diff hunk headers ([09b3ff9](https://github.com/rtk-ai/rtk/commit/09b3ff9424e055f5fe25e535e5b60e077f8344f9)) -* **go:** avoid false build errors from download logs ([9c1cf2f](https://github.com/rtk-ai/rtk/commit/9c1cf2f403534fa7874638b1b983c2d7f918a185)) -* **go:** avoid false build errors from download logs ([d44fd3e](https://github.com/rtk-ai/rtk/commit/d44fd3e034208e3bcd59c2c46f7720eec4f10c98)) -* **go:** cover more build failure shapes ([2425ad6](https://github.com/rtk-ai/rtk/commit/2425ad68e5386d19e5ec9ff1ca151a6d2c9a56d3)) -* **go:** preserve failing test location context ([1481bc5](https://github.com/rtk-ai/rtk/commit/1481bc590924031456a6022510275c29c09e330e)) -* **go:** preserve failing test location context ([374fe64](https://github.com/rtk-ai/rtk/commit/374fe64cfbedcd676733973e81a63a6dfecbb1b7)) -* **go:** restore build error coverage ([1177c9c](https://github.com/rtk-ai/rtk/commit/1177c9c873ac63b6c0bcc9e1b664a705baa0ad7a)) -* **grep:** close subprocess stdin to prevent memory leak ([#897](https://github.com/rtk-ai/rtk/issues/897)) ([7217562](https://github.com/rtk-ai/rtk/commit/72175623551f40b581b4a7f6ed966c1e4a9c7358)) -* **grep:** close subprocess stdin to prevent memory leak ([#897](https://github.com/rtk-ai/rtk/issues/897)) ([09979cf](https://github.com/rtk-ai/rtk/commit/09979cf29701a1b775bcac761d24ec0e055d1bec)) -* **hook_check:** detect missing integrations ([9cf9ccc](https://github.com/rtk-ai/rtk/commit/9cf9ccc1ac39f8bba37e932c7d318a3aa7a34ae9)) -* **init:** remove opt-out instruction from telemetry message ([7571c8e](https://github.com/rtk-ai/rtk/commit/7571c8e101c41ee64c51e2bd64697f85f9142423)) -* **init:** remove telemetry info lines from init output ([7dbef2c](https://github.com/rtk-ai/rtk/commit/7dbef2ce00824d26f2057e4c3c76e429e2e23088)) -* **main:** kill zombie processes + path for rtk md ([d16fc6d](https://github.com/rtk-ai/rtk/commit/d16fc6dacbfec912c21522939b15b7bbd9719487)) -* **main:** kill zombie processes + path for rtk md + missing intergrations ([a919335](https://github.com/rtk-ai/rtk/commit/a919335519ed4a5259a212e56407cb312aa99bac)) -* **merge:** changelog conflicts ([d92c5d2](https://github.com/rtk-ai/rtk/commit/d92c5d264a49483c8d6079e04d946a79bc990a74)) -* **proxy:** kill child process on SIGINT/SIGTERM to prevent orphans ([d813919](https://github.com/rtk-ai/rtk/commit/d813919a24546e044e7844fc7ed05fef4ec24033)) -* **proxy:** kill child process on SIGINT/SIGTERM to prevent orphans ([3318510](https://github.com/rtk-ai/rtk/commit/33185101fc122d0c11a25a4e02ac9f3a7dc7e3bb)) -* **review:** address ChildGuard disarm, stdin dedup, hook masking ([d85fe33](https://github.com/rtk-ai/rtk/commit/d85fe3384b87c16fafd25ec7bcadbff6e69f3f1f)) -* **security:** default to ask when no permission rule matches ([#886](https://github.com/rtk-ai/rtk/issues/886)) ([158c745](https://github.com/rtk-ai/rtk/commit/158c74527f6591d372e40a78cd604d73a20649a9)) -* **security:** default to ask when no permission rule matches ([#886](https://github.com/rtk-ai/rtk/issues/886)) ([41a6c6b](https://github.com/rtk-ai/rtk/commit/41a6c6bf6da78a4754794fdc6a1469df2e327920)) -* **tracking:** use std::env::temp_dir() for compatibility (instead of unix tmp) ([e918661](https://github.com/rtk-ai/rtk/commit/e918661440d7b50321f0535032f52c5e87aaf3cb)) - ## [0.34.3](https://github.com/rtk-ai/rtk/compare/v0.34.2...v0.34.3) (2026-04-02) From 8adea26aeeacadd5e50037387c74de14c78b4fcf Mon Sep 17 00:00:00 2001 From: aesoft <43991222+aeppling@users.noreply.github.com> Date: Sun, 12 Apr 2026 14:04:26 +0200 Subject: [PATCH 128/204] Update Cargo.toml --- Cargo.toml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Cargo.toml b/Cargo.toml index a766e9228..cb52a76bb 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "rtk" -version = "0.35.0" +version = "0.34.3" edition = "2021" authors = ["Patrick Szymkowiak"] description = "Rust Token Killer - High-performance CLI proxy to minimize LLM token consumption" From a4ec3856752cd8e10a816ecf2baba3b0e6996ee5 Mon Sep 17 00:00:00 2001 From: aesoft <43991222+aeppling@users.noreply.github.com> Date: Sun, 12 Apr 2026 14:04:42 +0200 Subject: [PATCH 129/204] Update .release-please-manifest.json --- .release-please-manifest.json | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.release-please-manifest.json b/.release-please-manifest.json index 3a39fd8cf..b9091c583 100644 --- a/.release-please-manifest.json +++ b/.release-please-manifest.json @@ -1,3 +1,3 @@ { - ".": "0.35.0" + ".": "0.34.3" } From 7821e9872fd1f1ae9b40eb8a4458049869acc36b Mon Sep 17 00:00:00 2001 From: aesoft <43991222+aeppling@users.noreply.github.com> Date: Sun, 12 Apr 2026 15:05:58 +0200 Subject: [PATCH 130/204] fix(telemetry): non-terminal consent, single config load - P0: non-terminal `rtk init` no longer writes consent_given=false leaves consent as None so next interactive run prompts normally - P1: maybe_ping() loads Config once instead of twice per command --- src/core/telemetry.rs | 10 ++++++++-- src/hooks/init.rs | 1 - 2 files changed, 8 insertions(+), 3 deletions(-) diff --git a/src/core/telemetry.rs b/src/core/telemetry.rs index 62865dc0b..3b37e9a40 100644 --- a/src/core/telemetry.rs +++ b/src/core/telemetry.rs @@ -27,14 +27,20 @@ pub fn maybe_ping() { return; } + // Load config once (avoid double disk read) + let cfg = match config::Config::load() { + Ok(c) => c, + Err(_) => return, + }; + // RGPD: require explicit consent before any telemetry - match config::telemetry_consent() { + match cfg.telemetry.consent_given { Some(true) => {} Some(false) | None => return, } // Check opt-out: config.toml - if let Some(false) = config::telemetry_enabled() { + if !cfg.telemetry.enabled { return; } diff --git a/src/hooks/init.rs b/src/hooks/init.rs index 5c2ca8e7e..7a2520853 100644 --- a/src/hooks/init.rs +++ b/src/hooks/init.rs @@ -459,7 +459,6 @@ fn prompt_telemetry_consent() -> Result<()> { } if !io::stdin().is_terminal() { - save_telemetry_consent(false)?; return Ok(()); } From 81560812610686fa5ca3633c2bf0b79c05eaa7d9 Mon Sep 17 00:00:00 2001 From: aesoft <43991222+aeppling@users.noreply.github.com> Date: Sun, 12 Apr 2026 15:28:25 +0200 Subject: [PATCH 131/204] fix(telemetry): clean code --- Cargo.lock | 12 ------------ Cargo.toml | 1 - src/core/config.rs | 8 -------- src/core/telemetry_cmd.rs | 4 ++-- 4 files changed, 2 insertions(+), 23 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index e1309d2eb..d3fb7edb2 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -491,17 +491,6 @@ version = "0.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "2304e00983f87ffb38b55b444b5e3b60a884b5d30c0fca7d82fe33449bbe55ea" -[[package]] -name = "hostname" -version = "0.4.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "617aaa3557aef3810a6369d0a99fac8a080891b68bd9f9812a1eeda0c0730cbd" -dependencies = [ - "cfg-if", - "libc", - "windows-link", -] - [[package]] name = "iana-time-zone" version = "0.1.65" @@ -913,7 +902,6 @@ dependencies = [ "dirs", "flate2", "getrandom 0.4.2", - "hostname", "ignore", "lazy_static", "libc", diff --git a/Cargo.toml b/Cargo.toml index cb52a76bb..81cc9c1df 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -28,7 +28,6 @@ chrono = "0.4" tempfile = "3" sha2 = "0.10" ureq = "2" -hostname = "0.4" getrandom = "0.4" flate2 = "1.0" quick-xml = "0.37" diff --git a/src/core/config.rs b/src/core/config.rs index 88ae9a173..8d3da7275 100644 --- a/src/core/config.rs +++ b/src/core/config.rs @@ -128,14 +128,6 @@ pub fn limits() -> LimitsConfig { Config::load().map(|c| c.limits).unwrap_or_default() } -pub fn telemetry_enabled() -> Option { - Config::load().ok().map(|c| c.telemetry.enabled) -} - -pub fn telemetry_consent() -> Option { - Config::load().ok().and_then(|c| c.telemetry.consent_given) -} - impl Config { pub fn load() -> Result { let path = get_config_path()?; diff --git a/src/core/telemetry_cmd.rs b/src/core/telemetry_cmd.rs index f129f4ac9..70ba30db7 100644 --- a/src/core/telemetry_cmd.rs +++ b/src/core/telemetry_cmd.rs @@ -158,11 +158,11 @@ fn run_forget() -> Result<()> { Ok(()) } -fn send_erasure_request(device_hash: &str) -> Result<(), Box> { +fn send_erasure_request(device_hash: &str) -> Result<()> { let url = option_env!("RTK_TELEMETRY_URL"); let url = match url { Some(u) => format!("{}/erasure", u), - None => return Err("no telemetry endpoint configured".into()), + None => anyhow::bail!("no telemetry endpoint configured"), }; let payload = serde_json::json!({ From bab3a53f24f95a4a5821b23712f0b7f2ce3e0445 Mon Sep 17 00:00:00 2001 From: Nicolas Le Cam Date: Fri, 6 Mar 2026 03:47:22 +0100 Subject: [PATCH 132/204] feat(discover): handle more npm/npx/pnpm/pnpx patterns - handle npm exec|run and their aliases - handle pnpm exec|run and their aliases like npm - handle pnpx and its alias like npx - handle all forms of js script/package execution Signed-off-by: Nicolas Le Cam --- src/discover/registry.rs | 465 ++++++++++++++++++++++++++++++++++----- src/discover/rules.rs | 199 +++++++++++++++-- 2 files changed, 586 insertions(+), 78 deletions(-) diff --git a/src/discover/registry.rs b/src/discover/registry.rs index 401cba9ba..5485b14e6 100644 --- a/src/discover/registry.rs +++ b/src/discover/registry.rs @@ -1029,19 +1029,32 @@ mod tests { } #[test] - fn test_rewrite_npx_tsc() { - assert_eq!( - rewrite_command("npx tsc --noEmit", &[]), - Some("rtk tsc --noEmit".into()) - ); - } - - #[test] - fn test_rewrite_pnpm_tsc() { - assert_eq!( - rewrite_command("pnpm tsc --noEmit", &[]), - Some("rtk tsc --noEmit".into()) - ); + fn test_rewrite_tsc() { + let commands = vec![ + "npm exec tsc", + "npm rum tsc", + "npm run tsc", + "npm run-script tsc", + "npm urn tsc", + "npm x tsc", + "pnpm dlx tsc", + "pnpm exec tsc", + "pnpm run tsc", + "pnpm run-script tsc", + "npm tsc", + "npx tsc", + "pnpm tsc", + "pnpx tsc", + "tsc", + ]; + for command in commands { + assert_eq!( + rewrite_command(&format!("{command} --noEmit"), &[]), + Some("rtk tsc --noEmit".into()), + "Failed for command: {}", + command + ); + } } #[test] @@ -1081,19 +1094,61 @@ mod tests { } #[test] - fn test_rewrite_npx_playwright() { - assert_eq!( - rewrite_command("npx playwright test", &[]), - Some("rtk playwright test".into()) - ); + fn test_rewrite_playwright() { + let commands = vec![ + "npm exec playwright", + "npm rum playwright", + "npm run playwright", + "npm run-script playwright", + "npm urn playwright", + "npm x playwright", + "pnpm dlx playwright", + "pnpm exec playwright", + "pnpm run playwright", + "pnpm run-script playwright", + "npm playwright", + "npx playwright", + "pnpm playwright", + "pnpx playwright", + "playwright", + ]; + for command in commands { + assert_eq!( + rewrite_command(&format!("{command} test"), &[]), + Some("rtk playwright test".into()), + "Failed for command: {}", + command + ); + } } #[test] fn test_rewrite_next_build() { - assert_eq!( - rewrite_command("next build --turbo", &[]), - Some("rtk next --turbo".into()) - ); + let commands = vec![ + "npm exec next build", + "npm rum next build", + "npm run next build", + "npm run-script next build", + "npm urn next build", + "npm x next build", + "pnpm dlx next build", + "pnpm exec next build", + "pnpm run next build", + "pnpm run-script next build", + "npm next build", + "npx next build", + "pnpm next build", + "pnpx next build", + "next build", + ]; + for command in commands { + assert_eq!( + rewrite_command(&format!("{command} --turbo"), &[]), + Some("rtk next --turbo".into()), + "Failed for command: {}", + command + ); + } } #[test] @@ -1918,67 +1973,359 @@ mod tests { // --- JS/TS tooling --- #[test] - fn test_classify_vitest() { - assert!(matches!( - classify_command("vitest run"), - Classification::Supported { - rtk_equivalent: "rtk vitest", - .. - } - )); + fn test_classify_lint() { + let commands = vec![ + "npm exec biome", + "npm exec eslint", + "npm rum biome", + "npm rum eslint", + "npm rum lint", + "npm run biome", + "npm run eslint", + "npm run lint", + "npm run-script biome", + "npm run-script eslint", + "npm run-script lint", + "npm urn biome", + "npm urn eslint", + "npm urn lint", + "npm x biome", + "npm x eslint", + "pnpm dlx biome", + "pnpm dlx eslint", + "pnpm exec biome", + "pnpm exec eslint", + "pnpm run biome", + "pnpm run eslint", + "pnpm run lint", + "pnpm run-script biome", + "pnpm run-script eslint", + "pnpm run-script lint", + "npm biome", + "npm eslint", + "npm lint", + "npx biome", + "npx eslint", + "npx lint", + "pnpm biome", + "pnpm eslint", + "pnpm lint", + "pnpx biome", + "pnpx eslint", + "pnpx lint", + "biome", + "eslint", + "lint", + ]; + for command in commands { + assert!( + matches!( + classify_command(command), + Classification::Supported { + rtk_equivalent: "rtk lint", + .. + } + ), + "Failed for command: {}", + command + ); + } } #[test] - fn test_rewrite_vitest() { - assert_eq!( - rewrite_command("vitest run", &[]), - Some("rtk vitest run".into()) - ); + fn test_rewrite_lint() { + let commands = vec![ + "npm exec biome", + "npm exec eslint", + "npm rum biome", + "npm rum eslint", + "npm rum lint", + "npm run biome", + "npm run eslint", + "npm run lint", + "npm run-script biome", + "npm run-script eslint", + "npm run-script lint", + "npm urn biome", + "npm urn eslint", + "npm urn lint", + "npm x biome", + "npm x eslint", + "pnpm dlx biome", + "pnpm dlx eslint", + "pnpm exec biome", + "pnpm exec eslint", + "pnpm run biome", + "pnpm run eslint", + "pnpm run lint", + "pnpm run-script biome", + "pnpm run-script eslint", + "pnpm run-script lint", + "npm biome", + "npm eslint", + "npm lint", + "npx biome", + "npx eslint", + "npx lint", + "pnpm biome", + "pnpm eslint", + "pnpm lint", + "pnpx biome", + "pnpx eslint", + "pnpx lint", + "biome", + "eslint", + "lint", + ]; + for command in commands { + assert_eq!( + rewrite_command(command, &[]), + Some("rtk lint".into()), + "Failed for command: {}", + command + ); + } } #[test] - fn test_rewrite_pnpm_vitest() { - assert_eq!( - rewrite_command("pnpm vitest run", &[]), - Some("rtk vitest run".into()) - ); + fn test_classify_vitest() { + let commands = vec![ + "npm exec jest", + "npm exec vitest", + "npm rum jest", + "npm rum vitest", + "npm run jest", + "npm run vitest", + "npm run-script jest", + "npm run-script vitest", + "npm urn jest", + "npm urn vitest", + "npm x jest", + "npm x vitest", + "pnpm dlx jest", + "pnpm dlx vitest", + "pnpm exec jest", + "pnpm exec vitest", + "pnpm run jest", + "pnpm run vitest", + "pnpm run-script jest", + "pnpm run-script vitest", + "npm jest", + "npm t", + "npm test", + "npm tst", + "npm vitest", + "npx jest", + "npx vitest", + "pnpm jest", + "pnpm t", + "pnpm test", + "pnpm tst", + "pnpm vitest", + "pnpx jest", + "pnpx vitest", + "jest", + "vitest", + ]; + for command in commands { + assert!( + matches!( + classify_command(command), + Classification::Supported { + rtk_equivalent: "rtk vitest", + .. + } + ), + "Failed for command: {}", + command + ); + } + } + + #[test] + fn test_rewrite_vitest() { + let commands = vec![ + "npm exec jest", + "npm exec vitest", + "npm rum jest", + "npm rum vitest", + "npm run jest", + "npm run vitest", + "npm run-script jest", + "npm run-script vitest", + "npm urn jest", + "npm urn vitest", + "npm x jest", + "npm x vitest", + "pnpm dlx jest", + "pnpm dlx vitest", + "pnpm exec jest", + "pnpm exec vitest", + "pnpm run jest", + "pnpm run vitest", + "pnpm run-script jest", + "pnpm run-script vitest", + "npm jest", + "npm t", + "npm test", + "npm tst", + "npm vitest", + "npx jest", + "npx vitest", + "pnpm jest", + "pnpm t", + "pnpm test", + "pnpm tst", + "pnpm vitest", + "pnpx jest", + "pnpx vitest", + "jest", + "vitest", + ]; + for command in commands { + assert_eq!( + rewrite_command(&format!("{command} run"), &[]), + Some("rtk vitest run".into()), + "Failed for command: {}", + command + ); + } } #[test] fn test_classify_prisma() { - assert!(matches!( - classify_command("npx prisma migrate dev"), - Classification::Supported { - rtk_equivalent: "rtk prisma", - .. - } - )); + let commands = vec![ + "npm exec prisma", + "npm rum prisma", + "npm run prisma", + "npm run-script prisma", + "npm urn prisma", + "npm x prisma", + "pnpm dlx prisma", + "pnpm exec prisma", + "pnpm run prisma", + "pnpm run-script prisma", + "npm prisma", + "npx prisma", + "pnpm prisma", + "pnpx prisma", + "prisma", + ]; + for command in commands { + assert!( + matches!( + classify_command(format!("{command} migrate dev").as_str()), + Classification::Supported { + rtk_equivalent: "rtk prisma", + .. + } + ), + "Failed for command: {}", + command + ); + } } #[test] fn test_rewrite_prisma() { - assert_eq!( - rewrite_command("npx prisma migrate dev", &[]), - Some("rtk prisma migrate dev".into()) - ); + let commands = vec![ + "npm exec prisma", + "npm rum prisma", + "npm run prisma", + "npm run-script prisma", + "npm urn prisma", + "npm x prisma", + "pnpm dlx prisma", + "pnpm exec prisma", + "pnpm run prisma", + "pnpm run-script prisma", + "npm prisma", + "npx prisma", + "pnpm prisma", + "pnpx prisma", + "prisma", + ]; + for command in commands { + assert_eq!( + rewrite_command(format!("{command} migrate dev").as_str(), &[]), + Some("rtk prisma migrate dev".into()), + "Failed for command: {}", + command + ); + } } #[test] fn test_rewrite_prettier() { - assert_eq!( - rewrite_command("npx prettier --check src/", &[]), - Some("rtk prettier --check src/".into()) - ); + let commands = vec![ + "npm exec prettier", + "npm rum prettier", + "npm run prettier", + "npm run-script prettier", + "npm urn prettier", + "npm x prettier", + "pnpm dlx prettier", + "pnpm exec prettier", + "pnpm run prettier", + "pnpm run-script prettier", + "npm prettier", + "npx prettier", + "pnpm prettier", + "pnpx prettier", + "prettier", + ]; + for command in commands { + assert_eq!( + rewrite_command(format!("{command} --check src/").as_str(), &[]), + Some("rtk prettier --check src/".into()), + "Failed for command: {}", + command + ); + } } #[test] - fn test_rewrite_pnpm_list() { + fn test_rewrite_pnpm_command() { + let commands = vec![ + "exec", + "i", + "install", + "list", + "ls", + "outdated", + "run", + "run-script", + ]; + for command in commands { + assert_eq!( + rewrite_command(format!("pnpm {command}").as_str(), &[]), + Some(format!("rtk pnpm {command}")), + "Failed for command: pnpm {}", + command + ); + } + } + + #[test] + fn test_rewrite_pnpx() { + let commands = vec!["pnpm dlx", "pnpx"]; + for command in commands { + assert_eq!( + rewrite_command(format!("{command} svgo").as_str(), &[]), + Some(format!("rtk pnpx svgo")), + "Failed for command: {}", + command + ); + } + } + + #[test] + fn test_rewrite_npx() { assert_eq!( - rewrite_command("pnpm list", &[]), - Some("rtk pnpm list".into()) + rewrite_command(format!("npx svgo").as_str(), &[]), + Some(format!("rtk npx svgo")), ); } - // --- Compound operator edge cases --- #[test] diff --git a/src/discover/rules.rs b/src/discover/rules.rs index b315edd77..304493307 100644 --- a/src/discover/rules.rs +++ b/src/discover/rules.rs @@ -44,7 +44,7 @@ pub const RULES: &[RtkRule] = &[ subcmd_status: &[("fmt", RtkStatus::Passthrough)], }, RtkRule { - pattern: r"^pnpm\s+(list|ls|outdated|install)", + pattern: r"^pnpm\s+(exec|i|install|list|ls|outdated|run|run-script)", rtk_cmd: "rtk pnpm", rewrite_prefixes: &["pnpm"], category: "PackageManager", @@ -53,7 +53,16 @@ pub const RULES: &[RtkRule] = &[ subcmd_status: &[], }, RtkRule { - pattern: r"^npm\s+(run|exec)", + pattern: r"^(pnpm\s+dlx|pnpx)\s+", + rtk_cmd: "rtk pnpx", + rewrite_prefixes: &["pnpm dlx", "pnpx"], + category: "PackageManager", + savings_pct: 80.0, + subcmd_savings: &[], + subcmd_status: &[], + }, + RtkRule { + pattern: r"^npm\s+(exec|run|run-script|rum|urn|x)\s+", rtk_cmd: "rtk npm", rewrite_prefixes: &["npm"], category: "PackageManager", @@ -107,24 +116,75 @@ pub const RULES: &[RtkRule] = &[ subcmd_status: &[], }, RtkRule { - pattern: r"^(npx\s+|pnpm\s+)?tsc(\s|$)", + pattern: r"^((p?np(m|x)|p?npm\s+(exec|run|run-script)|npm\s+(rum|urn|x)|pnpm\s+dlx)\s+)?tsc(\s|$)", rtk_cmd: "rtk tsc", - rewrite_prefixes: &["pnpm tsc", "npx tsc", "tsc"], + rewrite_prefixes: &[ + "npm exec tsc", + "npm rum tsc", + "npm run tsc", + "npm run-script tsc", + "npm tsc", + "npm urn tsc", + "npm x tsc", + "npx tsc", + "pnpm dlx tsc", + "pnpm exec tsc", + "pnpm run tsc", + "pnpm run-script tsc", + "pnpm tsc", + "pnpx tsc", + "tsc", + ], category: "Build", savings_pct: 83.0, subcmd_savings: &[], subcmd_status: &[], }, RtkRule { - pattern: r"^(npx\s+|pnpm\s+)?(eslint|biome|lint)(\s|$)", + pattern: r"^((p?np(m|x)|p?npm\s+(exec|run|run-script)|npm\s+(rum|urn|x)|pnpm\s+dlx)\s+)?(biome|eslint|lint)(\s|$)", rtk_cmd: "rtk lint", rewrite_prefixes: &[ - "npx eslint", - "pnpm lint", - "npx biome", - "eslint", "biome", + "eslint", "lint", + "npm biome", + "npm eslint", + "npm exec biome", + "npm exec eslint", + "npm lint", + "npm rum biome", + "npm rum eslint", + "npm rum lint", + "npm run biome", + "npm run eslint", + "npm run lint", + "npm run-script biome", + "npm run-script eslint", + "npm run-script lint", + "npm urn biome", + "npm urn eslint", + "npm urn lint", + "npm x biome", + "npm x eslint", + "npx biome", + "npx eslint", + "npx lint", + "pnpm biome", + "pnpm dlx biome", + "pnpm dlx eslint", + "pnpm eslint", + "pnpm exec biome", + "pnpm exec eslint", + "pnpm lint", + "pnpm run biome", + "pnpm run eslint", + "pnpm run lint", + "pnpm run-script biome", + "pnpm run-script eslint", + "pnpm run-script lint", + "pnpx biome", + "pnpx eslint", + "pnpx lint", ], category: "Build", savings_pct: 84.0, @@ -132,45 +192,146 @@ pub const RULES: &[RtkRule] = &[ subcmd_status: &[], }, RtkRule { - pattern: r"^(npx\s+|pnpm\s+)?prettier", + pattern: r"^((p?np(m|x)|p?npm\s+(exec|run|run-script)|npm\s+(rum|urn|x)|pnpm\s+dlx)\s+)?prettier", rtk_cmd: "rtk prettier", - rewrite_prefixes: &["npx prettier", "pnpm prettier", "prettier"], + rewrite_prefixes: &[ + "npm exec prettier", + "npm prettier", + "npm rum prettier", + "npm run prettier", + "npm run-script prettier", + "npm urn prettier", + "npm x prettier", + "npx prettier", + "pnpm dlx prettier", + "pnpm exec prettier", + "pnpm prettier", + "pnpm run prettier", + "pnpm run-script prettier", + "pnpx prettier", + "prettier", + ], category: "Build", savings_pct: 70.0, subcmd_savings: &[], subcmd_status: &[], }, RtkRule { - pattern: r"^(npx\s+|pnpm\s+)?next\s+build", + pattern: r"^((p?np(m|x)|p?npm\s+(exec|run|run-script)|npm\s+(rum|urn|x)|pnpm\s+dlx)\s+)?next\s+build", rtk_cmd: "rtk next", - rewrite_prefixes: &["npx next build", "pnpm next build", "next build"], + rewrite_prefixes: &[ + "next build", + "npm exec next build", + "npm next build", + "npm rum next build", + "npm run next build", + "npm run-script next build", + "npm urn next build", + "npm x next build", + "npx next build", + "pnpm dlx next build", + "pnpm exec next build", + "pnpm next build", + "pnpm run next build", + "pnpm run-script next build", + "pnpx next build", + ], category: "Build", savings_pct: 87.0, subcmd_savings: &[], subcmd_status: &[], }, RtkRule { - pattern: r"^(pnpm\s+|npx\s+)?(vitest|jest|test)(\s|$)", + pattern: r"^((p?np(m|x)|p?npm\s+(exec|run|run-script)|npm\s+(rum|urn|x)|pnpm\s+dlx)\s+)?(jest|t|tst|test|vitest)(\s|$)", rtk_cmd: "rtk vitest", - rewrite_prefixes: &["pnpm vitest", "npx vitest", "vitest", "jest"], + rewrite_prefixes: &[ + "jest", + "npm exec jest", + "npm exec vitest", + "npm jest", + "npm rum jest", + "npm rum vitest", + "npm run jest", + "npm run vitest", + "npm run-script jest", + "npm run-script vitest", + "npm t", + "npm test", + "npm tst", + "npm urn jest", + "npm urn vitest", + "npm vitest", + "npm x jest", + "npm x vitest", + "npx jest", + "npx vitest", + "pnpm dlx jest", + "pnpm dlx vitest", + "pnpm exec jest", + "pnpm exec vitest", + "pnpm jest", + "pnpm run jest", + "pnpm run vitest", + "pnpm run-script jest", + "pnpm run-script vitest", + "pnpm t", + "pnpm test", + "pnpm tst", + "pnpm vitest", + "pnpx jest", + "pnpx vitest", + "vitest", + ], category: "Tests", savings_pct: 99.0, subcmd_savings: &[], subcmd_status: &[], }, RtkRule { - pattern: r"^(npx\s+|pnpm\s+)?playwright", + pattern: r"^((p?np(m|x)|p?npm\s+(exec|run|run-script)|npm\s+(rum|urn|x)|pnpm\s+dlx)\s+)?playwright", rtk_cmd: "rtk playwright", - rewrite_prefixes: &["npx playwright", "pnpm playwright", "playwright"], + rewrite_prefixes: &[ + "npm exec playwright", + "npm playwright", + "npm rum playwright", + "npm run playwright", + "npm run-script playwright", + "npm urn playwright", + "npm x playwright", + "npx playwright", + "playwright", + "pnpm dlx playwright", + "pnpm exec playwright", + "pnpm playwright", + "pnpm run playwright", + "pnpm run-script playwright", + "pnpx playwright", + ], category: "Tests", savings_pct: 94.0, subcmd_savings: &[], subcmd_status: &[], }, RtkRule { - pattern: r"^(npx\s+|pnpm\s+)?prisma", + pattern: r"^((p?np(m|x)|p?npm\s+(exec|run|run-script)|npm\s+(rum|urn|x)|pnpm\s+dlx)\s+)?prisma", rtk_cmd: "rtk prisma", - rewrite_prefixes: &["npx prisma", "pnpm prisma", "prisma"], + rewrite_prefixes: &[ + "npm exec prisma", + "npm prisma", + "npm rum prisma", + "npm run prisma", + "npm run-script prisma", + "npm urn prisma", + "npm x prisma", + "npx prisma", + "pnpm dlx prisma", + "pnpm exec prisma", + "pnpm prisma", + "pnpm run prisma", + "pnpm run-script prisma", + "pnpx prisma", + "prisma", + ], category: "Build", savings_pct: 88.0, subcmd_savings: &[], From 325a42e09fd8de6d0c26914ff29ab11d8680f354 Mon Sep 17 00:00:00 2001 From: Nicolas Le Cam Date: Mon, 9 Mar 2026 01:17:43 +0100 Subject: [PATCH 133/204] fix: remove pnpx -> rtk pnpx rule as rtk pnpx command doesn't exist Signed-off-by: Nicolas Le Cam --- src/discover/registry.rs | 13 ------------- src/discover/rules.rs | 9 --------- 2 files changed, 22 deletions(-) diff --git a/src/discover/registry.rs b/src/discover/registry.rs index 5485b14e6..58acab411 100644 --- a/src/discover/registry.rs +++ b/src/discover/registry.rs @@ -2306,19 +2306,6 @@ mod tests { } } - #[test] - fn test_rewrite_pnpx() { - let commands = vec!["pnpm dlx", "pnpx"]; - for command in commands { - assert_eq!( - rewrite_command(format!("{command} svgo").as_str(), &[]), - Some(format!("rtk pnpx svgo")), - "Failed for command: {}", - command - ); - } - } - #[test] fn test_rewrite_npx() { assert_eq!( diff --git a/src/discover/rules.rs b/src/discover/rules.rs index 304493307..c63ce0bc4 100644 --- a/src/discover/rules.rs +++ b/src/discover/rules.rs @@ -52,15 +52,6 @@ pub const RULES: &[RtkRule] = &[ subcmd_savings: &[], subcmd_status: &[], }, - RtkRule { - pattern: r"^(pnpm\s+dlx|pnpx)\s+", - rtk_cmd: "rtk pnpx", - rewrite_prefixes: &["pnpm dlx", "pnpx"], - category: "PackageManager", - savings_pct: 80.0, - subcmd_savings: &[], - subcmd_status: &[], - }, RtkRule { pattern: r"^npm\s+(exec|run|run-script|rum|urn|x)\s+", rtk_cmd: "rtk npm", From f93613881939bc278261e9143bd328c82c696a33 Mon Sep 17 00:00:00 2001 From: Nicolas Le Cam Date: Thu, 12 Mar 2026 23:59:23 +0100 Subject: [PATCH 134/204] feat(pnpm): handle pnpm build rewrite Signed-off-by: Nicolas Le Cam --- src/discover/registry.rs | 1 + src/discover/rules.rs | 2 +- 2 files changed, 2 insertions(+), 1 deletion(-) diff --git a/src/discover/registry.rs b/src/discover/registry.rs index 58acab411..31614ddca 100644 --- a/src/discover/registry.rs +++ b/src/discover/registry.rs @@ -2287,6 +2287,7 @@ mod tests { #[test] fn test_rewrite_pnpm_command() { let commands = vec![ + "build", "exec", "i", "install", diff --git a/src/discover/rules.rs b/src/discover/rules.rs index c63ce0bc4..b3d72f990 100644 --- a/src/discover/rules.rs +++ b/src/discover/rules.rs @@ -44,7 +44,7 @@ pub const RULES: &[RtkRule] = &[ subcmd_status: &[("fmt", RtkStatus::Passthrough)], }, RtkRule { - pattern: r"^pnpm\s+(exec|i|install|list|ls|outdated|run|run-script)", + pattern: r"^pnpm\s+(build|exec|i|install|list|ls|outdated|run|run-script)", rtk_cmd: "rtk pnpm", rewrite_prefixes: &["pnpm"], category: "PackageManager", From c47edac35db8ca785cbf5f44be1e3921afda5b93 Mon Sep 17 00:00:00 2001 From: Nicolas Le Cam Date: Sun, 12 Apr 2026 00:21:02 +0200 Subject: [PATCH 135/204] chore: fix clippy warning --- src/discover/registry.rs | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/discover/registry.rs b/src/discover/registry.rs index 31614ddca..165756a93 100644 --- a/src/discover/registry.rs +++ b/src/discover/registry.rs @@ -2310,8 +2310,8 @@ mod tests { #[test] fn test_rewrite_npx() { assert_eq!( - rewrite_command(format!("npx svgo").as_str(), &[]), - Some(format!("rtk npx svgo")), + rewrite_command("npx svgo", &[]), + Some("rtk npx svgo".to_string()), ); } // --- Compound operator edge cases --- From 45938b2a4d3fe76685a6e008f210bb276df50319 Mon Sep 17 00:00:00 2001 From: Nicolas Le Cam Date: Sun, 12 Apr 2026 01:49:06 +0200 Subject: [PATCH 136/204] feat(js): distinguish between `jest` and `vitest` and don't rewrite `npm test` commands as we don't know which test framework is used under the hood Signed-off-by: Nicolas Le Cam --- .claude/hooks/rtk-suggest.sh | 6 +- INSTALL.md | 19 ++-- README.md | 7 +- README_es.md | 6 +- README_fr.md | 5 +- README_ja.md | 5 +- README_ko.md | 5 +- README_zh.md | 5 +- docs/guide/analytics/gain.md | 3 +- docs/guide/what-rtk-covers.md | 3 +- docs/usage/AUDIT_GUIDE.md | 3 +- docs/usage/FEATURES.md | 8 +- hooks/claude/test-rtk-rewrite.sh | 68 ++++++------ scripts/benchmark.sh | 2 +- src/cmds/js/vitest_cmd.rs | 50 ++++++--- src/discover/registry.rs | 173 +++++++++++++++++++++++-------- src/discover/rules.rs | 74 +++++++++---- src/hooks/init.rs | 9 +- src/main.rs | 56 +++++----- 19 files changed, 324 insertions(+), 183 deletions(-) diff --git a/.claude/hooks/rtk-suggest.sh b/.claude/hooks/rtk-suggest.sh index 34fb50f3b..80c356582 100755 --- a/.claude/hooks/rtk-suggest.sh +++ b/.claude/hooks/rtk-suggest.sh @@ -97,10 +97,8 @@ elif echo "$FIRST_CMD" | grep -qE '^head\s+'; then fi # --- JS/TS tooling --- -elif echo "$FIRST_CMD" | grep -qE '^(pnpm\s+)?vitest(\s|$)'; then - SUGGESTION="rtk vitest run" -elif echo "$FIRST_CMD" | grep -qE '^pnpm\s+test(\s|$)'; then - SUGGESTION="rtk vitest run" +elif echo "$FIRST_CMD" | grep -qE '^(pnpm\s+)?vitest(\s+run)?(\s|$)'; then + SUGGESTION="rtk vitest" elif echo "$FIRST_CMD" | grep -qE '^pnpm\s+tsc(\s|$)'; then SUGGESTION="rtk tsc" elif echo "$FIRST_CMD" | grep -qE '^(npx\s+)?tsc(\s|$)'; then diff --git a/INSTALL.md b/INSTALL.md index 98457d09a..f439d4e74 100644 --- a/INSTALL.md +++ b/INSTALL.md @@ -231,11 +231,11 @@ rtk ls . # Test with git rtk git status -# Test with pnpm (fork only) +# Test with pnpm rtk pnpm list -# Test with Vitest (feat/vitest-support branch only) -rtk vitest run +# Test with Vitest +rtk vitest ``` ## Uninstalling @@ -303,8 +303,15 @@ rtk pnpm install pkg # Silent installation ### Tests ```bash -rtk test cargo test # Failures only (-90%) -rtk vitest run # Filtered Vitest output (-99.6%) +rtk cargo test # Filtered Cargo test output (-90%) +rtk go test # Filtered Go tests (NDJSON, -90%) +rtk jest # Filtered Jest output (-99.6%) +rtk vitest # Filtered Vitest output (-99.6%) +rtk playwright test # Filtered Playwright output (-94%) +rtk pytest # Filtered Python tests (-90%) +rtk rake test # Filtered Ruby tests (-90%) +rtk rspec # Filtered RSpec tests (-60%) +rtk test # Generic test wrapper - failures only (-90%) ``` ### Statistics @@ -319,7 +326,7 @@ rtk gain --history # With command history ### Production T3 Stack Project | Operation | Standard | RTK | Reduction | |-----------|----------|-----|-----------| -| `vitest run` | 102,199 chars | 377 chars | **-99.6%** | +| `vitest` | 102,199 chars | 377 chars | **-99.6%** | | `git status` | 529 chars | 217 chars | **-59%** | | `pnpm list` | ~8,000 tokens | ~2,400 | **-70%** | | `pnpm outdated` | ~12,000 tokens | ~1,200-2,400 | **-80-90%** | diff --git a/README.md b/README.md index 9772104c3..2c07c33d0 100644 --- a/README.md +++ b/README.md @@ -169,15 +169,16 @@ rtk gh run list # Workflow run status ### Test Runners ```bash -rtk test cargo test # Show failures only (-90%) -rtk err npm run build # Errors/warnings only -rtk vitest run # Vitest compact (failures only) +rtk jest # Jest compact (failures only) +rtk vitest # Vitest compact (failures only) rtk playwright test # E2E results (failures only) rtk pytest # Python tests (-90%) rtk go test # Go tests (NDJSON, -90%) rtk cargo test # Cargo tests (-90%) rtk rake test # Ruby minitest (-90%) rtk rspec # RSpec tests (JSON, -60%+) +rtk err # Filter errors only from any command +rtk test # Generic test wrapper - failures only (-90%) ``` ### Build & Lint diff --git a/README_es.md b/README_es.md index 27237ace1..751bd1ba8 100644 --- a/README_es.md +++ b/README_es.md @@ -121,10 +121,12 @@ rtk git push # -> "ok main" ### Tests ```bash -rtk test cargo test # Solo fallos (-90%) -rtk vitest run # Vitest compacto +rtk jest # Jest compacto +rtk vitest # Vitest compacto rtk pytest # Tests Python (-90%) rtk go test # Tests Go (-90%) +rtk cargo test # Tests Rust (-90%) +rtk test # Solo fallos (-90%) ``` ### Build & Lint diff --git a/README_fr.md b/README_fr.md index a29f52e55..d305feaaf 100644 --- a/README_fr.md +++ b/README_fr.md @@ -135,11 +135,12 @@ rtk git push # -> "ok main" ### Tests ```bash -rtk test cargo test # Echecs uniquement (-90%) -rtk vitest run # Vitest compact +rtk jest # Jest compact +rtk vitest # Vitest compact rtk pytest # Tests Python (-90%) rtk go test # Tests Go (-90%) rtk cargo test # Tests Cargo (-90%) +rtk test # Echecs uniquement (-90%) ``` ### Build & Lint diff --git a/README_ja.md b/README_ja.md index e0db09809..23bf4412f 100644 --- a/README_ja.md +++ b/README_ja.md @@ -121,10 +121,11 @@ rtk git push # -> "ok main" ### テスト ```bash -rtk test cargo test # 失敗のみ表示(-90%) -rtk vitest run # Vitest コンパクト +rtk jest # Jest コンパクト +rtk vitest # Vitest コンパクト rtk pytest # Python テスト(-90%) rtk go test # Go テスト(-90%) +rtk test # 失敗のみ表示(-90%) ``` ### ビルド & リント diff --git a/README_ko.md b/README_ko.md index abc35a14a..a07a4590a 100644 --- a/README_ko.md +++ b/README_ko.md @@ -121,10 +121,11 @@ rtk git push # -> "ok main" ### 테스트 ```bash -rtk test cargo test # 실패만 표시 (-90%) -rtk vitest run # Vitest 컴팩트 +rtk jest # Jest 컴팩트 +rtk vitest # Vitest 컴팩트 rtk pytest # Python 테스트 (-90%) rtk go test # Go 테스트 (-90%) +rtk test # 실패만 표시 (-90%) ``` ### 빌드 & 린트 diff --git a/README_zh.md b/README_zh.md index c4a358d77..854ca2314 100644 --- a/README_zh.md +++ b/README_zh.md @@ -122,10 +122,11 @@ rtk git push # -> "ok main" ### 测试 ```bash -rtk test cargo test # 仅显示失败(-90%) -rtk vitest run # Vitest 紧凑输出 +rtk jest # Jest 紧凑输出 +rtk vitest # Vitest 紧凑输出 rtk pytest # Python 测试(-90%) rtk go test # Go 测试(-90%) +rtk test # 仅显示失败(-90%) ``` ### 构建 & 检查 diff --git a/docs/guide/analytics/gain.md b/docs/guide/analytics/gain.md index db2249d4b..9b257e3e5 100644 --- a/docs/guide/analytics/gain.md +++ b/docs/guide/analytics/gain.md @@ -94,7 +94,8 @@ Same columns as daily, aggregated by Sunday-Saturday week or calendar month. |---------|----------------|-----------| | `git status` | 77-93% | Compact stat format | | `eslint` | 84% | Group by rule | -| `vitest run` | 94-99% | Show failures only | +| `jest` | 94-99% | Show failures only | +| `vitest` | 94-99% | Show failures only | | `find` | 75% | Tree format | | `pnpm list` | 70-90% | Compact dependencies | | `grep` | 70% | Truncate + group | diff --git a/docs/guide/what-rtk-covers.md b/docs/guide/what-rtk-covers.md index 426e91a97..de20182f2 100644 --- a/docs/guide/what-rtk-covers.md +++ b/docs/guide/what-rtk-covers.md @@ -51,7 +51,8 @@ Once RTK is installed with a hook, these commands are automatically intercepted | Command | Savings | What changes | |---------|---------|--------------| -| `vitest run` | 94-99% | Failures only | +| `jest` | 94-99% | Failures only | +| `vitest` | 94-99% | Failures only | | `tsc` | 75% | Type errors grouped by file | | `eslint` | 84% | Violations grouped by rule | | `pnpm list` | 70-90% | Compact dependency tree | diff --git a/docs/usage/AUDIT_GUIDE.md b/docs/usage/AUDIT_GUIDE.md index 4ce6fecec..c653fc08a 100644 --- a/docs/usage/AUDIT_GUIDE.md +++ b/docs/usage/AUDIT_GUIDE.md @@ -267,7 +267,8 @@ Savings % = (Saved / Input) × 100 |---------|----------------|-----------| | `rtk git status` | 77-93% | Compact stat format | | `rtk eslint` | 84% | Group by rule | -| `rtk vitest run` | 94-99% | Show failures only | +| `rtk jest` | 94-99% | Show failures only | +| `rtk vitest` | 94-99% | Show failures only | | `rtk find` | 75% | Tree format | | `rtk pnpm list` | 70-90% | Compact dependencies | | `rtk grep` | 70% | Truncate + group | diff --git a/docs/usage/FEATURES.md b/docs/usage/FEATURES.md index 061a604a9..3c285cb40 100644 --- a/docs/usage/FEATURES.md +++ b/docs/usage/FEATURES.md @@ -576,12 +576,13 @@ Filtre la sortie de `cargo nextest` pour n'afficher que les echecs. --- -### `rtk vitest run` -- Tests Vitest +### `rtk jest` / `rtk vitest` -- Tests Jest/Vitest **Economies :** ~99.5% ```bash -rtk vitest run [args...] +rtk jest [args...] +rtk vitest [args...] ``` --- @@ -1258,7 +1259,8 @@ rtk verify | `ls` | `rtk ls` | | `tree` | `rtk tree` | | `wc` | `rtk wc` | -| `vitest/jest` | `rtk vitest run` | +| `jest` | `rtk jest` | +| `vitest` | `rtk vitest` | | `tsc` | `rtk tsc` | | `eslint/biome` | `rtk lint` | | `prettier` | `rtk prettier` | diff --git a/hooks/claude/test-rtk-rewrite.sh b/hooks/claude/test-rtk-rewrite.sh index 85103163b..702fe9299 100644 --- a/hooks/claude/test-rtk-rewrite.sh +++ b/hooks/claude/test-rtk-rewrite.sh @@ -117,6 +117,10 @@ test_rewrite "npx prisma migrate" \ "npx prisma migrate" \ "rtk prisma migrate" +test_rewrite "rtk git status" \ + "rtk git status" \ + "rtk git status" + echo "" # ---- SECTION 2: Env var prefix handling (THE BIG FIX) ---- @@ -134,8 +138,8 @@ test_rewrite "env + git log" \ "GIT_PAGER=cat rtk git log --oneline -10" test_rewrite "multi env + vitest" \ - "NODE_ENV=test CI=1 npx vitest run" \ - "NODE_ENV=test CI=1 rtk vitest run" + "NODE_ENV=test CI=1 npx vitest" \ + "NODE_ENV=test CI=1 rtk vitest" test_rewrite "env + ls" \ "LANG=C ls -la" \ @@ -143,7 +147,7 @@ test_rewrite "env + ls" \ test_rewrite "env + npm run" \ "NODE_ENV=test npm run test:e2e" \ - "NODE_ENV=test rtk npm test:e2e" + "NODE_ENV=test rtk npm run test:e2e" test_rewrite "env + docker compose (unsupported subcommand, NOT rewritten)" \ "COMPOSE_PROJECT_NAME=test docker compose up -d" \ @@ -159,23 +163,15 @@ echo "" echo "--- New patterns ---" test_rewrite "npm run test:e2e" \ "npm run test:e2e" \ - "rtk npm test:e2e" + "rtk npm run test:e2e" test_rewrite "npm run build" \ "npm run build" \ - "rtk npm build" + "rtk npm run build" -test_rewrite "npm test" \ - "npm test" \ - "rtk npm test" - -test_rewrite "vue-tsc -b" \ - "vue-tsc -b" \ - "rtk tsc -b" - -test_rewrite "npx vue-tsc --noEmit" \ - "npx vue-tsc --noEmit" \ - "rtk tsc --noEmit" +test_rewrite "npm jest run" \ + "npm jest run" \ + "rtk jest" test_rewrite "docker compose up -d (NOT rewritten — unsupported by rtk)" \ "docker compose up -d" \ @@ -209,17 +205,17 @@ test_rewrite "docker exec -it db psql" \ "docker exec -it db psql" \ "rtk docker exec -it db psql" -test_rewrite "find (NOT rewritten — different arg format)" \ +test_rewrite "find . -name '*.ts'" \ "find . -name '*.ts'" \ - "" + "rtk find . -name '*.ts'" -test_rewrite "tree (NOT rewritten — different arg format)" \ +test_rewrite "tree src/" \ "tree src/" \ - "" + "rtk tree src/" -test_rewrite "wget (NOT rewritten — different arg format)" \ +test_rewrite "wget https://example.com/file" \ "wget https://example.com/file" \ - "" + "rtk wget https://example.com/file" test_rewrite "gh api repos/owner/repo" \ "gh api repos/owner/repo" \ @@ -281,32 +277,28 @@ echo "" echo "--- Vitest run dedup ---" test_rewrite "vitest (no args)" \ "vitest" \ - "rtk vitest run" + "rtk vitest" -test_rewrite "vitest run (no double run)" \ +test_rewrite "vitest run (no run)" \ "vitest run" \ - "rtk vitest run" + "rtk vitest" -test_rewrite "vitest run --reporter" \ - "vitest run --reporter=verbose" \ - "rtk vitest run --reporter=verbose" +test_rewrite "vitest --reporter" \ + "vitest --reporter=verbose" \ + "rtk vitest --reporter=verbose" -test_rewrite "npx vitest run" \ - "npx vitest run" \ - "rtk vitest run" +test_rewrite "npx vitest" \ + "npx vitest" \ + "rtk vitest" -test_rewrite "pnpm vitest run --coverage" \ - "pnpm vitest run --coverage" \ - "rtk vitest run --coverage" +test_rewrite "pnpm vitest --coverage" \ + "pnpm vitest --coverage" \ + "rtk vitest --coverage" echo "" # ---- SECTION 5: Should NOT rewrite ---- echo "--- Should NOT rewrite ---" -test_rewrite "already rtk" \ - "rtk git status" \ - "" - test_rewrite "heredoc" \ "cat <<'EOF' hello diff --git a/scripts/benchmark.sh b/scripts/benchmark.sh index a1e616bcc..15aee89df 100755 --- a/scripts/benchmark.sh +++ b/scripts/benchmark.sh @@ -367,7 +367,7 @@ if [ -f "package.json" ]; then fi if command -v vitest &> /dev/null || [ -f "node_modules/.bin/vitest" ]; then - bench "vitest run" "vitest run --reporter=json 2>&1 || true" "$RTK vitest run" + bench "vitest" "vitest run --reporter=json 2>&1 || true" "$RTK vitest" fi if command -v pnpm &> /dev/null; then diff --git a/src/cmds/js/vitest_cmd.rs b/src/cmds/js/vitest_cmd.rs index 4f5e9ae47..260a3cdb7 100644 --- a/src/cmds/js/vitest_cmd.rs +++ b/src/cmds/js/vitest_cmd.rs @@ -10,6 +10,7 @@ use crate::parser::{ emit_degradation_warning, emit_passthrough_warning, extract_json_object, truncate_passthrough, FormatMode, OutputParser, ParseResult, TestFailure, TestResult, TokenFormatter, }; +use crate::Commands; /// Vitest JSON output structures (tool-specific format) #[derive(Debug, Deserialize)] @@ -210,31 +211,39 @@ fn extract_failures_regex(output: &str) -> Vec { failures } -#[derive(Debug, Clone)] -pub enum VitestCommand { - Run, -} - -pub fn run(cmd: VitestCommand, args: &[String], verbose: u8) -> Result { - match cmd { - VitestCommand::Run => run_vitest(args, verbose), +impl Commands { + fn test_framework_name(&self) -> Result<&'static str, &Commands> { + match self { + Commands::Jest { .. } => Ok("jest"), + Commands::Vitest { .. } => Ok("vitest"), + unknown => Err(unknown), + } } } -fn run_vitest(args: &[String], verbose: u8) -> Result { +pub fn run_test(command: &Commands, args: &[String], verbose: u8) -> Result { let timer = tracking::TimedExecution::start(); - let mut cmd = package_manager_exec("vitest"); + let framework = command + .test_framework_name() + .expect("Unknown test framework"); + + let mut cmd = package_manager_exec(framework); cmd.arg("run"); // Force non-watch mode // Add JSON reporter for structured output cmd.arg("--reporter=json"); for arg in args { + if arg == "run" || arg.starts_with("--reporter") { + continue; + } cmd.arg(arg); } - let output = cmd.output().context("Failed to run vitest")?; + let output = cmd + .output() + .context(format!("Failed to run {}", framework))?; let stdout = String::from_utf8_lossy(&output.stdout); let stderr = String::from_utf8_lossy(&output.stderr); let combined = format!("{}{}", stdout, stderr); @@ -246,30 +255,37 @@ fn run_vitest(args: &[String], verbose: u8) -> Result { let filtered = match parse_result { ParseResult::Full(data) => { if verbose > 0 { - eprintln!("vitest run (Tier 1: Full JSON parse)"); + eprintln!("{} run (Tier 1: Full JSON parse)", framework); } data.format(mode) } ParseResult::Degraded(data, warnings) => { if verbose > 0 { - emit_degradation_warning("vitest", &warnings.join(", ")); + emit_degradation_warning(framework, &warnings.join(", ")); } data.format(mode) } ParseResult::Passthrough(raw) => { - emit_passthrough_warning("vitest", "All parsing tiers failed"); + emit_passthrough_warning(framework, "All parsing tiers failed"); raw } }; - let exit_code = crate::core::utils::exit_code_from_output(&output, "vitest"); - if let Some(hint) = crate::core::tee::tee_and_hint(&combined, "vitest_run", exit_code) { + let exit_code = crate::core::utils::exit_code_from_output(&output, framework); + if let Some(hint) = + crate::core::tee::tee_and_hint(&combined, format!("{}_run", framework).as_str(), exit_code) + { println!("{}\n{}", filtered, hint); } else { println!("{}", filtered); } - timer.track("vitest run", "rtk vitest run", &combined, &filtered); + timer.track( + format!("{} run", framework).as_str(), + format!("rtk {} run", framework).as_str(), + &combined, + &filtered, + ); if !output.status.success() { return Ok(exit_code); diff --git a/src/discover/registry.rs b/src/discover/registry.rs index 165756a93..1cb054160 100644 --- a/src/discover/registry.rs +++ b/src/discover/registry.rs @@ -2088,43 +2088,130 @@ mod tests { } #[test] - fn test_classify_vitest() { + fn test_classify_jest() { let commands = vec![ + "jest run", + "jest", + "npm exec jest run", "npm exec jest", - "npm exec vitest", + "npm jest run", + "npm jest", + "npm rum jest run", "npm rum jest", - "npm rum vitest", + "npm run jest run", "npm run jest", - "npm run vitest", + "npm run-script jest run", "npm run-script jest", - "npm run-script vitest", + "npm urn jest run", "npm urn jest", - "npm urn vitest", + "npm x jest run", "npm x jest", - "npm x vitest", + "npx jest run", + "npx jest", + "pnpm dlx jest run", "pnpm dlx jest", - "pnpm dlx vitest", + "pnpm exec jest run", "pnpm exec jest", - "pnpm exec vitest", + "pnpm jest run", + "pnpm jest", + "pnpm run jest run", "pnpm run jest", - "pnpm run vitest", + "pnpm run-script jest run", "pnpm run-script jest", - "pnpm run-script vitest", + "pnpx jest run", + "pnpx jest", + ]; + for command in commands { + assert!( + matches!( + classify_command(command), + Classification::Supported { + rtk_equivalent: "rtk jest", + .. + } + ), + "Failed for command: {}", + command + ); + } + } + + #[test] + fn test_rewrite_jest() { + let commands = vec![ + "jest run", + "jest", + "npm exec jest run", + "npm exec jest", + "npm jest run", "npm jest", - "npm t", - "npm test", - "npm tst", - "npm vitest", + "npm rum jest run", + "npm rum jest", + "npm run jest run", + "npm run jest", + "npm run-script jest run", + "npm run-script jest", + "npm urn jest run", + "npm urn jest", + "npm x jest run", + "npm x jest", + "npx jest run", "npx jest", - "npx vitest", + "pnpm dlx jest run", + "pnpm dlx jest", + "pnpm exec jest run", + "pnpm exec jest", + "pnpm jest run", "pnpm jest", - "pnpm t", - "pnpm test", - "pnpm tst", - "pnpm vitest", + "pnpm run jest run", + "pnpm run jest", + "pnpm run-script jest run", + "pnpm run-script jest", + "pnpx jest run", "pnpx jest", + ]; + for command in commands { + assert_eq!( + rewrite_command(command, &[]), + Some("rtk jest".into()), + "Failed for command: {}", + command + ); + } + } + + #[test] + fn test_classify_vitest() { + let commands = vec![ + "npm exec vitest run", + "npm exec vitest", + "npm rum vitest run", + "npm rum vitest", + "npm run vitest run", + "npm run vitest", + "npm run-script vitest run", + "npm run-script vitest", + "npm urn vitest run", + "npm urn vitest", + "npm vitest run", + "npm vitest", + "npm x vitest run", + "npm x vitest", + "npx vitest run", + "npx vitest", + "pnpm dlx vitest run", + "pnpm dlx vitest", + "pnpm exec vitest run", + "pnpm exec vitest", + "pnpm run vitest run", + "pnpm run vitest", + "pnpm run-script vitest run", + "pnpm run-script vitest", + "pnpm vitest run", + "pnpm vitest", + "pnpx vitest run", "pnpx vitest", - "jest", + "vitest run", "vitest", ]; for command in commands { @@ -2145,47 +2232,41 @@ mod tests { #[test] fn test_rewrite_vitest() { let commands = vec![ - "npm exec jest", + "npm exec vitest run", "npm exec vitest", - "npm rum jest", + "npm rum vitest run", "npm rum vitest", - "npm run jest", + "npm run vitest run", "npm run vitest", - "npm run-script jest", + "npm run-script vitest run", "npm run-script vitest", - "npm urn jest", + "npm urn vitest run", "npm urn vitest", - "npm x jest", + "npm vitest run", + "npm vitest", + "npm x vitest run", "npm x vitest", - "pnpm dlx jest", + "npx vitest run", + "npx vitest", + "pnpm dlx vitest run", "pnpm dlx vitest", - "pnpm exec jest", + "pnpm exec vitest run", "pnpm exec vitest", - "pnpm run jest", + "pnpm run vitest run", "pnpm run vitest", - "pnpm run-script jest", + "pnpm run-script vitest run", "pnpm run-script vitest", - "npm jest", - "npm t", - "npm test", - "npm tst", - "npm vitest", - "npx jest", - "npx vitest", - "pnpm jest", - "pnpm t", - "pnpm test", - "pnpm tst", + "pnpm vitest run", "pnpm vitest", - "pnpx jest", + "pnpx vitest run", "pnpx vitest", - "jest", + "vitest run", "vitest", ]; for command in commands { assert_eq!( - rewrite_command(&format!("{command} run"), &[]), - Some("rtk vitest run".into()), + rewrite_command(command, &[]), + Some("rtk vitest".into()), "Failed for command: {}", command ); diff --git a/src/discover/rules.rs b/src/discover/rules.rs index b3d72f990..409e9adea 100644 --- a/src/discover/rules.rs +++ b/src/discover/rules.rs @@ -233,44 +233,78 @@ pub const RULES: &[RtkRule] = &[ subcmd_status: &[], }, RtkRule { - pattern: r"^((p?np(m|x)|p?npm\s+(exec|run|run-script)|npm\s+(rum|urn|x)|pnpm\s+dlx)\s+)?(jest|t|tst|test|vitest)(\s|$)", - rtk_cmd: "rtk vitest", + pattern: r"^((p?np(m|x)|p?npm\s+(exec|run|run-script)|npm\s+(rum|urn|x)|pnpm\s+dlx)\s+)?jest(\s+run)?(\s|$)", + rtk_cmd: "rtk jest", rewrite_prefixes: &[ + "jest run", "jest", + "npm exec jest run", "npm exec jest", - "npm exec vitest", + "npm jest run", "npm jest", + "npm rum jest run", "npm rum jest", - "npm rum vitest", + "npm run jest run", "npm run jest", - "npm run vitest", + "npm run-script jest run", "npm run-script jest", - "npm run-script vitest", - "npm t", - "npm test", - "npm tst", + "npm urn jest run", "npm urn jest", - "npm urn vitest", - "npm vitest", + "npm x jest run", "npm x jest", - "npm x vitest", + "npx jest run", "npx jest", - "npx vitest", + "pnpm dlx jest run", "pnpm dlx jest", - "pnpm dlx vitest", + "pnpm exec jest run", "pnpm exec jest", - "pnpm exec vitest", + "pnpm jest run", "pnpm jest", + "pnpm run jest run", "pnpm run jest", - "pnpm run vitest", + "pnpm run-script jest run", "pnpm run-script jest", + "pnpx jest run", + "pnpx jest", + ], + category: "Tests", + savings_pct: 99.0, + subcmd_savings: &[], + subcmd_status: &[], + }, + RtkRule { + pattern: r"^((p?np(m|x)|p?npm\s+(exec|run|run-script)|npm\s+(rum|urn|x)|pnpm\s+dlx)\s+)?vitest(\s+run)?(\s|$)", + rtk_cmd: "rtk vitest", + rewrite_prefixes: &[ + "npm exec vitest run", + "npm exec vitest", + "npm rum vitest run", + "npm rum vitest", + "npm run vitest run", + "npm run vitest", + "npm run-script vitest run", + "npm run-script vitest", + "npm urn vitest run", + "npm urn vitest", + "npm vitest run", + "npm vitest", + "npm x vitest run", + "npm x vitest", + "npx vitest run", + "npx vitest", + "pnpm dlx vitest run", + "pnpm dlx vitest", + "pnpm exec vitest run", + "pnpm exec vitest", + "pnpm run vitest run", + "pnpm run vitest", + "pnpm run-script vitest run", "pnpm run-script vitest", - "pnpm t", - "pnpm test", - "pnpm tst", + "pnpm vitest run", "pnpm vitest", - "pnpx jest", + "pnpx vitest run", "pnpx vitest", + "vitest run", "vitest", ], category: "Tests", diff --git a/src/hooks/init.rs b/src/hooks/init.rs index 42a3db7ca..6edba7ca6 100644 --- a/src/hooks/init.rs +++ b/src/hooks/init.rs @@ -109,11 +109,16 @@ rtk prettier --check # Files needing format only (70%) rtk next build # Next.js build with route metrics (87%) ``` -### Test (90-99% savings) +### Test (60-99% savings) ```bash rtk cargo test # Cargo test failures only (90%) -rtk vitest run # Vitest failures only (99.5%) +rtk go test # Go test failures only (90%) +rtk jest # Jest failures only (99.5%) +rtk vitest # Vitest failures only (99.5%) rtk playwright test # Playwright failures only (94%) +rtk pytest # Python test failures only (90%) +rtk rake test # Ruby test failures only (90%) +rtk rspec # RSpec test failures only (60%) rtk test # Generic test wrapper - failures only ``` diff --git a/src/main.rs b/src/main.rs index 11332034a..9c732422f 100644 --- a/src/main.rs +++ b/src/main.rs @@ -70,7 +70,7 @@ struct Cli { skip_env: bool, } -#[derive(Subcommand)] +#[derive(Debug, Subcommand)] enum Commands { /// List directory contents with token-optimized output (proxy to native ls) Ls { @@ -440,10 +440,18 @@ enum Commands { create: bool, }, + /// Jest commands with compact output + Jest { + /// Additional jest arguments + #[arg(trailing_var_arg = true, allow_hyphen_values = true)] + args: Vec, + }, + /// Vitest commands with compact output Vitest { - #[command(subcommand)] - command: VitestCommands, + /// Additional vitest arguments + #[arg(trailing_var_arg = true, allow_hyphen_values = true)] + args: Vec, }, /// Prisma commands with compact output (no ASCII art) @@ -693,7 +701,7 @@ enum Commands { }, } -#[derive(Subcommand)] +#[derive(Debug, Subcommand)] enum HookCommands { /// Process Gemini CLI BeforeTool hook (reads JSON from stdin) Gemini, @@ -701,7 +709,7 @@ enum HookCommands { Copilot, } -#[derive(Subcommand)] +#[derive(Debug, Subcommand)] enum GitCommands { /// Condensed diff output Diff { @@ -782,7 +790,7 @@ enum GitCommands { Other(Vec), } -#[derive(Subcommand)] +#[derive(Debug, Subcommand)] enum PnpmCommands { /// List installed packages (ultra-dense) List { @@ -818,7 +826,7 @@ enum PnpmCommands { Other(Vec), } -#[derive(Subcommand)] +#[derive(Debug, Subcommand)] enum DockerCommands { /// List running containers Ps, @@ -836,7 +844,7 @@ enum DockerCommands { Other(Vec), } -#[derive(Subcommand)] +#[derive(Debug, Subcommand)] enum ComposeCommands { /// List compose services (compact) Ps, @@ -855,7 +863,7 @@ enum ComposeCommands { Other(Vec), } -#[derive(Subcommand)] +#[derive(Debug, Subcommand)] enum KubectlCommands { /// List pods Pods { @@ -884,17 +892,7 @@ enum KubectlCommands { Other(Vec), } -#[derive(Subcommand)] -enum VitestCommands { - /// Run tests with filtered output (90% token reduction) - Run { - /// Additional vitest arguments - #[arg(trailing_var_arg = true, allow_hyphen_values = true)] - args: Vec, - }, -} - -#[derive(Subcommand)] +#[derive(Debug, Subcommand)] enum PrismaCommands { /// Generate Prisma Client (strip ASCII art) Generate { @@ -915,7 +913,7 @@ enum PrismaCommands { }, } -#[derive(Subcommand)] +#[derive(Debug, Subcommand)] enum PrismaMigrateCommands { /// Create and apply migration Dev { @@ -940,7 +938,7 @@ enum PrismaMigrateCommands { }, } -#[derive(Subcommand)] +#[derive(Debug, Subcommand)] enum CargoCommands { /// Build with compact output (strip Compiling lines, keep errors) Build { @@ -983,7 +981,7 @@ enum CargoCommands { Other(Vec), } -#[derive(Subcommand)] +#[derive(Debug, Subcommand)] enum DotnetCommands { /// Build with compact output Build { @@ -1010,7 +1008,7 @@ enum DotnetCommands { Other(Vec), } -#[derive(Subcommand)] +#[derive(Debug, Subcommand)] enum GoCommands { /// Run tests with compact output (90% token reduction via JSON streaming) Test { @@ -1162,7 +1160,7 @@ fn run_fallback(parse_error: clap::Error) -> Result { } } -#[derive(Subcommand)] +#[derive(Debug, Subcommand)] enum GtCommands { /// Compact stack log output Log { @@ -1785,11 +1783,9 @@ fn run_cli() -> Result { 0 } - Commands::Vitest { command } => match command { - VitestCommands::Run { args } => { - vitest_cmd::run(vitest_cmd::VitestCommand::Run, &args, cli.verbose)? - } - }, + Commands::Jest { ref args } | Commands::Vitest { ref args } => { + vitest_cmd::run_test(&cli.command, args, cli.verbose)? + } Commands::Prisma { command } => match command { PrismaCommands::Generate { args } => { From 70610da4bbf0fd8f4226fc61895af61377eafcc8 Mon Sep 17 00:00:00 2001 From: Nicolas Le Cam Date: Sun, 12 Apr 2026 21:42:45 +0200 Subject: [PATCH 137/204] fix(vitest): rework command to handle differences between vitest and jest Also remove duration computation as there's no endTime attribute in json output Signed-off-by: Nicolas Le Cam --- src/cmds/js/vitest_cmd.rs | 71 +++++++++++++++++++-------------------- 1 file changed, 35 insertions(+), 36 deletions(-) diff --git a/src/cmds/js/vitest_cmd.rs b/src/cmds/js/vitest_cmd.rs index 260a3cdb7..c4e089877 100644 --- a/src/cmds/js/vitest_cmd.rs +++ b/src/cmds/js/vitest_cmd.rs @@ -25,10 +25,6 @@ struct VitestJsonOutput { num_failed_tests: usize, #[serde(rename = "numPendingTests", default)] num_pending_tests: usize, - #[serde(rename = "startTime")] - start_time: Option, - #[serde(rename = "endTime")] - end_time: Option, } #[derive(Debug, Deserialize)] @@ -67,17 +63,13 @@ impl OutputParser for VitestParser { match json_result { Ok(json) => { let failures = extract_failures_from_json(&json); - let duration_ms = match (json.start_time, json.end_time) { - (Some(start), Some(end)) => Some(end.saturating_sub(start)), - _ => None, - }; let result = TestResult { total: json.num_total_tests, passed: json.num_passed_tests, failed: json.num_failed_tests, skipped: json.num_pending_tests, - duration_ms, + duration_ms: None, failures, }; @@ -211,31 +203,39 @@ fn extract_failures_regex(output: &str) -> Vec { failures } -impl Commands { - fn test_framework_name(&self) -> Result<&'static str, &Commands> { - match self { - Commands::Jest { .. } => Ok("jest"), - Commands::Vitest { .. } => Ok("vitest"), - unknown => Err(unknown), - } - } -} - pub fn run_test(command: &Commands, args: &[String], verbose: u8) -> Result { let timer = tracking::TimedExecution::start(); - let framework = command - .test_framework_name() - .expect("Unknown test framework"); - - let mut cmd = package_manager_exec(framework); - cmd.arg("run"); // Force non-watch mode - - // Add JSON reporter for structured output - cmd.arg("--reporter=json"); + let (framework, mut cmd) = match command { + Commands::Vitest { .. } => { + let framework = "vitest"; + let mut cmd = package_manager_exec(framework); + cmd + // Force non-watch mode + .arg("run") + // Enable JSON structured output + .arg("--reporter=json"); + (framework, cmd) + } + Commands::Jest { .. } => { + let framework = "jest"; + let mut cmd = package_manager_exec(framework); + cmd + // Force non-watch mode + .arg("--no-watch") + // Enable JSON structured output + .arg("--json"); + (framework, cmd) + } + _ => unreachable!(), + }; for arg in args { - if arg == "run" || arg.starts_with("--reporter") { + if arg == "run" + || arg.starts_with("--json") + || arg.starts_with("--reporter") + || arg.starts_with("--watch") + { continue; } cmd.arg(arg); @@ -305,8 +305,7 @@ mod tests { "numFailedTests": 0, "numPendingTests": 0, "testResults": [], - "startTime": 1000, - "endTime": 1450 + "startTime": 1000 }"#; let result = VitestParser::parse(json); @@ -317,7 +316,7 @@ mod tests { assert_eq!(data.total, 13); assert_eq!(data.passed, 13); assert_eq!(data.failed, 0); - assert_eq!(data.duration_ms, Some(450)); + assert_eq!(data.duration_ms, None); } #[test] @@ -359,7 +358,7 @@ mod tests { Scope: all 6 workspace projects WARN deprecated inflight@1.0.6: This module is not supported -{"numTotalTests": 13, "numPassedTests": 13, "numFailedTests": 0, "numPendingTests": 0, "testResults": [], "startTime": 1000, "endTime": 1450} +{"numTotalTests": 13, "numPassedTests": 13, "numFailedTests": 0, "numPendingTests": 0, "testResults": [], "startTime": 1000} "#; let result = VitestParser::parse(input); assert_eq!(result.tier(), 1, "Should succeed with Tier 1 (full parse)"); @@ -376,7 +375,7 @@ Scope: all 6 workspace projects let input = r#"[dotenv] Loading environment variables from .env [dotenv] Injected 5 variables -{"numTotalTests": 5, "numPassedTests": 4, "numFailedTests": 1, "numPendingTests": 0, "testResults": [], "startTime": 2000, "endTime": 2300} +{"numTotalTests": 5, "numPassedTests": 4, "numFailedTests": 1, "numPendingTests": 0, "testResults": [], "startTime": 2000} "#; let result = VitestParser::parse(input); assert_eq!(result.tier(), 1, "Should succeed with Tier 1 (full parse)"); @@ -386,13 +385,13 @@ Scope: all 6 workspace projects assert_eq!(data.total, 5); assert_eq!(data.passed, 4); assert_eq!(data.failed, 1); - assert_eq!(data.duration_ms, Some(300)); + assert_eq!(data.duration_ms, None); } #[test] fn test_vitest_parser_with_nested_json() { let input = r#"prefix text -{"numTotalTests": 2, "numPassedTests": 2, "numFailedTests": 0, "numPendingTests": 0, "testResults": [{"name": "test.js", "assertionResults": [{"fullName": "nested test", "status": "passed", "failureMessages": []}]}], "startTime": 1000, "endTime": 1100} +{"numTotalTests": 2, "numPassedTests": 2, "numFailedTests": 0, "numPendingTests": 0, "testResults": [{"name": "test.js", "assertionResults": [{"fullName": "nested test", "status": "passed", "failureMessages": []}]}], "startTime": 1000} "#; let result = VitestParser::parse(input); assert_eq!(result.tier(), 1, "Should succeed with Tier 1 (full parse)"); From 010596c696469736c12ed7c05ebf8cba803ff326 Mon Sep 17 00:00:00 2001 From: Nicolas Le Cam Date: Sun, 12 Apr 2026 21:54:16 +0200 Subject: [PATCH 138/204] Revert "feat(pnpm): handle pnpm build rewrite" This reverts commit 94a35325790a8a62715e66863ad5a6cd17417d81. Build is no longer a pnpm command with specific handling. Signed-off-by: Nicolas Le Cam --- src/discover/registry.rs | 1 - src/discover/rules.rs | 2 +- 2 files changed, 1 insertion(+), 2 deletions(-) diff --git a/src/discover/registry.rs b/src/discover/registry.rs index 1cb054160..0005cd21e 100644 --- a/src/discover/registry.rs +++ b/src/discover/registry.rs @@ -2368,7 +2368,6 @@ mod tests { #[test] fn test_rewrite_pnpm_command() { let commands = vec![ - "build", "exec", "i", "install", diff --git a/src/discover/rules.rs b/src/discover/rules.rs index 409e9adea..0e357d7bb 100644 --- a/src/discover/rules.rs +++ b/src/discover/rules.rs @@ -44,7 +44,7 @@ pub const RULES: &[RtkRule] = &[ subcmd_status: &[("fmt", RtkStatus::Passthrough)], }, RtkRule { - pattern: r"^pnpm\s+(build|exec|i|install|list|ls|outdated|run|run-script)", + pattern: r"^pnpm\s+(exec|i|install|list|ls|outdated|run|run-script)", rtk_cmd: "rtk pnpm", rewrite_prefixes: &["pnpm"], category: "PackageManager", From 630f0a2800e311d7aee80a9edca22f9f50315f83 Mon Sep 17 00:00:00 2001 From: s2005 Date: Sun, 5 Apr 2026 15:28:02 +0200 Subject: [PATCH 139/204] Add liquibase TOML filter --- src/core/toml_filter.rs | 18 +++++--- src/discover/rules.rs | 9 ++++ src/filters/README.md | 1 + src/filters/liquibase.toml | 84 ++++++++++++++++++++++++++++++++++++++ src/main.rs | 36 ++++++++++++---- 5 files changed, 134 insertions(+), 14 deletions(-) create mode 100644 src/filters/liquibase.toml diff --git a/src/core/toml_filter.rs b/src/core/toml_filter.rs index bd294f8af..06060d22d 100644 --- a/src/core/toml_filter.rs +++ b/src/core/toml_filter.rs @@ -102,6 +102,10 @@ struct TomlFilterDef { tail_lines: Option, max_lines: Option, on_empty: Option, + /// When true, stderr is captured and merged with stdout before filtering. + /// Use for tools like liquibase that emit banners/logs to stderr. + #[serde(default)] + filter_stderr: bool, } // --------------------------------------------------------------------------- @@ -145,6 +149,8 @@ pub struct CompiledFilter { tail_lines: Option, pub max_lines: Option, on_empty: Option, + /// When true, the runner should capture stderr and merge it with stdout. + pub filter_stderr: bool, } // --------------------------------------------------------------------------- @@ -391,6 +397,7 @@ fn compile_filter(name: String, def: TomlFilterDef) -> Result Result { if let Some(filter) = toml_match { // TOML match: capture stdout for filtering - let result = core::utils::resolved_command(&args[0]) - .args(&args[1..]) - .stdin(std::process::Stdio::inherit()) - .stdout(std::process::Stdio::piped()) // capture - .stderr(std::process::Stdio::inherit()) // stderr always direct - .output(); + let result = if filter.filter_stderr { + // Merge stderr into stdout so the filter can strip banners emitted by tools like liquibase + core::utils::resolved_command(&args[0]) + .args(&args[1..]) + .stdin(std::process::Stdio::inherit()) + .stdout(std::process::Stdio::piped()) + .stderr(std::process::Stdio::piped()) // captured for merging + .output() + } else { + core::utils::resolved_command(&args[0]) + .args(&args[1..]) + .stdin(std::process::Stdio::inherit()) + .stdout(std::process::Stdio::piped()) // capture + .stderr(std::process::Stdio::inherit()) // stderr always direct + .output() + }; match result { Ok(output) => { let exit_code = core::utils::exit_code_from_output(&output, &raw_command); let stdout_raw = String::from_utf8_lossy(&output.stdout); + let stderr_raw = String::from_utf8_lossy(&output.stderr); + // Merge stderr into the text to filter when filter_stderr is enabled; + // otherwise emit stderr directly so it is always visible. + let combined_raw = if filter.filter_stderr { + format!("{}{}", stdout_raw, stderr_raw) + } else { + stdout_raw.to_string() + }; // Tee raw output BEFORE filtering on failure — lets LLM re-read if needed let tee_hint = if !output.status.success() { - core::tee::tee_and_hint(&stdout_raw, &raw_command, exit_code) + core::tee::tee_and_hint(&combined_raw, &raw_command, exit_code) } else { None }; - let filtered = core::toml_filter::apply_filter(filter, &stdout_raw); + let filtered = core::toml_filter::apply_filter(filter, &combined_raw); println!("{}", filtered); if let Some(hint) = tee_hint { println!("{}", hint); @@ -1121,7 +1139,7 @@ fn run_fallback(parse_error: clap::Error) -> Result { timer.track( &raw_command, &format!("rtk:toml {}", raw_command), - &stdout_raw, + &combined_raw, &filtered, ); core::tracking::record_parse_failure_silent(&raw_command, &error_message, true); From 4f4e4d2b5a3529030fe4089f60d2f4b8740b5d53 Mon Sep 17 00:00:00 2001 From: mgierok Date: Tue, 24 Mar 2026 18:10:34 +0100 Subject: [PATCH 140/204] fix(golangci-lint): restore run wrapper and align guidance Keep bare golangci-lint invocations as passthrough while preserving compact filtering for golangci-lint run. Update discover/rewrite rules, regression tests, and docs to advertise only the supported compact run path. --- src/cmds/go/golangci_cmd.rs | 236 ++++++++++++++++++++++++++++++++---- src/discover/registry.rs | 34 +++++- src/discover/rules.rs | 6 +- src/main.rs | 4 +- 4 files changed, 248 insertions(+), 32 deletions(-) diff --git a/src/cmds/go/golangci_cmd.rs b/src/cmds/go/golangci_cmd.rs index f24a9e059..6fe734002 100644 --- a/src/cmds/go/golangci_cmd.rs +++ b/src/cmds/go/golangci_cmd.rs @@ -6,6 +6,42 @@ use crate::core::utils::{resolved_command, truncate}; use anyhow::Result; use serde::Deserialize; use std::collections::HashMap; +use std::ffi::OsString; + +const GOLANGCI_SUBCOMMANDS: &[&str] = &[ + "cache", + "completion", + "config", + "custom", + "fmt", + "formatters", + "help", + "linters", + "migrate", + "run", + "version", +]; + +const GLOBAL_FLAGS_WITH_VALUE: &[&str] = &[ + "-c", + "--color", + "--config", + "--cpu-profile-path", + "--mem-profile-path", + "--trace-path", +]; + +#[derive(Debug, PartialEq, Eq)] +struct RunInvocation { + global_args: Vec, + run_args: Vec, +} + +#[derive(Debug, PartialEq, Eq)] +enum Invocation { + FilteredRun(RunInvocation), + Passthrough, +} #[derive(Debug, Deserialize)] struct Position { @@ -81,44 +117,31 @@ pub(crate) fn detect_major_version() -> u32 { } pub fn run(args: &[String], verbose: u8) -> Result { + match classify_invocation(args) { + Invocation::FilteredRun(invocation) => run_filtered(args, &invocation, verbose), + Invocation::Passthrough => run_passthrough(args, verbose), + } +} + +fn run_filtered(original_args: &[String], invocation: &RunInvocation, verbose: u8) -> Result { let version = detect_major_version(); let mut cmd = resolved_command("golangci-lint"); - - // Force JSON output (only if user hasn't specified it) - let has_format = args.iter().any(|a| { - a == "--out-format" - || a.starts_with("--out-format=") - || a == "--output.json.path" - || a.starts_with("--output.json.path=") - }); - - if !has_format { - if version >= 2 { - cmd.arg("run").arg("--output.json.path").arg("stdout"); - } else { - cmd.arg("run").arg("--out-format=json"); - } - } else { - cmd.arg("run"); - } - - for arg in args { + for arg in build_filtered_args(invocation, version) { cmd.arg(arg); } if verbose > 0 { - if version >= 2 { - eprintln!("Running: golangci-lint run --output.json.path stdout"); - } else { - eprintln!("Running: golangci-lint run --out-format=json"); - } + eprintln!( + "Running: {}", + format_command("golangci-lint", &build_filtered_args(invocation, version)) + ); } let exit_code = runner::run_filtered( cmd, "golangci-lint", - &args.join(" "), + &original_args.join(" "), |stdout| { // v2 outputs JSON on first line + trailing text; v1 outputs just JSON let json_output = if version >= 2 { @@ -136,6 +159,95 @@ pub fn run(args: &[String], verbose: u8) -> Result { Ok(if exit_code == 1 { 0 } else { exit_code }) } +fn run_passthrough(args: &[String], verbose: u8) -> Result { + let os_args: Vec = args.iter().map(OsString::from).collect(); + runner::run_passthrough("golangci-lint", &os_args, verbose) +} + +fn classify_invocation(args: &[String]) -> Invocation { + match find_subcommand_index(args) { + Some(idx) if args[idx] == "run" => Invocation::FilteredRun(RunInvocation { + global_args: args[..idx].to_vec(), + run_args: args[idx + 1..].to_vec(), + }), + _ => Invocation::Passthrough, + } +} + +fn find_subcommand_index(args: &[String]) -> Option { + let mut i = 0; + while i < args.len() { + let arg = args[i].as_str(); + + if arg == "--" { + return None; + } + + if !arg.starts_with('-') { + if GOLANGCI_SUBCOMMANDS.contains(&arg) { + return Some(i); + } + return None; + } + + if let Some(flag) = split_flag_name(arg) { + if GLOBAL_FLAGS_WITH_VALUE.contains(&flag) { + i += 1; + } + } + + i += 1; + } + + None +} + +fn split_flag_name(arg: &str) -> Option<&str> { + if arg.starts_with("--") { + return Some(arg.split_once('=').map(|(flag, _)| flag).unwrap_or(arg)); + } + + if arg.starts_with('-') { + return Some(arg); + } + + None +} + +fn build_filtered_args(invocation: &RunInvocation, version: u32) -> Vec { + let mut args = invocation.global_args.clone(); + args.push("run".to_string()); + + if !has_output_flag(&invocation.run_args) { + if version >= 2 { + args.push("--output.json.path".to_string()); + args.push("stdout".to_string()); + } else { + args.push("--out-format=json".to_string()); + } + } + + args.extend(invocation.run_args.clone()); + args +} + +fn has_output_flag(args: &[String]) -> bool { + args.iter().any(|a| { + a == "--out-format" + || a.starts_with("--out-format=") + || a == "--output.json.path" + || a.starts_with("--output.json.path=") + }) +} + +fn format_command(base: &str, args: &[String]) -> String { + if args.is_empty() { + base.to_string() + } else { + format!("{} {}", base, args.join(" ")) + } +} + /// Filter golangci-lint JSON output - group by linter and file pub(crate) fn filter_golangci_json(output: &str, version: u32) -> String { let result: Result = serde_json::from_str(output); @@ -343,6 +455,78 @@ mod tests { assert_eq!(parse_major_version("not a version string"), 1); } + #[test] + fn test_classify_invocation_run_uses_filtered_path() { + assert_eq!( + classify_invocation(&["run".into(), "./...".into()]), + Invocation::FilteredRun(RunInvocation { + global_args: vec![], + run_args: vec!["./...".into()], + }) + ); + } + + #[test] + fn test_classify_invocation_with_global_flag_value_uses_filtered_path() { + assert_eq!( + classify_invocation(&[ + "--color".into(), + "never".into(), + "run".into(), + "./...".into(), + ]), + Invocation::FilteredRun(RunInvocation { + global_args: vec!["--color".into(), "never".into()], + run_args: vec!["./...".into()], + }) + ); + } + + #[test] + fn test_classify_invocation_with_short_global_flag_uses_filtered_path() { + assert_eq!( + classify_invocation(&["-v".into(), "run".into(), "./...".into()]), + Invocation::FilteredRun(RunInvocation { + global_args: vec!["-v".into()], + run_args: vec!["./...".into()], + }) + ); + } + + #[test] + fn test_classify_invocation_bare_command_is_passthrough() { + assert_eq!(classify_invocation(&[]), Invocation::Passthrough); + } + + #[test] + fn test_classify_invocation_version_flag_is_passthrough() { + assert_eq!( + classify_invocation(&["--version".into()]), + Invocation::Passthrough + ); + } + + #[test] + fn test_classify_invocation_version_subcommand_is_passthrough() { + assert_eq!( + classify_invocation(&["version".into()]), + Invocation::Passthrough + ); + } + + #[test] + fn test_build_filtered_args_does_not_duplicate_run() { + let invocation = RunInvocation { + global_args: vec![], + run_args: vec!["./...".into()], + }; + + assert_eq!( + build_filtered_args(&invocation, 2), + vec!["run", "--output.json.path", "stdout", "./..."] + ); + } + #[test] fn test_filter_golangci_v2_fields_parse_cleanly() { // v2 JSON includes Severity, SourceLines, Offset — must not panic diff --git a/src/discover/registry.rs b/src/discover/registry.rs index 401cba9ba..93a2504db 100644 --- a/src/discover/registry.rs +++ b/src/discover/registry.rs @@ -1877,7 +1877,29 @@ mod tests { assert!(matches!( classify_command("golangci-lint run"), Classification::Supported { - rtk_equivalent: "rtk golangci-lint", + rtk_equivalent: "rtk golangci-lint run", + .. + } + )); + } + + #[test] + fn test_classify_golangci_lint_bare_is_not_compact_wrapper() { + assert!(!matches!( + classify_command("golangci-lint"), + Classification::Supported { + rtk_equivalent: "rtk golangci-lint run", + .. + } + )); + } + + #[test] + fn test_classify_golangci_lint_other_subcommand_is_not_compact_wrapper() { + assert!(!matches!( + classify_command("golangci-lint version"), + Classification::Supported { + rtk_equivalent: "rtk golangci-lint run", .. } )); @@ -1915,6 +1937,16 @@ mod tests { ); } + #[test] + fn test_rewrite_bare_golangci_lint_skips_compact_wrapper() { + assert_eq!(rewrite_command("golangci-lint", &[]), None); + } + + #[test] + fn test_rewrite_other_golangci_lint_subcommand_skips_compact_wrapper() { + assert_eq!(rewrite_command("golangci-lint version", &[]), None); + } + // --- JS/TS tooling --- #[test] diff --git a/src/discover/rules.rs b/src/discover/rules.rs index b315edd77..338e5a6d3 100644 --- a/src/discover/rules.rs +++ b/src/discover/rules.rs @@ -276,9 +276,9 @@ pub const RULES: &[RtkRule] = &[ subcmd_status: &[], }, RtkRule { - pattern: r"^golangci-lint(\s|$)", - rtk_cmd: "rtk golangci-lint", - rewrite_prefixes: &["golangci-lint", "golangci"], + pattern: r"^(?:golangci-lint|golangci)\s+(run)(?:\s|$)", + rtk_cmd: "rtk golangci-lint run", + rewrite_prefixes: &["golangci-lint run", "golangci run"], category: "Go", savings_pct: 85.0, subcmd_savings: &[], diff --git a/src/main.rs b/src/main.rs index 11332034a..36d0a03c2 100644 --- a/src/main.rs +++ b/src/main.rs @@ -656,10 +656,10 @@ enum Commands { command: GtCommands, }, - /// golangci-lint with compact output + /// golangci-lint wrapper with compact `run` support and passthrough for other invocations #[command(name = "golangci-lint")] GolangciLint { - /// golangci-lint arguments + /// Additional golangci-lint arguments #[arg(trailing_var_arg = true, allow_hyphen_values = true)] args: Vec, }, From d85303ec4893deb904260f5dc11b7df906a50c07 Mon Sep 17 00:00:00 2001 From: mgierok Date: Tue, 24 Mar 2026 19:08:34 +0100 Subject: [PATCH 141/204] fix(discover): preserve golangci-lint flags in rewrite Normalize golangci-lint global flags before run during classification and keep them in rewritten commands. Add regression coverage for classify_command and rewrite_command with pre-run global flags. --- src/discover/registry.rs | 180 +++++++++++++++++++++++++++++++++++++++ 1 file changed, 180 insertions(+) diff --git a/src/discover/registry.rs b/src/discover/registry.rs index 93a2504db..15bcaccdd 100644 --- a/src/discover/registry.rs +++ b/src/discover/registry.rs @@ -70,6 +70,21 @@ lazy_static! { static ref TAIL_LINES_SPACE: Regex = Regex::new(r"^tail\s+--lines\s+(\d+)\s+(.+)$").unwrap(); } +const GOLANGCI_GLOBAL_OPT_WITH_VALUE: &[&str] = &[ + "-c", + "--color", + "--config", + "--cpu-profile-path", + "--mem-profile-path", + "--trace-path", +]; + +#[derive(Debug, Clone, Copy)] +struct GolangciRunParts<'a> { + global_segment: &'a str, + run_segment: &'a str, +} + /// Classify a single (already-split) command. pub fn classify_command(cmd: &str) -> Classification { let trimmed = cmd.trim(); @@ -100,6 +115,9 @@ pub fn classify_command(cmd: &str) -> Classification { let cmd_normalized = strip_absolute_path(cmd_clean); // Strip git global options: git -C /tmp status → git status (#163) let cmd_normalized = strip_git_global_opts(&cmd_normalized); + // Strip golangci-lint global options before `run` so classify/rewrite stays + // aligned with the runtime wrapper behavior. + let cmd_normalized = strip_golangci_global_opts(&cmd_normalized); let cmd_clean = cmd_normalized.as_str(); // Exclude cat/head/tail with redirect operators — these are writes, not reads (#315) @@ -258,6 +276,93 @@ fn strip_git_global_opts(cmd: &str) -> String { format!("git {}", stripped.trim()) } +/// Strip golangci-lint global options before the `run` subcommand. +/// `golangci-lint --color never run ./...` → `golangci-lint run ./...` +/// Returns the original string unchanged if this is not a supported compact `run` invocation. +fn strip_golangci_global_opts(cmd: &str) -> String { + match parse_golangci_run_parts(cmd) { + Some(parts) => format!("golangci-lint {}", parts.run_segment), + None => cmd.to_string(), + } +} + +/// Parse supported golangci-lint invocations with optional global flags before `run`. +fn parse_golangci_run_parts(cmd: &str) -> Option> { + let tokens = split_token_spans(cmd); + let first = tokens.first()?; + if first.0 != "golangci-lint" && first.0 != "golangci" { + return None; + } + + let mut i = 1; + while i < tokens.len() { + let token = tokens[i].0; + + if token == "--" { + return None; + } + + if !token.starts_with('-') { + if token == "run" { + let global_segment = if i > 1 { + cmd[tokens[1].1..tokens[i].1].trim() + } else { + "" + }; + let run_segment = cmd[tokens[i].1..].trim(); + return Some(GolangciRunParts { + global_segment, + run_segment, + }); + } + return None; + } + + if let Some(flag) = split_golangci_flag_name(token) { + if GOLANGCI_GLOBAL_OPT_WITH_VALUE.contains(&flag) { + i += 1; + } + } + + i += 1; + } + + None +} + +fn split_golangci_flag_name(arg: &str) -> Option<&str> { + if arg.starts_with("--") { + return Some(arg.split_once('=').map(|(flag, _)| flag).unwrap_or(arg)); + } + + if arg.starts_with('-') { + return Some(arg); + } + + None +} + +fn split_token_spans(cmd: &str) -> Vec<(&str, usize, usize)> { + let mut tokens = Vec::new(); + let mut start = None; + + for (idx, ch) in cmd.char_indices() { + if ch.is_whitespace() { + if let Some(token_start) = start.take() { + tokens.push((&cmd[token_start..idx], token_start, idx)); + } + } else if start.is_none() { + start = Some(idx); + } + } + + if let Some(token_start) = start { + tokens.push((&cmd[token_start..], token_start, cmd.len())); + } + + tokens +} + /// Normalize absolute binary paths: `/usr/bin/grep -rn foo` → `grep -rn foo` (#485) /// Only strips if the first word contains a `/` (Unix path). fn strip_absolute_path(cmd: &str) -> String { @@ -538,6 +643,18 @@ fn rewrite_segment(seg: &str, excluded: &[String]) -> Option { return None; } + if let Some(parts) = parse_golangci_run_parts(cmd_clean) { + let rewritten = if parts.global_segment.is_empty() { + format!("{}rtk golangci-lint {}", env_prefix, parts.run_segment) + } else { + format!( + "{}rtk golangci-lint {} {}", + env_prefix, parts.global_segment, parts.run_segment + ) + }; + return Some(rewritten); + } + // #196: gh with --json/--jq/--template produces structured output that // rtk gh would corrupt — skip rewrite so the caller gets raw JSON. if rule.rtk_cmd == "rtk gh" { @@ -1883,6 +2000,28 @@ mod tests { )); } + #[test] + fn test_classify_golangci_lint_with_flag_before_run() { + assert!(matches!( + classify_command("golangci-lint -v run ./..."), + Classification::Supported { + rtk_equivalent: "rtk golangci-lint run", + .. + } + )); + } + + #[test] + fn test_classify_golangci_lint_with_value_flag_before_run() { + assert!(matches!( + classify_command("golangci-lint --color never run ./..."), + Classification::Supported { + rtk_equivalent: "rtk golangci-lint run", + .. + } + )); + } + #[test] fn test_classify_golangci_lint_bare_is_not_compact_wrapper() { assert!(!matches!( @@ -1937,6 +2076,30 @@ mod tests { ); } + #[test] + fn test_rewrite_golangci_lint_with_flag_before_run() { + assert_eq!( + rewrite_command("golangci-lint -v run ./...", &[]), + Some("rtk golangci-lint -v run ./...".into()) + ); + } + + #[test] + fn test_rewrite_golangci_lint_with_value_flag_before_run() { + assert_eq!( + rewrite_command("golangci-lint --color never run ./...", &[]), + Some("rtk golangci-lint --color never run ./...".into()) + ); + } + + #[test] + fn test_rewrite_env_prefixed_golangci_lint_with_value_flag_before_run() { + assert_eq!( + rewrite_command("FOO=1 golangci-lint --color never run ./...", &[]), + Some("FOO=1 rtk golangci-lint --color never run ./...".into()) + ); + } + #[test] fn test_rewrite_bare_golangci_lint_skips_compact_wrapper() { assert_eq!(rewrite_command("golangci-lint", &[]), None); @@ -2369,6 +2532,23 @@ mod tests { assert_eq!(strip_git_global_opts("cargo test"), "cargo test"); } + #[test] + fn test_strip_golangci_global_opts_helper() { + assert_eq!( + strip_golangci_global_opts("golangci-lint -v run ./..."), + "golangci-lint run ./..." + ); + assert_eq!( + strip_golangci_global_opts("golangci-lint --color never run ./..."), + "golangci-lint run ./..." + ); + assert_eq!( + strip_golangci_global_opts("golangci-lint version"), + "golangci-lint version" + ); + assert_eq!(strip_golangci_global_opts("cargo test"), "cargo test"); + } + // --- #wc: wc filter was silently ignored by the hook --- #[test] From 24f2adaf8fb541c2564fa7dfb423947932e68fb4 Mon Sep 17 00:00:00 2001 From: mgierok Date: Tue, 24 Mar 2026 19:17:46 +0100 Subject: [PATCH 142/204] fix(golangci-lint): support inline global flags before run Handle --flag=value forms consistently in both the runtime parser and discover rewrite logic. Add regression coverage for classify and rewrite paths using inline global flag values before run. --- src/cmds/go/golangci_cmd.rs | 36 +++++++++++++++++++- src/discover/registry.rs | 68 ++++++++++++++++++++++++++++++++++++- 2 files changed, 102 insertions(+), 2 deletions(-) diff --git a/src/cmds/go/golangci_cmd.rs b/src/cmds/go/golangci_cmd.rs index 6fe734002..2e7036e93 100644 --- a/src/cmds/go/golangci_cmd.rs +++ b/src/cmds/go/golangci_cmd.rs @@ -191,7 +191,7 @@ fn find_subcommand_index(args: &[String]) -> Option { } if let Some(flag) = split_flag_name(arg) { - if GLOBAL_FLAGS_WITH_VALUE.contains(&flag) { + if golangci_flag_takes_separate_value(arg, flag) { i += 1; } } @@ -214,6 +214,18 @@ fn split_flag_name(arg: &str) -> Option<&str> { None } +fn golangci_flag_takes_separate_value(arg: &str, flag: &str) -> bool { + if !GLOBAL_FLAGS_WITH_VALUE.contains(&flag) { + return false; + } + + if arg.starts_with("--") && arg.contains('=') { + return false; + } + + true +} + fn build_filtered_args(invocation: &RunInvocation, version: u32) -> Vec { let mut args = invocation.global_args.clone(); args.push("run".to_string()); @@ -493,6 +505,28 @@ mod tests { ); } + #[test] + fn test_classify_invocation_with_inline_value_flag_uses_filtered_path() { + assert_eq!( + classify_invocation(&["--color=never".into(), "run".into(), "./...".into()]), + Invocation::FilteredRun(RunInvocation { + global_args: vec!["--color=never".into()], + run_args: vec!["./...".into()], + }) + ); + } + + #[test] + fn test_classify_invocation_with_inline_config_flag_uses_filtered_path() { + assert_eq!( + classify_invocation(&["--config=foo.yml".into(), "run".into(), "./...".into()]), + Invocation::FilteredRun(RunInvocation { + global_args: vec!["--config=foo.yml".into()], + run_args: vec!["./...".into()], + }) + ); + } + #[test] fn test_classify_invocation_bare_command_is_passthrough() { assert_eq!(classify_invocation(&[]), Invocation::Passthrough); diff --git a/src/discover/registry.rs b/src/discover/registry.rs index 15bcaccdd..d9ff70363 100644 --- a/src/discover/registry.rs +++ b/src/discover/registry.rs @@ -319,7 +319,7 @@ fn parse_golangci_run_parts(cmd: &str) -> Option> { } if let Some(flag) = split_golangci_flag_name(token) { - if GOLANGCI_GLOBAL_OPT_WITH_VALUE.contains(&flag) { + if golangci_flag_takes_separate_value(token, flag) { i += 1; } } @@ -342,6 +342,18 @@ fn split_golangci_flag_name(arg: &str) -> Option<&str> { None } +fn golangci_flag_takes_separate_value(arg: &str, flag: &str) -> bool { + if !GOLANGCI_GLOBAL_OPT_WITH_VALUE.contains(&flag) { + return false; + } + + if arg.starts_with("--") && arg.contains('=') { + return false; + } + + true +} + fn split_token_spans(cmd: &str) -> Vec<(&str, usize, usize)> { let mut tokens = Vec::new(); let mut start = None; @@ -2022,6 +2034,28 @@ mod tests { )); } + #[test] + fn test_classify_golangci_lint_with_inline_value_flag_before_run() { + assert!(matches!( + classify_command("golangci-lint --color=never run ./..."), + Classification::Supported { + rtk_equivalent: "rtk golangci-lint run", + .. + } + )); + } + + #[test] + fn test_classify_golangci_lint_with_inline_config_flag_before_run() { + assert!(matches!( + classify_command("golangci-lint --config=foo.yml run ./..."), + Classification::Supported { + rtk_equivalent: "rtk golangci-lint run", + .. + } + )); + } + #[test] fn test_classify_golangci_lint_bare_is_not_compact_wrapper() { assert!(!matches!( @@ -2092,6 +2126,22 @@ mod tests { ); } + #[test] + fn test_rewrite_golangci_lint_with_inline_value_flag_before_run() { + assert_eq!( + rewrite_command("golangci-lint --color=never run ./...", &[]), + Some("rtk golangci-lint --color=never run ./...".into()) + ); + } + + #[test] + fn test_rewrite_golangci_lint_with_inline_config_flag_before_run() { + assert_eq!( + rewrite_command("golangci-lint --config=foo.yml run ./...", &[]), + Some("rtk golangci-lint --config=foo.yml run ./...".into()) + ); + } + #[test] fn test_rewrite_env_prefixed_golangci_lint_with_value_flag_before_run() { assert_eq!( @@ -2100,6 +2150,14 @@ mod tests { ); } + #[test] + fn test_rewrite_env_prefixed_golangci_lint_with_inline_value_flag_before_run() { + assert_eq!( + rewrite_command("FOO=1 golangci-lint --color=never run ./...", &[]), + Some("FOO=1 rtk golangci-lint --color=never run ./...".into()) + ); + } + #[test] fn test_rewrite_bare_golangci_lint_skips_compact_wrapper() { assert_eq!(rewrite_command("golangci-lint", &[]), None); @@ -2542,6 +2600,14 @@ mod tests { strip_golangci_global_opts("golangci-lint --color never run ./..."), "golangci-lint run ./..." ); + assert_eq!( + strip_golangci_global_opts("golangci-lint --color=never run ./..."), + "golangci-lint run ./..." + ); + assert_eq!( + strip_golangci_global_opts("golangci-lint --config=foo.yml run ./..."), + "golangci-lint run ./..." + ); assert_eq!( strip_golangci_global_opts("golangci-lint version"), "golangci-lint version" From dbeeaed16aee79674ec2fd3778b7b11b10b847c6 Mon Sep 17 00:00:00 2001 From: Ousama Ben Younes Date: Fri, 10 Apr 2026 17:18:11 +0000 Subject: [PATCH 143/204] fix(find): include hidden files when pattern targets dotfiles (#1101) rtk find used WalkBuilder with .hidden(true), which skips ALL hidden files and directories. As a result, `rtk find . -name ".claude.json"` always returned "0 for '.claude.json'" even when the files existed. Fix: detect when the -name pattern starts with a dot and set .hidden(false) for that walk, so hidden entries are visited. Non-dotfile patterns keep the default .hidden(true) behaviour. Add regression tests covering both cases. Closes #1101 Co-Authored-By: Claude Sonnet 4.6 --- src/cmds/system/find_cmd.rs | 22 +++++++++++++++++++++- 1 file changed, 21 insertions(+), 1 deletion(-) diff --git a/src/cmds/system/find_cmd.rs b/src/cmds/system/find_cmd.rs index 942843fba..490619e2f 100644 --- a/src/cmds/system/find_cmd.rs +++ b/src/cmds/system/find_cmd.rs @@ -210,9 +210,13 @@ pub fn run( let want_dirs = file_type == "d"; + // When the pattern targets dotfiles (e.g. -name ".claude.json"), we must walk hidden + // entries; otherwise skip them to keep results tidy (#1101). + let search_hidden = effective_pattern.starts_with('.'); + let mut builder = WalkBuilder::new(path); builder - .hidden(true) // skip hidden files/dirs + .hidden(!search_hidden) // skip hidden files/dirs unless pattern targets dotfiles .git_ignore(true) // respect .gitignore .git_global(true) .git_exclude(true); @@ -560,6 +564,22 @@ mod tests { assert!(result.is_ok()); } + // --- #1101: dotfile pattern should not skip hidden files --- + + #[test] + fn find_dotfile_pattern_includes_hidden() { + // .gitignore exists at the repo root — must be found when using a dotfile pattern + let result = run(".gitignore", ".", 50, Some(1), "f", false, 0); + assert!(result.is_ok(), "run with dotfile pattern should not error"); + } + + #[test] + fn find_regular_pattern_skips_hidden() { + // Non-dot pattern should not error (hidden dirs remain skipped) + let result = run("*.rs", "src", 5, None, "f", false, 0); + assert!(result.is_ok()); + } + // --- integration: run on this repo --- #[test] From 3db8070b51b9a312fcca20a8460d3d6259cc38b7 Mon Sep 17 00:00:00 2001 From: Ousama Ben Younes Date: Fri, 10 Apr 2026 17:20:32 +0000 Subject: [PATCH 144/204] fix(permissions): glob_matches middle-wildcard matches commands without trailing args (#1105) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit When glob_matches processed a pattern like "git -C * diff:*" (normalized to "git -C * diff *"), middle segment " diff " requires a trailing space to match. For "git -C /path diff" the remaining text is "/path diff" which ends with " diff" — no trailing space — so .find(" diff ") returned None and the command was incorrectly rejected. Fix: in the middle-segment branch, if the full substring search fails, also check if the remaining text ends with the trimmed (right-stripped) segment. This handles commands that terminate at the middle token with no trailing arguments. Add regression test covering the exact reproduction case from the issue. Closes #1105 Co-Authored-By: Claude Sonnet 4.6 --- src/hooks/permissions.rs | 38 ++++++++++++++++++++++++++++++++++---- 1 file changed, 34 insertions(+), 4 deletions(-) diff --git a/src/hooks/permissions.rs b/src/hooks/permissions.rs index 187489811..066e2b317 100644 --- a/src/hooks/permissions.rs +++ b/src/hooks/permissions.rs @@ -253,10 +253,20 @@ fn glob_matches(cmd: &str, pattern: &str) -> bool { return false; } } else { - // Middle segment: find next occurrence - match cmd[search_from..].find(*part) { - Some(pos) => search_from += pos + part.len(), - None => return false, + // Middle segment: find next occurrence. + // Also accept end-of-string when the segment ends with whitespace — this + // handles commands that terminate at the middle token without trailing args, + // e.g. "git -C * diff:*" should match bare "git -C /path diff" (#1105). + let remaining = &cmd[search_from..]; + if let Some(pos) = remaining.find(*part) { + search_from += pos + part.len(); + } else { + let trimmed = part.trim_end(); + if !trimmed.is_empty() && remaining.ends_with(trimmed) { + search_from += remaining.len(); + } else { + return false; + } } } } @@ -438,6 +448,26 @@ mod tests { assert!(!command_matches_pattern("git push develop", "git * main")); } + // Bug 3: middle wildcard at end-of-command (no trailing args) — #1105 + #[test] + fn test_middle_wildcard_at_end_of_command() { + // "git -C * diff:*" should match bare "git -C /path diff" (no trailing flags) + assert!(command_matches_pattern( + "git -C /path diff", + "git -C * diff:*" + )); + // Must still match when there ARE trailing args + assert!(command_matches_pattern( + "git -C /path diff --stat", + "git -C * diff:*" + )); + // Must NOT match a different subcommand + assert!(!command_matches_pattern( + "git -C /path status", + "git -C * diff:*" + )); + } + // Bug 3: multiple wildcards #[test] fn test_multiple_wildcards() { From 6b76fdb87d7c54cfc2a1b0e6117dd78b8430910b Mon Sep 17 00:00:00 2001 From: Ousama Ben Younes Date: Fri, 10 Apr 2026 15:05:16 +0000 Subject: [PATCH 145/204] fix(git): remove -u short alias from --ultra-compact to fix git push -u The global --ultra-compact flag used -u as its short form which caused `rtk git push -u origin ` to silently consume -u as ultra-compact mode instead of forwarding it to git as --set-upstream. The push succeeded but the upstream tracking branch was never written to .git/config, causing subsequent plain `git push` calls to fail with "no upstream branch". Fixes #1086 Generated by Claude Code Vibe coded by ousamabenyounes Co-Authored-By: Claude --- CHANGELOG.md | 64 ++++++++++++++++++++++++++++++---------------------- src/main.rs | 33 ++++++++++++++++++++++++++- 2 files changed, 69 insertions(+), 28 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 1c1067489..a45309776 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -5,37 +5,51 @@ All notable changes to rtk (Rust Token Killer) will be documented in this file. The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html). -## [0.34.3](https://github.com/rtk-ai/rtk/compare/v0.34.2...v0.34.3) (2026-04-02) - +## [Unreleased] ### Bug Fixes -* **automod:** add auto discovery for cmds ([234909d](https://github.com/rtk-ai/rtk/commit/234909d2c754ade2fdc939b0a1435a8e34ffc305)) -* **ci:** fix validate-docs.sh broken module count check ([bbe3da6](https://github.com/rtk-ai/rtk/commit/bbe3da642b5fc4b065b13a65647ea0ebf5264e65)) -* **cleaning:** constant extract ([aabc016](https://github.com/rtk-ai/rtk/commit/aabc0167bc013fd2d0c61a687580f6e69305500a)) -* **cmds:** migrate remaining exit_code to exit_code_from_output ([ba9fa34](https://github.com/rtk-ai/rtk/commit/ba9fa345f3d1d14bd0af236ec9aa8a9a0e5581d6)) -* **cmds:** more covering for run_filtered ([e48485a](https://github.com/rtk-ai/rtk/commit/e48485adc6a33d12b70664598020595cf7dfcd7e)) -* **docs:** add documentation ([2f7278a](https://github.com/rtk-ai/rtk/commit/2f7278ac5992bf2e84b763fb05642d89900ba495)) -* **docs:** add maintainers docs ([14265b4](https://github.com/rtk-ai/rtk/commit/14265b48c3a15e459a31da11250a51ab5830a508)) -* **refacto-p1:** unified cmds execution flow (+ rm dead code) ([75bd607](https://github.com/rtk-ai/rtk/commit/75bd607d55235f313855f5fe8c9eceafd73700a7)) -* **refacto-p2:** more standardize ([47a76ea](https://github.com/rtk-ai/rtk/commit/47a76ea35ed2fe02a3600792163f727fa3a94ff2)) -* **refacto-p2:** more standardize ([92c671a](https://github.com/rtk-ai/rtk/commit/92c671a175a5e2bf09720fd1a8591140bcb473a0)) -* **refacto:** wrappers for standardization, exit codes lexer tokenizer, constants, code clean ([bff0258](https://github.com/rtk-ai/rtk/commit/bff02584243f1b73418418b0c05365acf56fbb36)) -* **registry:** quoted env prefix + inline regex cleanup + routing docs ([f3217a4](https://github.com/rtk-ai/rtk/commit/f3217a467b543a3181605b257162f2b3ab5d5df0)) -* **review:** address PR [#910](https://github.com/rtk-ai/rtk/issues/910) review feedback ([0a8b8fd](https://github.com/rtk-ai/rtk/commit/0a8b8fd0693fa504f376146cbbcafe9ddf4632c8)) -* **review:** PR [#934](https://github.com/rtk-ai/rtk/issues/934) ([5bd35a3](https://github.com/rtk-ai/rtk/commit/5bd35a33ad6abe5278749726bed19912664531c2)) -* **review:** PR [#934](https://github.com/rtk-ai/rtk/issues/934) ([bae7930](https://github.com/rtk-ai/rtk/commit/bae79301194bbb48d1cbb39554096c3225f7cb73)) -* **rules:** add wc RtkRule with pattern field for develop compat ([d75e864](https://github.com/rtk-ai/rtk/commit/d75e864f20451a5e17918c75f2ea32672f65e1f4)) -* **standardize:** git+kube sub wrappers run_filtered ([7fd221f](https://github.com/rtk-ai/rtk/commit/7fd221f44660bcf411aa333d2c35a49ff89e7961)) -* **standardize:** merge pattern into rues ([08aabb9](https://github.com/rtk-ai/rtk/commit/08aabb95c3ae6e0b734f696264e1e1a8c0f0b22e)) +* **git:** remove `-u` short alias from `--ultra-compact` to fix `git push -u` upstream tracking ([#1086](https://github.com/rtk-ai/rtk/issues/1086)) -## [0.34.2](https://github.com/rtk-ai/rtk/compare/v0.34.1...v0.34.2) (2026-03-30) +## [0.35.0](https://github.com/rtk-ai/rtk/compare/v0.34.3...v0.35.0) (2026-04-06) + + +### Features + +* **aws:** expand CLI filters from 8 to 25 subcommands ([402c48e](https://github.com/rtk-ai/rtk/commit/402c48e66988e638a5b4f4dd193238fc1d0fe18f)) ### Bug Fixes -* **emots:** replace 📊 with "Summary:" ([495a152](https://github.com/rtk-ai/rtk/commit/495a152059feabc7b516b96e804757608b87a10a)) -* **refacto-codebase:** technical docs & sub folders ([927daef](https://github.com/rtk-ai/rtk/commit/927daef49b8f771d195201d196378e27e0ee8a2b)) +* **cmd:** read/cat multiple file and consistent behavior ([3f58018](https://github.com/rtk-ai/rtk/commit/3f58018f4af1d7206457929cf80bb4534203c3ee)) +* **docs:** clean some docs + disclaimer ([deda44f](https://github.com/rtk-ai/rtk/commit/deda44f73607981f3d27ecc6341ce927aab34d37)) +* **gh:** pass through gh pr merge instead of canned response ([#938](https://github.com/rtk-ai/rtk/issues/938)) ([8465ca9](https://github.com/rtk-ai/rtk/commit/8465ca953fa9d70dcc971a941c19465d456eb7d4)) +* **gh:** pass through gh pr merge instead of canned response ([#938](https://github.com/rtk-ai/rtk/issues/938)) ([e1f2845](https://github.com/rtk-ai/rtk/commit/e1f2845df06a8d8b8325945dc4940ec5f530e4cc)) +* **git:** inherit stdin for commit and push to preserve SSH signing ([#733](https://github.com/rtk-ai/rtk/issues/733)) ([eefeae4](https://github.com/rtk-ai/rtk/commit/eefeae45656ff2607c3f519c8eae235e3f0fe411)) +* **git:** inherit stdin for commit and push to preserve SSH signing ([#733](https://github.com/rtk-ai/rtk/issues/733)) ([6cee6c6](https://github.com/rtk-ai/rtk/commit/6cee6c60b80f914ed9505e3925d85cadec43ab97)) +* **git:** preserve full diff hunk headers ([62f4452](https://github.com/rtk-ai/rtk/commit/62f445227679f3df293fe35e9b18cc5ab39d7963)) +* **git:** preserve full diff hunk headers ([09b3ff9](https://github.com/rtk-ai/rtk/commit/09b3ff9424e055f5fe25e535e5b60e077f8344f9)) +* **go:** avoid false build errors from download logs ([9c1cf2f](https://github.com/rtk-ai/rtk/commit/9c1cf2f403534fa7874638b1b983c2d7f918a185)) +* **go:** avoid false build errors from download logs ([d44fd3e](https://github.com/rtk-ai/rtk/commit/d44fd3e034208e3bcd59c2c46f7720eec4f10c98)) +* **go:** cover more build failure shapes ([2425ad6](https://github.com/rtk-ai/rtk/commit/2425ad68e5386d19e5ec9ff1ca151a6d2c9a56d3)) +* **go:** preserve failing test location context ([1481bc5](https://github.com/rtk-ai/rtk/commit/1481bc590924031456a6022510275c29c09e330e)) +* **go:** preserve failing test location context ([374fe64](https://github.com/rtk-ai/rtk/commit/374fe64cfbedcd676733973e81a63a6dfecbb1b7)) +* **go:** restore build error coverage ([1177c9c](https://github.com/rtk-ai/rtk/commit/1177c9c873ac63b6c0bcc9e1b664a705baa0ad7a)) +* **grep:** close subprocess stdin to prevent memory leak ([#897](https://github.com/rtk-ai/rtk/issues/897)) ([7217562](https://github.com/rtk-ai/rtk/commit/72175623551f40b581b4a7f6ed966c1e4a9c7358)) +* **grep:** close subprocess stdin to prevent memory leak ([#897](https://github.com/rtk-ai/rtk/issues/897)) ([09979cf](https://github.com/rtk-ai/rtk/commit/09979cf29701a1b775bcac761d24ec0e055d1bec)) +* **hook_check:** detect missing integrations ([9cf9ccc](https://github.com/rtk-ai/rtk/commit/9cf9ccc1ac39f8bba37e932c7d318a3aa7a34ae9)) +* **init:** remove opt-out instruction from telemetry message ([7571c8e](https://github.com/rtk-ai/rtk/commit/7571c8e101c41ee64c51e2bd64697f85f9142423)) +* **init:** remove telemetry info lines from init output ([7dbef2c](https://github.com/rtk-ai/rtk/commit/7dbef2ce00824d26f2057e4c3c76e429e2e23088)) +* **main:** kill zombie processes + path for rtk md ([d16fc6d](https://github.com/rtk-ai/rtk/commit/d16fc6dacbfec912c21522939b15b7bbd9719487)) +* **main:** kill zombie processes + path for rtk md + missing intergrations ([a919335](https://github.com/rtk-ai/rtk/commit/a919335519ed4a5259a212e56407cb312aa99bac)) +* **merge:** changelog conflicts ([d92c5d2](https://github.com/rtk-ai/rtk/commit/d92c5d264a49483c8d6079e04d946a79bc990a74)) +* **proxy:** kill child process on SIGINT/SIGTERM to prevent orphans ([d813919](https://github.com/rtk-ai/rtk/commit/d813919a24546e044e7844fc7ed05fef4ec24033)) +* **proxy:** kill child process on SIGINT/SIGTERM to prevent orphans ([3318510](https://github.com/rtk-ai/rtk/commit/33185101fc122d0c11a25a4e02ac9f3a7dc7e3bb)) +* **review:** address ChildGuard disarm, stdin dedup, hook masking ([d85fe33](https://github.com/rtk-ai/rtk/commit/d85fe3384b87c16fafd25ec7bcadbff6e69f3f1f)) +* **security:** default to ask when no permission rule matches ([#886](https://github.com/rtk-ai/rtk/issues/886)) ([158c745](https://github.com/rtk-ai/rtk/commit/158c74527f6591d372e40a78cd604d73a20649a9)) +* **security:** default to ask when no permission rule matches ([#886](https://github.com/rtk-ai/rtk/issues/886)) ([41a6c6b](https://github.com/rtk-ai/rtk/commit/41a6c6bf6da78a4754794fdc6a1469df2e327920)) +* **tracking:** use std::env::temp_dir() for compatibility (instead of unix tmp) ([e918661](https://github.com/rtk-ai/rtk/commit/e918661440d7b50321f0535032f52c5e87aaf3cb)) + ## [Unreleased] ### Features @@ -44,10 +58,6 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0 * **aws:** add shared runner `run_aws_filtered()` eliminating per-handler boilerplate * **tee:** add `force_tee_hint()` — truncated output saves full data to file with recovery hint -### Bug Fixes - -* **init:** honor `CODEX_HOME` for Codex global install paths and cleanup - ## [0.34.3](https://github.com/rtk-ai/rtk/compare/v0.34.2...v0.34.3) (2026-04-02) diff --git a/src/main.rs b/src/main.rs index 11332034a..43ef0e96a 100644 --- a/src/main.rs +++ b/src/main.rs @@ -62,7 +62,7 @@ struct Cli { verbose: u8, /// Ultra-compact mode: ASCII icons, inline format (Level 2 optimizations) - #[arg(short = 'u', long, global = true)] + #[arg(long, global = true)] ultra_compact: bool, /// Set SKIP_ENV_VALIDATION=1 for child processes (Next.js, tsc, lint, prisma) @@ -2665,6 +2665,28 @@ mod tests { } } + #[test] + fn test_git_push_u_flag_passes_through() { + let cli = Cli::try_parse_from(["rtk", "git", "push", "-u", "origin", "my-branch"]).unwrap(); + assert!( + !cli.ultra_compact, + "-u on git push must NOT be consumed as --ultra-compact" + ); + match cli.command { + Commands::Git { + command: GitCommands::Push { args }, + .. + } => { + assert!( + args.contains(&"-u".to_string()), + "-u must be forwarded to git push, got: {:?}", + args + ); + } + _ => panic!("Expected Git Push command"), + } + } + #[test] fn test_pnpm_subcommand_with_short_filter() { // -F is the short form of --filter in pnpm @@ -2727,4 +2749,13 @@ mod tests { _ => panic!("Expected Pnpm Build command"), } } + + #[test] + fn test_ultra_compact_long_form_still_works() { + let cli = Cli::try_parse_from(["rtk", "--ultra-compact", "git", "status"]).unwrap(); + assert!( + cli.ultra_compact, + "--ultra-compact long form must still enable ultra-compact mode" + ); + } } From efa8e86c195546dfa1f74ae865d0d47558bcf752 Mon Sep 17 00:00:00 2001 From: "github-actions[bot]" <41898282+github-actions[bot]@users.noreply.github.com> Date: Mon, 13 Apr 2026 17:55:56 +0000 Subject: [PATCH 146/204] chore(master): release 0.36.0 --- .release-please-manifest.json | 2 +- CHANGELOG.md | 51 +++++++++++++++++++++++++++++++++++ Cargo.lock | 2 +- Cargo.toml | 2 +- 4 files changed, 54 insertions(+), 3 deletions(-) diff --git a/.release-please-manifest.json b/.release-please-manifest.json index 3a39fd8cf..93c546c8d 100644 --- a/.release-please-manifest.json +++ b/.release-please-manifest.json @@ -1,3 +1,3 @@ { - ".": "0.35.0" + ".": "0.36.0" } diff --git a/CHANGELOG.md b/CHANGELOG.md index dcfc82142..08cae60ae 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -5,6 +5,57 @@ All notable changes to rtk (Rust Token Killer) will be documented in this file. The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html). +## [0.36.0](https://github.com/rtk-ai/rtk/compare/v0.35.0...v0.36.0) (2026-04-13) + + +### Features + +* **benchmark:** add multipass VM integration test suite ([6e7863b](https://github.com/rtk-ai/rtk/commit/6e7863bf313b0d18a47cf0ca2cdaea03cc2ed900)) +* **benchmark:** add multipass VM integration test suite ([d22759b](https://github.com/rtk-ai/rtk/commit/d22759b8c5254ad9c4a455f10cb7de75e92df581)) +* **benchmark:** add Swift ecosystem tests (6 commands + savings) ([1fbb6d9](https://github.com/rtk-ai/rtk/commit/1fbb6d935b4a0d031a7862cba312eebe1303ba9b)) +* **init:** add native support for Kilo Code and Google Antigravity ([d0a3797](https://github.com/rtk-ai/rtk/commit/d0a3797ec580f96948489d1e7c3329ac22a6c4eb)) +* **init:** add support for kilocode and antigravity agents ([66b90f1](https://github.com/rtk-ai/rtk/commit/66b90f1ed3de81acdce61164c068c24ed7ef29db)) +* **pnpm:** Add filter argument support ([2ba8d37](https://github.com/rtk-ai/rtk/commit/2ba8d372df186b4056a3b8906fc25cde8586dd42)) +* **skills:** add /pr-review skill for batch PR review ([21e67a1](https://github.com/rtk-ai/rtk/commit/21e67a1113041b74542d0285e5f74587dfb30b65)) +* **telemetry:** enrich daily ping with gap detection and quality metrics ([644c50f](https://github.com/rtk-ai/rtk/commit/644c50f786e5c567617e7ea907c5f312797b1265)) + + +### Bug Fixes + +* **benchmark:** address PR review feedback ([87ee81f](https://github.com/rtk-ai/rtk/commit/87ee81f08be5e7b1ca79513b1a91925d455f4f5c)) +* **benchmark:** address review feedback from @FlorianBruniaux ([d13c185](https://github.com/rtk-ai/rtk/commit/d13c185aac64d14288b574df44623723a69e7b95)) +* **ccusage:** add --yes flag and warn when falling back to npx ([f68fa00](https://github.com/rtk-ai/rtk/commit/f68fa0087c03d6882993b7b3eaee98e1dbab41b4)) +* **clippy:** show full error blocks instead of truncated headline ([95d9d13](https://github.com/rtk-ai/rtk/commit/95d9d134b0b76d83b6162614b0a79269b2135f40)) +* **clippy:** show full error blocks instead of truncated headline ([f4074f8](https://github.com/rtk-ai/rtk/commit/f4074f898a9b73b72bbcd8b18afab4831dcda406)), closes [#602](https://github.com/rtk-ai/rtk/issues/602) +* **curl:** skip JSON schema conversion for internal/localhost URLs ([577c311](https://github.com/rtk-ai/rtk/commit/577c311ecaaa8ae94f22dbe252152424d4333d04)) +* **discover:** preserve golangci-lint flags in rewrite ([d85303e](https://github.com/rtk-ai/rtk/commit/d85303ec4893deb904260f5dc11b7df906a50c07)) +* **docs:** update TELEMETRY.md to match code after review fixes ([be5c057](https://github.com/rtk-ai/rtk/commit/be5c0576d95566f37f266fd9f92e2a1b263697bd)) +* **find:** include hidden files when pattern targets dotfiles ([#1101](https://github.com/rtk-ai/rtk/issues/1101)) ([dbeeaed](https://github.com/rtk-ai/rtk/commit/dbeeaed16aee79674ec2fd3778b7b11b10b847c6)) +* **git:** re-insert -- separator when clap consumes it from git diff args ([#1215](https://github.com/rtk-ai/rtk/issues/1215)) ([9979c69](https://github.com/rtk-ai/rtk/commit/9979c699307a4adad2c2df0f2bc3b663df653311)) +* **git:** remove -u short alias from --ultra-compact to fix git push -u ([6b76fdb](https://github.com/rtk-ai/rtk/commit/6b76fdb87d7c54cfc2a1b0e6117dd78b8430910b)) +* **golangci-lint:** restore run wrapper and align guidance ([4f4e4d2](https://github.com/rtk-ai/rtk/commit/4f4e4d2b5a3529030fe4089f60d2f4b8740b5d53)) +* **golangci-lint:** support inline global flags before run ([24f2ada](https://github.com/rtk-ai/rtk/commit/24f2adaf8fb541c2564fa7dfb423947932e68fb4)) +* **go:** prevent double-counted failures when test-level fail also triggers package-level fail ([#958](https://github.com/rtk-ai/rtk/issues/958)) ([4fc15ef](https://github.com/rtk-ai/rtk/commit/4fc15ef2c1c80336ffaafa4179db4cee6f39236a)) +* **go:** prevent double-counting failures when package-level fail cascades from test failures ([#958](https://github.com/rtk-ai/rtk/issues/958)) ([9722d5e](https://github.com/rtk-ai/rtk/commit/9722d5ebd8916f9b398bdc01b1102d42ab2b8795)) +* **hooks:** ensure default permission verdict prompts user for confirmation ([40462c0](https://github.com/rtk-ai/rtk/commit/40462c05e66f116928de365a0d271bdfd61cec72)) +* **hooks:** require all segments to match allow rules ([#1213](https://github.com/rtk-ai/rtk/issues/1213)) ([40c9dbc](https://github.com/rtk-ai/rtk/commit/40c9dbc7dbbf9332d6859060765c582a880f0fde)) +* **init:** honor CODEX_HOME for Codex global paths ([d442799](https://github.com/rtk-ai/rtk/commit/d442799e34d522c87a6eb60c2ff373385d201315)) +* **init:** install Codex global instructions in CODEX_HOME ([a257688](https://github.com/rtk-ai/rtk/commit/a2576883a27c5f915ba0ae7883a51006411b3ae5)) +* **json:** rename --schema to --keys-only, closes [#621](https://github.com/rtk-ai/rtk/issues/621) ([c16713a](https://github.com/rtk-ai/rtk/commit/c16713a973b563a6cba283c830b67c8c470e419f)) +* **ls:** filter quality wrong truncation ([aa6317f](https://github.com/rtk-ai/rtk/commit/aa6317fb83a5d9883623a4d3bee7a25bc99dcb4c)) +* **permissions:** glob_matches middle-wildcard matches commands without trailing args ([#1105](https://github.com/rtk-ai/rtk/issues/1105)) ([3db8070](https://github.com/rtk-ai/rtk/commit/3db8070b51b9a312fcca20a8460d3d6259cc38b7)) +* **pnpm:** list command not working ([ba235d8](https://github.com/rtk-ai/rtk/commit/ba235d85974c0a85b25e290a8bb83648800438a6)) +* **pytest:** -q mode summary line not detected ([57502a5](https://github.com/rtk-ai/rtk/commit/57502a5bef1fb56109a57cf2ea7377fd271253a7)) +* report package-level failures (timeouts, signals) in go test summary ([0b1c32b](https://github.com/rtk-ai/rtk/commit/0b1c32b3cc9a3e73418d401d1d481c1611c7ec0b)) +* report package-level failures (timeouts, signals) in go test summary ([c85a387](https://github.com/rtk-ai/rtk/commit/c85a387363e2079234b6141aad26418172c0e61a)), closes [#958](https://github.com/rtk-ai/rtk/issues/958) +* **security:** correct email domain from .dev to .app ([47383e8](https://github.com/rtk-ai/rtk/commit/47383e80197fc56e38f880f33a6b54261b82523c)) +* **tee:** prevent panic on UTF-8 multi-byte truncation boundary ([da486bf](https://github.com/rtk-ai/rtk/commit/da486bf394330c804cd1cd12e4b6835f18de5205)) +* **telemetry:** 7 bugs in enrichment — privacy leak, broken meta_usage, pricing ([15f666d](https://github.com/rtk-ai/rtk/commit/15f666dd8dbd18648cb7bd14a6f9f3cac2f7d10b)) +* **telemetry:** clean code ([8156081](https://github.com/rtk-ai/rtk/commit/81560812610686fa5ca3633c2bf0b79c05eaa7d9)) +* **telemetry:** consent, erasure, auth, docs ([2e4cc4b](https://github.com/rtk-ai/rtk/commit/2e4cc4bb5226444c8c0bfc827baf0c101c3759e8)) +* **telemetry:** non-terminal consent, single config load ([7821e98](https://github.com/rtk-ai/rtk/commit/7821e9872fd1f1ae9b40eb8a4458049869acc36b)) +* **telemetry:** RGPD-compliant, consent gate, erasure, privacy controls ([6a5bc84](https://github.com/rtk-ai/rtk/commit/6a5bc847e06cf6066e6f4aeed5a3ad0803a3649b)) + ## [0.35.0](https://github.com/rtk-ai/rtk/compare/v0.34.3...v0.35.0) (2026-04-06) diff --git a/Cargo.lock b/Cargo.lock index 7f33886e0..7ad9dc981 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -892,7 +892,7 @@ dependencies = [ [[package]] name = "rtk" -version = "0.35.0" +version = "0.36.0" dependencies = [ "anyhow", "automod", diff --git a/Cargo.toml b/Cargo.toml index d5ff024a1..2a2a3d683 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "rtk" -version = "0.35.0" +version = "0.36.0" edition = "2021" authors = ["Patrick Szymkowiak"] description = "Rust Token Killer - High-performance CLI proxy to minimize LLM token consumption" From 82c62eb893966b8f170ea22ec72e79f14789e12e Mon Sep 17 00:00:00 2001 From: Florian BRUNIAUX Date: Tue, 14 Apr 2026 11:51:40 +0200 Subject: [PATCH 147/204] fix(discover): weighted savings rate per bucket, decimal already_rtk percent MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit B6 — report.rs:already_rtk integer division truncated small ratios to 0% (e.g. 3/1000 showed "0%" instead of "0.3%"). Now uses f64 division with one decimal place. Three regression tests added. B7 — mod.rs:SupportedBucket.savings_pct was set once from the first-seen sub-command and never updated. For buckets containing multiple sub-commands (e.g. git add + git diff + git log all under "rtk git"), the displayed estimated_savings_pct reflected only the first classification. Fix: track total_raw_output_tokens alongside total_output_tokens (savings). Compute effective_savings_pct = saved / raw * 100 at entry construction, giving a weighted average across all sub-commands in the bucket. This rate is exposed in rtk discover --format json via estimated_savings_pct. Co-Authored-By: Claude Sonnet 4.6 Signed-off-by: Florian BRUNIAUX --- src/discover/mod.rs | 23 +++++++++++++--- src/discover/report.rs | 60 +++++++++++++++++++++++++++++++++++++++--- 2 files changed, 77 insertions(+), 6 deletions(-) diff --git a/src/discover/mod.rs b/src/discover/mod.rs index ada51f8e5..e5b4a87b8 100644 --- a/src/discover/mod.rs +++ b/src/discover/mod.rs @@ -21,8 +21,13 @@ struct SupportedBucket { rtk_equivalent: &'static str, category: &'static str, count: usize, + /// Total estimated tokens *saved* (post-filter). Used for the "Est. Savings" column. total_output_tokens: usize, - savings_pct: f64, + /// Total estimated tokens *before* filtering (raw output). Accumulated alongside + /// `total_output_tokens` so the bucket's effective savings rate can be derived as + /// `total_output_tokens / total_raw_output_tokens` — a weighted average across + /// all sub-commands, regardless of which sub-command was seen first. + total_raw_output_tokens: usize, // For display: the most common raw command command_counts: HashMap, } @@ -120,7 +125,7 @@ pub fn run( category, count: 0, total_output_tokens: 0, - savings_pct: estimated_savings_pct, + total_raw_output_tokens: 0, command_counts: HashMap::new(), } }); @@ -140,6 +145,9 @@ pub fn run( let savings = (output_tokens as f64 * estimated_savings_pct / 100.0) as usize; bucket.total_output_tokens += savings; + // Accumulate pre-savings tokens so we can compute a weighted effective + // savings rate across all sub-commands in this bucket later. + bucket.total_raw_output_tokens += output_tokens; // Track the display name with status let display_name = truncate_command(part); @@ -196,13 +204,22 @@ pub fn run( }) .unwrap_or_else(|| (String::new(), report::RtkStatus::Existing)); + // Derive the effective savings rate from accumulated totals rather than + // using the first-seen sub-command's rate. This gives a weighted average + // across all sub-commands that fell in this bucket. + let effective_savings_pct = if bucket.total_raw_output_tokens > 0 { + bucket.total_output_tokens as f64 * 100.0 / bucket.total_raw_output_tokens as f64 + } else { + 0.0 + }; + SupportedEntry { command: command_with_status, count: bucket.count, rtk_equivalent: bucket.rtk_equivalent, category: bucket.category, estimated_savings_tokens: bucket.total_output_tokens, - estimated_savings_pct: bucket.savings_pct, + estimated_savings_pct: effective_savings_pct, rtk_status: status, } }) diff --git a/src/discover/report.rs b/src/discover/report.rs index 652bb3482..128ecb45e 100644 --- a/src/discover/report.rs +++ b/src/discover/report.rs @@ -83,12 +83,12 @@ pub fn format_text(report: &DiscoverReport, limit: usize, verbose: bool) -> Stri report.sessions_scanned, report.since_days, report.total_commands )); out.push_str(&format!( - "Already using RTK: {} commands ({}%)\n", + "Already using RTK: {} commands ({:.1}%)\n", report.already_rtk, if report.total_commands > 0 { - report.already_rtk * 100 / report.total_commands + report.already_rtk as f64 * 100.0 / report.total_commands as f64 } else { - 0 + 0.0 } )); @@ -214,3 +214,57 @@ fn truncate_str(s: &str, max: usize) -> String { format!("{}..", truncated) } } + +#[cfg(test)] +mod tests { + use super::*; + + fn make_report(total_commands: usize, already_rtk: usize) -> DiscoverReport { + DiscoverReport { + sessions_scanned: 1, + total_commands, + already_rtk, + since_days: 30, + supported: vec![], + unsupported: vec![], + parse_errors: 0, + rtk_disabled_count: 0, + rtk_disabled_examples: vec![], + } + } + + // B6 regression: integer division truncated small percentages to 0%. + // Example: 3/1000 = 0% (old bug), should be "0.3%". + #[test] + fn test_already_rtk_percent_shows_decimal() { + let report = make_report(1000, 3); + let output = format_text(&report, 10, false); + // "0.3%" must appear; old code would print "0%" + assert!( + output.contains("0.3%"), + "Expected '0.3%' in output but got:\n{}", + output + ); + assert!( + !output.contains("(0%)"), + "Output must not contain '(0%)' — integer division bug still present:\n{}", + output + ); + } + + // Edge case: 0/0 must not divide-by-zero. + #[test] + fn test_already_rtk_percent_zero_total() { + let report = make_report(0, 0); + let output = format_text(&report, 10, false); + assert!(output.contains("0 commands (0.0%)")); + } + + // Full percent: 1000/1000 = 100.0% + #[test] + fn test_already_rtk_percent_full() { + let report = make_report(1000, 1000); + let output = format_text(&report, 10, false); + assert!(output.contains("100.0%")); + } +} From 4c9b528206e4844ccaadf8089bc86437f462f4e7 Mon Sep 17 00:00:00 2001 From: Florian BRUNIAUX Date: Tue, 14 Apr 2026 14:17:58 +0200 Subject: [PATCH 148/204] =?UTF-8?q?docs:=20Adrien=20review=20=E2=80=94=20h?= =?UTF-8?q?ierarchy,=20counts,=20consistency,=20gaps?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Hierarchy: - Move troubleshooting.md and what-rtk-covers.md to guide/resources/ - Add guide/resources/telemetry.md (adapted from docs/TELEMETRY.md, user-facing: consent, opt-out, GDPR rights — without internal sections) Content fixes: - Remove all hardcoded counts (9 ecosystems, 12 agents, 60+, 7 more) replaced with qualitative language throughout - Unify DB filename: tracking.db → history.db everywhere (canonical: src/core/constants.rs HISTORY_DB) - installation.md: replace bare `cargo install rtk` with warning + explicit git URL to avoid Rust Type Kit name collision - supported-agents.md: remove hardcoded agent count - quick-start.md: remove hardcoded ecosystem list, link to what-rtk-covers New content: - gain.md: add --quota section explaining pro/5x/20x tier meanings - gain.md: add callout linking to discover.md (find missed savings) - index.md: add "Analyze your usage" section (rtk discover, rtk session) - configuration.md: clarify ignore_dirs/ignore_files scope, add prose link to telemetry.md - what-rtk-covers.md: clarify --ultra-compact vs git -u short flag conflict Cross-references: - All internal links updated for new resources/ paths - index.md: link to troubleshooting, telemetry, what-rtk-covers, analytics - discover.md: updated relative link to troubleshooting README: - Add Core team section (Patrick Szymkowiak, Florian Bruniaux, Adrien Eppling) Co-Authored-By: Claude Sonnet 4.6 --- README.md | 9 + docs/guide/analytics/discover.md | 2 +- docs/guide/analytics/gain.md | 20 ++- docs/guide/getting-started/configuration.md | 8 +- docs/guide/getting-started/installation.md | 6 +- docs/guide/getting-started/quick-start.md | 4 +- .../guide/getting-started/supported-agents.md | 2 +- docs/guide/index.md | 18 +- docs/guide/resources/telemetry.md | 168 ++++++++++++++++++ docs/guide/{ => resources}/troubleshooting.md | 2 +- docs/guide/{ => resources}/what-rtk-covers.md | 15 +- 11 files changed, 235 insertions(+), 19 deletions(-) create mode 100644 docs/guide/resources/telemetry.md rename docs/guide/{ => resources}/troubleshooting.md (99%) rename docs/guide/{ => resources}/what-rtk-covers.md (93%) diff --git a/README.md b/README.md index 6228a689d..ff848a690 100644 --- a/README.md +++ b/README.md @@ -464,6 +464,15 @@ export RTK_TELEMETRY_DISABLED=1 # Blocks telemetry regardless of consent +## Core team + +- **Patrick Szymkowiak** — Founder + [GitHub](https://github.com/pszymkowiak) · [LinkedIn](https://www.linkedin.com/in/patrick-szymkowiak/) +- **Florian Bruniaux** — Core contributor + [GitHub](https://github.com/FlorianBruniaux) · [LinkedIn](https://www.linkedin.com/in/florian-bruniaux-43408b83/) +- **Adrien Eppling** — Core contributor + [GitHub](https://github.com/aeppling) · [LinkedIn](https://www.linkedin.com/in/adrien-eppling/) + ## Contributing Contributions welcome! Please open an issue or PR on [GitHub](https://github.com/rtk-ai/rtk). diff --git a/docs/guide/analytics/discover.md b/docs/guide/analytics/discover.md index 77d21cc65..575ca73b2 100644 --- a/docs/guide/analytics/discover.md +++ b/docs/guide/analytics/discover.md @@ -32,7 +32,7 @@ Total missed: 23 ~66,000 tokens Run `rtk init --global` to capture these automatically. ``` -If commands appear in the missed list after installing RTK, it usually means the hook isn't active for that agent. See [Troubleshooting](../troubleshooting.md) — "Agent not using RTK". +If commands appear in the missed list after installing RTK, it usually means the hook isn't active for that agent. See [Troubleshooting](../resources/troubleshooting.md) — "Agent not using RTK". ## rtk session — adoption tracking diff --git a/docs/guide/analytics/gain.md b/docs/guide/analytics/gain.md index 9b257e3e5..706508fce 100644 --- a/docs/guide/analytics/gain.md +++ b/docs/guide/analytics/gain.md @@ -24,7 +24,8 @@ rtk gain --all # all breakdowns at once # Classic flags rtk gain --graph # ASCII graph, last 30 days rtk gain --history # last 10 commands -rtk gain --quota -t pro # quota analysis (pro/5x/20x tiers) +rtk gain --quota # monthly quota savings estimate (default tier: 20x) +rtk gain --quota -t pro # use pro tier token budget for estimate # Export rtk gain --all --format json > savings.json @@ -176,6 +177,23 @@ jobs: - run: git add stats/ && git commit -m "Weekly rtk stats" && git push ``` +## Quota estimate + +`--quota` estimates how many tokens RTK has saved relative to your monthly subscription budget, so you can see the cost impact of those savings. + +```bash +rtk gain --quota # uses 20x tier by default +rtk gain --quota -t pro # Claude Pro plan budget +rtk gain --quota -t 5x # 5× usage plan budget +rtk gain --quota -t 20x # 20× usage plan budget +``` + +The tiers (`pro`, `5x`, `20x`) correspond to Anthropic Claude API subscription levels, each with a different monthly token allocation. RTK uses those allocations as a denominator to express your savings as a percentage of your budget. + +:::tip[Find missed savings] +`rtk gain` shows what RTK saved. To find commands that ran *without* RTK and calculate what you lost, see [rtk discover](./discover.md). +::: + ## Troubleshooting **No data showing:** diff --git a/docs/guide/getting-started/configuration.md b/docs/guide/getting-started/configuration.md index f2b1d5a2e..2c649945e 100644 --- a/docs/guide/getting-started/configuration.md +++ b/docs/guide/getting-started/configuration.md @@ -25,7 +25,7 @@ rtk config --create # create config file with defaults [tracking] enabled = true # enable/disable token tracking history_days = 90 # retention in days (auto-cleanup) -database_path = "/custom/path/tracking.db" # optional override +database_path = "/custom/path/history.db" # optional override [display] colors = true # colored output @@ -33,6 +33,8 @@ emoji = true # use emojis in output max_width = 120 # maximum output width [filters] +# These apply to file-reading commands (ls, find, grep, cat/rtk read). +# Paths matching these patterns are excluded from output, keeping noise low. ignore_dirs = [".git", "node_modules", "target", "__pycache__", ".venv", "vendor"] ignore_files = ["*.lock", "*.min.js", "*.min.css"] @@ -43,12 +45,14 @@ max_files = 20 # rotation: keep last N files # directory = "/custom/tee/path" # optional override [telemetry] -enabled = true # anonymous daily ping (opt-out below) +enabled = true # anonymous daily ping — see Telemetry & Privacy for full details [hooks] exclude_commands = [] # commands to never auto-rewrite ``` +For full details on what is collected, opt-out options, and GDPR rights, see [Telemetry & Privacy](../resources/telemetry.md). + ## Environment variables | Variable | Description | diff --git a/docs/guide/getting-started/installation.md b/docs/guide/getting-started/installation.md index 3f9ee991d..a07c025b7 100644 --- a/docs/guide/getting-started/installation.md +++ b/docs/guide/getting-started/installation.md @@ -39,8 +39,12 @@ brew install rtk-ai/tap/rtk ## Cargo +:::caution[Name collision risk] +`cargo install rtk` may install **Rust Type Kit** instead of Rust Token Killer — two unrelated projects share the same crate name. Use the explicit Git URL to guarantee the correct package: +::: + ```bash -cargo install rtk +cargo install --git https://github.com/rtk-ai/rtk rtk ``` ## Pre-built binaries (Windows, Linux, macOS) diff --git a/docs/guide/getting-started/quick-start.md b/docs/guide/getting-started/quick-start.md index af661ebff..6e1b7b558 100644 --- a/docs/guide/getting-started/quick-start.md +++ b/docs/guide/getting-started/quick-start.md @@ -38,7 +38,7 @@ Once the hook is installed, nothing changes in how you work. Your AI assistant r For example, when Claude Code runs `cargo test`, the hook rewrites it to `rtk cargo test` before it executes. The LLM receives filtered output with only the failures — not 500 lines of passing tests. You never see or type `rtk`. -Supported ecosystems: Git, Cargo/Rust, JavaScript (vitest, tsc, eslint, pnpm, Next.js, Prisma), Python, Go, Ruby, .NET, Docker/Kubernetes, GitHub CLI, and more. See [What RTK Optimizes](../what-rtk-covers.md) for the full list. +RTK covers all major ecosystems — Git, Cargo/Rust, JavaScript, Python, Go, Ruby, .NET, Docker/Kubernetes, and more. See [What RTK Optimizes](../resources/what-rtk-covers.md) for the full list. ## Step 3: Check your savings @@ -65,6 +65,6 @@ rtk proxy make install ## Next steps -- [What RTK Optimizes](../what-rtk-covers.md) — all supported commands and savings by ecosystem +- [What RTK Optimizes](../resources/what-rtk-covers.md) — all supported commands and savings by ecosystem - [Supported agents](./supported-agents.md) — Claude Code, Cursor, Copilot, and more - [Configuration](./configuration.md) — customize RTK behavior diff --git a/docs/guide/getting-started/supported-agents.md b/docs/guide/getting-started/supported-agents.md index f896822be..4623353d5 100644 --- a/docs/guide/getting-started/supported-agents.md +++ b/docs/guide/getting-started/supported-agents.md @@ -7,7 +7,7 @@ sidebar: # Supported Agents -RTK supports 12 AI coding agents across 3 integration tiers. Mistral Vibe support is planned. +RTK supports all major AI coding agents across 3 integration tiers. Mistral Vibe support is planned. ## How it works diff --git a/docs/guide/index.md b/docs/guide/index.md index 2b9a09b47..44e82095d 100644 --- a/docs/guide/index.md +++ b/docs/guide/index.md @@ -30,13 +30,13 @@ Zero config changes to your workflow. The hook handles everything automatically. ## What RTK optimizes -60+ commands across 9 ecosystems — Git, Cargo/Rust, JavaScript, Python, Go, Ruby, .NET, Docker/Kubernetes, and more. See [What RTK Optimizes](./what-rtk-covers.md) for the full list with savings percentages. +Dozens of commands across all major ecosystems — Git, Cargo/Rust, JavaScript, Python, Go, Ruby, .NET, Docker/Kubernetes, and more. See [What RTK Optimizes](./resources/what-rtk-covers.md) for the full list with savings percentages. ## Get started 1. **[Installation](./getting-started/installation.md)** — Install RTK and verify you have the right package 2. **[Quick Start](./getting-started/quick-start.md)** — Connect to your AI assistant in 5 minutes -3. **[Supported Agents](./getting-started/supported-agents.md)** — Claude Code, Cursor, Copilot, Gemini, and 7 more +3. **[Supported Agents](./getting-started/supported-agents.md)** — Claude Code, Cursor, Copilot, Gemini, and more ## Measure your savings @@ -46,10 +46,20 @@ rtk gain --daily # day-by-day breakdown rtk gain --weekly # weekly aggregation ``` -See [Analytics](./analytics/gain.md) for export formats and analysis workflows. +See [Token Savings Analytics](./analytics/gain.md) for export formats and analysis workflows. + +## Analyze your usage + +```bash +rtk discover # find commands that ran without RTK (missed savings) +rtk session # RTK adoption rate per Claude Code session +``` + +See [Discover and Session](./analytics/discover.md) for details. ## Further reading - [Configuration](./getting-started/configuration.md) — config.toml, global flags, env vars, tee recovery -- [Troubleshooting](./troubleshooting.md) — common issues and fixes +- [Troubleshooting](./resources/troubleshooting.md) — common issues and fixes +- [Telemetry & Privacy](./resources/telemetry.md) — what RTK collects and how to opt out - [ARCHITECTURE.md](https://github.com/rtk-ai/rtk/blob/master/ARCHITECTURE.md) — system design for contributors diff --git a/docs/guide/resources/telemetry.md b/docs/guide/resources/telemetry.md new file mode 100644 index 000000000..b77a0afc7 --- /dev/null +++ b/docs/guide/resources/telemetry.md @@ -0,0 +1,168 @@ +--- +title: Telemetry & Privacy +description: What RTK collects, how to opt out, and your GDPR rights +sidebar: + order: 3 +--- + +# Telemetry & Privacy + +RTK collects anonymous, aggregate usage metrics once per day to help improve the product. Telemetry is **disabled by default** and requires explicit consent during `rtk init` or `rtk telemetry enable`. + +## Data Collector + +**Entity**: `RTK AI Labs` +**Contact**: contact@rtk-ai.app + +## Why we collect telemetry + +Without telemetry, we have no visibility into: + +- Which commands are used most and need the best filters +- Which filters are underperforming and need improvement +- Which ecosystems to prioritize for new filter development +- How much value RTK delivers to users (token savings in $ terms) +- Whether users stay engaged over time or churn after trying RTK + +This data directly drives our roadmap. For example, if telemetry shows that 40% of users run Python commands but only 10% of our filters cover Python, we know where to invest next. + +## How it works + +1. **Once per day** (23-hour interval), RTK sends a single HTTPS POST to our telemetry endpoint +2. The ping runs in a **background thread** and never blocks the CLI (2-second timeout) +3. A marker file prevents duplicate pings within the interval +4. If the server is unreachable, the ping is silently dropped — no retries, no queue + +## What is collected + +### Identity (anonymous) + +| Field | Example | Purpose | +|-------|---------|---------| +| `device_hash` | `a3f8c9...` (64 hex chars) | Count unique installations. SHA-256 of a per-device random salt stored locally (`~/.local/share/rtk/.device_salt`). Not reversible. No hostname or username included. | + +### Environment + +| Field | Example | Purpose | +|-------|---------|---------| +| `version` | `0.34.1` | Track adoption of new versions | +| `os` | `macos` | Know which platforms to support and test | +| `arch` | `aarch64` | Prioritize ARM vs x86 builds | +| `install_method` | `homebrew` | Understand distribution channels (homebrew/cargo/script/nix) | + +### Usage volume + +| Field | Example | Purpose | +|-------|---------|---------| +| `commands_24h` | `142` | Daily activity level | +| `commands_total` | `32888` | Lifetime usage — segment light vs heavy users | +| `top_commands` | `["git", "cargo", "ls"]` | Most popular tools (names only, max 5) | +| `tokens_saved_24h` | `450000` | Daily value delivered | +| `tokens_saved_total` | `96500000` | Lifetime value delivered | +| `savings_pct` | `72.5` | Overall effectiveness | + +### Quality (filter improvement) + +| Field | Example | Purpose | +|-------|---------|---------| +| `passthrough_top` | `["git:15", "npm:8"]` | Top 5 commands with 0% savings — these need filters | +| `parse_failures_24h` | `3` | Filter fragility — high count means filters are breaking | +| `low_savings_commands` | `["rtk docker ps:25%"]` | Commands averaging <30% savings — filters to improve | +| `avg_savings_per_command` | `68.5` | Unweighted average (vs global which is volume-biased) | + +### Ecosystem distribution + +| Field | Example | Purpose | +|-------|---------|---------| +| `ecosystem_mix` | `{"git": 45, "cargo": 20, "js": 15}` | Category percentages — where to invest filter development | + +### Retention (engagement) + +| Field | Example | Purpose | +|-------|---------|---------| +| `first_seen_days` | `45` | Installation age in days | +| `active_days_30d` | `22` | Days with at least 1 command in last 30 days — measures stickiness | + +### Economics + +| Field | Example | Purpose | +|-------|---------|---------| +| `tokens_saved_30d` | `12000000` | 30-day token savings for trend analysis | +| `estimated_savings_usd_30d` | `36.0` | Estimated dollar value saved (at ~$3/Mtok input pricing, Claude Sonnet) | + +### Adoption + +| Field | Example | Purpose | +|-------|---------|---------| +| `hook_type` | `claude` | Which AI agent hook is installed (claude/gemini/codex/cursor/none) | +| `custom_toml_filters` | `3` | Number of user-created TOML filter files — DSL adoption | + +### Configuration (user maturity) + +| Field | Example | Purpose | +|-------|---------|---------| +| `has_config_toml` | `true` | Whether user has customized RTK config | +| `exclude_commands_count` | `2` | Commands excluded from rewriting — high count may indicate frustration | +| `projects_count` | `5` | Distinct project paths — multi-project = power user | + +### Feature adoption + +| Field | Example | Purpose | +|-------|---------|---------| +| `meta_usage` | `{"gain": 5, "discover": 2}` | Which RTK features are actually used | + +## What is NOT collected + +- Source code or file contents +- Full command lines or arguments (only tool names like "git", "cargo") +- File paths or directory structures +- Secrets, API keys, or environment variable values +- Repository names or URLs +- Personally identifiable information +- IP addresses (not stored in telemetry pings; stored temporarily in erasure audit log for accountability, anonymized after 6 months) + +## Consent + +Telemetry requires explicit opt-in consent (GDPR Art. 6, 7). Consent is requested during `rtk init` or via `rtk telemetry enable`. Without consent, no data is sent. + +```bash +rtk telemetry status # Check current consent state +rtk telemetry enable # Give consent (interactive prompt) +rtk telemetry disable # Withdraw consent +rtk telemetry forget # Withdraw consent + delete local data + request server erasure +``` + +Environment variable override (blocks telemetry regardless of consent): +```bash +export RTK_TELEMETRY_DISABLED=1 +``` + +## Retention Policy + +- **Server-side**: telemetry records are retained for a maximum of **12 months**, then automatically purged. +- **Server-side (erasure log)**: IP addresses in the erasure audit log are **anonymized after 6 months** (GDPR — IP is personal data). +- **Client-side**: the local SQLite database (`~/.local/share/rtk/history.db`) retains data for **90 days** by default (configurable via `tracking.history_days` in `config.toml`). Deleted entirely by `rtk telemetry forget`. + +## Your Rights (GDPR) + +Under the EU General Data Protection Regulation, you have the right to: + +- **Access** your data: `rtk telemetry status` shows your device hash; the telemetry payload is fully documented above. +- **Rectification**: since data is anonymous and aggregate, rectification is not applicable. +- **Erasure** (Art. 17): run `rtk telemetry forget` to delete local data and send an erasure request to the server. Alternatively, email contact@rtk-ai.app with your device hash. +- **Restriction of processing**: `rtk telemetry disable` stops all data collection immediately. +- **Portability**: the local SQLite database at `~/.local/share/rtk/history.db` contains all locally stored data. +- **Objection**: `rtk telemetry disable` or `export RTK_TELEMETRY_DISABLED=1`. + +## Erasure Procedure + +1. Run `rtk telemetry forget` — this disables telemetry, deletes your device salt, ping marker, and local tracking database (`history.db`), then sends an erasure request to the server. +2. If the server is unreachable, the CLI prints your full device hash and fallback instructions to email contact@rtk-ai.app for manual erasure. +3. You can also email contact@rtk-ai.app directly to request manual erasure. + +## Data Handling + +- All communications use HTTPS (TLS) +- Data is used exclusively for RTK product improvement +- No data is sold or shared with third parties +- Aggregate statistics may be published (e.g. "70% of RTK users are on macOS") diff --git a/docs/guide/troubleshooting.md b/docs/guide/resources/troubleshooting.md similarity index 99% rename from docs/guide/troubleshooting.md rename to docs/guide/resources/troubleshooting.md index 7eb3b41ff..51a6fa3be 100644 --- a/docs/guide/troubleshooting.md +++ b/docs/guide/resources/troubleshooting.md @@ -2,7 +2,7 @@ title: Troubleshooting description: Common RTK issues and how to fix them sidebar: - order: 8 + order: 2 --- # Troubleshooting diff --git a/docs/guide/what-rtk-covers.md b/docs/guide/resources/what-rtk-covers.md similarity index 93% rename from docs/guide/what-rtk-covers.md rename to docs/guide/resources/what-rtk-covers.md index de20182f2..dd5c39e89 100644 --- a/docs/guide/what-rtk-covers.md +++ b/docs/guide/resources/what-rtk-covers.md @@ -2,14 +2,14 @@ title: What RTK Optimizes description: Commands and ecosystems automatically optimized by RTK with typical token savings sidebar: - order: 2 + order: 1 --- # What RTK Optimizes Once RTK is installed with a hook, these commands are automatically intercepted and filtered. You run them normally — the hook rewrites them transparently before execution. -60+ commands across 9 ecosystems. Typical savings: 60-99%. +Typical savings: 60-99%. ## Git @@ -131,18 +131,21 @@ These flags apply to all RTK commands and can push savings even higher: | Flag | Description | |------|-------------| -| `-u` / `--ultra-compact` | ASCII icons, inline format — extra token reduction on top of normal filtering | +| `--ultra-compact` | ASCII icons, inline format — extra token reduction on top of normal filtering | | `-v` / `--verbose` | Show filtering details on stderr (`-v`, `-vv`, `-vvv` for increasing detail) | ```bash # Ultra-compact: even smaller output -git log # → already filtered by RTK -git log -u # → ultra-compact variant (if using rtk directly) +rtk git log --ultra-compact # Debug: see what RTK is doing -RTK_DISABLED=0 git status -vvv +rtk git status -vvv ``` +:::note +Use `--ultra-compact` (long form) rather than `-u` when working with Git commands. Git's own `-u` flag means `--set-upstream` and the short form can cause confusion. +::: + ## Commands that are not rewritten If a command isn't in the list above, RTK runs it through passthrough — the output reaches the LLM unchanged. You can explicitly track unsupported commands: From 5916ecd86fb319c2519a0b4fb2891309833a3bb4 Mon Sep 17 00:00:00 2001 From: Adrien Eppling Date: Tue, 14 Apr 2026 20:52:34 +0200 Subject: [PATCH 149/204] fix: rename ship.md to ship/SKILL.md to match develop --- .claude/skills/{ship.md => ship/SKILL.md} | 1 + 1 file changed, 1 insertion(+) rename .claude/skills/{ship.md => ship/SKILL.md} (99%) diff --git a/.claude/skills/ship.md b/.claude/skills/ship/SKILL.md similarity index 99% rename from .claude/skills/ship.md rename to .claude/skills/ship/SKILL.md index b774bcb42..66acf6181 100644 --- a/.claude/skills/ship.md +++ b/.claude/skills/ship/SKILL.md @@ -1,5 +1,6 @@ --- description: Build, commit, push & version bump workflow - automates the complete release cycle +allowed-tools: Read Write Edit Bash Grep Glob --- # Ship Release From dfc009a4d6c2f78cf37f48031e16c11500c0c9df Mon Sep 17 00:00:00 2001 From: Adrien Eppling Date: Tue, 14 Apr 2026 22:50:52 +0200 Subject: [PATCH 150/204] restore cursor and copilot hook scripts for backward compat --- hooks/copilot/test-rtk-rewrite.sh | 293 ++++++++++++++++++++++++++++++ hooks/cursor/rtk-rewrite.sh | 54 ++++++ 2 files changed, 347 insertions(+) create mode 100644 hooks/copilot/test-rtk-rewrite.sh create mode 100644 hooks/cursor/rtk-rewrite.sh diff --git a/hooks/copilot/test-rtk-rewrite.sh b/hooks/copilot/test-rtk-rewrite.sh new file mode 100644 index 000000000..f1cca9497 --- /dev/null +++ b/hooks/copilot/test-rtk-rewrite.sh @@ -0,0 +1,293 @@ +#!/usr/bin/env bash +# Test suite for rtk hook (cross-platform preToolUse handler). +# Feeds mock preToolUse JSON through `rtk hook` and verifies allow/deny decisions. +# +# Usage: bash hooks/test-copilot-rtk-rewrite.sh +# +# Copilot CLI input format: +# {"toolName":"bash","toolArgs":"{\"command\":\"...\"}"} +# Output on intercept: {"permissionDecision":"deny","permissionDecisionReason":"..."} +# +# VS Code Copilot Chat input format: +# {"tool_name":"Bash","tool_input":{"command":"..."}} +# Output on intercept: {"hookSpecificOutput":{"permissionDecision":"allow","updatedInput":{...}}} +# +# Output on pass-through: empty (exit 0) + +RTK="${RTK:-rtk}" +PASS=0 +FAIL=0 +TOTAL=0 + +# Colors +GREEN='\033[32m' +RED='\033[31m' +DIM='\033[2m' +RESET='\033[0m' + +# Build a Copilot CLI preToolUse input JSON +copilot_bash_input() { + local cmd="$1" + local tool_args + tool_args=$(jq -cn --arg cmd "$cmd" '{"command":$cmd}') + jq -cn --arg ta "$tool_args" '{"toolName":"bash","toolArgs":$ta}' +} + +# Build a VS Code Copilot Chat preToolUse input JSON +vscode_bash_input() { + local cmd="$1" + jq -cn --arg cmd "$cmd" '{"tool_name":"Bash","tool_input":{"command":$cmd}}' +} + +# Build a non-bash tool input +tool_input() { + local tool_name="$1" + jq -cn --arg t "$tool_name" '{"toolName":$t,"toolArgs":"{}"}' +} + +# Assert Copilot CLI: hook denies and reason contains the expected rtk command +test_deny() { + local description="$1" + local input_cmd="$2" + local expected_rtk="$3" + TOTAL=$((TOTAL + 1)) + + local output + output=$(copilot_bash_input "$input_cmd" | "$RTK" hook 2>/dev/null) || true + + local decision reason + decision=$(echo "$output" | jq -r '.permissionDecision // empty' 2>/dev/null) + reason=$(echo "$output" | jq -r '.permissionDecisionReason // empty' 2>/dev/null) + + if [ "$decision" = "deny" ] && echo "$reason" | grep -qF "$expected_rtk"; then + printf " ${GREEN}DENY${RESET} %s ${DIM}→ %s${RESET}\n" "$description" "$expected_rtk" + PASS=$((PASS + 1)) + else + printf " ${RED}FAIL${RESET} %s\n" "$description" + printf " expected decision: deny, reason containing: %s\n" "$expected_rtk" + printf " actual decision: %s\n" "$decision" + printf " actual reason: %s\n" "$reason" + FAIL=$((FAIL + 1)) + fi +} + +# Assert VS Code Copilot Chat: hook returns updatedInput (allow) with rewritten command +test_vscode_rewrite() { + local description="$1" + local input_cmd="$2" + local expected_rtk="$3" + TOTAL=$((TOTAL + 1)) + + local output + output=$(vscode_bash_input "$input_cmd" | "$RTK" hook 2>/dev/null) || true + + local decision updated_cmd + decision=$(echo "$output" | jq -r '.hookSpecificOutput.permissionDecision // empty' 2>/dev/null) + updated_cmd=$(echo "$output" | jq -r '.hookSpecificOutput.updatedInput.command // empty' 2>/dev/null) + + if [ "$decision" = "allow" ] && echo "$updated_cmd" | grep -qF "$expected_rtk"; then + printf " ${GREEN}REWRITE${RESET} %s ${DIM}→ %s${RESET}\n" "$description" "$updated_cmd" + PASS=$((PASS + 1)) + else + printf " ${RED}FAIL${RESET} %s\n" "$description" + printf " expected decision: allow, updatedInput containing: %s\n" "$expected_rtk" + printf " actual decision: %s\n" "$decision" + printf " actual updatedInput: %s\n" "$updated_cmd" + FAIL=$((FAIL + 1)) + fi +} + +# Assert the hook emits no output (pass-through) +test_allow() { + local description="$1" + local input="$2" + TOTAL=$((TOTAL + 1)) + + local output + output=$(echo "$input" | "$RTK" hook 2>/dev/null) || true + + if [ -z "$output" ]; then + printf " ${GREEN}PASS${RESET} %s ${DIM}→ (allow)${RESET}\n" "$description" + PASS=$((PASS + 1)) + else + local decision + decision=$(echo "$output" | jq -r '.permissionDecision // .hookSpecificOutput.permissionDecision // empty' 2>/dev/null) + printf " ${RED}FAIL${RESET} %s\n" "$description" + printf " expected: (no output)\n" + printf " actual: permissionDecision=%s\n" "$decision" + FAIL=$((FAIL + 1)) + fi +} + +echo "============================================" +echo " RTK Hook Test Suite (rtk hook)" +echo "============================================" +echo "" + +# ---- SECTION 1: Copilot CLI — commands that should be denied ---- +echo "--- Copilot CLI: intercepted (deny with rtk suggestion) ---" + +test_deny "git status" \ + "git status" \ + "rtk git status" + +test_deny "git log --oneline -10" \ + "git log --oneline -10" \ + "rtk git log" + +test_deny "git diff HEAD" \ + "git diff HEAD" \ + "rtk git diff" + +test_deny "cargo test" \ + "cargo test" \ + "rtk cargo test" + +test_deny "cargo clippy --all-targets" \ + "cargo clippy --all-targets" \ + "rtk cargo clippy" + +test_deny "cargo build" \ + "cargo build" \ + "rtk cargo build" + +test_deny "grep -rn pattern src/" \ + "grep -rn pattern src/" \ + "rtk grep" + +test_deny "gh pr list" \ + "gh pr list" \ + "rtk gh" + +echo "" + +# ---- SECTION 2: VS Code Copilot Chat — commands that should be rewritten via updatedInput ---- +echo "--- VS Code Copilot Chat: intercepted (updatedInput rewrite) ---" + +test_vscode_rewrite "git status" \ + "git status" \ + "rtk git status" + +test_vscode_rewrite "cargo test" \ + "cargo test" \ + "rtk cargo test" + +test_vscode_rewrite "gh pr list" \ + "gh pr list" \ + "rtk gh" + +echo "" + +# ---- SECTION 3: Pass-through cases ---- +echo "--- Pass-through (allow silently) ---" + +test_allow "Copilot CLI: already rtk: rtk git status" \ + "$(copilot_bash_input "rtk git status")" + +test_allow "Copilot CLI: already rtk: rtk cargo test" \ + "$(copilot_bash_input "rtk cargo test")" + +test_allow "Copilot CLI: heredoc" \ + "$(copilot_bash_input "cat <<'EOF' +hello +EOF")" + +test_allow "Copilot CLI: unknown command: htop" \ + "$(copilot_bash_input "htop")" + +test_allow "Copilot CLI: unknown command: echo" \ + "$(copilot_bash_input "echo hello world")" + +test_allow "Copilot CLI: non-bash tool: view" \ + "$(tool_input "view")" + +test_allow "Copilot CLI: non-bash tool: edit" \ + "$(tool_input "edit")" + +test_allow "VS Code: already rtk" \ + "$(vscode_bash_input "rtk git status")" + +test_allow "VS Code: non-bash tool: editFiles" \ + "$(jq -cn '{"tool_name":"editFiles"}')" + +echo "" + +# ---- SECTION 4: Output format assertions ---- +echo "--- Output format ---" + +# Copilot CLI output format +TOTAL=$((TOTAL + 1)) +raw_output=$(copilot_bash_input "git status" | "$RTK" hook 2>/dev/null) + +if echo "$raw_output" | jq . >/dev/null 2>&1; then + printf " ${GREEN}PASS${RESET} Copilot CLI: output is valid JSON\n" + PASS=$((PASS + 1)) +else + printf " ${RED}FAIL${RESET} Copilot CLI: output is not valid JSON: %s\n" "$raw_output" + FAIL=$((FAIL + 1)) +fi + +TOTAL=$((TOTAL + 1)) +decision=$(echo "$raw_output" | jq -r '.permissionDecision') +if [ "$decision" = "deny" ]; then + printf " ${GREEN}PASS${RESET} Copilot CLI: permissionDecision == \"deny\"\n" + PASS=$((PASS + 1)) +else + printf " ${RED}FAIL${RESET} Copilot CLI: expected \"deny\", got \"%s\"\n" "$decision" + FAIL=$((FAIL + 1)) +fi + +TOTAL=$((TOTAL + 1)) +reason=$(echo "$raw_output" | jq -r '.permissionDecisionReason') +if echo "$reason" | grep -qE '`rtk [^`]+`'; then + printf " ${GREEN}PASS${RESET} Copilot CLI: reason contains backtick-quoted rtk command ${DIM}→ %s${RESET}\n" "$reason" + PASS=$((PASS + 1)) +else + printf " ${RED}FAIL${RESET} Copilot CLI: reason missing backtick-quoted command: %s\n" "$reason" + FAIL=$((FAIL + 1)) +fi + +# VS Code output format +TOTAL=$((TOTAL + 1)) +vscode_output=$(vscode_bash_input "git status" | "$RTK" hook 2>/dev/null) + +if echo "$vscode_output" | jq . >/dev/null 2>&1; then + printf " ${GREEN}PASS${RESET} VS Code: output is valid JSON\n" + PASS=$((PASS + 1)) +else + printf " ${RED}FAIL${RESET} VS Code: output is not valid JSON: %s\n" "$vscode_output" + FAIL=$((FAIL + 1)) +fi + +TOTAL=$((TOTAL + 1)) +vscode_decision=$(echo "$vscode_output" | jq -r '.hookSpecificOutput.permissionDecision') +if [ "$vscode_decision" = "allow" ]; then + printf " ${GREEN}PASS${RESET} VS Code: hookSpecificOutput.permissionDecision == \"allow\"\n" + PASS=$((PASS + 1)) +else + printf " ${RED}FAIL${RESET} VS Code: expected \"allow\", got \"%s\"\n" "$vscode_decision" + FAIL=$((FAIL + 1)) +fi + +TOTAL=$((TOTAL + 1)) +vscode_updated=$(echo "$vscode_output" | jq -r '.hookSpecificOutput.updatedInput.command') +if echo "$vscode_updated" | grep -q "^rtk "; then + printf " ${GREEN}PASS${RESET} VS Code: updatedInput.command starts with rtk ${DIM}→ %s${RESET}\n" "$vscode_updated" + PASS=$((PASS + 1)) +else + printf " ${RED}FAIL${RESET} VS Code: updatedInput.command should start with rtk: %s\n" "$vscode_updated" + FAIL=$((FAIL + 1)) +fi + +echo "" + +# ---- SUMMARY ---- +echo "============================================" +if [ $FAIL -eq 0 ]; then + printf " ${GREEN}ALL $TOTAL TESTS PASSED${RESET}\n" +else + printf " ${RED}$FAIL FAILED${RESET} / $TOTAL total ($PASS passed)\n" +fi +echo "============================================" + +exit $FAIL diff --git a/hooks/cursor/rtk-rewrite.sh b/hooks/cursor/rtk-rewrite.sh new file mode 100644 index 000000000..4b80b260c --- /dev/null +++ b/hooks/cursor/rtk-rewrite.sh @@ -0,0 +1,54 @@ +#!/usr/bin/env bash +# rtk-hook-version: 1 +# RTK Cursor Agent hook — rewrites shell commands to use rtk for token savings. +# Works with both Cursor editor and cursor-cli (they share ~/.cursor/hooks.json). +# Cursor preToolUse hook format: receives JSON on stdin, returns JSON on stdout. +# Requires: rtk >= 0.23.0, jq +# +# This is a thin delegating hook: all rewrite logic lives in `rtk rewrite`, +# which is the single source of truth (src/discover/registry.rs). +# To add or change rewrite rules, edit the Rust registry — not this file. + +if ! command -v jq &>/dev/null; then + echo "[rtk] WARNING: jq is not installed. Hook cannot rewrite commands. Install jq: https://jqlang.github.io/jq/download/" >&2 + exit 0 +fi + +if ! command -v rtk &>/dev/null; then + echo "[rtk] WARNING: rtk is not installed or not in PATH. Hook cannot rewrite commands. Install: https://github.com/rtk-ai/rtk#installation" >&2 + exit 0 +fi + +# Version guard: rtk rewrite was added in 0.23.0. +RTK_VERSION=$(rtk --version 2>/dev/null | grep -oE '[0-9]+\.[0-9]+\.[0-9]+' | head -1) +if [ -n "$RTK_VERSION" ]; then + MAJOR=$(echo "$RTK_VERSION" | cut -d. -f1) + MINOR=$(echo "$RTK_VERSION" | cut -d. -f2) + if [ "$MAJOR" -eq 0 ] && [ "$MINOR" -lt 23 ]; then + echo "[rtk] WARNING: rtk $RTK_VERSION is too old (need >= 0.23.0). Upgrade: cargo install rtk" >&2 + exit 0 + fi +fi + +INPUT=$(cat) +CMD=$(echo "$INPUT" | jq -r '.tool_input.command // empty') + +if [ -z "$CMD" ]; then + echo '{}' + exit 0 +fi + +# Delegate all rewrite logic to the Rust binary. +# rtk rewrite exits 1 when there's no rewrite — hook passes through silently. +REWRITTEN=$(rtk rewrite "$CMD" 2>/dev/null) || { echo '{}'; exit 0; } + +# No change — nothing to do. +if [ "$CMD" = "$REWRITTEN" ]; then + echo '{}' + exit 0 +fi + +jq -n --arg cmd "$REWRITTEN" '{ + "permission": "allow", + "updated_input": { "command": $cmd } +}' From 71baf2b82d14ba0c8c3f1ca281d7fb8728fbdd2b Mon Sep 17 00:00:00 2001 From: Joshua Date: Tue, 14 Apr 2026 18:53:24 -0500 Subject: [PATCH 151/204] making curl_cmd use force_tee_hint --- Cargo.lock | 2 +- src/cmds/cloud/curl_cmd.rs | 136 +++++++++++++------------------------ 2 files changed, 49 insertions(+), 89 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 7f33886e0..d3fb7edb2 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -892,7 +892,7 @@ dependencies = [ [[package]] name = "rtk" -version = "0.35.0" +version = "0.34.3" dependencies = [ "anyhow", "automod", diff --git a/src/cmds/cloud/curl_cmd.rs b/src/cmds/cloud/curl_cmd.rs index df073c507..acd4cb91f 100644 --- a/src/cmds/cloud/curl_cmd.rs +++ b/src/cmds/cloud/curl_cmd.rs @@ -1,9 +1,8 @@ //! Runs curl and auto-compresses JSON responses. -use crate::core::stream::exec_capture; +use crate::core::tee::force_tee_hint; use crate::core::tracking; -use crate::core::utils::{resolved_command, truncate}; -use crate::json_cmd; +use crate::core::{stream::exec_capture, utils::resolved_command}; use anyhow::{Context, Result}; /// Not using run_filtered: on failure, curl can return HTML error pages (404, 500) @@ -36,68 +35,43 @@ pub fn run(args: &[String], verbose: u8) -> Result { let raw = result.stdout.clone(); - // Auto-detect JSON and pipe through filter - let filtered = filter_curl_output(&result.stdout, args); - println!("{}", filtered); + let result = filter_curl_output(&result.stdout); + + println!("{}", result.content); + if let Some(hint) = &result.tee_hint { + println!("{}", hint); + } timer.track( &format!("curl {}", args.join(" ")), &format!("rtk curl {}", args.join(" ")), &raw, - &filtered, + &result.content, ); Ok(0) } -fn filter_curl_output(output: &str, args: &[String]) -> String { - let trimmed = output.trim(); - - // Try JSON detection: starts with { or [ - if (trimmed.starts_with('{') || trimmed.starts_with('[')) - && (trimmed.ends_with('}') || trimmed.ends_with(']')) - { - // Skip schema conversion for internal/localhost URLs (issues #1152, #1157) - if !is_internal_url(args) { - if let Ok(schema) = json_cmd::filter_json_string(trimmed, 5) { - // Only use schema if it's actually shorter than the original (#297) - if schema.len() <= trimmed.len() { - return schema; - } - } - } - } +fn filter_curl_output(raw: &str) -> FilterResult { + let trimmed = raw.trim(); + let tee_hint = force_tee_hint(raw, "curl"); - // Not JSON: truncate long output - let lines: Vec<&str> = trimmed.lines().collect(); - if lines.len() > 30 { - let mut result: Vec<&str> = lines[..30].to_vec(); - result.push(""); - let msg = format!( - "... ({} more lines, {} bytes total)", - lines.len() - 30, - trimmed.len() - ); - return format!("{}\n{}", result.join("\n"), msg); - } + let content = if trimmed.len() >= 500 { + let mut end = 500; + while !trimmed.is_char_boundary(end) { + end -= 1; + } + format!("{}... ({} bytes total)", &trimmed[..end], trimmed.len()) + } else { + trimmed.to_string() + }; - // Short output: return as-is but truncate long lines - lines - .iter() - .map(|l| truncate(l, 200)) - .collect::>() - .join("\n") + FilterResult { content, tee_hint } } -fn is_internal_url(args: &[String]) -> bool { - args.iter().any(|a| { - let lower = a.to_lowercase(); - lower.starts_with("http://localhost") - || lower.starts_with("http://127.0.0.1") - || lower.starts_with("http://[::1]") - || lower.starts_with("https://localhost") - || lower.starts_with("https://127.0.0.1") - }) +struct FilterResult { + content: String, + tee_hint: Option, } #[cfg(test)] @@ -105,56 +79,42 @@ mod tests { use super::*; #[test] - fn test_filter_curl_json() { - // Large JSON where schema is shorter than original — schema should be returned - let output = r#"{"name": "a very long user name here", "count": 42, "items": [1, 2, 3], "description": "a very long description that takes up many characters in the original JSON payload", "status": "active", "url": "https://example.com/api/v1/users/123"}"#; - let result = filter_curl_output(output, &[]); - assert!(result.contains("name")); - assert!(result.contains("string")); - assert!(result.contains("int")); - } - - #[test] - fn test_filter_curl_json_array() { - let output = r#"[{"id": 1}, {"id": 2}]"#; - let result = filter_curl_output(output, &[]); - assert!(result.contains("id")); + fn test_filter_curl_json_small_no_tee_hint() { + let output = r#"{"r2Ready":true,"status":"ok"}"#; + let result = filter_curl_output(output); + assert_eq!(result.content, output); + assert!(result.tee_hint.is_none()); } #[test] fn test_filter_curl_non_json() { let output = "Hello, World!\nThis is plain text."; - let result = filter_curl_output(output, &[]); - assert!(result.contains("Hello, World!")); - assert!(result.contains("plain text")); + let result = filter_curl_output(output); + assert_eq!(result.content, output); } #[test] - fn test_filter_curl_json_small_returns_original() { - // Small JSON where schema would be larger than original (issue #297) - let output = r#"{"r2Ready":true,"status":"ok"}"#; - let result = filter_curl_output(output, &[]); - // Schema would be "{\n r2Ready: bool,\n status: string\n}" which is longer - // Should return the original JSON unchanged - assert_eq!(result.trim(), output.trim()); + fn test_filter_curl_long_output_truncated() { + let long: String = "x".repeat(1000); + let result = filter_curl_output(&long); + assert!(result.content.starts_with('x')); + assert!(result.content.contains("bytes total")); + assert!(result.content.contains("1000")); + assert!(result.content.len() < 600); } #[test] - fn test_filter_curl_long_output() { - let lines: Vec = (0..50).map(|i| format!("Line {}", i)).collect(); - let output = lines.join("\n"); - let result = filter_curl_output(&output, &[]); - assert!(result.contains("Line 0")); - assert!(result.contains("Line 29")); - assert!(result.contains("more lines")); + fn test_filter_curl_multibyte_boundary() { + let content = "a".repeat(499) + "é"; + let result = filter_curl_output(&content); + assert!(result.content.contains("bytes total")); + assert!(result.content.len() < 600); } #[test] - fn test_is_internal_url_localhost() { - assert!(is_internal_url(&["http://localhost:9222/json/version".to_string()])); - assert!(is_internal_url(&["http://127.0.0.1:8080/api".to_string()])); - assert!(is_internal_url(&["-s".to_string(), "http://localhost:3000".to_string()])); - assert!(!is_internal_url(&["https://api.example.com/data".to_string()])); - assert!(!is_internal_url(&["https://github.com".to_string()])); + fn test_filter_curl_exact_500_bytes() { + let content = "a".repeat(500); + let result = filter_curl_output(&content); + assert!(result.content.contains("bytes total")); } } From 92bbc8a345daf003726e040be093d66b022c5866 Mon Sep 17 00:00:00 2001 From: Joshua Date: Wed, 15 Apr 2026 14:29:09 -0500 Subject: [PATCH 152/204] fixing bad merge --- Cargo.lock | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Cargo.lock b/Cargo.lock index d3fb7edb2..7f33886e0 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -892,7 +892,7 @@ dependencies = [ [[package]] name = "rtk" -version = "0.34.3" +version = "0.35.0" dependencies = [ "anyhow", "automod", From 80bc7fbd61995cde4198ca7b28f31f75a6ae7bd1 Mon Sep 17 00:00:00 2001 From: Joshua Date: Wed, 15 Apr 2026 14:45:30 -0500 Subject: [PATCH 153/204] documentation --- README.md | 2 +- docs/usage/FEATURES.md | 4 ++-- src/cmds/cloud/README.md | 2 +- 3 files changed, 4 insertions(+), 4 deletions(-) diff --git a/README.md b/README.md index 6228a689d..68522d627 100644 --- a/README.md +++ b/README.md @@ -235,7 +235,7 @@ rtk json config.json # Structure without values rtk deps # Dependencies summary rtk env -f AWS # Filtered env vars rtk log app.log # Deduplicated logs -rtk curl # Auto-detect JSON + schema +rtk curl # Truncate + save full output rtk wget # Download, strip progress bars rtk summary # Heuristic summary rtk proxy # Raw passthrough + tracking diff --git a/docs/usage/FEATURES.md b/docs/usage/FEATURES.md index 5b42cbbca..bf3412b82 100644 --- a/docs/usage/FEATURES.md +++ b/docs/usage/FEATURES.md @@ -964,13 +964,13 @@ Les lignes repetees sont fusionnees : `[ERROR] Connection refused (x42)`. --- -### `rtk curl` -- HTTP avec detection JSON +### `rtk curl` -- HTTP avec troncature ```bash rtk curl [args...] ``` -Auto-detecte les reponses JSON et affiche le schema au lieu du contenu complet. +Tronque les reponses longues et sauvegarde la sortie complete dans un fichier pour recuperation. --- diff --git a/src/cmds/cloud/README.md b/src/cmds/cloud/README.md index f64a5e3a5..a86acec6a 100644 --- a/src/cmds/cloud/README.md +++ b/src/cmds/cloud/README.md @@ -6,6 +6,6 @@ - `aws_cmd.rs` — 25 specialized filters covering STS, S3, EC2, ECS, RDS, CloudFormation, CloudWatch Logs, Lambda, IAM, DynamoDB, EKS, SQS, Secrets Manager. Forces `--output json` for structured parsing, uses `force_tee_hint()` for truncation recovery, strips Lambda secrets. Shared runner `run_aws_filtered()` handles boilerplate for JSON-based filters; text-based filters (S3 ls, S3 sync/cp) have dedicated runners - `container.rs` handles both Docker and Kubernetes; `DockerCommands` and `KubectlCommands` sub-enums in `main.rs` route to `container::run()` -- uses passthrough for unknown subcommands -- `curl_cmd.rs` auto-detects JSON responses and shows schema (structure without values) +- `curl_cmd.rs` truncates long responses, saves full output to file for recovery - `wget_cmd.rs` wraps wget with output filtering - `psql_cmd.rs` filters PostgreSQL query output From d34389c3d0936c2b0790e14f450bb50a28a7edf7 Mon Sep 17 00:00:00 2001 From: aesoft <43991222+aeppling@users.noreply.github.com> Date: Thu, 16 Apr 2026 15:41:20 +0200 Subject: [PATCH 154/204] fix: P0+P1 fixes from pre-merge review of hook engine - runner: print captured output on non-zero exit (P0.1) - main: add hook/pipe to META_COMMANDS (P0.2) - init: store integrity hash after Gemini script install (P0.3) - hook_cmd: audit log + permission check for all agent paths (P1.1, P1.2) - runner: include failure_lines in cargo test summary (P1.5) - Cargo.toml: remove unconditional libc dep (P1.7) - init: clean stale settings.json entries during migration (P1.8) --- Cargo.toml | 1 - src/cmds/rust/runner.rs | 6 + src/cmds/system/pipe_cmd.rs | 4 +- src/core/runner.rs | 3 + src/hooks/hook_cmd.rs | 43 +++++- src/hooks/init.rs | 292 +++++++++++++++++++++++++++++++++++- src/main.rs | 2 + 7 files changed, 338 insertions(+), 13 deletions(-) diff --git a/Cargo.toml b/Cargo.toml index ec55eea29..81cc9c1df 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -33,7 +33,6 @@ flate2 = "1.0" quick-xml = "0.37" which = "8" automod = "1" -libc = "0.2" [target.'cfg(unix)'.dependencies] libc = "0.2" diff --git a/src/cmds/rust/runner.rs b/src/cmds/rust/runner.rs index 51b2ba790..476f90671 100644 --- a/src/cmds/rust/runner.rs +++ b/src/cmds/rust/runner.rs @@ -242,6 +242,12 @@ fn extract_test_summary(output: &str, command: &str) -> String { if failures.len() > 10 { output.push_str(&format!(" ... +{} more failures\n", failures.len() - 10)); } + for f in failure_lines.iter().take(20) { + output.push_str(&format!(" {}\n", f.trim())); + } + if failure_lines.len() > 20 { + output.push_str(&format!(" ... +{} more\n", failure_lines.len() - 20)); + } output.push('\n'); } diff --git a/src/cmds/system/pipe_cmd.rs b/src/cmds/system/pipe_cmd.rs index 0af3448f3..fe569a597 100644 --- a/src/cmds/system/pipe_cmd.rs +++ b/src/cmds/system/pipe_cmd.rs @@ -495,7 +495,7 @@ mod tests { let output = grep_wrapper(&input); let savings = 100.0 - (count_tokens(&output) as f64 / count_tokens(&input) as f64 * 100.0); assert!( - savings >= 40.0, + savings >= 40.0, // TODO: grep pipe filter below 60% target — improve grouping "grep filter: expected ≥40% savings, got {:.1}% (in={}, out={})", savings, count_tokens(&input), count_tokens(&output) ); @@ -516,7 +516,7 @@ mod tests { let output = find_wrapper(&input); let savings = 100.0 - (count_tokens(&output) as f64 / count_tokens(&input) as f64 * 100.0); assert!( - savings >= 40.0, + savings >= 40.0, // TODO: find pipe filter below 60% target — improve grouping "find filter: expected ≥40% savings, got {:.1}% (in={}, out={})", savings, count_tokens(&input), count_tokens(&output) ); diff --git a/src/core/runner.rs b/src/core/runner.rs index cb406da40..aa2ec8cfe 100644 --- a/src/core/runner.rs +++ b/src/core/runner.rs @@ -79,6 +79,9 @@ pub fn run( let raw_stdout = &result.raw_stdout; if opts.skip_filter_on_failure && exit_code != 0 { + if !raw.trim().is_empty() { + eprint!("{}", raw); + } timer.track(&cmd_label, &format!("rtk {}", cmd_label), raw, raw); return Ok(exit_code); } diff --git a/src/hooks/hook_cmd.rs b/src/hooks/hook_cmd.rs index 5f002b7a9..f4f80527b 100644 --- a/src/hooks/hook_cmd.rs +++ b/src/hooks/hook_cmd.rs @@ -123,6 +123,7 @@ fn get_rewritten(cmd: &str) -> Option { fn handle_vscode(cmd: &str) -> Result<()> { let verdict = permissions::check_command(cmd); if verdict == PermissionVerdict::Deny { + audit_log("deny", cmd, ""); return Ok(()); } @@ -138,6 +139,8 @@ fn handle_vscode(cmd: &str) -> Result<()> { _ => "ask", }; + audit_log("rewrite", cmd, &rewritten); + let output = json!({ "hookSpecificOutput": { "hookEventName": PRE_TOOL_USE_KEY, @@ -152,6 +155,7 @@ fn handle_vscode(cmd: &str) -> Result<()> { fn handle_copilot_cli(cmd: &str) -> Result<()> { if permissions::check_command(cmd) == PermissionVerdict::Deny { + audit_log("deny", cmd, ""); return Ok(()); } @@ -160,6 +164,8 @@ fn handle_copilot_cli(cmd: &str) -> Result<()> { None => return Ok(()), }; + audit_log("rewrite", cmd, &rewritten); + let output = json!({ "permissionDecision": "deny", "permissionDecisionReason": format!( @@ -210,7 +216,10 @@ pub fn run_gemini() -> Result<()> { .unwrap_or_default(); match rewrite_command(cmd, &excluded) { - Some(rewritten) => print_rewrite(&rewritten), + Some(ref rewritten) => { + audit_log("rewrite", cmd, rewritten); + print_rewrite(rewritten); + } None => print_allow(), } @@ -420,7 +429,9 @@ pub fn run_cursor() -> Result<()> { } }; - if permissions::check_command(&cmd) == PermissionVerdict::Deny { + let verdict = permissions::check_command(&cmd); + if verdict == PermissionVerdict::Deny { + audit_log("deny", &cmd, ""); let _ = writeln!(io::stdout(), "{{}}"); return Ok(()); } @@ -433,8 +444,15 @@ pub fn run_cursor() -> Result<()> { } }; + let decision = match verdict { + PermissionVerdict::Allow => "allow", + _ => "ask", + }; + + audit_log("rewrite", &cmd, &rewritten); + let output = json!({ - "permission": "allow", + "permission": decision, "updated_input": { "command": rewritten } }); let _ = writeln!(io::stdout(), "{output}"); @@ -458,14 +476,19 @@ fn run_cursor_inner(input: &str) -> String { None => return "{}".to_string(), }; - if permissions::check_command(&cmd) == PermissionVerdict::Deny { + let verdict = permissions::check_command(&cmd); + if verdict == PermissionVerdict::Deny { return "{}".to_string(); } match get_rewritten(&cmd) { Some(rewritten) => { + let decision = match verdict { + PermissionVerdict::Allow => "allow", + _ => "ask", + }; let output = json!({ - "permission": "allow", + "permission": decision, "updated_input": { "command": rewritten } }); output.to_string() @@ -738,7 +761,8 @@ mod tests { fn test_cursor_rewrite_flat_format() { let result = run_cursor_inner(&cursor_input("git status")); let v: Value = serde_json::from_str(&result).unwrap(); - assert_eq!(v["permission"], "allow"); + // Default permission (no explicit allow rule) → "ask" + assert_eq!(v["permission"], "ask"); assert_eq!(v["updated_input"]["command"], "rtk git status"); assert!(v.get("hookSpecificOutput").is_none()); } @@ -772,7 +796,12 @@ mod tests { let result = run_cursor_inner(&cursor_input("cargo test")); let v: Value = serde_json::from_str(&result).unwrap(); assert!(v.get("hookSpecificOutput").is_none()); - assert_eq!(v["permission"], "allow"); + // Permission is "allow" or "ask" depending on settings.json rules + let perm = v["permission"].as_str().unwrap(); + assert!( + perm == "allow" || perm == "ask", + "expected allow or ask, got {perm}" + ); } // --- Audit logging --- diff --git a/src/hooks/init.rs b/src/hooks/init.rs index 24a736e9a..65517fc7b 100644 --- a/src/hooks/init.rs +++ b/src/hooks/init.rs @@ -965,7 +965,9 @@ fn run_default_mode( } /// Migrate old hook script to new binary command. -/// Deletes `~/.claude/hooks/rtk-rewrite.sh` and `.rtk-hook.sha256` if present. +/// Deletes `~/.claude/hooks/rtk-rewrite.sh` and `.rtk-hook.sha256` if present, +/// and removes the stale settings.json entry so the new `rtk hook claude` entry +/// can be registered. fn migrate_old_hook_script(verbose: u8) { if let Some(home) = dirs::home_dir() { let old_hook = home @@ -977,8 +979,16 @@ fn migrate_old_hook_script(verbose: u8) { if verbose > 0 { eprintln!(" [warn] Failed to remove old hook script: {e}"); } - } else if verbose > 0 { - eprintln!(" [ok] Removed old hook script: {}", old_hook.display()); + } else { + if verbose > 0 { + eprintln!(" [ok] Removed old hook script: {}", old_hook.display()); + } + // Clean up the stale settings.json entry that pointed to the deleted script + if let Err(e) = remove_legacy_settings_entries(verbose) { + if verbose > 0 { + eprintln!(" [warn] Failed to clean legacy settings.json entry: {e}"); + } + } } } // Remove legacy hash file @@ -997,6 +1007,76 @@ fn migrate_old_hook_script(verbose: u8) { } } +/// Remove only legacy `rtk-rewrite.sh` entries from settings.json. +/// Preserves any existing `rtk hook claude` entries (new format). +fn remove_legacy_settings_entries(verbose: u8) -> Result<()> { + let claude_dir = resolve_claude_dir()?; + let settings_path = claude_dir.join(SETTINGS_JSON); + + if !settings_path.exists() { + return Ok(()); + } + + let content = fs::read_to_string(&settings_path) + .with_context(|| format!("Failed to read {}", settings_path.display()))?; + if content.trim().is_empty() { + return Ok(()); + } + + let mut root: serde_json::Value = serde_json::from_str(&content) + .with_context(|| format!("Failed to parse {}", settings_path.display()))?; + + if !remove_legacy_hook_entries_from_json(&mut root) { + return Ok(()); + } + + // Backup before modifying + let backup_path = settings_path.with_extension("json.bak"); + fs::copy(&settings_path, &backup_path) + .with_context(|| format!("Failed to backup to {}", backup_path.display()))?; + + let serialized = + serde_json::to_string_pretty(&root).context("Failed to serialize settings.json")?; + atomic_write(&settings_path, &serialized)?; + + if verbose > 0 { + eprintln!(" [ok] Removed legacy rtk-rewrite.sh entry from settings.json"); + } + Ok(()) +} + +/// Remove only legacy `rtk-rewrite.sh` hook entries from a parsed settings.json. +/// Returns true if any entries were removed. +/// Does NOT remove `rtk hook claude` entries — those are the new format. +fn remove_legacy_hook_entries_from_json(root: &mut serde_json::Value) -> bool { + let pre_tool_use_array = match root + .get_mut("hooks") + .and_then(|h| h.get_mut(PRE_TOOL_USE_KEY)) + .and_then(|p| p.as_array_mut()) + { + Some(arr) => arr, + None => return false, + }; + + let original_len = pre_tool_use_array.len(); + pre_tool_use_array.retain(|entry| { + let dominated_by_legacy = entry + .get("hooks") + .and_then(|h| h.as_array()) + .map(|hooks| { + hooks.iter().all(|hook| { + hook.get("command") + .and_then(|c| c.as_str()) + .is_some_and(|cmd| cmd.contains(REWRITE_HOOK_FILE)) + }) + }) + .unwrap_or(false); + !dominated_by_legacy + }); + + pre_tool_use_array.len() < original_len +} + /// Generate .rtk/filters.toml template in the current directory if not present. fn generate_project_filters_template(verbose: u8) -> Result<()> { let rtk_dir = std::path::Path::new(".rtk"); @@ -1766,6 +1846,13 @@ fn install_cursor_hooks(verbose: u8) -> Result<()> { old_hook.display() ); } + // Clean stale hooks.json entry pointing to the deleted script + let hooks_json_path = cursor_dir.join(HOOKS_JSON); + if let Err(e) = remove_legacy_cursor_hooks_json_entries(&hooks_json_path, verbose) { + if verbose > 0 { + eprintln!(" [warn] Failed to clean legacy Cursor hooks.json entry: {e}"); + } + } } // Create or patch hooks.json with binary command @@ -1883,6 +1970,60 @@ fn insert_cursor_hook_entry(root: &mut serde_json::Value) -> Result<()> { Ok(()) } +/// Remove only legacy `rtk-rewrite.sh` entries from Cursor hooks.json. +/// Preserves any existing `rtk hook cursor` entries (new format). +fn remove_legacy_cursor_hooks_json_entries(path: &Path, verbose: u8) -> Result<()> { + if !path.exists() { + return Ok(()); + } + + let content = + fs::read_to_string(path).with_context(|| format!("Failed to read {}", path.display()))?; + if content.trim().is_empty() { + return Ok(()); + } + + let mut root: serde_json::Value = serde_json::from_str(&content) + .with_context(|| format!("Failed to parse {}", path.display()))?; + + if !remove_legacy_cursor_hook_entries_from_json(&mut root) { + return Ok(()); + } + + let serialized = + serde_json::to_string_pretty(&root).context("Failed to serialize hooks.json")?; + atomic_write(path, &serialized)?; + + if verbose > 0 { + eprintln!(" [ok] Removed legacy rtk-rewrite.sh entry from Cursor hooks.json"); + } + Ok(()) +} + +/// Remove only legacy `rtk-rewrite.sh` entries from parsed Cursor hooks.json. +/// Returns true if any entries were removed. +/// Does NOT remove `rtk hook cursor` entries — those are the new format. +fn remove_legacy_cursor_hook_entries_from_json(root: &mut serde_json::Value) -> bool { + let pre_tool_use = match root + .get_mut("hooks") + .and_then(|h| h.get_mut("preToolUse")) + .and_then(|p| p.as_array_mut()) + { + Some(arr) => arr, + None => return false, + }; + + let original_len = pre_tool_use.len(); + pre_tool_use.retain(|entry| { + !entry + .get("command") + .and_then(|c| c.as_str()) + .is_some_and(|cmd| cmd.contains(REWRITE_HOOK_FILE)) + }); + + pre_tool_use.len() < original_len +} + /// Remove Cursor RTK artifacts: hook script + hooks.json entry fn remove_cursor_hooks(verbose: u8) -> Result> { let cursor_dir = resolve_cursor_dir()?; @@ -2293,6 +2434,10 @@ pub fn run_gemini(global: bool, hook_only: bool, patch_mode: PatchMode, verbose: .with_context(|| format!("Failed to set hook permissions: {}", hook_path.display()))?; } + // Store integrity baseline for tamper detection + integrity::store_hash(&hook_path) + .with_context(|| format!("Failed to store integrity hash for {}", hook_path.display()))?; + // 2. Install GEMINI.md (RTK awareness for Gemini) if !hook_only { let gemini_md_path = gemini_dir.join(GEMINI_MD); @@ -3382,4 +3527,145 @@ More notes let removed = remove_cursor_hook_from_json(&mut json_content); assert!(!removed); } + + // ─── Legacy migration tests ────────────────────────────────────── + + #[test] + fn test_remove_legacy_hook_entries_strips_old_script() { + let mut root = serde_json::json!({ + "hooks": { + "PreToolUse": [{ + "matcher": "Bash", + "hooks": [{ + "type": "command", + "command": "/home/user/.claude/hooks/rtk-rewrite.sh" + }] + }] + } + }); + + assert!(remove_legacy_hook_entries_from_json(&mut root)); + let arr = root["hooks"]["PreToolUse"].as_array().unwrap(); + assert!(arr.is_empty()); + } + + #[test] + fn test_remove_legacy_hook_entries_preserves_new_command() { + let mut root = serde_json::json!({ + "hooks": { + "PreToolUse": [ + { + "matcher": "Bash", + "hooks": [{ + "type": "command", + "command": "/home/user/.claude/hooks/rtk-rewrite.sh" + }] + }, + { + "matcher": "Bash", + "hooks": [{ + "type": "command", + "command": CLAUDE_HOOK_COMMAND + }] + } + ] + } + }); + + assert!(remove_legacy_hook_entries_from_json(&mut root)); + let arr = root["hooks"]["PreToolUse"].as_array().unwrap(); + assert_eq!(arr.len(), 1); + let cmd = arr[0]["hooks"][0]["command"].as_str().unwrap(); + assert_eq!(cmd, CLAUDE_HOOK_COMMAND); + } + + #[test] + fn test_remove_legacy_hook_entries_noop_when_no_legacy() { + let mut root = serde_json::json!({ + "hooks": { + "PreToolUse": [{ + "matcher": "Bash", + "hooks": [{ + "type": "command", + "command": CLAUDE_HOOK_COMMAND + }] + }] + } + }); + + assert!(!remove_legacy_hook_entries_from_json(&mut root)); + let arr = root["hooks"]["PreToolUse"].as_array().unwrap(); + assert_eq!(arr.len(), 1); + } + + #[test] + fn test_remove_legacy_hook_entries_preserves_third_party_hooks() { + let mut root = serde_json::json!({ + "hooks": { + "PreToolUse": [ + { + "matcher": "Bash", + "hooks": [{ + "type": "command", + "command": "/home/user/.claude/hooks/rtk-rewrite.sh" + }] + }, + { + "matcher": "Bash", + "hooks": [{ + "type": "command", + "command": "some-other-tool --hook" + }] + } + ] + } + }); + + assert!(remove_legacy_hook_entries_from_json(&mut root)); + let arr = root["hooks"]["PreToolUse"].as_array().unwrap(); + assert_eq!(arr.len(), 1); + let cmd = arr[0]["hooks"][0]["command"].as_str().unwrap(); + assert_eq!(cmd, "some-other-tool --hook"); + } + + #[test] + fn test_remove_legacy_cursor_entries_strips_old_script() { + let mut root = serde_json::json!({ + "version": 1, + "hooks": { + "preToolUse": [{ + "command": "./hooks/rtk-rewrite.sh", + "matcher": "Shell" + }] + } + }); + + assert!(remove_legacy_cursor_hook_entries_from_json(&mut root)); + let arr = root["hooks"]["preToolUse"].as_array().unwrap(); + assert!(arr.is_empty()); + } + + #[test] + fn test_remove_legacy_cursor_entries_preserves_new_command() { + let mut root = serde_json::json!({ + "version": 1, + "hooks": { + "preToolUse": [ + { + "command": "./hooks/rtk-rewrite.sh", + "matcher": "Shell" + }, + { + "command": CURSOR_HOOK_COMMAND, + "matcher": "Shell" + } + ] + } + }); + + assert!(remove_legacy_cursor_hook_entries_from_json(&mut root)); + let arr = root["hooks"]["preToolUse"].as_array().unwrap(); + assert_eq!(arr.len(), 1); + assert_eq!(arr[0]["command"].as_str().unwrap(), CURSOR_HOOK_COMMAND); + } } diff --git a/src/main.rs b/src/main.rs index 1d139d958..e8a19c2be 100644 --- a/src/main.rs +++ b/src/main.rs @@ -1083,7 +1083,9 @@ const RTK_META_COMMANDS: &[&str] = &[ "config", "proxy", "run", + "hook", "hook-audit", + "pipe", "cc-economics", "verify", "trust", From 9bdf435e8de3a24be213b5d5ab359bd8e7737923 Mon Sep 17 00:00:00 2001 From: Shawn Scott Date: Thu, 26 Mar 2026 14:52:17 -0700 Subject: [PATCH 155/204] fix: grep false negatives, output mangling, and truncation annotations - grep: use --no-ignore-vcs so .gitignore'd files aren't silently skipped (matches grep -r behavior, avoids false negatives in large monorepos) - grep: passthrough raw output for <=50 matches so AI agents can parse standard file:line:content format without retry loops - filter: replace smart_truncate heuristic with clean first-N-lines truncation and a single [X more lines] suffix (eliminates synthetic // ... markers that AI agents misread as code, causing parsing confusion and retries) --- src/cmds/system/grep_cmd.rs | 58 +++++++++++++++++++--------- src/core/filter.rs | 77 ++++++++++++++++++++++++------------- 2 files changed, 90 insertions(+), 45 deletions(-) diff --git a/src/cmds/system/grep_cmd.rs b/src/cmds/system/grep_cmd.rs index f163f4f85..83b1886db 100644 --- a/src/cmds/system/grep_cmd.rs +++ b/src/cmds/system/grep_cmd.rs @@ -29,7 +29,11 @@ pub fn run( let rg_pattern = pattern.replace(r"\|", "|"); let mut rg_cmd = resolved_command("rg"); - rg_cmd.args(["-n", "--no-heading", &rg_pattern, path]); + // --no-ignore-vcs: match grep -r behavior (don't skip .gitignore'd files). + // Without this, rg returns 0 matches for files in .gitignore, causing + // false negatives that make AI agents draw wrong conclusions. + // Using --no-ignore-vcs (not --no-ignore) so .ignore/.rgignore are still respected. + rg_cmd.args(["-n", "--no-heading", "--no-ignore-vcs", &rg_pattern, path]); if let Some(ft) = file_type { rg_cmd.arg("--type").arg(ft); @@ -72,16 +76,18 @@ pub fn run( return Ok(exit_code); } - let mut by_file: HashMap> = HashMap::new(); - let mut total = 0; + // Always filter: truncate long lines, apply per-file and global caps. + // Output in standard file:line:content format that AI agents can parse. + // (A passthrough approach yields 0% savings — no reason for RTK to exist on that path.) + let total_matches = result.stdout.lines().count(); - // Compile context regex once (instead of per-line in clean_line) let context_re = if context_only { Regex::new(&format!("(?i).{{0,20}}{}.*", regex::escape(pattern))).ok() } else { None }; + let mut by_file: HashMap> = HashMap::new(); for line in result.stdout.lines() { let parts: Vec<&str> = line.splitn(3, ':').collect(); @@ -95,43 +101,39 @@ pub fn run( continue; }; - total += 1; let cleaned = clean_line(content, max_line_len, context_re.as_ref(), pattern); by_file.entry(file).or_default().push((line_num, cleaned)); } let mut rtk_output = String::new(); - rtk_output.push_str(&format!("{} matches in {}F:\n\n", total, by_file.len())); + rtk_output.push_str(&format!( + "{} matches in {} files:\n\n", + total_matches, + by_file.len() + )); let mut shown = 0; let mut files: Vec<_> = by_file.iter().collect(); files.sort_by_key(|(f, _)| *f); + let per_file = config::limits().grep_max_per_file; for (file, matches) in files { if shown >= max_results { break; } let file_display = compact_path(file); - rtk_output.push_str(&format!("[file] {} ({}):\n", file_display, matches.len())); - - let per_file = config::limits().grep_max_per_file; for (line_num, content) in matches.iter().take(per_file) { - rtk_output.push_str(&format!(" {:>4}: {}\n", line_num, content)); - shown += 1; if shown >= max_results { break; } + rtk_output.push_str(&format!("{}:{}:{}\n", file_display, line_num, content)); + shown += 1; } - - if matches.len() > per_file { - rtk_output.push_str(&format!(" +{}\n", matches.len() - per_file)); - } - rtk_output.push('\n'); } - if total > shown { - rtk_output.push_str(&format!("... +{}\n", total - shown)); + if total_matches > shown { + rtk_output.push_str(&format!("[+{} more]\n", total_matches - shown)); } print!("{}", rtk_output); @@ -310,4 +312,24 @@ mod tests { } // If rg is not installed, skip gracefully (test still passes) } + + #[test] + fn test_rg_no_ignore_vcs_flag_accepted() { + // Verify rg accepts --no-ignore-vcs (used to match grep -r behavior for .gitignore) + let mut cmd = resolved_command("rg"); + cmd.args([ + "-n", + "--no-heading", + "--no-ignore-vcs", + "NONEXISTENT_PATTERN_12345", + ".", + ]); + if let Ok(output) = cmd.output() { + assert!( + output.status.code() == Some(1) || output.status.success(), + "rg --no-ignore-vcs should be accepted" + ); + } + // If rg is not installed, skip gracefully (test still passes) + } } diff --git a/src/core/filter.rs b/src/core/filter.rs index de74368c7..90f89ade6 100644 --- a/src/core/filter.rs +++ b/src/core/filter.rs @@ -326,14 +326,15 @@ pub fn smart_truncate(content: &str, max_lines: usize, _lang: &Language) -> Stri return content.to_string(); } - let mut result = Vec::with_capacity(max_lines); + let mut result = Vec::with_capacity(max_lines + 1); let mut kept_lines = 0; - let mut skipped_section = false; for line in &lines { let trimmed = line.trim(); - // Always keep signatures and important structural elements + // Prioritize structurally important lines so the visible window stays useful. + // The old approach interleaved "// ... N lines omitted" markers which AI agents + // treated as code, causing parsing confusion and extra retry loops. let is_important = FUNC_SIGNATURE.is_match(trimmed) || IMPORT_PATTERN.is_match(trimmed) || trimmed.starts_with("pub ") @@ -342,31 +343,20 @@ pub fn smart_truncate(content: &str, max_lines: usize, _lang: &Language) -> Stri || trimmed == "{"; if is_important || kept_lines < max_lines / 2 { - if skipped_section { - result.push(format!( - " // ... {} lines omitted", - lines.len() - kept_lines - )); - skipped_section = false; - } result.push((*line).to_string()); kept_lines += 1; - } else { - skipped_section = true; } + // Non-important lines beyond max_lines/2 are silently skipped — + // no inline markers that could be mistaken for file content. if kept_lines >= max_lines - 1 { break; } } - if skipped_section || kept_lines < lines.len() { - result.push(format!( - "// ... {} more lines (total: {})", - lines.len() - kept_lines, - lines.len() - )); - } + // Single end-of-output marker: not code syntax, unambiguous to AI agents. + // Invariant: kept_lines + N == lines.len() (N = lines not shown) + result.push(format!("[{} more lines]", lines.len() - kept_lines)); result.join("\n") } @@ -484,10 +474,10 @@ fn main() { #[test] fn test_smart_truncate_overflow_count_exact() { - // 200 plain-text lines with max_lines=20. - // smart_truncate keeps the first max_lines/2=10 lines, then skips the rest. - // The overflow message "// ... N more lines (total: T)" must satisfy: - // kept_count + N == T + // 200 plain-text lines (no function signatures/imports) with max_lines=20. + // Smart selection keeps up to max_lines/2=10 non-important lines then stops. + // The overflow message "[N more lines]" must satisfy: + // kept_count + N == total_lines let total_lines = 200usize; let max_lines = 20usize; let content: String = (0..total_lines) @@ -503,11 +493,12 @@ fn main() { .find(|l| l.contains("more lines")) .unwrap_or_else(|| panic!("No overflow message found in:\n{}", output)); - // Parse "// ... N more lines (total: T)" + // Parse "[N more lines]" let reported_more: usize = overflow_line - .split_whitespace() - .find(|w| w.parse::().is_ok()) - .and_then(|w| w.parse().ok()) + .trim() + .strip_prefix('[') + .and_then(|s| s.split_whitespace().next()) + .and_then(|n| n.parse().ok()) .unwrap_or_else(|| panic!("Could not parse overflow count from: {}", overflow_line)); let kept_count = output @@ -524,4 +515,36 @@ fn main() { total_lines ); } + + #[test] + fn test_smart_truncate_no_annotations() { + // 10 plain-text lines, max_lines=3: smart logic keeps first max_lines/2=1 line. + // (None of the lines match FUNC_SIGNATURE or IMPORT_PATTERN patterns.) + let input = "line1\nline2\nline3\nline4\nline5\nline6\nline7\nline8\nline9\nline10\n"; + let output = smart_truncate(input, 3, &Language::Unknown); + // Must NOT contain old-style "// ... N lines omitted" annotations + assert!( + !output.contains("// ..."), + "smart_truncate must not insert synthetic comment annotations" + ); + // Must contain clean end-of-output marker (1 kept + 9 omitted = 10 total) + assert!(output.contains("[9 more lines]")); + // Only the first line is kept (plain-text, no important signatures) + assert!(output.starts_with("line1\n")); + } + + #[test] + fn test_smart_truncate_no_truncation_when_under_limit() { + let input = "a\nb\nc\n"; + let output = smart_truncate(input, 10, &Language::Unknown); + assert_eq!(output, input); + assert!(!output.contains("more lines")); + } + + #[test] + fn test_smart_truncate_exact_limit() { + let input = "a\nb\nc"; + let output = smart_truncate(input, 3, &Language::Unknown); + assert_eq!(output, input); + } } From e92d0993c93f0b732316dfa932d265aeca7488d6 Mon Sep 17 00:00:00 2001 From: aesoft <43991222+aeppling@users.noreply.github.com> Date: Thu, 16 Apr 2026 20:27:13 +0200 Subject: [PATCH 156/204] fix(runner): preserve fd separation on command failure --- src/core/runner.rs | 7 +++++-- src/core/stream.rs | 3 +++ 2 files changed, 8 insertions(+), 2 deletions(-) diff --git a/src/core/runner.rs b/src/core/runner.rs index aa2ec8cfe..f127a6081 100644 --- a/src/core/runner.rs +++ b/src/core/runner.rs @@ -79,8 +79,11 @@ pub fn run( let raw_stdout = &result.raw_stdout; if opts.skip_filter_on_failure && exit_code != 0 { - if !raw.trim().is_empty() { - eprint!("{}", raw); + if !result.raw_stdout.trim().is_empty() { + print!("{}", result.raw_stdout); + } + if !result.raw_stderr.trim().is_empty() { + eprint!("{}", result.raw_stderr); } timer.track(&cmd_label, &format!("rtk {}", cmd_label), raw, raw); return Ok(exit_code); diff --git a/src/core/stream.rs b/src/core/stream.rs index 75d576e54..b580cbbc3 100644 --- a/src/core/stream.rs +++ b/src/core/stream.rs @@ -191,6 +191,7 @@ pub struct StreamResult { pub exit_code: i32, pub raw: String, pub raw_stdout: String, + pub raw_stderr: String, pub filtered: String, } @@ -239,6 +240,7 @@ pub fn run_streaming( exit_code: status_to_exit_code(status), raw: String::new(), raw_stdout: String::new(), + raw_stderr: String::new(), filtered: String::new(), }); } @@ -432,6 +434,7 @@ pub fn run_streaming( exit_code, raw, raw_stdout, + raw_stderr, filtered, }) } From a1d46f39c291e3356b9c26a062bde05ba1de591a Mon Sep 17 00:00:00 2001 From: aesoft <43991222+aeppling@users.noreply.github.com> Date: Thu, 16 Apr 2026 20:39:01 +0200 Subject: [PATCH 157/204] fix(stream): missing stderr fields --- scripts/benchmark-sessions/lib/runner.py | 155 +++++++++++++++++++++++ src/core/stream.rs | 3 + 2 files changed, 158 insertions(+) create mode 100644 scripts/benchmark-sessions/lib/runner.py diff --git a/scripts/benchmark-sessions/lib/runner.py b/scripts/benchmark-sessions/lib/runner.py new file mode 100644 index 000000000..192fbcd41 --- /dev/null +++ b/scripts/benchmark-sessions/lib/runner.py @@ -0,0 +1,155 @@ +from __future__ import annotations + +import asyncio +import subprocess +import tempfile +from pathlib import Path + +from .config import TaskConfig +from .manifest import ( + RunManifest, + SessionEntry, + TbEntry, + TbTaskEntry, + write_manifest, +) +from .session import run_all_sessions, setup_codebase, setup_rtk +from .terminal_bench import run_terminal_bench +from .vm import create_vm_pool, destroy_vm_pool + +ROOT_DIR = Path(__file__).resolve().parent.parent + + +def _create_tarball(source_dir: Path) -> str: + tarball = tempfile.mktemp(suffix=".tar.gz") + subprocess.run( + ["tar", "czf", tarball, "-C", str(source_dir), "."], + check=True, + ) + return tarball + + +def _print_step(step: int, total: int, msg: str): + print(f"\n[{step}/{total}] {msg}") + + +def _session_to_entry(r) -> SessionEntry: + return SessionEntry( + vm_name=r.vm_name, + group=r.group, + stdout_json=f"{r.vm_name}-stdout.json", + otel_log=f"{r.vm_name}-otel.log", + rtk_db=f"{r.vm_name}-tracking.db" if r.rtk_db_path else None, + exit_code=r.exit_code, + error=r.error or None, + ) + + +def _tb_to_entry(r) -> TbEntry: + return TbEntry( + vm_name=r.vm_name, + group=r.group, + total=r.total, + passed=r.passed, + failed=r.failed, + tasks=[TbTaskEntry(name=t.name, passed=t.passed, duration_s=t.duration_s) for t in r.tasks], + error=r.error, + ) + + +async def run_benchmark( + task: TaskConfig, + vms: int, + api_key: str, + output_dir: Path, + cloud_init: Path | None = None, + terminal_bench: bool = False, + keep_vms: bool = False, +) -> RunManifest: + if cloud_init is None: + cloud_init = ROOT_DIR / "cloud-init-base.yaml" + + output_dir.mkdir(parents=True, exist_ok=True) + + total_steps = 5 if terminal_bench else 4 + vm_names: list[str] = [] + + manifest = RunManifest( + task_name=task.name, + model=task.model, + vm_count=vms, + ) + + try: + _print_step(1, total_steps, f"Creating {vms * 2} VMs ({vms} RTK ON + {vms} RTK OFF)") + vm_names = await create_vm_pool(vms, cloud_init) + print(f" VMs ready: {', '.join(vm_names)}") + + _print_step(2, total_steps, "Setting up codebases") + local_tarball = None + if not task.codebase.is_github: + local_tarball = _create_tarball(task.codebase.local_path()) + + await asyncio.gather(*( + setup_codebase(name, task.codebase, local_tarball) + for name in vm_names + )) + print(" Codebases deployed") + + _print_step(3, total_steps, "Configuring RTK on ON VMs") + setup_script = ROOT_DIR / "setup-rtk.sh" + on_vms = [n for n in vm_names if "-on-" in n] + off_vms = [n for n in vm_names if "-off-" in n] + await asyncio.gather(*(setup_rtk(vm, setup_script) for vm in on_vms)) + print(f" RTK configured on {len(on_vms)} VMs") + + _print_step(4, total_steps, f"Running Claude sessions (timeout: {task.timeout_minutes}min)") + results = await run_all_sessions(vm_names, task, api_key, output_dir) + + on_ok = [r for r in results if r.group == "on" and not r.error] + off_ok = [r for r in results if r.group == "off" and not r.error] + errors = [r for r in results if r.error] + print(f" Completed: {len(on_ok)} ON, {len(off_ok)} OFF, {len(errors)} errors") + for r in errors: + print(f" {r.vm_name}: {r.error}") + + manifest.sessions = [_session_to_entry(r) for r in results] + + if terminal_bench: + _print_step(5, total_steps, "Running terminal-bench precision tests") + tb_on = await asyncio.gather(*( + run_terminal_bench(vm, "on", task.model, api_key) + for vm in on_vms + )) + tb_off = await asyncio.gather(*( + run_terminal_bench(vm, "off", task.model, api_key) + for vm in off_vms + )) + + manifest.terminal_bench = [_tb_to_entry(r) for r in list(tb_on) + list(tb_off)] + + ok_on = [r for r in tb_on if not r.error] + ok_off = [r for r in tb_off if not r.error] + if ok_on and ok_off: + on_total = sum(r.total for r in ok_on) + on_passed = sum(r.passed for r in ok_on) + off_total = sum(r.total for r in ok_off) + off_passed = sum(r.passed for r in ok_off) + on_rate = on_passed / on_total if on_total else 0 + off_rate = off_passed / off_total if off_total else 0 + print(f" terminal-bench: ON pass rate={on_rate:.0%}, OFF pass rate={off_rate:.0%}, delta={on_rate - off_rate:+.0%}") + + tb_errors = [r for r in list(tb_on) + list(tb_off) if r.error] + for r in tb_errors: + print(f" {r.vm_name}: {r.error}") + + write_manifest(manifest, output_dir) + print(f"\n Manifest written to {output_dir / 'manifest.json'}") + + finally: + if not keep_vms and vm_names: + print("\nCleaning up VMs...") + await destroy_vm_pool(vm_names) + print(" VMs destroyed") + + return manifest diff --git a/src/core/stream.rs b/src/core/stream.rs index b580cbbc3..7f7f29704 100644 --- a/src/core/stream.rs +++ b/src/core/stream.rs @@ -522,6 +522,7 @@ pub(crate) mod tests { exit_code: 0, raw: String::new(), raw_stdout: String::new(), + raw_stderr: String::new(), filtered: String::new(), }; assert!(r.success()); @@ -533,6 +534,7 @@ pub(crate) mod tests { exit_code: 1, raw: String::new(), raw_stdout: String::new(), + raw_stderr: String::new(), filtered: String::new(), }; assert!(!r.success()); @@ -544,6 +546,7 @@ pub(crate) mod tests { exit_code: 137, raw: String::new(), raw_stdout: String::new(), + raw_stderr: String::new(), filtered: String::new(), }; assert!(!r.success()); From d8ddefe78efe25c35bb2a2f9083f2eacb9dd7274 Mon Sep 17 00:00:00 2001 From: aesoft <43991222+aeppling@users.noreply.github.com> Date: Thu, 16 Apr 2026 21:00:13 +0200 Subject: [PATCH 158/204] fix: isolate cursor hook tests from local settings (determinist) --- src/hooks/hook_cmd.rs | 20 ++++++++++++-------- 1 file changed, 12 insertions(+), 8 deletions(-) diff --git a/src/hooks/hook_cmd.rs b/src/hooks/hook_cmd.rs index f4f80527b..cd3c82d1e 100644 --- a/src/hooks/hook_cmd.rs +++ b/src/hooks/hook_cmd.rs @@ -459,9 +459,18 @@ pub fn run_cursor() -> Result<()> { Ok(()) } -/// Process a Cursor hook payload from a string (for testing). #[cfg(test)] fn run_cursor_inner(input: &str) -> String { + run_cursor_inner_with_rules(input, &[], &[], &[]) +} + +#[cfg(test)] +fn run_cursor_inner_with_rules( + input: &str, + deny_rules: &[String], + ask_rules: &[String], + allow_rules: &[String], +) -> String { let v: Value = match serde_json::from_str(input) { Ok(v) => v, Err(_) => return "{}".to_string(), @@ -476,7 +485,7 @@ fn run_cursor_inner(input: &str) -> String { None => return "{}".to_string(), }; - let verdict = permissions::check_command(&cmd); + let verdict = permissions::check_command_with_rules(&cmd, deny_rules, ask_rules, allow_rules); if verdict == PermissionVerdict::Deny { return "{}".to_string(); } @@ -796,12 +805,7 @@ mod tests { let result = run_cursor_inner(&cursor_input("cargo test")); let v: Value = serde_json::from_str(&result).unwrap(); assert!(v.get("hookSpecificOutput").is_none()); - // Permission is "allow" or "ask" depending on settings.json rules - let perm = v["permission"].as_str().unwrap(); - assert!( - perm == "allow" || perm == "ask", - "expected allow or ask, got {perm}" - ); + assert_eq!(v["permission"], "ask"); } // --- Audit logging --- From 75bcf9dec6a3ddf2fecea9bc5aee5d1e0467854c Mon Sep 17 00:00:00 2001 From: "github-actions[bot]" <41898282+github-actions[bot]@users.noreply.github.com> Date: Fri, 17 Apr 2026 07:45:25 +0000 Subject: [PATCH 159/204] chore(master): release 0.37.0 --- .release-please-manifest.json | 2 +- CHANGELOG.md | 19 +++++++++++++++++++ Cargo.lock | 2 +- Cargo.toml | 2 +- 4 files changed, 22 insertions(+), 3 deletions(-) diff --git a/.release-please-manifest.json b/.release-please-manifest.json index 93c546c8d..05b60243f 100644 --- a/.release-please-manifest.json +++ b/.release-please-manifest.json @@ -1,3 +1,3 @@ { - ".": "0.36.0" + ".": "0.37.0" } diff --git a/CHANGELOG.md b/CHANGELOG.md index 08cae60ae..688458579 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -5,6 +5,25 @@ All notable changes to rtk (Rust Token Killer) will be documented in this file. The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html). +## [0.37.0](https://github.com/rtk-ai/rtk/compare/v0.36.0...v0.37.0) (2026-04-17) + + +### Features + +* **discover:** handle more npm/npx/pnpm/pnpx patterns ([9e96caa](https://github.com/rtk-ai/rtk/commit/9e96caa0a18a95c84da82ba57716a9d3ef86d0c8)) +* **refacto-core:** binary hook w/ native cmd exec + streaming ([e7b7f9a](https://github.com/rtk-ai/rtk/commit/e7b7f9ab665a0f7303d41d23ad156d24e5e8964e)) + + +### Bug Fixes + +* **docs:** use release please changelog no manual ([7591a14](https://github.com/rtk-ai/rtk/commit/7591a14e4ceb732ab7ca160ac01a852926abe77a)) +* isolate cursor hook tests from local settings (determinist) ([d8ddefe](https://github.com/rtk-ai/rtk/commit/d8ddefe78efe25c35bb2a2f9083f2eacb9dd7274)) +* P0+P1 fixes from pre-merge review of hook engine ([df8e035](https://github.com/rtk-ai/rtk/commit/df8e03558d4d6cc2f5cbac91c63ab1b3b51d3bcd)) +* P0+P1 fixes from pre-merge review of hook engine ([d34389c](https://github.com/rtk-ai/rtk/commit/d34389c3d0936c2b0790e14f450bb50a28a7edf7)) +* rename ship.md to ship/SKILL.md to match develop ([5916ecd](https://github.com/rtk-ai/rtk/commit/5916ecd86fb319c2519a0b4fb2891309833a3bb4)) +* **runner:** preserve fd separation on command failure ([e92d099](https://github.com/rtk-ai/rtk/commit/e92d0993c93f0b732316dfa932d265aeca7488d6)) +* **stream:** missing stderr fields ([a1d46f3](https://github.com/rtk-ai/rtk/commit/a1d46f39c291e3356b9c26a062bde05ba1de591a)) + ## [0.36.0](https://github.com/rtk-ai/rtk/compare/v0.35.0...v0.36.0) (2026-04-13) diff --git a/Cargo.lock b/Cargo.lock index 7ad9dc981..29a041450 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -892,7 +892,7 @@ dependencies = [ [[package]] name = "rtk" -version = "0.36.0" +version = "0.37.0" dependencies = [ "anyhow", "automod", diff --git a/Cargo.toml b/Cargo.toml index 81cc9c1df..778af25ee 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "rtk" -version = "0.34.3" +version = "0.37.0" edition = "2021" authors = ["Patrick Szymkowiak"] description = "Rust Token Killer - High-performance CLI proxy to minimize LLM token consumption" From ea56548009cebe66c3e94a9c32318a31f6f0ddde Mon Sep 17 00:00:00 2001 From: Adrien Eppling Date: Sat, 18 Apr 2026 15:29:36 +0200 Subject: [PATCH 160/204] Revert "fix(discover): weighted savings rate per bucket, decimal already_rtk percent" This reverts commit 82c62eb893966b8f170ea22ec72e79f14789e12e. --- src/discover/mod.rs | 23 +++------------- src/discover/report.rs | 60 +++--------------------------------------- 2 files changed, 6 insertions(+), 77 deletions(-) diff --git a/src/discover/mod.rs b/src/discover/mod.rs index e5b4a87b8..ada51f8e5 100644 --- a/src/discover/mod.rs +++ b/src/discover/mod.rs @@ -21,13 +21,8 @@ struct SupportedBucket { rtk_equivalent: &'static str, category: &'static str, count: usize, - /// Total estimated tokens *saved* (post-filter). Used for the "Est. Savings" column. total_output_tokens: usize, - /// Total estimated tokens *before* filtering (raw output). Accumulated alongside - /// `total_output_tokens` so the bucket's effective savings rate can be derived as - /// `total_output_tokens / total_raw_output_tokens` — a weighted average across - /// all sub-commands, regardless of which sub-command was seen first. - total_raw_output_tokens: usize, + savings_pct: f64, // For display: the most common raw command command_counts: HashMap, } @@ -125,7 +120,7 @@ pub fn run( category, count: 0, total_output_tokens: 0, - total_raw_output_tokens: 0, + savings_pct: estimated_savings_pct, command_counts: HashMap::new(), } }); @@ -145,9 +140,6 @@ pub fn run( let savings = (output_tokens as f64 * estimated_savings_pct / 100.0) as usize; bucket.total_output_tokens += savings; - // Accumulate pre-savings tokens so we can compute a weighted effective - // savings rate across all sub-commands in this bucket later. - bucket.total_raw_output_tokens += output_tokens; // Track the display name with status let display_name = truncate_command(part); @@ -204,22 +196,13 @@ pub fn run( }) .unwrap_or_else(|| (String::new(), report::RtkStatus::Existing)); - // Derive the effective savings rate from accumulated totals rather than - // using the first-seen sub-command's rate. This gives a weighted average - // across all sub-commands that fell in this bucket. - let effective_savings_pct = if bucket.total_raw_output_tokens > 0 { - bucket.total_output_tokens as f64 * 100.0 / bucket.total_raw_output_tokens as f64 - } else { - 0.0 - }; - SupportedEntry { command: command_with_status, count: bucket.count, rtk_equivalent: bucket.rtk_equivalent, category: bucket.category, estimated_savings_tokens: bucket.total_output_tokens, - estimated_savings_pct: effective_savings_pct, + estimated_savings_pct: bucket.savings_pct, rtk_status: status, } }) diff --git a/src/discover/report.rs b/src/discover/report.rs index 128ecb45e..652bb3482 100644 --- a/src/discover/report.rs +++ b/src/discover/report.rs @@ -83,12 +83,12 @@ pub fn format_text(report: &DiscoverReport, limit: usize, verbose: bool) -> Stri report.sessions_scanned, report.since_days, report.total_commands )); out.push_str(&format!( - "Already using RTK: {} commands ({:.1}%)\n", + "Already using RTK: {} commands ({}%)\n", report.already_rtk, if report.total_commands > 0 { - report.already_rtk as f64 * 100.0 / report.total_commands as f64 + report.already_rtk * 100 / report.total_commands } else { - 0.0 + 0 } )); @@ -214,57 +214,3 @@ fn truncate_str(s: &str, max: usize) -> String { format!("{}..", truncated) } } - -#[cfg(test)] -mod tests { - use super::*; - - fn make_report(total_commands: usize, already_rtk: usize) -> DiscoverReport { - DiscoverReport { - sessions_scanned: 1, - total_commands, - already_rtk, - since_days: 30, - supported: vec![], - unsupported: vec![], - parse_errors: 0, - rtk_disabled_count: 0, - rtk_disabled_examples: vec![], - } - } - - // B6 regression: integer division truncated small percentages to 0%. - // Example: 3/1000 = 0% (old bug), should be "0.3%". - #[test] - fn test_already_rtk_percent_shows_decimal() { - let report = make_report(1000, 3); - let output = format_text(&report, 10, false); - // "0.3%" must appear; old code would print "0%" - assert!( - output.contains("0.3%"), - "Expected '0.3%' in output but got:\n{}", - output - ); - assert!( - !output.contains("(0%)"), - "Output must not contain '(0%)' — integer division bug still present:\n{}", - output - ); - } - - // Edge case: 0/0 must not divide-by-zero. - #[test] - fn test_already_rtk_percent_zero_total() { - let report = make_report(0, 0); - let output = format_text(&report, 10, false); - assert!(output.contains("0 commands (0.0%)")); - } - - // Full percent: 1000/1000 = 100.0% - #[test] - fn test_already_rtk_percent_full() { - let report = make_report(1000, 1000); - let output = format_text(&report, 10, false); - assert!(output.contains("100.0%")); - } -} From 92823bf21de51632477daec4dc7a82553e10e803 Mon Sep 17 00:00:00 2001 From: "github-actions[bot]" <41898282+github-actions[bot]@users.noreply.github.com> Date: Sat, 18 Apr 2026 13:46:53 +0000 Subject: [PATCH 161/204] chore(master): release 0.37.1 --- .release-please-manifest.json | 2 +- CHANGELOG.md | 7 +++++++ Cargo.lock | 2 +- Cargo.toml | 2 +- 4 files changed, 10 insertions(+), 3 deletions(-) diff --git a/.release-please-manifest.json b/.release-please-manifest.json index 05b60243f..18ec56b0d 100644 --- a/.release-please-manifest.json +++ b/.release-please-manifest.json @@ -1,3 +1,3 @@ { - ".": "0.37.0" + ".": "0.37.1" } diff --git a/CHANGELOG.md b/CHANGELOG.md index 688458579..081d4f11e 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -5,6 +5,13 @@ All notable changes to rtk (Rust Token Killer) will be documented in this file. The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html). +## [0.37.1](https://github.com/rtk-ai/rtk/compare/v0.37.0...v0.37.1) (2026-04-18) + + +### Bug Fixes + +* **docs:** user facing docs ([c8d6878](https://github.com/rtk-ai/rtk/commit/c8d68787fb8b31c52125e9fc7ea62e0aa590485f)) + ## [0.37.0](https://github.com/rtk-ai/rtk/compare/v0.36.0...v0.37.0) (2026-04-17) diff --git a/Cargo.lock b/Cargo.lock index 29a041450..f24979261 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -892,7 +892,7 @@ dependencies = [ [[package]] name = "rtk" -version = "0.37.0" +version = "0.37.1" dependencies = [ "anyhow", "automod", diff --git a/Cargo.toml b/Cargo.toml index 778af25ee..8da192c91 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "rtk" -version = "0.37.0" +version = "0.37.1" edition = "2021" authors = ["Patrick Szymkowiak"] description = "Rust Token Killer - High-performance CLI proxy to minimize LLM token consumption" From 5cd375ec1c180d2d628cb526e5e861d32682cda7 Mon Sep 17 00:00:00 2001 From: Adrien Eppling Date: Sat, 18 Apr 2026 15:50:40 +0200 Subject: [PATCH 162/204] Reapply "fix(discover): weighted savings rate per bucket, decimal already_rtk percent" This reverts commit ea56548009cebe66c3e94a9c32318a31f6f0ddde. --- src/discover/mod.rs | 23 +++++++++++++--- src/discover/report.rs | 60 +++++++++++++++++++++++++++++++++++++++--- 2 files changed, 77 insertions(+), 6 deletions(-) diff --git a/src/discover/mod.rs b/src/discover/mod.rs index ada51f8e5..e5b4a87b8 100644 --- a/src/discover/mod.rs +++ b/src/discover/mod.rs @@ -21,8 +21,13 @@ struct SupportedBucket { rtk_equivalent: &'static str, category: &'static str, count: usize, + /// Total estimated tokens *saved* (post-filter). Used for the "Est. Savings" column. total_output_tokens: usize, - savings_pct: f64, + /// Total estimated tokens *before* filtering (raw output). Accumulated alongside + /// `total_output_tokens` so the bucket's effective savings rate can be derived as + /// `total_output_tokens / total_raw_output_tokens` — a weighted average across + /// all sub-commands, regardless of which sub-command was seen first. + total_raw_output_tokens: usize, // For display: the most common raw command command_counts: HashMap, } @@ -120,7 +125,7 @@ pub fn run( category, count: 0, total_output_tokens: 0, - savings_pct: estimated_savings_pct, + total_raw_output_tokens: 0, command_counts: HashMap::new(), } }); @@ -140,6 +145,9 @@ pub fn run( let savings = (output_tokens as f64 * estimated_savings_pct / 100.0) as usize; bucket.total_output_tokens += savings; + // Accumulate pre-savings tokens so we can compute a weighted effective + // savings rate across all sub-commands in this bucket later. + bucket.total_raw_output_tokens += output_tokens; // Track the display name with status let display_name = truncate_command(part); @@ -196,13 +204,22 @@ pub fn run( }) .unwrap_or_else(|| (String::new(), report::RtkStatus::Existing)); + // Derive the effective savings rate from accumulated totals rather than + // using the first-seen sub-command's rate. This gives a weighted average + // across all sub-commands that fell in this bucket. + let effective_savings_pct = if bucket.total_raw_output_tokens > 0 { + bucket.total_output_tokens as f64 * 100.0 / bucket.total_raw_output_tokens as f64 + } else { + 0.0 + }; + SupportedEntry { command: command_with_status, count: bucket.count, rtk_equivalent: bucket.rtk_equivalent, category: bucket.category, estimated_savings_tokens: bucket.total_output_tokens, - estimated_savings_pct: bucket.savings_pct, + estimated_savings_pct: effective_savings_pct, rtk_status: status, } }) diff --git a/src/discover/report.rs b/src/discover/report.rs index 652bb3482..128ecb45e 100644 --- a/src/discover/report.rs +++ b/src/discover/report.rs @@ -83,12 +83,12 @@ pub fn format_text(report: &DiscoverReport, limit: usize, verbose: bool) -> Stri report.sessions_scanned, report.since_days, report.total_commands )); out.push_str(&format!( - "Already using RTK: {} commands ({}%)\n", + "Already using RTK: {} commands ({:.1}%)\n", report.already_rtk, if report.total_commands > 0 { - report.already_rtk * 100 / report.total_commands + report.already_rtk as f64 * 100.0 / report.total_commands as f64 } else { - 0 + 0.0 } )); @@ -214,3 +214,57 @@ fn truncate_str(s: &str, max: usize) -> String { format!("{}..", truncated) } } + +#[cfg(test)] +mod tests { + use super::*; + + fn make_report(total_commands: usize, already_rtk: usize) -> DiscoverReport { + DiscoverReport { + sessions_scanned: 1, + total_commands, + already_rtk, + since_days: 30, + supported: vec![], + unsupported: vec![], + parse_errors: 0, + rtk_disabled_count: 0, + rtk_disabled_examples: vec![], + } + } + + // B6 regression: integer division truncated small percentages to 0%. + // Example: 3/1000 = 0% (old bug), should be "0.3%". + #[test] + fn test_already_rtk_percent_shows_decimal() { + let report = make_report(1000, 3); + let output = format_text(&report, 10, false); + // "0.3%" must appear; old code would print "0%" + assert!( + output.contains("0.3%"), + "Expected '0.3%' in output but got:\n{}", + output + ); + assert!( + !output.contains("(0%)"), + "Output must not contain '(0%)' — integer division bug still present:\n{}", + output + ); + } + + // Edge case: 0/0 must not divide-by-zero. + #[test] + fn test_already_rtk_percent_zero_total() { + let report = make_report(0, 0); + let output = format_text(&report, 10, false); + assert!(output.contains("0 commands (0.0%)")); + } + + // Full percent: 1000/1000 = 100.0% + #[test] + fn test_already_rtk_percent_full() { + let report = make_report(1000, 1000); + let output = format_text(&report, 10, false); + assert!(output.contains("100.0%")); + } +} From e8a3bca5fb632c1eff4fdaa884bd36bf1f8260ad Mon Sep 17 00:00:00 2001 From: Joshua Date: Sat, 18 Apr 2026 09:51:56 -0500 Subject: [PATCH 163/204] adding edge case comments and a constant for max response size --- src/cmds/cloud/curl_cmd.rs | 11 ++++++++--- 1 file changed, 8 insertions(+), 3 deletions(-) diff --git a/src/cmds/cloud/curl_cmd.rs b/src/cmds/cloud/curl_cmd.rs index acd4cb91f..a24506d8c 100644 --- a/src/cmds/cloud/curl_cmd.rs +++ b/src/cmds/cloud/curl_cmd.rs @@ -1,10 +1,12 @@ -//! Runs curl and auto-compresses JSON responses. +//! Runs curl and applies a simple truncation with tee hint if the output is too long. use crate::core::tee::force_tee_hint; use crate::core::tracking; use crate::core::{stream::exec_capture, utils::resolved_command}; use anyhow::{Context, Result}; +const MAX_RESPONSE_SIZE: usize = 500; + /// Not using run_filtered: on failure, curl can return HTML error pages (404, 500) /// that the JSON schema filter would mangle. The early exit skips filtering entirely. pub fn run(args: &[String], verbose: u8) -> Result { @@ -56,8 +58,11 @@ fn filter_curl_output(raw: &str) -> FilterResult { let trimmed = raw.trim(); let tee_hint = force_tee_hint(raw, "curl"); - let content = if trimmed.len() >= 500 { - let mut end = 500; + // If the output is too long and we have a tee hint, truncate the output. + let content = if trimmed.len() >= MAX_RESPONSE_SIZE && tee_hint.is_some() { + let mut end = MAX_RESPONSE_SIZE; + // Ensure we don't cut in the middle of a UTF-8 character. + // .len() counts bytes, not chars. while !trimmed.is_char_boundary(end) { end -= 1; } From a724375650dbf88aca2b547300ace1de245c1671 Mon Sep 17 00:00:00 2001 From: aesoft <43991222+aeppling@users.noreply.github.com> Date: Sat, 18 Apr 2026 17:26:12 +0200 Subject: [PATCH 164/204] docs(contributing): coding practices v1 --- docs/contributing/CODING_PRACTICES.md | 186 ++++++++++++++++++++++++++ 1 file changed, 186 insertions(+) create mode 100644 docs/contributing/CODING_PRACTICES.md diff --git a/docs/contributing/CODING_PRACTICES.md b/docs/contributing/CODING_PRACTICES.md new file mode 100644 index 000000000..bc0975541 --- /dev/null +++ b/docs/contributing/CODING_PRACTICES.md @@ -0,0 +1,186 @@ +# RTK Coding Practices v1.0 + +This document follows the [Design Philosophy](../../CONTRIBUTING.md#design-philosophy) in `CONTRIBUTING.md`. Once you understand the mental model there, this guide describes the coding practices we use day-to-day in RTK and what reviewers will look for on your PR. + +Our goal is to keep the codebase consistent and easy to extend. PRs that deviate from these practices may be asked for changes during review — this is guidance, not a gate. If a rule seems wrong for your specific case, flag it in the PR and we'll discuss. + +> **Heads up:** RTK has grown quickly and some code in the repository predates these practices. You may spot modules that don't fully follow them — this is expected, and core/ecosystem maintainers will refactor them over time. When in doubt, follow the practices below for new code rather than mirroring older patterns. + +--- + +## Quick Start for Contributors + +New to RTK? The fastest path to a mergeable first PR: + +1. **Read the flow once.** Start at [`CONTRIBUTING.md`](../../CONTRIBUTING.md), then skim [`docs/contributing/TECHNICAL.md`](TECHNICAL.md) to see how a command flows from `main.rs` → a `*_cmd.rs` filter → tracking → stdout. +2. **Look at a good example.** [`src/cmds/git/git.rs`](../../src/cmds/git/git.rs) is a representative filter — it shows the `run()` entry point, `lazy_static!` regex setup, filter helpers, and embedded tests all in one file. +3. **Know the shared helpers before reimplementing.** Two files cover most of what you need: + - [`src/core/runner.rs`](../../src/core/runner.rs) — command execution wrappers: `run_filtered()` (run a command, then apply your filter function), `run_passthrough()` (run unfiltered but tracked), `run_streamed()` (streaming filter). + - [`src/core/utils.rs`](../../src/core/utils.rs) — shared utilities: `resolved_command()`, `strip_ansi()`, `truncate()`, `count_tokens()`, and more. +4. **Follow the checklist.** [`src/cmds/README.md — Adding a New Command Filter`](../../src/cmds/README.md#adding-a-new-command-filter) walks you through creating a filter, registering it, and adding tests. +5. **Write the test first.** We follow Red-Green-Refactor. A snapshot test plus a token-savings assertion (see [Testing](#testing) below) is enough for most filters. + +If you're unsure whether your approach fits, open a draft PR or a discussion early — we'd rather help shape the design than ask for a rewrite at review. + +--- + +## Design Philosophy + +For the full framing (Correctness vs. Token Savings, Transparency, Never Block, Zero Overhead, Extensibility), see the [Design Philosophy](../../CONTRIBUTING.md#design-philosophy) section in `CONTRIBUTING.md`. + +Two practical reminders that come up often in review: + +**Portability.** RTK should behave the same across platforms. Use `#[cfg(target_os = "...")]` for platform-specific code; never assume a single OS. + +**Extensibility.** RTK should be modular. Before writing a new feature or filter, check whether an existing entry point fits — `runner::run_filtered()`, `runner::run_passthrough()`, helpers in `src/core/utils.rs`, etc. If your logic could be reused elsewhere, lift it into a shared component rather than burying it in one `*_cmd.rs` file. + +--- + +## Files, Functions, and Documentation + +Each folder contains a root `README.md` that explains the main principles, flows, and specificities of the source files it owns. These READMEs should describe concepts and cases — not list individual source files or counts, to avoid stale lists as the code evolves. Because the root README reflects core features and logic, it should not change often; meaningful edits usually imply a core refactor. + +Tests live in the same file as the code they test (inside `#[cfg(test)] mod tests { ... }`), not in a separate test file. This keeps the filter, its fixtures, and its assertions close together. + +--- + +## Edge Cases + +When you add an edge-case branch or a non-obvious exception, leave a short comment above it explaining *why* it exists. This prevents a future contributor from removing it because the reason isn't visible from the code alone. + +Referencing an issue is often the clearest form: + +```rust +// ISSUE #463: some `git log` output contains NUL bytes when --format=%x00 is used; +// skip the line rather than panicking on invalid UTF-8. +if line.contains('\0') { + continue; +} +``` + +--- + +## Comments + +Prefer code that reads clearly over code that needs comments to explain it. In particular, avoid redundant comments that restate what the function signature already says. + +Comments are welcome when they add information the code cannot carry on its own. The common cases: + +- **File header (`//!`)** — purpose and scope of the current file. +- **Edge case** — a non-obvious branch or exception, as described above. +- **Issue reference** — e.g. `// ISSUE #463: the fix for this`. +- **"Why, not what"** — when the intent or tradeoff behind a decision isn't obvious from the code. + +In short: avoid noise comments; keep the ones that would save a future reader a trip to `git blame`. + +--- + +## Variables + +Use explicit, descriptive names for variables, just like for functions. + +Do not hardcode repetitive patterns or values that control behavior — extract them into named constants at the top of the file. For anything a user might want to tune (thresholds, limits, display cutoffs), use `config::limits()` so it flows through `~/.config/rtk/config.toml`. + +Example from `src/cmds/git/git.rs`: + +```rust +let limits = config::limits(); +let max_files = limits.status_max_files; +let max_untracked = limits.status_max_untracked; +``` + +--- + +## Function and File Size + +**Prefer functions under ~60 lines.** Shorter functions are easier to read, test, and reuse. If a function grows beyond that, it's usually a sign the logic should be split into helpers — but this is a guideline, not a hard cap. + +Legitimate exceptions include: +- Dispatcher / match functions that route to subcommands, where each arm delegates to a focused helper. +- State-machine parsers where splitting would harm readability. + +When you keep a longer function, aim to make each block obviously cohesive — and consider leaving a short comment on *why* splitting it would hurt. + +**Files are expected to be large** in RTK because each module keeps its tests and fixtures alongside the implementation. When a file becomes hard to navigate, split responsibilities across multiple files where possible. If it isn't possible, a big file is acceptable for now. + +--- + +## Imports and Dependencies + +RTK is a low-dependency project. Before adding a crate, check whether the functionality is already covered by `std`, an existing dependency, or `src/core/utils.rs`. If a few lines of straightforward code will do the job, prefer that over a new dependency. + +When a new dependency is genuinely needed, justify it in the PR description. For non-trivial additions, it's worth opening a discussion with maintainers first. + +--- + +## Error Handling + +Use `anyhow::Result` everywhere, and always attach context with `.context("description")?` or `.with_context(|| format!(...))`. + +Never silently swallow errors (`Err(_) => {}`). Either log with `eprintln!` and fall back to raw output (the common case for filters), or propagate the error. + +Example of the standard fallback pattern for a filter: + +```rust +let filtered = filter_output(&output.stdout) + .unwrap_or_else(|e| { + eprintln!("rtk: filter warning: {}", e); + output.stdout.clone() // passthrough on failure — never block the user + }); +``` + +For the full error-handling architecture (propagation chain, exit code preservation), see [ARCHITECTURE.md — Error Handling](ARCHITECTURE.md#error-handling). + +--- + +## Testing + +See [`CONTRIBUTING.md` — Testing](../../CONTRIBUTING.md#testing) for the full strategy. In short, for a new filter you typically want: + +- **Unit + snapshot tests** in the same file, using the `insta` crate. +- **A token-savings assertion** verifying the filter hits the ≥60% target on a real fixture. + +Minimal example: + +```rust +#[cfg(test)] +mod tests { + use super::*; + use insta::assert_snapshot; + + fn count_tokens(s: &str) -> usize { s.split_whitespace().count() } + + #[test] + fn filter_git_log_snapshot() { + let input = include_str!("../../../tests/fixtures/git_log_raw.txt"); + let output = filter_git_log(input); + assert_snapshot!(output); + } + + #[test] + fn filter_git_log_savings() { + let input = include_str!("../../../tests/fixtures/git_log_raw.txt"); + let output = filter_git_log(input); + let savings = 100.0 - (count_tokens(&output) as f64 / count_tokens(input) as f64 * 100.0); + assert!(savings >= 60.0, "expected ≥60% savings, got {:.1}%", savings); + } +} +``` + +Fixtures go in `tests/fixtures/` and should be captured from real command output rather than hand-written. + +--- + +## Security + +RTK executes shell commands on behalf of the user, so security is a first-class concern. + +**Command execution.** All commands go through argument arrays via `Command::new().args()` — never through shell string concatenation. This prevents injection. Always use `resolved_command()` from `src/core/utils.rs` instead of a raw `Command::new()`. + +**Hook integrity.** RTK verifies hook files via SHA-256 hashes before operational commands. If a hook has been tampered with, RTK exits with code 1. See [`src/hooks/integrity.rs`](../../src/hooks/integrity.rs). + +**Project filter trust.** `.rtk/filters.toml` files are not loaded until the user explicitly trusts them, and content changes require re-trust. See [`src/hooks/trust.rs`](../../src/hooks/trust.rs). + +**Permission whitelist.** `is_operational_command()` in `main.rs` uses a whitelist pattern — new commands are *not* integrity-checked until explicitly added. This is an intentional security posture: fail-open with an audit trail is preferred over false confidence. + +**`unsafe` code.** Not allowed except for Unix signal handling in proxy mode, which is correctly scoped to `#[cfg(unix)]`. From 42d3161872713bc0b20ef49b0714add40c40d5e3 Mon Sep 17 00:00:00 2001 From: aesoft <43991222+aeppling@users.noreply.github.com> Date: Sun, 19 Apr 2026 13:47:17 +0200 Subject: [PATCH 165/204] fix(discover): exclude_commands bypass for env-prefix, sub cmd + regex Strip env prefix (sudo, VAR=val) before exclude check so PGPASSWORD=x psql matches ["psql"]. Match against full command instead of first token so ["git push"] matches "git push origin main". Compile exclude entries as regex so ["^curl"] works as pattern. Invalid regex falls back to prefix match with eprintln warning. Non-regex patterns auto-anchor with ^ and \b to prevent substring matches (e.g. "go" won't exclude "golangci-lint"). --- src/discover/registry.rs | 84 ++++++++++++++++++++++++++++++++++++---- 1 file changed, 77 insertions(+), 7 deletions(-) diff --git a/src/discover/registry.rs b/src/discover/registry.rs index ee5f7a7be..cae0e59de 100644 --- a/src/discover/registry.rs +++ b/src/discover/registry.rs @@ -447,6 +447,8 @@ pub fn rewrite_command(cmd: &str, excluded: &[String]) -> Option { return None; } + let compiled = compile_exclude_patterns(excluded); + // Simple (non-compound) already-RTK command — return as-is. // For compound commands that start with "rtk" (e.g. "rtk git add . && cargo test"), // fall through to rewrite_compound so the remaining segments get rewritten. @@ -459,11 +461,11 @@ pub fn rewrite_command(cmd: &str, excluded: &[String]) -> Option { return Some(trimmed.to_string()); } - rewrite_compound(trimmed, excluded) + rewrite_compound(trimmed, &compiled) } /// Rewrite a compound command (with `&&`, `||`, `;`, `|`) by rewriting each segment. -fn rewrite_compound(cmd: &str, excluded: &[String]) -> Option { +fn rewrite_compound(cmd: &str, excluded: &[ExcludePattern]) -> Option { let tokens = tokenize(cmd); let mut result = String::with_capacity(cmd.len() + 32); let mut any_changed = false; @@ -595,11 +597,46 @@ const SHELL_PREFIX_BUILTINS: &[&str] = &["noglob", "command", "builtin", "exec", const MAX_PREFIX_DEPTH: usize = 10; -fn rewrite_segment(seg: &str, excluded: &[String]) -> Option { +enum ExcludePattern { + Regex(Regex), + Prefix(String), +} + +fn compile_exclude_patterns(patterns: &[String]) -> Vec { + patterns + .iter() + .map(|pattern| { + let anchored = if pattern.starts_with('^') { + pattern.clone() + } else { + format!(r"^{}\b", regex::escape(pattern)) + }; + match Regex::new(&anchored) { + Ok(re) => ExcludePattern::Regex(re), + Err(e) => { + eprintln!( + "rtk: warning: invalid exclude_commands pattern '{}': {}", + pattern, e + ); + ExcludePattern::Prefix(pattern.clone()) + } + } + }) + .collect() +} + +fn rewrite_segment(seg: &str, excluded: &[ExcludePattern]) -> Option { rewrite_segment_inner(seg, excluded, 0) } -fn rewrite_segment_inner(seg: &str, excluded: &[String], depth: usize) -> Option { +fn is_excluded(cmd: &str, excluded: &[ExcludePattern]) -> bool { + excluded.iter().any(|pat| match pat { + ExcludePattern::Regex(re) => re.is_match(cmd), + ExcludePattern::Prefix(prefix) => cmd.starts_with(prefix.as_str()), + }) +} + +fn rewrite_segment_inner(seg: &str, excluded: &[ExcludePattern], depth: usize) -> Option { let trimmed = seg.trim(); if trimmed.is_empty() { return None; @@ -647,9 +684,9 @@ fn rewrite_segment_inner(seg: &str, excluded: &[String], depth: usize) -> Option // Use classify_command for correct ignore/prefix handling let rtk_equivalent = match classify_command(cmd_part) { Classification::Supported { rtk_equivalent, .. } => { - // Check if the base command is excluded from rewriting (#243) - let base = cmd_part.split_whitespace().next().unwrap_or(""); - if excluded.iter().any(|e| e == base) { + let stripped = ENV_PREFIX.replace(cmd_part, ""); + let cmd_clean = stripped.trim(); + if is_excluded(cmd_clean, excluded) { return None; } rtk_equivalent @@ -2853,6 +2890,39 @@ mod tests { ); } + #[test] + fn test_exclude_env_prefixed_command() { + let excluded = vec!["psql".to_string()]; + assert_eq!( + rewrite_command("PGPASSWORD=postgres psql -h localhost", &excluded), + None + ); + } + + #[test] + fn test_exclude_subcommand_pattern() { + let excluded = vec!["git push".to_string()]; + assert_eq!(rewrite_command("git push origin main", &excluded), None); + } + + #[test] + fn test_exclude_regex_pattern() { + let excluded = vec!["^curl".to_string()]; + assert_eq!(rewrite_command("curl http://example.com", &excluded), None); + } + + #[test] + fn test_exclude_invalid_regex_fallback() { + let excluded = vec!["curl[".to_string()]; + assert!(rewrite_command("curl http://example.com", &excluded).is_some()); + } + + #[test] + fn test_exclude_does_not_substring_match() { + let excluded = vec!["go".to_string()]; + assert!(rewrite_command("golangci-lint run ./...", &excluded).is_some()); + } + #[test] fn test_all_patterns_are_valid_regex() { use regex::Regex; From 2e401ac38feec88de8d5e46f0301c8a532b95614 Mon Sep 17 00:00:00 2001 From: aesoft <43991222+aeppling@users.noreply.github.com> Date: Sun, 19 Apr 2026 13:59:03 +0200 Subject: [PATCH 166/204] fix(docs): add missing docs for exclude commands patterns --- docs/guide/getting-started/configuration.md | 13 +++++++++++++ hooks/README.md | 2 +- 2 files changed, 14 insertions(+), 1 deletion(-) diff --git a/docs/guide/getting-started/configuration.md b/docs/guide/getting-started/configuration.md index 2c649945e..da7602391 100644 --- a/docs/guide/getting-started/configuration.md +++ b/docs/guide/getting-started/configuration.md @@ -91,6 +91,19 @@ Prevent specific commands from being rewritten by the hook: exclude_commands = ["git rebase", "git cherry-pick", "docker exec"] ``` +Patterns match against the full command after stripping env prefixes (`sudo`, `VAR=val`), so `"psql"` excludes both `psql -h localhost` and `PGPASSWORD=x psql -h localhost`. + +Subcommand patterns work too: `"git push"` excludes `git push origin main` but not `git status`. + +Patterns starting with `^` are treated as regex: + +```toml +[hooks] +exclude_commands = ["^curl", "^wget", "git rebase"] +``` + +Invalid regex patterns fall back to prefix matching. + Or for a single invocation: ```bash diff --git a/hooks/README.md b/hooks/README.md index 62875c028..6a6744281 100644 --- a/hooks/README.md +++ b/hooks/README.md @@ -184,7 +184,7 @@ Example: `cargo fmt --all && cargo test` becomes `rtk cargo fmt --all && rtk car ### Override Controls - **`RTK_DISABLED=1`**: Per-command override (`RTK_DISABLED=1 git status` runs raw) -- **`exclude_commands`**: In `~/.config/rtk/config.toml`, list commands to never rewrite +- **`exclude_commands`**: In `~/.config/rtk/config.toml`, list commands to never rewrite. Matches against the full command after stripping env prefixes. Subcommand patterns work (`"git push"` excludes `git push origin main`). Patterns starting with `^` are treated as regex. - **Already-RTK**: `rtk git status` passes through unchanged (no `rtk rtk git`) ## Exit Code Contract From 0ea115bca5fa66daa69fda2f0eeaaf103346b3a4 Mon Sep 17 00:00:00 2001 From: aesoft <43991222+aeppling@users.noreply.github.com> Date: Sun, 19 Apr 2026 14:35:04 +0200 Subject: [PATCH 167/204] fix(discover): word boundary in exclude_commands Replace \b with ($|\s) to only match end-of-string or whitespace. Empty string and bare "^" patterns produce regexes that match all commands, silently disabling all rewrites on a config typo. Skip trivial patterns with an eprintln warning. --- src/discover/registry.rs | 38 ++++++++++++++++++++++++++++++++------ 1 file changed, 32 insertions(+), 6 deletions(-) diff --git a/src/discover/registry.rs b/src/discover/registry.rs index cae0e59de..3e371c09c 100644 --- a/src/discover/registry.rs +++ b/src/discover/registry.rs @@ -605,13 +605,21 @@ enum ExcludePattern { fn compile_exclude_patterns(patterns: &[String]) -> Vec { patterns .iter() - .map(|pattern| { - let anchored = if pattern.starts_with('^') { - pattern.clone() + .filter_map(|pattern| { + let trimmed = pattern.trim(); + if trimmed.is_empty() || trimmed == "^" { + eprintln!( + "rtk: warning: ignoring trivial exclude_commands pattern '{}'", + pattern + ); + return None; + } + let anchored = if trimmed.starts_with('^') { + trimmed.to_string() } else { - format!(r"^{}\b", regex::escape(pattern)) + format!(r"^{}($|\s)", regex::escape(trimmed)) }; - match Regex::new(&anchored) { + Some(match Regex::new(&anchored) { Ok(re) => ExcludePattern::Regex(re), Err(e) => { eprintln!( @@ -620,7 +628,7 @@ fn compile_exclude_patterns(patterns: &[String]) -> Vec { ); ExcludePattern::Prefix(pattern.clone()) } - } + }) }) .collect() } @@ -2923,6 +2931,24 @@ mod tests { assert!(rewrite_command("golangci-lint run ./...", &excluded).is_some()); } + #[test] + fn test_exclude_does_not_match_hyphenated_command() { + let excluded = vec!["golangci".to_string()]; + assert!(rewrite_command("golangci-lint run ./...", &excluded).is_some()); + } + + #[test] + fn test_exclude_empty_pattern_ignored() { + let excluded = vec!["".to_string()]; + assert!(rewrite_command("git status", &excluded).is_some()); + } + + #[test] + fn test_exclude_bare_anchor_ignored() { + let excluded = vec!["^".to_string()]; + assert!(rewrite_command("git status", &excluded).is_some()); + } + #[test] fn test_all_patterns_are_valid_regex() { use regex::Regex; From 0e29650e11959730f4c4a2e6d6c0519e14dc8595 Mon Sep 17 00:00:00 2001 From: aesoft <43991222+aeppling@users.noreply.github.com> Date: Sun, 19 Apr 2026 15:31:52 +0200 Subject: [PATCH 168/204] fix(hooks): windows use 'rtk hook claude' no fallback Removing old guards, windows can now just use the binary hook engine from 0.37 Related issues: - Fixes #502 : rtk init --global falls back to --claude-md on Windows - Fixes #1353 : Feature request: hook-based mode on Windows - Partially addresses #330 : Add hooks support for Windows - Partially addresses #913 : Persistent "No hook installed" warning on Windows - Partially addresses #1373 : Suppress "No hook installed" warning on Windows - Partially addresses #682 : Config to suppress hook warning - Related to #1248 : Windows PowerShell compatibility gaps Supersedes community PRs: - #1123 fix(init): enable hook installation on Windows - #1027 fix(init): enable hook-based mode on Windows - #809 feat: enable hook-based mode on Windows - #452 feat: add Windows hook support for rtk init --global - #551 feat: native cross-platform hook for Windows support - #150 feat(hook): native cross-platform hook-rewrite command - #1063 Feat/windows hooks --- src/hooks/init.rs | 25 ------------------------- 1 file changed, 25 deletions(-) diff --git a/src/hooks/init.rs b/src/hooks/init.rs index 65517fc7b..af182dd13 100644 --- a/src/hooks/init.rs +++ b/src/hooks/init.rs @@ -872,20 +872,6 @@ fn hook_already_present(root: &serde_json::Value, hook_command: &str) -> bool { } /// Default mode: hook + slim RTK.md + @RTK.md reference -#[cfg(not(unix))] -fn run_default_mode( - _global: bool, - _patch_mode: PatchMode, - _verbose: u8, - _install_opencode: bool, -) -> Result<()> { - eprintln!("[warn] Hook-based mode requires Unix (macOS/Linux)."); - eprintln!(" Windows: use --claude-md mode for full injection."); - eprintln!(" Falling back to --claude-md mode."); - run_claude_md_mode(_global, _verbose, _install_opencode) -} - -#[cfg(unix)] fn run_default_mode( global: bool, patch_mode: PatchMode, @@ -1127,17 +1113,6 @@ fn generate_global_filters_template(verbose: u8) -> Result<()> { } /// Hook-only mode: just the hook, no RTK.md -#[cfg(not(unix))] -fn run_hook_only_mode( - _global: bool, - _patch_mode: PatchMode, - _verbose: u8, - _install_opencode: bool, -) -> Result<()> { - anyhow::bail!("Hook install requires Unix (macOS/Linux). Use WSL or --claude-md mode.") -} - -#[cfg(unix)] fn run_hook_only_mode( global: bool, patch_mode: PatchMode, From 115e44853b8cdd2d7af3af2b52c9c31e924a45d3 Mon Sep 17 00:00:00 2001 From: aesoft <43991222+aeppling@users.noreply.github.com> Date: Sun, 19 Apr 2026 20:17:44 +0200 Subject: [PATCH 169/204] fix(hooks): add regression test for windows native --- src/hooks/init.rs | 141 ++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 141 insertions(+) diff --git a/src/hooks/init.rs b/src/hooks/init.rs index af182dd13..e38147179 100644 --- a/src/hooks/init.rs +++ b/src/hooks/init.rs @@ -3643,4 +3643,145 @@ More notes assert_eq!(arr.len(), 1); assert_eq!(arr[0]["command"].as_str().unwrap(), CURSOR_HOOK_COMMAND); } + + use std::sync::Mutex; + static HOME_LOCK: Mutex<()> = Mutex::new(()); + + fn with_home_override(tmp: &TempDir, f: F) { + let _guard = HOME_LOCK.lock().unwrap(); + fs::create_dir_all(tmp.path().join(CLAUDE_DIR)).unwrap(); + + let orig_home = std::env::var_os("HOME"); + let orig_profile = std::env::var_os("USERPROFILE"); + std::env::set_var("HOME", tmp.path()); + std::env::set_var("USERPROFILE", tmp.path()); + f(); + match orig_home { + Some(v) => std::env::set_var("HOME", v), + None => std::env::remove_var("HOME"), + } + match orig_profile { + Some(v) => std::env::set_var("USERPROFILE", v), + None => std::env::remove_var("USERPROFILE"), + } + } + + #[test] + fn test_global_default_mode_creates_artifacts() { + let tmp = TempDir::new().unwrap(); + with_home_override(&tmp, || { + run_default_mode(true, PatchMode::Auto, 0, false).unwrap(); + + let claude_dir = tmp.path().join(CLAUDE_DIR); + assert!(claude_dir.join(RTK_MD).exists(), "RTK.md must be created"); + assert!( + claude_dir.join(CLAUDE_MD).exists(), + "CLAUDE.md must be created" + ); + + let settings = claude_dir.join(SETTINGS_JSON); + assert!(settings.exists(), "settings.json must be created"); + let content = fs::read_to_string(&settings).unwrap(); + assert!( + content.contains(CLAUDE_HOOK_COMMAND), + "settings.json must contain hook command" + ); + }); + } + + #[test] + fn test_global_uninstall_removes_artifacts() { + let tmp = TempDir::new().unwrap(); + with_home_override(&tmp, || { + run_default_mode(true, PatchMode::Auto, 0, false).unwrap(); + uninstall(true, false, false, false, 0).unwrap(); + + let claude_dir = tmp.path().join(CLAUDE_DIR); + assert!(!claude_dir.join(RTK_MD).exists(), "RTK.md must be removed"); + let settings_content = + fs::read_to_string(claude_dir.join(SETTINGS_JSON)).unwrap_or_default(); + assert!( + !settings_content.contains(CLAUDE_HOOK_COMMAND), + "hook entry must be removed from settings.json" + ); + }); + } + + #[test] + fn test_global_default_mode_idempotent() { + let tmp = TempDir::new().unwrap(); + with_home_override(&tmp, || { + run_default_mode(true, PatchMode::Auto, 0, false).unwrap(); + run_default_mode(true, PatchMode::Auto, 0, false).unwrap(); + + let settings = + fs::read_to_string(tmp.path().join(CLAUDE_DIR).join(SETTINGS_JSON)).unwrap(); + let count = settings.matches(CLAUDE_HOOK_COMMAND).count(); + assert_eq!(count, 1, "hook command must appear exactly once"); + }); + } + + #[test] + fn test_upgrade_from_claude_md_to_hook_mode() { + let tmp = TempDir::new().unwrap(); + with_home_override(&tmp, || { + // Simulate old --claude-md installation + run_claude_md_mode(true, 0, false).unwrap(); + let claude_dir = tmp.path().join(CLAUDE_DIR); + let claude_md_content = fs::read_to_string(claude_dir.join(CLAUDE_MD)).unwrap(); + assert!( + claude_md_content.contains("").unwrap(); + static ref BADGE_LINE_RE: Regex = + Regex::new(r"(?m)^\s*\[!\[[^\]]*\]\([^)]*\)\]\([^)]*\)\s*$").unwrap(); + static ref IMAGE_ONLY_LINE_RE: Regex = Regex::new(r"(?m)^\s*!\[[^\]]*\]\([^)]*\)\s*$").unwrap(); + static ref HORIZONTAL_RULE_RE: Regex = + Regex::new(r"(?m)^\s*(?:---+|\*\*\*+|___+)\s*$").unwrap(); + static ref MULTI_BLANK_RE: Regex = Regex::new(r"\n{3,}").unwrap(); + static ref MR_URL_RE: Regex = Regex::new(r"/-/merge_requests/(\d+)").unwrap(); + /// Match GitLab CI section markers: section_start/end:timestamp:name[0K + static ref SECTION_MARKER_RE: Regex = + Regex::new(r"section_(?:start|end):\d+:[a-z0-9_]+(?:\x1b\[0K|\[0K)*").unwrap(); + /// Match bare bracket ANSI-like codes without ESC prefix: [0K, [0;m, [36;1m, etc. + static ref BARE_ANSI_RE: Regex = Regex::new(r"\[[\d;]+[A-Za-z]").unwrap(); +} + +/// Filter markdown body to remove noise while preserving meaningful content. +/// Removes HTML comments, badge lines, image-only lines, horizontal rules, +/// and collapses excessive blank lines. Preserves code blocks untouched. +fn filter_markdown_body(body: &str) -> String { + if body.is_empty() { + return String::new(); + } + + let mut result = String::new(); + let mut remaining = body; + + loop { + let fence_pos = remaining + .find("```") + .or_else(|| remaining.find("~~~")) + .map(|pos| { + let fence = if remaining[pos..].starts_with("```") { + "```" + } else { + "~~~" + }; + (pos, fence) + }); + + match fence_pos { + Some((start, fence)) => { + let before = &remaining[..start]; + result.push_str(&filter_markdown_segment(before)); + + let after_open = start + fence.len(); + let code_start = remaining[after_open..] + .find('\n') + .map(|p| after_open + p + 1) + .unwrap_or(remaining.len()); + + let close_pos = remaining[code_start..] + .find(fence) + .map(|p| code_start + p + fence.len()); + + match close_pos { + Some(end) => { + result.push_str(&remaining[start..end]); + let after_close = remaining[end..] + .find('\n') + .map(|p| end + p + 1) + .unwrap_or(remaining.len()); + result.push_str(&remaining[end..after_close]); + remaining = &remaining[after_close..]; + } + None => { + result.push_str(&remaining[start..]); + remaining = ""; + } + } + } + None => { + result.push_str(&filter_markdown_segment(remaining)); + break; + } + } + } + + result.trim().to_string() +} + +/// Filter a markdown segment that is NOT inside a code block. +fn filter_markdown_segment(text: &str) -> String { + let mut s = HTML_COMMENT_RE.replace_all(text, "").to_string(); + s = BADGE_LINE_RE.replace_all(&s, "").to_string(); + s = IMAGE_ONLY_LINE_RE.replace_all(&s, "").to_string(); + s = HORIZONTAL_RULE_RE.replace_all(&s, "").to_string(); + s = MULTI_BLANK_RE.replace_all(&s, "\n\n").to_string(); + s +} + +/// State icon for MR/issue states (glab uses lowercase). +fn state_icon(state: &str, ultra_compact: bool) -> &'static str { + if ultra_compact { + match state { + "opened" => "O", + "merged" => "M", + "closed" => "C", + _ => "?", + } + } else { + match state { + "opened" => "[open]", + "merged" => "[merged]", + "closed" => "[closed]", + _ => "?", + } + } +} + +/// Pipeline status icon. Non-compact mode uses text tags for parity with +/// `gh_cmd.rs` (avoids multi-byte terminal rendering quirks; aligns with the +/// rest of the codebase). Ultra-compact keeps single-char density. +fn pipeline_icon(status: &str, ultra_compact: bool) -> &'static str { + if ultra_compact { + match status { + "success" => "+", + "failed" => "x", + "canceled" | "cancelled" => "X", + "running" | "pending" => "~", + "skipped" => "-", + _ => "?", + } + } else { + match status { + "success" => "[ok]", + "failed" => "[fail]", + "canceled" | "cancelled" => "[cancel]", + "running" => "[run]", + "pending" => "[pend]", + "skipped" => "[skip]", + _ => "?", + } + } +} + +/// Extract MR number from glab output URL or text. +fn extract_mr_number(text: &str) -> Option { + MR_URL_RE + .captures(text) + .and_then(|c| c.get(1)) + .map(|m| m.as_str().to_string()) +} + +/// Extract the first positional identifier (MR/issue number or URL) from args, +/// skipping glab flags that take a value. Returns the identifier and remaining args. +fn extract_identifier_and_extra_args(args: &[String]) -> Option<(String, Vec)> { + if args.is_empty() { + return None; + } + + // Known glab flags that take a value — skip these and their values + let flags_with_value = [ + "-R", + "--repo", + "-g", + "--group", + "-F", + "--output", + "-m", + "--message", + ]; + let mut identifier = None; + let mut extra = Vec::new(); + let mut skip_next = false; + + for arg in args { + if skip_next { + extra.push(arg.clone()); + skip_next = false; + continue; + } + if flags_with_value.contains(&arg.as_str()) { + extra.push(arg.clone()); + skip_next = true; + continue; + } + if arg.starts_with('-') { + extra.push(arg.clone()); + continue; + } + // First non-flag arg is the identifier (number/URL) + if identifier.is_none() { + identifier = Some(arg.clone()); + } else { + extra.push(arg.clone()); + } + } + + identifier.map(|id| (id, extra)) +} + +/// Check if user explicitly requested JSON/custom output format. +/// When present, passthrough to avoid double JSON injection. +fn has_output_flag(args: &[String]) -> bool { + args.iter() + .any(|a| a == "--output" || a == "-F" || a == "--json") +} + +/// Check if view subcommand should passthrough (--web, --comments, etc.). +fn should_passthrough_view(extra_args: &[String]) -> bool { + extra_args + .iter() + .any(|a| a == "--web" || a == "--comments" || a == "--output" || a == "-F") +} + +/// Run a glab command that emits JSON and filter through `filter_fn`. +/// On JSON parse failure (glab returns plain text for empty results), +/// fall back to the raw stdout. +fn run_glab_json(cmd: Command, label: &str, filter_fn: F) -> Result +where + F: Fn(&Value) -> String, +{ + runner::run_filtered( + cmd, + "glab", + label, + |stdout| match serde_json::from_str::(stdout) { + Ok(json) => filter_fn(&json), + Err(_) => stdout.to_string(), + }, + RunOptions::stdout_only() + .early_exit_on_failure() + .no_trailing_newline(), + ) +} + +/// Run a glab command with token-optimized output. +pub fn run(subcommand: &str, args: &[String], verbose: u8, ultra_compact: bool) -> Result { + // If the user explicitly requests a specific output format, passthrough unchanged. + if has_output_flag(args) { + return run_passthrough("glab", subcommand, args); + } + + match subcommand { + "mr" => run_mr(args, verbose, ultra_compact), + "issue" => run_issue(args, verbose, ultra_compact), + "ci" | "pipeline" => run_ci(args, verbose, ultra_compact), + "release" => run_release(args, verbose, ultra_compact), + "api" => run_api(args, verbose), + _ => run_passthrough("glab", subcommand, args), + } +} + +// ── MR subcommands ────────────────────────────────────────────────────── + +fn run_mr(args: &[String], verbose: u8, ultra_compact: bool) -> Result { + if args.is_empty() { + return run_passthrough("glab", "mr", args); + } + + match args[0].as_str() { + "list" => mr_list(&args[1..], verbose, ultra_compact), + "view" => mr_view(&args[1..], verbose, ultra_compact), + "create" => mr_create(&args[1..], verbose), + "merge" => mr_action("merge", "merged", &args[1..], verbose), + "approve" => mr_action("approve", "approved", &args[1..], verbose), + "diff" => mr_diff(&args[1..], verbose), + "note" => mr_action("note", "noted", &args[1..], verbose), + "update" => mr_action("update", "updated", &args[1..], verbose), + _ => run_passthrough("glab", "mr", args), + } +} + +/// Format MR list JSON into compact output (pure function, testable). +fn format_mr_list(json: &Value, ultra_compact: bool) -> String { + let mrs = match json.as_array() { + Some(arr) => arr, + None => return String::new(), + }; + if mrs.is_empty() { + return if ultra_compact { + "No MRs\n".to_string() + } else { + "No Merge Requests\n".to_string() + }; + } + + let mut filtered = String::new(); + filtered.push_str(if ultra_compact { + "MRs\n" + } else { + "Merge Requests\n" + }); + + for mr in mrs.iter().take(20) { + let iid = mr["iid"].as_i64().unwrap_or(0); + let title = mr["title"].as_str().unwrap_or("???"); + let state = mr["state"].as_str().unwrap_or("???"); + let author = mr["author"]["username"].as_str().unwrap_or("???"); + + let icon = state_icon(state, ultra_compact); + filtered.push_str(&format!( + " {} !{} {} ({})\n", + icon, + iid, + truncate(title, 60), + author + )); + } + + if mrs.len() > 20 { + filtered.push_str(&format!( + " ... {} more (use glab mr list for all)\n", + mrs.len() - 20 + )); + } + + filtered +} + +fn mr_list(args: &[String], _verbose: u8, ultra_compact: bool) -> Result { + let mut cmd = resolved_command("glab"); + cmd.args(["mr", "list", "-F", "json"]); + for arg in args { + cmd.arg(arg); + } + run_glab_json(cmd, "mr list", |json| format_mr_list(json, ultra_compact)) +} + +/// Format MR view JSON into compact output (pure function, testable). +fn format_mr_view(json: &Value, ultra_compact: bool) -> String { + let iid = json["iid"].as_i64().unwrap_or(0); + let title = json["title"].as_str().unwrap_or("???"); + let state = json["state"].as_str().unwrap_or("???"); + let author = json["author"]["username"].as_str().unwrap_or("???"); + let web_url = json["web_url"].as_str().unwrap_or(""); + let merge_status = json["merge_status"].as_str().unwrap_or("unknown"); + let source_branch = json["source_branch"].as_str().unwrap_or("???"); + let target_branch = json["target_branch"].as_str().unwrap_or("???"); + + let icon = state_icon(state, ultra_compact); + + let mut filtered = String::new(); + filtered.push_str(&format!("{} MR !{}: {}\n", icon, iid, title)); + filtered.push_str(&format!(" {}\n", author)); + + let mergeable_str = match merge_status { + "can_be_merged" => "[ok]", + "cannot_be_merged" => "[conflict]", + _ => "[?]", + }; + filtered.push_str(&format!(" {} | {}\n", state, mergeable_str)); + filtered.push_str(&format!(" {} -> {}\n", source_branch, target_branch)); + + if let Some(labels) = json["labels"].as_array() { + let joined: Vec<&str> = labels.iter().filter_map(|v| v.as_str()).collect(); + if !joined.is_empty() { + filtered.push_str(&format!(" Labels: {}\n", joined.join(", "))); + } + } + + if let Some(reviewers) = json["reviewers"].as_array() { + let names: Vec = reviewers + .iter() + .filter_map(|r| r["username"].as_str()) + .map(|u| format!("@{}", u)) + .collect(); + if !names.is_empty() { + filtered.push_str(&format!(" Reviewers: {}\n", names.join(", "))); + } + } + + if let Some(pipeline) = json.get("head_pipeline") { + if !pipeline.is_null() { + let pipeline_status = pipeline["status"].as_str().unwrap_or("unknown"); + let p_icon = pipeline_icon(pipeline_status, ultra_compact); + filtered.push_str(&format!(" Pipeline: {} {}\n", p_icon, pipeline_status)); + } + } + + filtered.push_str(&format!(" {}\n", web_url)); + + if let Some(desc) = json["description"].as_str() { + if !desc.is_empty() { + let desc_filtered = filter_markdown_body(desc); + if !desc_filtered.is_empty() { + filtered.push('\n'); + for line in desc_filtered.lines() { + filtered.push_str(&format!(" {}\n", line)); + } + } + } + } + + filtered +} + +fn mr_view(args: &[String], _verbose: u8, ultra_compact: bool) -> Result { + let (mr_number, extra_args) = match extract_identifier_and_extra_args(args) { + Some(pair) => pair, + None => return Err(anyhow::anyhow!("MR number required")), + }; + + // Passthrough for --web, --comments, or explicit output format + if should_passthrough_view(&extra_args) { + return run_passthrough_with_extra("glab", &["mr", "view", &mr_number], &extra_args); + } + + let mut cmd = resolved_command("glab"); + cmd.args(["mr", "view", &mr_number, "-F", "json"]); + for arg in &extra_args { + cmd.arg(arg); + } + run_glab_json(cmd, &format!("mr view {}", mr_number), |json| { + format_mr_view(json, ultra_compact) + }) +} + +fn mr_create(args: &[String], _verbose: u8) -> Result { + let mut cmd = resolved_command("glab"); + cmd.args(["mr", "create"]); + for arg in args { + cmd.arg(arg); + } + runner::run_filtered( + cmd, + "glab", + "mr create", + |stdout| { + // glab mr create outputs the URL on success + let url = stdout.trim(); + let mr_num = extract_mr_number(url).unwrap_or_default(); + let detail = if !mr_num.is_empty() { + format!("!{} {}", mr_num, url) + } else { + url.to_string() + }; + ok_confirmation("created", &detail) + }, + RunOptions::stdout_only().early_exit_on_failure(), + ) +} + +fn mr_diff(args: &[String], _verbose: u8) -> Result { + let mut cmd = resolved_command("glab"); + cmd.args(["mr", "diff"]); + for arg in args { + cmd.arg(arg); + } + runner::run_filtered( + cmd, + "glab", + "mr diff", + |stdout| { + if stdout.trim().is_empty() { + "No diff\n".to_string() + } else { + git::compact_diff(stdout, 500) + } + }, + RunOptions::stdout_only().early_exit_on_failure(), + ) +} + +/// Generic MR action handler for merge/approve/note/update. +/// Uses extract_identifier_and_extra_args to correctly find the MR number +/// even when it appears after flags (e.g. `glab mr note -m "msg" 42`). +fn mr_action(subcmd: &str, label: &str, args: &[String], _verbose: u8) -> Result { + let mut cmd = resolved_command("glab"); + cmd.args(["mr", subcmd]); + for arg in args { + cmd.arg(arg); + } + + let mr_num = extract_identifier_and_extra_args(args) + .map(|(id, _)| format!("!{}", id)) + .unwrap_or_default(); + let label = label.to_string(); + runner::run_filtered( + cmd, + "glab", + &format!("mr {}", subcmd), + move |_stdout| ok_confirmation(&label, &mr_num), + RunOptions::stdout_only().early_exit_on_failure(), + ) +} + +// ── Issue subcommands ─────────────────────────────────────────────────── + +fn run_issue(args: &[String], verbose: u8, ultra_compact: bool) -> Result { + if args.is_empty() { + return run_passthrough("glab", "issue", args); + } + + match args[0].as_str() { + "list" => issue_list(&args[1..], verbose, ultra_compact), + "view" => issue_view(&args[1..], verbose), + _ => run_passthrough("glab", "issue", args), + } +} + +/// Format issue list JSON into compact output (pure function, testable). +fn format_issue_list(json: &Value, ultra_compact: bool) -> String { + let issues = match json.as_array() { + Some(arr) => arr, + None => return String::new(), + }; + if issues.is_empty() { + return "No Issues\n".to_string(); + } + + let mut filtered = String::new(); + filtered.push_str("Issues\n"); + + for issue in issues.iter().take(20) { + let iid = issue["iid"].as_i64().unwrap_or(0); + let title = issue["title"].as_str().unwrap_or("???"); + let state = issue["state"].as_str().unwrap_or("???"); + + let icon = if ultra_compact { + if state == "opened" { + "O" + } else { + "C" + } + } else if state == "opened" { + "[open]" + } else { + "[closed]" + }; + filtered.push_str(&format!(" {} #{} {}\n", icon, iid, truncate(title, 60))); + } + + if issues.len() > 20 { + filtered.push_str(&format!(" ... {} more\n", issues.len() - 20)); + } + + filtered +} + +fn issue_list(args: &[String], _verbose: u8, ultra_compact: bool) -> Result { + let mut cmd = resolved_command("glab"); + cmd.args(["issue", "list", "-F", "json"]); + for arg in args { + cmd.arg(arg); + } + run_glab_json(cmd, "issue list", |json| { + format_issue_list(json, ultra_compact) + }) +} + +/// Format issue view JSON into compact output (pure function, testable). +fn format_issue_view(json: &Value) -> String { + let iid = json["iid"].as_i64().unwrap_or(0); + let title = json["title"].as_str().unwrap_or("???"); + let state = json["state"].as_str().unwrap_or("???"); + let author = json["author"]["username"].as_str().unwrap_or("???"); + let web_url = json["web_url"].as_str().unwrap_or(""); + + let icon = if state == "opened" { + "[open]" + } else { + "[closed]" + }; + + let mut filtered = String::new(); + filtered.push_str(&format!("{} Issue #{}: {}\n", icon, iid, title)); + filtered.push_str(&format!(" Author: @{}\n", author)); + filtered.push_str(&format!(" Status: {}\n", state)); + filtered.push_str(&format!(" URL: {}\n", web_url)); + + if let Some(desc) = json["description"].as_str() { + if !desc.is_empty() { + let desc_filtered = filter_markdown_body(desc); + if !desc_filtered.is_empty() { + filtered.push_str("\n Description:\n"); + for line in desc_filtered.lines() { + filtered.push_str(&format!(" {}\n", line)); + } + } + } + } + + filtered +} + +fn issue_view(args: &[String], _verbose: u8) -> Result { + let (issue_number, extra_args) = match extract_identifier_and_extra_args(args) { + Some(pair) => pair, + None => return Err(anyhow::anyhow!("Issue number required")), + }; + + if should_passthrough_view(&extra_args) { + return run_passthrough_with_extra("glab", &["issue", "view", &issue_number], &extra_args); + } + + let mut cmd = resolved_command("glab"); + cmd.args(["issue", "view", &issue_number, "-F", "json"]); + for arg in &extra_args { + cmd.arg(arg); + } + run_glab_json( + cmd, + &format!("issue view {}", issue_number), + format_issue_view, + ) +} + +// ── CI/Pipeline subcommands ───────────────────────────────────────────── + +fn run_ci(args: &[String], verbose: u8, ultra_compact: bool) -> Result { + if args.is_empty() { + return run_passthrough("glab", "ci", args); + } + + match args[0].as_str() { + "list" => ci_list(&args[1..], verbose, ultra_compact), + "status" => ci_status(&args[1..], verbose, ultra_compact), + "trace" => ci_trace(&args[1..]), + // "ci view" is an interactive TUI (tcell) — must run with inherited stdio + _ => run_passthrough("glab", "ci", args), + } +} + +/// Format CI list JSON into compact output (pure function, testable). +fn format_ci_list(json: &Value, ultra_compact: bool) -> String { + let pipelines = match json.as_array() { + Some(arr) => arr, + None => return String::new(), + }; + if pipelines.is_empty() { + return "No Pipelines\n".to_string(); + } + + let mut filtered = String::new(); + filtered.push_str("Pipelines\n"); + for pipeline in pipelines.iter().take(10) { + let id = pipeline["id"].as_i64().unwrap_or(0); + let status = pipeline["status"].as_str().unwrap_or("???"); + let ref_name = pipeline["ref"].as_str().unwrap_or("???"); + + let icon = pipeline_icon(status, ultra_compact); + filtered.push_str(&format!(" {} #{} {} ({})\n", icon, id, status, ref_name)); + } + filtered +} + +fn ci_list(args: &[String], _verbose: u8, ultra_compact: bool) -> Result { + let mut cmd = resolved_command("glab"); + cmd.args(["ci", "list", "-F", "json"]); + for arg in args { + cmd.arg(arg); + } + run_glab_json(cmd, "ci list", |json| format_ci_list(json, ultra_compact)) +} + +/// Format `glab ci status` text output (English keyword parsing, raw fallback). +/// Returns the raw input when no status keyword is recognized on any line +/// (e.g. non-English locale). +fn format_ci_status(raw: &str, ultra_compact: bool) -> String { + let mut filtered = String::new(); + let mut any_keyword_matched = false; + for line in raw.lines() { + let trimmed = line.trim(); + if trimmed.is_empty() { + continue; + } + let icon = if trimmed.contains("passed") || trimmed.contains("success") { + pipeline_icon("success", ultra_compact) + } else if trimmed.contains("failed") { + pipeline_icon("failed", ultra_compact) + } else if trimmed.contains("running") { + pipeline_icon("running", ultra_compact) + } else if trimmed.contains("pending") { + pipeline_icon("pending", ultra_compact) + } else if trimmed.contains("canceled") || trimmed.contains("cancelled") { + pipeline_icon("canceled", ultra_compact) + } else { + "" + }; + + if !icon.is_empty() { + any_keyword_matched = true; + filtered.push_str(&format!("{} {}\n", icon, trimmed)); + } else { + filtered.push_str(&format!(" {}\n", trimmed)); + } + } + + if !any_keyword_matched { + // Non-English locale or unrecognized format — preserve raw output verbatim. + raw.to_string() + } else { + filtered + } +} + +fn ci_status(args: &[String], _verbose: u8, ultra_compact: bool) -> Result { + // glab ci status does not support -F json — text parsing with raw fallback + let mut cmd = resolved_command("glab"); + cmd.args(["ci", "status"]); + for arg in args { + cmd.arg(arg); + } + runner::run_filtered( + cmd, + "glab", + "ci status", + |stdout| format_ci_status(stdout, ultra_compact), + RunOptions::stdout_only().early_exit_on_failure(), + ) +} + +fn ci_trace(args: &[String]) -> Result { + let mut cmd = resolved_command("glab"); + cmd.args(["ci", "trace"]); + for arg in args { + cmd.arg(arg); + } + runner::run_filtered( + cmd, + "glab", + "ci trace", + filter_ci_trace, + RunOptions::stdout_only().early_exit_on_failure(), + ) +} + +/// Filter CI job trace output: strip ANSI codes, section markers, and runner +/// boilerplate. Keep warnings, errors, and build output. +fn filter_ci_trace(raw: &str) -> String { + let cleaned = strip_ansi(raw); + let cleaned = BARE_ANSI_RE.replace_all(&cleaned, ""); + let cleaned = SECTION_MARKER_RE.replace_all(&cleaned, ""); + + let mut filtered = String::new(); + + for line in cleaned.lines() { + let trimmed = line.trim(); + + if trimmed.is_empty() { + continue; + } + + // Skip runner boilerplate + if trimmed.starts_with("Running with gitlab-runner") + || (trimmed.starts_with("on ") && trimmed.contains("system ID:")) + || trimmed.starts_with("Using Docker executor") + || trimmed.starts_with("Using Shell") + || trimmed.starts_with("Running on runner-") + || trimmed.starts_with("Running on ") + || trimmed.starts_with("Preparing the") + || trimmed.starts_with("Preparing environment") + || trimmed.starts_with("Getting source from") + || trimmed.starts_with("Resolving secrets") + || trimmed.starts_with("Cleaning up") + || trimmed.starts_with("Uploading artifacts") + || trimmed.starts_with("Downloading artifacts") + || trimmed.starts_with("Runtime platform") + { + continue; + } + + // Skip git fetch / checkout boilerplate + if trimmed.starts_with("Fetching changes with git") + || trimmed.starts_with("Initialized empty Git") + || trimmed.starts_with("Created fresh repository") + || trimmed.starts_with("Checking out ") + || trimmed.starts_with("Skipping Git submodules") + { + continue; + } + + filtered.push_str(trimmed); + filtered.push('\n'); + } + + filtered +} + +// ── Release subcommands ────────────────────────────────────────────────── + +fn run_release(args: &[String], _verbose: u8, _ultra_compact: bool) -> Result { + if args.is_empty() { + return run_passthrough("glab", "release", args); + } + + match args[0].as_str() { + "list" => release_list(&args[1..]), + "view" => release_view(&args[1..]), + _ => run_passthrough("glab", "release", args), + } +} + +/// Format `glab release list` tab-separated output into compact form. +/// Input format: "Name\tTag\tCreated\n" header + data rows. +fn format_release_list(raw: &str) -> Option { + let mut lines = raw.lines().peekable(); + let mut filtered = String::new(); + + // Skip "Showing N releases..." preamble and blank lines + while let Some(line) = lines.peek() { + let trimmed = line.trim(); + if trimmed.starts_with("Name\t") || trimmed.starts_with("NAME\t") { + lines.next(); // consume header + break; + } + lines.next(); + } + + filtered.push_str("Releases\n"); + + let mut count = 0; + for line in lines { + let trimmed = line.trim(); + if trimmed.is_empty() { + continue; + } + + let parts: Vec<&str> = trimmed.split('\t').collect(); + if parts.len() < 3 { + continue; + } + + let name = parts[0].trim(); + let tag = parts[1].trim(); + let created = parts[2].trim(); + + if name == tag { + filtered.push_str(&format!(" {} ({})\n", name, created)); + } else { + filtered.push_str(&format!(" {} [{}] ({})\n", name, tag, created)); + } + + count += 1; + if count >= 20 { + break; + } + } + + if count == 0 { + return None; + } + + Some(filtered) +} + +fn release_list(args: &[String]) -> Result { + let mut cmd = resolved_command("glab"); + cmd.args(["release", "list"]); + for arg in args { + cmd.arg(arg); + } + runner::run_filtered( + cmd, + "glab", + "release list", + |stdout| format_release_list(stdout).unwrap_or_else(|| stdout.to_string()), + RunOptions::stdout_only().early_exit_on_failure(), + ) +} + +fn release_view(args: &[String]) -> Result { + let mut cmd = resolved_command("glab"); + cmd.args(["release", "view"]); + for arg in args { + cmd.arg(arg); + } + runner::run_filtered( + cmd, + "glab", + "release view", + filter_release_view, + RunOptions::stdout_only().early_exit_on_failure(), + ) +} + +/// Filter release view output: strip SOURCES block, image lines, HTML comments, +/// horizontal rules, and collapse blank lines. +fn filter_release_view(raw: &str) -> String { + let mut filtered = String::new(); + let mut in_sources = false; + + for line in raw.lines() { + let trimmed = line.trim(); + + // Skip SOURCES section (archive download URLs) + if trimmed == "SOURCES" { + in_sources = true; + continue; + } + if in_sources { + if trimmed.starts_with("http://") || trimmed.starts_with("https://") { + continue; + } + in_sources = false; + } + + // Strip image-only lines + if trimmed.starts_with("![") && trimmed.ends_with(')') && trimmed.contains("](") { + continue; + } + // Strip glab's "Image: name → url" rendering + if trimmed.starts_with("Image:") && trimmed.contains('→') { + continue; + } + + // Strip HTML comments + if trimmed.starts_with("") { + continue; + } + + // Strip horizontal rules (--- rendered as --------) + if trimmed.chars().all(|c| c == '-') && trimmed.len() >= 3 { + continue; + } + + filtered.push_str(line); + filtered.push('\n'); + } + + // Collapse multiple blank lines + MULTI_BLANK_RE.replace_all(&filtered, "\n\n").to_string() +} + +// ── API subcommand ────────────────────────────────────────────────────── + +fn run_api(args: &[String], _verbose: u8) -> Result { + // glab api is an explicit/advanced command — the user knows what they asked for. + // Converting JSON to a schema destroys all values and forces Claude to re-fetch. + // Passthrough preserves the full response and tracks metrics at 0% savings. + run_passthrough("glab", "api", args) +} + +// ── Passthrough ───────────────────────────────────────────────────────── + +fn run_passthrough(cmd: &str, subcommand: &str, args: &[String]) -> Result { + let mut os_args: Vec = vec![std::ffi::OsString::from(subcommand)]; + os_args.extend(args.iter().map(std::ffi::OsString::from)); + runner::run_passthrough(cmd, &os_args, 0) +} + +fn run_passthrough_with_extra(cmd: &str, base_args: &[&str], extra_args: &[String]) -> Result { + let mut os_args: Vec = + base_args.iter().map(std::ffi::OsString::from).collect(); + os_args.extend(extra_args.iter().map(std::ffi::OsString::from)); + runner::run_passthrough(cmd, &os_args, 0) +} + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn test_state_icon_opened() { + assert_eq!(state_icon("opened", false), "[open]"); + assert_eq!(state_icon("opened", true), "O"); + } + + #[test] + fn test_state_icon_merged() { + assert_eq!(state_icon("merged", false), "[merged]"); + assert_eq!(state_icon("merged", true), "M"); + } + + #[test] + fn test_state_icon_closed() { + assert_eq!(state_icon("closed", false), "[closed]"); + assert_eq!(state_icon("closed", true), "C"); + } + + #[test] + fn test_pipeline_icon_success() { + assert_eq!(pipeline_icon("success", false), "[ok]"); + assert_eq!(pipeline_icon("success", true), "+"); + } + + #[test] + fn test_pipeline_icon_failed() { + assert_eq!(pipeline_icon("failed", false), "[fail]"); + assert_eq!(pipeline_icon("failed", true), "x"); + } + + #[test] + fn test_pipeline_icon_running() { + assert_eq!(pipeline_icon("running", false), "[run]"); + assert_eq!(pipeline_icon("running", true), "~"); + } + + #[test] + fn test_extract_mr_number_from_url() { + let url = "https://gitlab.example.com/group/project/-/merge_requests/42"; + assert_eq!(extract_mr_number(url), Some("42".to_string())); + } + + #[test] + fn test_extract_mr_number_no_match() { + assert_eq!(extract_mr_number("not a url"), None); + } + + #[test] + fn test_filter_markdown_body_empty() { + assert_eq!(filter_markdown_body(""), ""); + } + + #[test] + fn test_filter_markdown_body_html_comments() { + let input = "Hello\n\nWorld"; + let result = filter_markdown_body(input); + assert!(!result.contains("\n```\nAfter"; + let result = filter_markdown_body(input); + assert!(result.contains("")); + assert!(result.contains("Text")); + assert!(result.contains("After")); + } + + #[test] + fn test_filter_markdown_body_blank_lines_collapse() { + let input = "Line 1\n\n\n\n\nLine 2"; + let result = filter_markdown_body(input); + assert!(!result.contains("\n\n\n")); + assert!(result.contains("Line 1")); + assert!(result.contains("Line 2")); + } + + #[test] + fn test_filter_markdown_body_badges_removed() { + let input = + "# Title\n[![CI](https://img.shields.io/badge.svg)](https://github.com/actions)\nText"; + let result = filter_markdown_body(input); + assert!(!result.contains("shields.io")); + assert!(result.contains("# Title")); + assert!(result.contains("Text")); + } + + #[test] + fn test_filter_markdown_body_meaningful_content_preserved() { + let input = "## Summary\n- Item 1\n- Item 2\n\n[Link](https://example.com)"; + let result = filter_markdown_body(input); + assert!(result.contains("## Summary")); + assert!(result.contains("- Item 1")); + assert!(result.contains("[Link](https://example.com)")); + } + + #[test] + fn test_ok_confirmation_mr_create() { + let result = ok_confirmation( + "created", + "!42 https://gitlab.example.com/-/merge_requests/42", + ); + assert!(result.contains("ok created")); + assert!(result.contains("!42")); + } + + #[test] + fn test_ok_confirmation_mr_merge() { + let result = ok_confirmation("merged", "!42"); + assert_eq!(result, "ok merged !42"); + } + + #[test] + fn test_ok_confirmation_mr_approve() { + let result = ok_confirmation("approved", "!42"); + assert_eq!(result, "ok approved !42"); + } + + fn count_tokens(text: &str) -> usize { + text.split_whitespace().count() + } + + fn parse_fixture(raw: &str) -> Value { + serde_json::from_str(raw).expect("valid JSON fixture") + } + + #[test] + fn test_mr_list_token_savings() { + let input = include_str!("../../../tests/fixtures/glab_mr_list_raw.json"); + let output = format_mr_list(&parse_fixture(input), false); + let input_tokens = count_tokens(input); + let output_tokens = count_tokens(&output); + let savings = 100.0 - (output_tokens as f64 / input_tokens as f64 * 100.0); + assert!( + savings >= 60.0, + "MR list: expected >=60% savings, got {:.1}% ({} -> {} tokens)", + savings, + input_tokens, + output_tokens + ); + } + + #[test] + fn test_mr_list_format() { + let input = include_str!("../../../tests/fixtures/glab_mr_list_raw.json"); + let output = format_mr_list(&parse_fixture(input), false); + assert!(output.contains("Merge Requests")); + assert!(output.contains("!314")); + assert!(output.contains("[open]")); // opened + assert!(output.contains("[merged]")); // merged + assert!(output.contains("[closed]")); // closed + } + + #[test] + fn test_mr_list_ultra_compact() { + let input = include_str!("../../../tests/fixtures/glab_mr_list_raw.json"); + let output = format_mr_list(&parse_fixture(input), true); + assert!(output.starts_with("MRs\n")); + assert!(output.contains("O ")); // opened + assert!(output.contains("M ")); // merged + assert!(output.contains("C ")); // closed + } + + #[test] + fn test_issue_list_token_savings() { + let input = include_str!("../../../tests/fixtures/glab_issue_list_raw.json"); + let output = format_issue_list(&parse_fixture(input), false); + let input_tokens = count_tokens(input); + let output_tokens = count_tokens(&output); + let savings = 100.0 - (output_tokens as f64 / input_tokens as f64 * 100.0); + assert!( + savings >= 60.0, + "Issue list: expected >=60% savings, got {:.1}% ({} -> {} tokens)", + savings, + input_tokens, + output_tokens + ); + } + + #[test] + fn test_issue_list_format() { + let input = include_str!("../../../tests/fixtures/glab_issue_list_raw.json"); + let output = format_issue_list(&parse_fixture(input), false); + assert!(output.contains("Issues")); + assert!(output.contains("#156")); + assert!(output.contains("[open]")); // opened + assert!(output.contains("[closed]")); // closed + } + + #[test] + fn test_format_mr_list_non_array_returns_empty() { + // Non-array JSON (e.g. error object) returns empty — run_glab_json then + // falls back to raw stdout through its JSON parse branch. + let output = format_mr_list(&Value::Object(Default::default()), false); + assert!(output.is_empty()); + } + + #[test] + fn test_format_issue_list_non_array_returns_empty() { + let output = format_issue_list(&Value::Object(Default::default()), false); + assert!(output.is_empty()); + } + + #[test] + fn test_extract_identifier_simple() { + let args: Vec = vec!["42".into()]; + let (id, extra) = extract_identifier_and_extra_args(&args).unwrap(); + assert_eq!(id, "42"); + assert!(extra.is_empty()); + } + + #[test] + fn test_extract_identifier_with_repo_flag_before() { + // glab mr view -R group/project 42 + let args: Vec = vec!["-R".into(), "group/project".into(), "42".into()]; + let (id, extra) = extract_identifier_and_extra_args(&args).unwrap(); + assert_eq!(id, "42"); + assert_eq!(extra, vec!["-R", "group/project"]); + } + + #[test] + fn test_extract_identifier_with_repo_flag_after() { + // glab mr view 42 -R group/project + let args: Vec = vec!["42".into(), "-R".into(), "group/project".into()]; + let (id, extra) = extract_identifier_and_extra_args(&args).unwrap(); + assert_eq!(id, "42"); + assert_eq!(extra, vec!["-R", "group/project"]); + } + + #[test] + fn test_extract_identifier_with_group_flag() { + let args: Vec = vec!["-g".into(), "mygroup".into(), "7".into()]; + let (id, extra) = extract_identifier_and_extra_args(&args).unwrap(); + assert_eq!(id, "7"); + assert_eq!(extra, vec!["-g", "mygroup"]); + } + + #[test] + fn test_extract_identifier_empty() { + let args: Vec = vec![]; + assert!(extract_identifier_and_extra_args(&args).is_none()); + } + + #[test] + fn test_extract_identifier_only_flags() { + let args: Vec = vec!["-R".into(), "group/project".into()]; + assert!(extract_identifier_and_extra_args(&args).is_none()); + } + + // ── has_output_flag tests ─────────────────────────────────────────── + + #[test] + fn test_has_output_flag_json() { + assert!(has_output_flag(&["--json".into()])); + } + + #[test] + fn test_has_output_flag_format() { + assert!(has_output_flag(&["-F".into(), "json".into()])); + assert!(has_output_flag(&["--output".into(), "text".into()])); + } + + #[test] + fn test_has_output_flag_none() { + assert!(!has_output_flag(&["mr".into(), "list".into()])); + } + + // ── should_passthrough_view tests ─────────────────────────────────── + + #[test] + fn test_should_passthrough_view_web() { + assert!(should_passthrough_view(&["--web".into()])); + } + + #[test] + fn test_should_passthrough_view_comments() { + assert!(should_passthrough_view(&["--comments".into()])); + } + + #[test] + fn test_should_passthrough_view_output() { + assert!(should_passthrough_view(&["-F".into(), "json".into()])); + } + + #[test] + fn test_should_passthrough_view_default() { + assert!(!should_passthrough_view(&[])); + } + + // ── mr_action identifier extraction ───────────────────────────────── + + #[test] + fn test_extract_identifier_with_message_flag() { + // glab mr note -m "comment" 42 — number should be 42, not "comment" + let args: Vec = vec!["-m".into(), "comment".into(), "42".into()]; + let (id, extra) = extract_identifier_and_extra_args(&args).unwrap(); + assert_eq!(id, "42"); + assert_eq!(extra, vec!["-m", "comment"]); + } + + // ── release list tests ────────────────────────────────────────────── + + #[test] + fn test_format_release_list() { + let input = include_str!("../../../tests/fixtures/glab_release_list_raw.txt"); + let output = format_release_list(input).expect("should parse release list"); + assert!(output.starts_with("Releases\n")); + assert!(output.contains("v3.2.1")); + assert!(output.contains("about 2 days ago")); + } + + #[test] + fn test_format_release_list_token_savings() { + let input = include_str!("../../../tests/fixtures/glab_release_list_raw.txt"); + let output = format_release_list(input).expect("should parse release list"); + let input_tokens = count_tokens(input); + let output_tokens = count_tokens(&output); + let savings = 100.0 - (output_tokens as f64 / input_tokens as f64 * 100.0); + // Release list text is already compact (tab-separated); savings are modest. + assert!( + savings >= 20.0, + "Release list: expected >=20% savings, got {:.1}% ({} -> {} tokens)", + savings, + input_tokens, + output_tokens + ); + } + + #[test] + fn test_format_release_list_empty() { + let input = "No releases available on owner/repo.\nName\tTag\tCreated\n"; + assert!(format_release_list(input).is_none()); + } + + #[test] + fn test_format_release_list_name_differs_from_tag() { + let input = "Showing 1 releases\n\nName\tTag\tCreated\nMy Release\tv1.0.0\t2 days ago\n"; + let output = format_release_list(input).expect("should parse"); + assert!(output.contains("My Release [v1.0.0]")); + } + + // ── ci trace tests ────────────────────────────────────────────────── + + #[test] + fn test_filter_ci_trace_strips_boilerplate() { + let input = include_str!("../../../tests/fixtures/glab_ci_trace_raw.txt"); + let output = filter_ci_trace(input); + // Runner boilerplate stripped + assert!(!output.contains("Running with gitlab-runner")); + assert!(!output.contains("Using Docker executor")); + assert!(!output.contains("Fetching changes with git")); + assert!(!output.contains("Checking out")); + assert!(!output.contains("Uploading artifacts")); + // Build output preserved + assert!(output.contains("npm ci")); + assert!(output.contains("npm run build")); + assert!(output.contains("npm test")); + // Test results preserved + assert!(output.contains("FAIL")); + assert!(output.contains("AssertionError")); + // Final error line preserved + assert!(output.contains("Job failed")); + } + + #[test] + fn test_filter_ci_trace_token_savings() { + let input = include_str!("../../../tests/fixtures/glab_ci_trace_raw.txt"); + let output = filter_ci_trace(input); + let input_tokens = count_tokens(input); + let output_tokens = count_tokens(&output); + let savings = 100.0 - (output_tokens as f64 / input_tokens as f64 * 100.0); + // CI trace preserves build output; savings come from stripping boilerplate. + assert!( + savings >= 30.0, + "CI trace: expected >=30% savings, got {:.1}% ({} -> {} tokens)", + savings, + input_tokens, + output_tokens + ); + } + + // ── release view tests ────────────────────────────────────────────── + + #[test] + fn test_filter_release_view_strips_sources() { + let input = include_str!("../../../tests/fixtures/glab_release_view_raw.txt"); + let output = filter_release_view(input); + // SOURCES section stripped + assert!(!output.contains("SOURCES")); + assert!(!output.contains("toolkit-v2.0.0.zip")); + assert!(!output.contains("toolkit-v2.0.0.tar.gz")); + // Content preserved + assert!(output.contains("Test Release v2.0")); + assert!(output.contains("Added widget support")); + assert!(output.contains("@alice_dev @bob_dev")); + // Noise stripped + assert!(!output.contains("--------")); + assert!(!output.contains("Image:")); + assert!(!output.contains("\n\n### Acceptance Criteria\n- [ ] `rtk glab ci list` shows compact pipeline summary\n- [ ] `rtk glab ci status` shows current pipeline status\n- [ ] Token savings >= 80%\n\n---\n\n[![status](https://img.shields.io/badge/status-in_progress-yellow)](https://example.com)\n" + }, + { + "iid": 150, + "title": "rtk cargo test shows full output when no failures", + "state": "opened", + "author": {"username": "bob_report", "name": "Bob Reporter", "id": 100}, + "web_url": "https://gitlab.example.com/acme/toolkit/-/issues/150", + "created_at": "2026-02-28T08:00:00Z", + "updated_at": "2026-03-02T16:00:00Z", + "labels": ["bug", "cargo"], + "assignees": [{"username": "dave_fix"}], + "description": "When all tests pass, `rtk cargo test` still shows verbose compilation output instead of just the summary line.\n\n### Steps to Reproduce\n1. Run `rtk cargo test` in a project with all passing tests\n2. Observe that compiler output is included\n\n### Expected\nOnly show test summary when all tests pass.\n\n### Actual\nFull compiler warnings and test output shown." + }, + { + "iid": 145, + "title": "Add Helm CLI support", + "state": "opened", + "author": {"username": "carol_infra", "name": "Carol Infra", "id": 200}, + "web_url": "https://gitlab.example.com/acme/toolkit/-/issues/145", + "created_at": "2026-02-25T12:00:00Z", + "updated_at": "2026-03-04T09:00:00Z", + "labels": ["enhancement", "infra"], + "assignees": [], + "description": "Helm CLI outputs are verbose. Would be great to have RTK support for:\n- `helm list` (compact table)\n- `helm status` (summary only)\n- `helm install/upgrade` (ok confirmation)\n\nSimilar to how `rtk kubectl` works." + }, + { + "iid": 140, + "title": "Binary size increased 30% after Python/Go modules", + "state": "opened", + "author": {"username": "eve_perf", "name": "Eve Performance", "id": 300}, + "web_url": "https://gitlab.example.com/acme/toolkit/-/issues/140", + "created_at": "2026-02-20T15:00:00Z", + "updated_at": "2026-02-22T10:00:00Z", + "labels": ["performance", "build"], + "assignees": [{"username": "frank_contrib"}], + "description": "After merging Python and Go support, stripped release binary went from 3.2MB to 4.1MB.\n\nInvestigate if we can:\n- Use feature flags to make modules optional\n- Reduce regex count (share patterns across modules)\n- Review serde usage (maybe avoid full JSON parsing for simple cases)" + }, + { + "iid": 135, + "title": "rtk gain --history shows wrong dates on macOS", + "state": "closed", + "author": {"username": "george_mac", "name": "George Mac", "id": 400}, + "web_url": "https://gitlab.example.com/acme/toolkit/-/issues/135", + "created_at": "2026-02-15T09:00:00Z", + "updated_at": "2026-02-18T11:00:00Z", + "labels": ["bug", "macos"], + "assignees": [{"username": "alice_dev"}], + "description": "On macOS, `rtk gain --history` shows dates in UTC instead of local timezone.\n\nFixed in v0.23.1." + }, + { + "iid": 130, + "title": "Support TOML-based filter DSL", + "state": "opened", + "author": {"username": "heidi_arch", "name": "Heidi Architect", "id": 500}, + "web_url": "https://gitlab.example.com/acme/toolkit/-/issues/130", + "created_at": "2026-02-10T08:00:00Z", + "updated_at": "2026-02-12T16:00:00Z", + "labels": ["enhancement", "architecture"], + "assignees": [], + "description": "Instead of writing Rust code for each new filter, allow users to define filters in TOML.\n\n```toml\n[[filter]]\ncommand = \"terraform plan\"\npattern = \"^(Plan|Apply|Error):\"\nformat = \"compact\"\n```\n\nThis would make RTK extensible without recompilation." + }, + { + "iid": 125, + "title": "Improve error messages for missing commands", + "state": "closed", + "author": {"username": "ivan_docs", "name": "Ivan Writer", "id": 600}, + "web_url": "https://gitlab.example.com/acme/toolkit/-/issues/125", + "created_at": "2026-02-05T14:00:00Z", + "updated_at": "2026-02-06T09:00:00Z", + "labels": ["enhancement", "ux"], + "assignees": [{"username": "ivan_docs"}], + "description": "When the underlying command is not installed (e.g., `rtk glab mr list` without glab), the error message is confusing:\n\n```\nError: Failed to run glab mr list\n```\n\nShould say something like:\n```\nError: glab not found. Install it: https://gitlab.com/gitlab-org/cli\n```" + }, + { + "iid": 120, + "title": "Add rtk completion command for shell completions", + "state": "opened", + "author": {"username": "judy_shell", "name": "Judy Shell", "id": 700}, + "web_url": "https://gitlab.example.com/acme/toolkit/-/issues/120", + "created_at": "2026-02-01T11:00:00Z", + "updated_at": "2026-02-03T15:00:00Z", + "labels": ["enhancement", "shell"], + "assignees": [], + "description": "Clap supports generating shell completions via `clap_complete`. Add a `rtk completion bash/zsh/fish` command.\n\nThis would help discoverability of available commands." + }, + { + "iid": 115, + "title": "rtk read crashes on binary files", + "state": "closed", + "author": {"username": "karl_refactor", "name": "Karl Refactorer", "id": 800}, + "web_url": "https://gitlab.example.com/acme/toolkit/-/issues/115", + "created_at": "2026-01-28T10:00:00Z", + "updated_at": "2026-01-30T12:00:00Z", + "labels": ["bug", "crash"], + "assignees": [{"username": "dave_fix"}], + "description": "Running `rtk read /path/to/binary.exe` panics with:\n```\nthread 'main' panicked at 'called `Result::unwrap()` on an `Err` value: Utf8Error'\n```\n\nShould detect binary files and skip filtering." + }, + { + "iid": 110, + "title": "Track savings per project directory", + "state": "opened", + "author": {"username": "lisa_feat", "name": "Lisa Feature", "id": 900}, + "web_url": "https://gitlab.example.com/acme/toolkit/-/issues/110", + "created_at": "2026-01-25T09:00:00Z", + "updated_at": "2026-01-27T14:00:00Z", + "labels": ["enhancement", "analytics"], + "assignees": [], + "description": "Currently `rtk gain` shows global stats. It would be useful to see savings broken down by project directory.\n\nProposal: store `cwd` in the tracking database and add `rtk gain --by-project` flag." + } +] diff --git a/tests/fixtures/glab_mr_list_raw.json b/tests/fixtures/glab_mr_list_raw.json new file mode 100644 index 000000000..c502b62a7 --- /dev/null +++ b/tests/fixtures/glab_mr_list_raw.json @@ -0,0 +1,182 @@ +[ + { + "iid": 314, + "title": "feat(glab): add GitLab CLI (glab) command support", + "state": "opened", + "author": {"username": "alice_dev", "name": "Alice Developer", "id": 42}, + "web_url": "https://gitlab.example.com/acme/toolkit/-/merge_requests/314", + "created_at": "2026-03-01T10:00:00Z", + "updated_at": "2026-03-05T14:30:00Z", + "source_branch": "feat/glab-support", + "target_branch": "master", + "merge_status": "can_be_merged", + "draft": false, + "labels": ["enhancement", "cli"], + "assignees": [{"username": "alice_dev", "name": "Alice Developer"}], + "reviewers": [{"username": "bob_review"}, {"username": "carol_review"}], + "description": "## Summary\n\nAdd GitLab CLI support.\n\n\n\n## Changes\n- New module\n- MR/issue/CI filtering\n- Token savings 80-87%\n\n---\n\n[![CI](https://img.shields.io/badge/CI-passing-green)](https://ci.example.com)\n", + "head_pipeline": {"id": 98765, "status": "success", "ref": "feat/glab-support"} + }, + { + "iid": 310, + "title": "fix(git): handle merge commits in compact diff", + "state": "merged", + "author": {"username": "dave_fix", "name": "Dave Fixer", "id": 100}, + "web_url": "https://gitlab.example.com/acme/toolkit/-/merge_requests/310", + "created_at": "2026-02-28T08:00:00Z", + "updated_at": "2026-03-02T16:00:00Z", + "source_branch": "fix/merge-commits", + "target_branch": "master", + "merge_status": "can_be_merged", + "draft": false, + "labels": ["bug", "git"], + "assignees": [{"username": "dave_fix"}], + "reviewers": [{"username": "eve_review"}], + "description": "Fix handling of merge commits in `compact_diff`. Previously, merge commits were being skipped entirely which lost context.\n\n### Test Plan\n- [x] Unit tests added\n- [x] Manual verification with merge-heavy repos\n", + "head_pipeline": {"id": 98700, "status": "success", "ref": "fix/merge-commits"} + }, + { + "iid": 305, + "title": "feat(aws): add AWS CLI module with token-optimized output", + "state": "opened", + "author": {"username": "frank_contrib", "name": "Frank Contributor", "id": 200}, + "web_url": "https://gitlab.example.com/acme/toolkit/-/merge_requests/305", + "created_at": "2026-02-25T12:00:00Z", + "updated_at": "2026-03-04T09:00:00Z", + "source_branch": "feat/aws-cli", + "target_branch": "master", + "merge_status": "cannot_be_merged", + "draft": true, + "labels": ["enhancement", "infra"], + "assignees": [], + "reviewers": [{"username": "grace_review"}, {"username": "heidi_review"}], + "description": "Add AWS CLI support.\n\n![architecture](https://example.com/arch.png)\n\n## Commands\n- `rtk aws s3 ls`\n- `rtk aws ec2 describe-instances`\n- `rtk aws ecs list-services`\n\n## Token Savings\n| Command | Savings |\n|---------|--------|\n| s3 ls | 75% |\n| ec2 describe | 85% |\n| ecs list | 80% |\n", + "head_pipeline": {"id": 98650, "status": "failed", "ref": "feat/aws-cli"} + }, + { + "iid": 302, + "title": "chore(master): release 0.24.0", + "state": "merged", + "author": {"username": "release-bot", "name": "Release Bot", "id": 1}, + "web_url": "https://gitlab.example.com/acme/toolkit/-/merge_requests/302", + "created_at": "2026-02-20T00:00:00Z", + "updated_at": "2026-02-20T01:00:00Z", + "source_branch": "release-please--branches--master", + "target_branch": "master", + "merge_status": "can_be_merged", + "draft": false, + "labels": ["release"], + "assignees": [], + "reviewers": [], + "description": "## [0.24.0](https://example.com/compare/v0.23.0...v0.24.0)\n\n### Features\n* feat(aws): add AWS CLI module\n* feat(psql): add PostgreSQL module\n\n### Bug Fixes\n* fix(playwright): fix JSON parser\n", + "head_pipeline": {"id": 98600, "status": "success", "ref": "release-please--branches--master"} + }, + { + "iid": 298, + "title": "docs: update README with Python and Go command examples", + "state": "merged", + "author": {"username": "ivan_docs", "name": "Ivan Writer", "id": 300}, + "web_url": "https://gitlab.example.com/acme/toolkit/-/merge_requests/298", + "created_at": "2026-02-18T15:00:00Z", + "updated_at": "2026-02-19T10:00:00Z", + "source_branch": "docs/python-go-examples", + "target_branch": "master", + "merge_status": "can_be_merged", + "draft": false, + "labels": ["documentation"], + "assignees": [{"username": "ivan_docs"}], + "reviewers": [{"username": "judy_review"}], + "description": "Update README.md with comprehensive examples for:\n- Python commands (ruff, pytest, pip)\n- Go commands (go test, go build, golangci-lint)\n\nAll examples tested manually.", + "head_pipeline": null + }, + { + "iid": 295, + "title": "refactor: extract parser module from runner.rs", + "state": "closed", + "author": {"username": "karl_refactor", "name": "Karl Refactorer", "id": 400}, + "web_url": "https://gitlab.example.com/acme/toolkit/-/merge_requests/295", + "created_at": "2026-02-15T09:00:00Z", + "updated_at": "2026-02-16T11:00:00Z", + "source_branch": "refactor/parser-module", + "target_branch": "master", + "merge_status": "can_be_merged", + "draft": false, + "labels": ["refactor"], + "assignees": [{"username": "karl_refactor"}], + "reviewers": [], + "description": "Extract parser logic from runner.rs into dedicated parser/ module.\n\n---\n\nThis was superseded by #300 which took a different approach.\n\n***\n", + "head_pipeline": {"id": 98500, "status": "canceled", "ref": "refactor/parser-module"} + }, + { + "iid": 290, + "title": "feat(tee): save raw output on failure for LLM re-read", + "state": "merged", + "author": {"username": "lisa_feat", "name": "Lisa Feature", "id": 500}, + "web_url": "https://gitlab.example.com/acme/toolkit/-/merge_requests/290", + "created_at": "2026-02-10T08:00:00Z", + "updated_at": "2026-02-12T16:00:00Z", + "source_branch": "feat/tee-output", + "target_branch": "master", + "merge_status": "can_be_merged", + "draft": false, + "labels": ["enhancement"], + "assignees": [{"username": "lisa_feat"}], + "reviewers": [{"username": "mike_review"}], + "description": "## Tee Output Recovery\n\nSave raw unfiltered output on command failure.\nPrint one-line hint so LLMs can re-read instead of re-run.\n\n### Configuration\n```toml\n[tee]\nenabled = true\ndir = \"~/.local/share/rtk/tee\"\nmax_files = 20\nmax_size = 1048576\n```\n", + "head_pipeline": {"id": 98400, "status": "success", "ref": "feat/tee-output"} + }, + { + "iid": 285, + "title": "ci: add ARM64 Linux build to release workflow", + "state": "merged", + "author": {"username": "nancy_ci", "name": "Nancy CI", "id": 600}, + "web_url": "https://gitlab.example.com/acme/toolkit/-/merge_requests/285", + "created_at": "2026-02-05T14:00:00Z", + "updated_at": "2026-02-06T09:00:00Z", + "source_branch": "ci/arm64-build", + "target_branch": "master", + "merge_status": "can_be_merged", + "draft": false, + "labels": ["ci"], + "assignees": [{"username": "nancy_ci"}], + "reviewers": [{"username": "oscar_review"}], + "description": "Add ARM64 Linux target to the release workflow.\n\n- Uses `cross` for cross-compilation\n- Generates `.deb` and `.rpm` packages\n- Tested on Raspberry Pi 4 and AWS Graviton", + "head_pipeline": {"id": 98300, "status": "success", "ref": "ci/arm64-build"} + }, + { + "iid": 280, + "title": "fix(vitest): handle watch mode output gracefully", + "state": "opened", + "author": {"username": "peter_bugfix", "name": "Peter Bugfix", "id": 700}, + "web_url": "https://gitlab.example.com/acme/toolkit/-/merge_requests/280", + "created_at": "2026-02-01T11:00:00Z", + "updated_at": "2026-02-03T15:00:00Z", + "source_branch": "fix/vitest-watch", + "target_branch": "master", + "merge_status": "unchecked", + "draft": false, + "labels": ["bug", "vitest"], + "assignees": [{"username": "peter_bugfix"}], + "reviewers": [], + "description": "When vitest runs in watch mode, output is continuous and doesn't have a clear end marker. This fix detects watch mode and falls back to passthrough.\n\n\n", + "head_pipeline": {"id": 98200, "status": "running", "ref": "fix/vitest-watch"} + }, + { + "iid": 275, + "title": "feat(discover): add rtk discover command for missed savings analysis", + "state": "merged", + "author": {"username": "quinn_dev", "name": "Quinn Developer", "id": 800}, + "web_url": "https://gitlab.example.com/acme/toolkit/-/merge_requests/275", + "created_at": "2026-01-28T10:00:00Z", + "updated_at": "2026-01-30T12:00:00Z", + "source_branch": "feat/discover", + "target_branch": "master", + "merge_status": "can_be_merged", + "draft": false, + "labels": ["enhancement", "analytics"], + "assignees": [{"username": "quinn_dev"}], + "reviewers": [{"username": "rachel_review"}, {"username": "sam_review"}], + "description": "Add `rtk discover` command that scans Claude Code JSONL sessions and reports missed savings opportunities.\n\n## Features\n- Classifies commands as Supported/Unsupported/Ignored\n- Groups by category with estimated token savings\n- Reports top missed commands\n\n## Example\n```\n$ rtk discover\nAnalyzed 1,234 commands across 45 sessions\n\nMissed savings by category:\n Git: 234 commands, ~16,800 tokens\n Cargo: 89 commands, ~7,120 tokens\n```\n", + "head_pipeline": {"id": 98100, "status": "success", "ref": "feat/discover"} + } +] diff --git a/tests/fixtures/glab_release_list_raw.txt b/tests/fixtures/glab_release_list_raw.txt new file mode 100644 index 000000000..a919148b1 --- /dev/null +++ b/tests/fixtures/glab_release_list_raw.txt @@ -0,0 +1,13 @@ +Showing 10 releases on acme/toolkit. + +Name Tag Created +v3.2.1 v3.2.1 about 2 days ago +v3.2.0 v3.2.0 about 1 week ago +v3.1.0 v3.1.0 about 3 weeks ago +v3.0.0 v3.0.0 about 1 month ago +v2.5.0 v2.5.0 about 3 months ago +v2.4.1 v2.4.1 about 5 months ago +v2.4.0 v2.4.0 about 6 months ago +v2.3.0 v2.3.0 about 9 months ago +v2.2.0 v2.2.0 about 1 year ago +v2.1.0 v2.1.0 about 2 years ago diff --git a/tests/fixtures/glab_release_view_raw.txt b/tests/fixtures/glab_release_view_raw.txt new file mode 100644 index 000000000..88893ee37 --- /dev/null +++ b/tests/fixtures/glab_release_view_raw.txt @@ -0,0 +1,30 @@ +Test Release v2.0 +alice_dev released this 3 days ago +abc1234 - v2.0.0 + + ## What's Changed + + - Added widget support + - Fixed authentication bug + + ### Contributors + + @alice_dev @bob_dev + + -------- + + Image: logo → https://example.com/logo.png + + + + +ASSETS +There are no assets for this release +SOURCES +https://gitlab.example.com/acme/toolkit/-/archive/v2.0.0/toolkit-v2.0.0.zip +https://gitlab.example.com/acme/toolkit/-/archive/v2.0.0/toolkit-v2.0.0.tar.gz +https://gitlab.example.com/acme/toolkit/-/archive/v2.0.0/toolkit-v2.0.0.tar.bz2 +https://gitlab.example.com/acme/toolkit/-/archive/v2.0.0/toolkit-v2.0.0.tar + + +View this release on GitLab at https://gitlab.example.com/acme/toolkit/-/releases/v2.0.0 From 13188a88b22f692157b89874f4c76287a0b3ecae Mon Sep 17 00:00:00 2001 From: binsee <5285894+binsee@users.noreply.github.com> Date: Tue, 21 Apr 2026 23:55:58 +0800 Subject: [PATCH 174/204] fix(git): fix empty output when branch name contains '/' in git diff MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Branch names containing '/' (e.g. feature/user-auth, main...feature/auth) were incorrectly treated as file paths by the string heuristic in normalize_diff_args, causing '--' to be injected before them and making git treat them as pathspecs — resulting in silent empty output (exit 0). Replace looks_like_path() with a three-tier detection strategy: - Explicit path prefixes (. ~) → always a path, no filesystem check needed - Contains path separator (/ \) → use filesystem existence to distinguish branch names (feature/auth, not on disk) from real paths (src/main.rs) - Bare word with no separator → never inject '--', regardless of filesystem state (avoids misfire when a file shares a name with a branch/ref) Introduce normalize_diff_args_impl with injectable path-checker for testability. Update all existing tests to use mock existence checks. Add three regression tests: branch-with-slash, range-with-slash, and bare-word-that-exists-on-disk. Fixes: #1431 Co-Authored-By: Claude Sonnet 4.6 --- src/cmds/git/git.rs | 133 +++++++++++++++++++++++++++++++++----------- 1 file changed, 102 insertions(+), 31 deletions(-) diff --git a/src/cmds/git/git.rs b/src/cmds/git/git.rs index 7ac95c4d3..f32c825b4 100644 --- a/src/cmds/git/git.rs +++ b/src/cmds/git/git.rs @@ -60,14 +60,6 @@ pub fn run( } } -/// Returns true if `arg` looks like a file-system path rather than a git revision. -/// -/// Used by `normalize_diff_args` to decide where to inject `--`. -fn looks_like_path(arg: &str) -> bool { - // Path separators are the strongest signal - arg.contains('/') || arg.contains('\\') || arg.starts_with('.') || arg.starts_with('~') -} - /// Re-insert `--` before the first path-like argument when clap has consumed it. /// /// clap's `trailing_var_arg = true` silently drops `--` when it appears as the @@ -76,17 +68,46 @@ fn looks_like_path(arg: &str) -> bool { /// `rtk git diff HEAD -- file` → args = ["HEAD", "--", "file"] (preserved) /// /// Without the `--` separator git may treat an unambiguous path as a revision and -/// emit "fatal: ambiguous argument". We re-insert `--` before the first -/// path-like argument when `--` is absent so git always gets the correct intent. +/// emit "fatal: ambiguous argument". We re-insert `--` before the first path-like +/// argument; see `normalize_diff_args_impl` for the detection rules. fn normalize_diff_args(args: &[String]) -> Vec { + normalize_diff_args_impl(args, |p| std::path::Path::new(p).exists()) +} + +/// Testable core of `normalize_diff_args` — accepts an injectable filesystem existence checker. +/// +/// The path-detection logic is: +/// 1. Explicit path prefixes (`.`, `~`) → always a path, no filesystem check needed. +/// 2. Contains path separator (`/`, `\`) → use `path_exists` to distinguish branch names +/// (e.g. `feature/auth`) from real paths (e.g. `src/main.rs`). +/// 3. Bare word with no separator → never a path (avoids injecting `--` when a file +/// happens to share a name with a branch or ref, e.g. a file named `main`). +fn normalize_diff_args_impl(args: &[String], path_exists: F) -> Vec +where + F: Fn(&str) -> bool, +{ // Already has `--` — nothing to do if args.iter().any(|a| a == "--") { return args.to_vec(); } - // Find the first non-flag arg that looks like a path - let path_start = args - .iter() - .position(|arg| !arg.starts_with('-') && looks_like_path(arg)); + let path_start = args.iter().position(|arg| { + if arg.starts_with('-') { + return false; + } + // Explicit path prefixes — always treat as path regardless of existence + if arg.starts_with('.') || arg.starts_with('~') { + return true; + } + // Contains path separator — use filesystem check to distinguish + // branch names (feature/auth) from real paths (src/main.rs) + if arg.contains('/') || arg.contains('\\') { + return path_exists(arg); + } + // Bare word (no separator, no special prefix) — never inject `--` + // This avoids misidentifying a ref/branch as a path even if a same-named + // file happens to exist on disk. + false + }); match path_start { Some(idx) => { let mut out = args[..idx].to_vec(); @@ -1799,7 +1820,14 @@ mod tests { ); } - // ----- normalize_diff_args (issue #1215) ----- + // ----- normalize_diff_args (issue #1215 + branch-name fix #1431) ----- + // + // Tests use normalize_diff_args_impl with a mock path-existence checker so + // they don't depend on the real filesystem. + + fn exists_mock<'a>(existing: &'a [&'a str]) -> impl Fn(&str) -> bool + 'a { + move |p| existing.contains(&p) + } /// Baseline: `--` already present → no-op, args unchanged. #[test] @@ -1809,38 +1837,44 @@ mod tests { "--".to_string(), "src/main.rs".to_string(), ]; - assert_eq!(normalize_diff_args(&args), args); + assert_eq!(normalize_diff_args_impl(&args, exists_mock(&[])), args); } - /// Core regression: clap ate `--` before a path with `/`. - /// `normalize_diff_args` must re-insert it. + /// Core regression (issue #1215): clap ate `--` before a real file path. + /// When the path exists on disk, `--` must be re-inserted. #[test] - fn test_normalize_diff_args_reinserts_separator_before_path_with_slash() { + fn test_normalize_diff_args_reinserts_separator_before_existing_path() { let args = vec!["apps/client/frontend/src/MyComponent.tsx".to_string()]; - let normalized = normalize_diff_args(&args); + let normalized = normalize_diff_args_impl( + &args, + exists_mock(&["apps/client/frontend/src/MyComponent.tsx"]), + ); assert_eq!( normalized, - vec!["--".to_string(), "apps/client/frontend/src/MyComponent.tsx".to_string()], - "-- must be injected before the path argument" + vec![ + "--".to_string(), + "apps/client/frontend/src/MyComponent.tsx".to_string() + ], + "-- must be injected before an existing path" ); } - /// Ref before path: args like ["HEAD", "src/foo.rs"] get `--` inserted before the path. + /// Ref before path: ["HEAD", "src/foo.rs"] where src/foo.rs exists → inject after HEAD. #[test] fn test_normalize_diff_args_reinserts_separator_after_ref() { let args = vec!["HEAD".to_string(), "src/foo.rs".to_string()]; - let normalized = normalize_diff_args(&args); + let normalized = normalize_diff_args_impl(&args, exists_mock(&["src/foo.rs"])); assert_eq!( normalized, vec!["HEAD".to_string(), "--".to_string(), "src/foo.rs".to_string()] ); } - /// Flags before path: `["--cached", "src/foo.rs"]` → `["--cached", "--", "src/foo.rs"]`. + /// Flags before path: ["--cached", "src/foo.rs"] where src/foo.rs exists. #[test] fn test_normalize_diff_args_reinserts_separator_after_flag() { let args = vec!["--cached".to_string(), "src/foo.rs".to_string()]; - let normalized = normalize_diff_args(&args); + let normalized = normalize_diff_args_impl(&args, exists_mock(&["src/foo.rs"])); assert_eq!( normalized, vec!["--cached".to_string(), "--".to_string(), "src/foo.rs".to_string()] @@ -1851,25 +1885,62 @@ mod tests { #[test] fn test_normalize_diff_args_no_injection_for_pure_flags() { let args = vec!["--stat".to_string(), "--cached".to_string()]; - assert_eq!(normalize_diff_args(&args), args); + assert_eq!(normalize_diff_args_impl(&args, exists_mock(&[])), args); } - /// Dotfile / relative-path detection (starts with `.`). + /// Dotfile that exists on disk → inject `--`. #[test] fn test_normalize_diff_args_dotfile_is_path() { let args = vec![".gitignore".to_string()]; - let normalized = normalize_diff_args(&args); + let normalized = normalize_diff_args_impl(&args, exists_mock(&[".gitignore"])); assert_eq!( normalized, vec!["--".to_string(), ".gitignore".to_string()] ); } - /// A bare word that isn't path-like (e.g. a branch name) → no injection. + /// A bare ref (HEAD) that doesn't exist as a file → no injection. #[test] fn test_normalize_diff_args_no_injection_for_bare_ref() { let args = vec!["HEAD".to_string()]; - assert_eq!(normalize_diff_args(&args), args); + assert_eq!(normalize_diff_args_impl(&args, exists_mock(&[])), args); + } + + /// Branch name with `/` that does NOT exist as a file → no injection. + /// Regression for issue #1431: `rtk git diff feature/user-auth` must not inject `--`. + #[test] + fn test_normalize_diff_args_no_injection_for_branch_with_slash() { + let args = vec!["feature/user-auth".to_string()]; + assert_eq!( + normalize_diff_args_impl(&args, exists_mock(&[])), + args, + "branch names containing '/' must not trigger -- injection" + ); + } + + /// Range syntax with `/` → no injection. + /// Regression: `rtk git diff main...feature/user-auth` produced no output. + #[test] + fn test_normalize_diff_args_no_injection_for_range_with_slash() { + let args = vec!["main...feature/user-auth".to_string()]; + assert_eq!( + normalize_diff_args_impl(&args, exists_mock(&[])), + args, + "revision ranges like main...feature/user-auth must not trigger -- injection" + ); + } + + /// Bare word that happens to exist as a file on disk → still no injection. + /// A file named "main" must not cause `--` to be injected when the user + /// intends `rtk git diff main` as a branch comparison. + #[test] + fn test_normalize_diff_args_no_injection_for_bare_word_even_if_file_exists() { + let args = vec!["main".to_string()]; + assert_eq!( + normalize_diff_args_impl(&args, exists_mock(&["main"])), + args, + "bare words must never trigger -- injection even when a same-named file exists" + ); } #[test] From 88d9f6a0d94fd2b5b3d40c956e966756670a2704 Mon Sep 17 00:00:00 2001 From: aesoft <43991222+aeppling@users.noreply.github.com> Date: Fri, 24 Apr 2026 18:54:29 +0200 Subject: [PATCH 175/204] fix(filters): benchmark ci update + fix stream filter quality --- .github/workflows/ci.yml | 5 +- scripts/benchmark.sh | 179 +++++++++++++++++++------ src/core/stream.rs | 279 ++++++++++++++++++++++++++------------- 3 files changed, 333 insertions(+), 130 deletions(-) diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 8c342d4e3..b56acffad 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -211,8 +211,11 @@ jobs: - name: Build rtk run: cargo build --release + - name: Install system tools + run: sudo apt-get install -y tree + - name: Install Python tools - run: pip install ruff pytest + run: pip install ruff pytest mypy - name: Install Go uses: actions/setup-go@v5 diff --git a/scripts/benchmark.sh b/scripts/benchmark.sh index 15aee89df..66f1d2345 100755 --- a/scripts/benchmark.sh +++ b/scripts/benchmark.sh @@ -11,34 +11,31 @@ else exit 1 fi BENCH_DIR="$(pwd)/scripts/benchmark" +RTK_ROOT="$(pwd)" -# Mode local : générer les fichiers debug if [ -z "$CI" ]; then rm -rf "$BENCH_DIR" mkdir -p "$BENCH_DIR/unix" "$BENCH_DIR/rtk" "$BENCH_DIR/diff" fi -# Nom de fichier safe safe_name() { echo "$1" | tr ' /' '_-' | tr -cd 'a-zA-Z0-9_-' } -# Fonction pour compter les tokens (~4 chars = 1 token) count_tokens() { local input="$1" local len=${#input} echo $(( (len + 3) / 4 )) } -# Compteurs globaux TOTAL_UNIX=0 TOTAL_RTK=0 TOTAL_TESTS=0 GOOD_TESTS=0 FAIL_TESTS=0 -SKIP_TESTS=0 +WARN_TESTS=0 +NEGATIVE_TESTS=0 -# Fonction de benchmark — une ligne par test bench() { local name="$1" local unix_cmd="$2" @@ -55,24 +52,41 @@ bench() { local icon="" local tag="" - if [ -z "$rtk_out" ]; then + if [ -z "$rtk_out" ] && [ -n "$unix_out" ]; then icon="❌" tag="FAIL" FAIL_TESTS=$((FAIL_TESTS + 1)) TOTAL_UNIX=$((TOTAL_UNIX + unix_tokens)) TOTAL_RTK=$((TOTAL_RTK + unix_tokens)) - elif [ "$rtk_tokens" -ge "$unix_tokens" ] && [ "$unix_tokens" -gt 0 ]; then + elif [ "$rtk_tokens" -gt "$unix_tokens" ] && [ "$unix_tokens" -gt 0 ]; then + icon="🔴" + tag="NEG" + NEGATIVE_TESTS=$((NEGATIVE_TESTS + 1)) + TOTAL_UNIX=$((TOTAL_UNIX + unix_tokens)) + TOTAL_RTK=$((TOTAL_RTK + rtk_tokens)) + elif [ "$unix_tokens" -gt 0 ] && [ "$rtk_tokens" -eq "$unix_tokens" ]; then icon="⚠️" - tag="SKIP" - SKIP_TESTS=$((SKIP_TESTS + 1)) + tag="WARN" + WARN_TESTS=$((WARN_TESTS + 1)) TOTAL_UNIX=$((TOTAL_UNIX + unix_tokens)) - TOTAL_RTK=$((TOTAL_RTK + unix_tokens)) - else - icon="✅" - tag="GOOD" - GOOD_TESTS=$((GOOD_TESTS + 1)) + TOTAL_RTK=$((TOTAL_RTK + rtk_tokens)) + elif [ "$unix_tokens" -gt 0 ]; then + local savings=$(( (unix_tokens - rtk_tokens) * 100 / unix_tokens )) + if [ "$savings" -lt 60 ]; then + icon="⚠️" + tag="WARN" + WARN_TESTS=$((WARN_TESTS + 1)) + else + icon="✅" + tag="GOOD" + GOOD_TESTS=$((GOOD_TESTS + 1)) + fi TOTAL_UNIX=$((TOTAL_UNIX + unix_tokens)) TOTAL_RTK=$((TOTAL_RTK + rtk_tokens)) + else + icon="⏭️" + tag="SKIP" + WARN_TESTS=$((WARN_TESTS + 1)) fi if [ "$tag" = "FAIL" ]; then @@ -88,12 +102,13 @@ bench() { "$icon" "$name" "$unix_cmd" "$rtk_cmd" "$unix_tokens" "$rtk_tokens" "$pct" fi - # Fichiers debug en local uniquement if [ -z "$CI" ]; then local filename=$(safe_name "$name") local prefix="GOOD" [ "$tag" = "FAIL" ] && prefix="FAIL" - [ "$tag" = "SKIP" ] && prefix="BAD" + [ "$tag" = "NEG" ] && prefix="NEG" + [ "$tag" = "WARN" ] && prefix="WARN" + [ "$tag" = "SKIP" ] && prefix="SKIP" local ts=$(date "+%d/%m/%Y %H:%M:%S") @@ -124,7 +139,6 @@ bench() { fi } -# Section header section() { echo "" echo "── $1 ──" @@ -149,6 +163,18 @@ bench "ls src/ -l" "ls -l src/" "$RTK ls src/ -l" bench "ls -a" "ls -la" "$RTK ls -a" bench "ls multi" "ls -la src/ scripts/" "$RTK ls src/ scripts/" +# =================== +# tree +# =================== +if command -v tree &>/dev/null; then + section "tree" + bench "tree" "tree -L 2" "$RTK tree -L 2" + bench "tree src/" "tree src/ -L 2" "$RTK tree src/ -L 2" +else + echo "" + echo "⏭️ tree (not installed, skipped)" +fi + # =================== # read # =================== @@ -175,6 +201,8 @@ bench "git status" "git status" "$RTK git status" bench "git log -n 10" "git log -10" "$RTK git log -n 10" bench "git log -n 5" "git log -5" "$RTK git log -n 5" bench "git diff" "git diff HEAD~1 2>/dev/null || echo ''" "$RTK git diff HEAD~1" +bench "git show" "git show HEAD --stat 2>/dev/null || true" "$RTK git show HEAD --stat" +bench "git branch" "git branch -a" "$RTK git branch -a" # =================== # grep @@ -327,7 +355,15 @@ fi # =================== if command -v wget &> /dev/null; then section "wget" - bench "wget" "wget -qO- https://httpbin.org/robots.txt" "$RTK wget https://httpbin.org/robots.txt -O" + bench "wget" "wget -qO- https://httpbin.org/robots.txt" "$RTK wget https://httpbin.org/robots.txt -O -" +fi + +# =================== +# npm (standalone — does not require package.json) +# =================== +if command -v npm &> /dev/null; then + section "npm" + bench "npm list" "npm list -g --depth 0 2>&1 || true" "$RTK npm list -g --depth 0" fi # =================== @@ -386,7 +422,24 @@ if command -v gh &> /dev/null && git rev-parse --git-dir &> /dev/null; then fi # =================== -# docker (skip si pas dispo) +# glab +# =================== +if command -v glab &> /dev/null; then + section "glab" + bench "glab mr list" "glab mr list 2>&1 || true" "$RTK glab mr list" + bench "glab issue list" "glab issue list 2>&1 || true" "$RTK glab issue list" +fi + +# =================== +# gt (Graphite) +# =================== +if command -v gt &> /dev/null; then + section "gt" + bench "gt log" "gt log 2>&1 || true" "$RTK gt log" +fi + +# =================== +# docker # =================== if command -v docker &> /dev/null; then section "docker" @@ -395,7 +448,7 @@ if command -v docker &> /dev/null; then fi # =================== -# kubectl (skip si pas dispo) +# kubectl # =================== if command -v kubectl &> /dev/null; then section "kubectl" @@ -412,7 +465,6 @@ if command -v python3 &> /dev/null && command -v ruff &> /dev/null && command -v PYTHON_FIXTURE=$(mktemp -d) cd "$PYTHON_FIXTURE" - # pyproject.toml cat > pyproject.toml << 'PYEOF' [project] name = "rtk-bench" @@ -422,7 +474,6 @@ version = "0.1.0" line-length = 88 PYEOF - # sample.py avec quelques issues ruff cat > sample.py << 'PYEOF' import os import sys @@ -442,7 +493,6 @@ def unused_function(): # F841: local variable assigned but never used return None PYEOF - # test_sample.py cat > test_sample.py << 'PYEOF' from sample import process_data @@ -456,7 +506,15 @@ PYEOF bench "ruff check" "ruff check . 2>&1 || true" "$RTK ruff check ." bench "pytest" "pytest -v 2>&1 || true" "$RTK pytest -v" - cd - > /dev/null + if command -v pip &>/dev/null; then + bench "pip list" "pip list 2>&1 || true" "$RTK pip list" + fi + + if command -v mypy &>/dev/null; then + bench "mypy" "mypy sample.py 2>&1 || true" "$RTK mypy sample.py" + fi + + cd "$RTK_ROOT" rm -rf "$PYTHON_FIXTURE" fi @@ -469,14 +527,12 @@ if command -v go &> /dev/null && command -v golangci-lint &> /dev/null; then GO_FIXTURE=$(mktemp -d) cd "$GO_FIXTURE" - # go.mod cat > go.mod << 'GOEOF' module bench go 1.21 GOEOF - # main.go cat > main.go << 'GOEOF' package main @@ -496,7 +552,6 @@ func main() { } GOEOF - # main_test.go cat > main_test.go << 'GOEOF' package main @@ -522,16 +577,55 @@ GOEOF bench "go build" "go build ./... 2>&1 || true" "$RTK go build ./..." bench "go vet" "go vet ./... 2>&1 || true" "$RTK go vet ./..." - cd - > /dev/null + cd "$RTK_ROOT" rm -rf "$GO_FIXTURE" fi +# =================== +# Ruby +# =================== +if command -v ruby &> /dev/null; then + section "ruby" + if command -v rake &>/dev/null; then + bench "rake -T" "rake -T 2>&1 || true" "$RTK rake -T" + fi + if command -v rubocop &>/dev/null; then + bench "rubocop" "rubocop --format simple 2>&1 || true" "$RTK rubocop --format simple" + fi + if command -v rspec &>/dev/null; then + bench "rspec --dry-run" "rspec --dry-run 2>&1 || true" "$RTK rspec --dry-run" + fi +fi + +# =================== +# dotnet +# =================== +if command -v dotnet &> /dev/null; then + section "dotnet" + bench "dotnet --info" "dotnet --info 2>&1 || true" "$RTK dotnet --info" +fi + +# =================== +# aws +# =================== +if command -v aws &> /dev/null; then + section "aws" + bench "aws --version" "aws --version 2>&1 || true" "$RTK aws --version" +fi + +# =================== +# psql +# =================== +if command -v psql &> /dev/null; then + section "psql" + bench "psql --version" "psql --version 2>&1 || true" "$RTK psql --version" +fi + # =================== # rewrite (verify rewrite works with and without quotes) # =================== section "rewrite" -# bench_rewrite: verifies rewrite produces expected output (not token comparison) bench_rewrite() { local name="$1" local cmd="$2" @@ -558,7 +652,7 @@ bench_rewrite "rewrite cargo test" "$RTK rewrite cargo test" "rtk cargo bench_rewrite "rewrite compound" "$RTK rewrite 'cargo test && git push'" "rtk cargo test && rtk git push" # =================== -# Résumé global +# Summary # =================== echo "" echo "═══════════════════════════════════════════════════════════════════════════════════════════════════════════════════════════════════════════" @@ -574,19 +668,30 @@ if [ "$TOTAL_TESTS" -gt 0 ]; then fi echo "" - echo " ✅ $GOOD_TESTS good ⚠️ $SKIP_TESTS skip ❌ $FAIL_TESTS fail $GOOD_TESTS/$TOTAL_TESTS ($GOOD_PCT%)" + echo " ✅ $GOOD_TESTS good ⚠️ $WARN_TESTS warn 🔴 $NEGATIVE_TESTS negative ❌ $FAIL_TESTS fail $GOOD_TESTS/$TOTAL_TESTS ($GOOD_PCT%)" echo " Tokens: $TOTAL_UNIX → $TOTAL_RTK (-$TOTAL_SAVE_PCT%)" echo "" - # Fichiers debug en local if [ -z "$CI" ]; then echo " Debug: $BENCH_DIR/{unix,rtk,diff}/" fi echo "" - # Exit code non-zero si moins de 80% good - if [ "$GOOD_PCT" -lt 80 ]; then - echo " BENCHMARK FAILED: $GOOD_PCT% good (minimum 80%)" - exit 1 + EXIT_CODE=0 + + if [ "$NEGATIVE_TESTS" -gt 0 ]; then + echo " BENCHMARK FAILED: $NEGATIVE_TESTS filter(s) produced more tokens than raw output" + EXIT_CODE=1 + fi + + if [ "$FAIL_TESTS" -gt 0 ]; then + echo " BENCHMARK FAILED: $FAIL_TESTS filter(s) returned empty output" + EXIT_CODE=1 fi + + if [ "$GOOD_PCT" -lt 60 ] && [ "$EXIT_CODE" -eq 0 ]; then + echo " WARNING: $GOOD_PCT% good (target 60%)" + fi + + exit $EXIT_CODE fi diff --git a/src/core/stream.rs b/src/core/stream.rs index 7f7f29704..02bb0ffef 100644 --- a/src/core/stream.rs +++ b/src/core/stream.rs @@ -2,6 +2,7 @@ use anyhow::{Context, Result}; use regex::Regex; use std::io::{self, BufRead, BufReader, BufWriter, Write}; use std::process::{Command, Stdio}; +use std::sync::mpsc; pub trait StreamFilter { fn feed_line(&mut self, line: &str) -> Option; @@ -263,7 +264,7 @@ pub fn run_streaming( } } - let live_stderr = matches!(stdout_mode, FilterMode::Streaming(_)); + let is_streaming = matches!(stdout_mode, FilterMode::Streaming(_)); let mut child = ChildGuard(cmd.spawn().context("Failed to spawn process")?); @@ -296,121 +297,157 @@ pub fn run_streaming( StdinMode::Inherit => None, }; + let stdout = child.0.stdout.take().context("No child stdout handle")?; let stderr = child.0.stderr.take().context("No child stderr handle")?; - let stderr_thread = std::thread::spawn(move || -> String { - let mut raw_err = String::new(); - let mut capped = false; - if live_stderr { - let stderr_out = io::stderr(); - let mut err_out = stderr_out.lock(); - for line in BufReader::new(stderr).lines().map_while(Result::ok) { - writeln!(err_out, "{}", line).ok(); - if raw_err.len() + line.len() + 1 <= RAW_CAP { - raw_err.push_str(&line); - raw_err.push('\n'); - } else if !capped { - capped = true; - eprintln!("[rtk] warning: stderr exceeds 10 MiB — capture truncated"); + let mut raw_stdout = String::new(); + let mut raw_stderr = String::new(); + let mut filtered = String::new(); + let mut capped_out = false; + let mut capped_err = false; + let mut saved_filter: Option> = None; + + if is_streaming { + enum StreamLine { + Stdout(String), + Stderr(String), + } + + let (tx, rx) = mpsc::channel(); + let tx_out = tx.clone(); + let stdout_thread = std::thread::spawn(move || { + for line in BufReader::new(stdout).lines().map_while(Result::ok) { + if tx_out.send(StreamLine::Stdout(line)).is_err() { + break; } } - } else { + }); + let tx_err = tx; + let stderr_thread = std::thread::spawn(move || { for line in BufReader::new(stderr).lines().map_while(Result::ok) { - if raw_err.len() + line.len() + 1 <= RAW_CAP { - raw_err.push_str(&line); - raw_err.push('\n'); - } else if !capped { - capped = true; + if tx_err.send(StreamLine::Stderr(line)).is_err() { + break; } } - } - raw_err - }); - - let stdout = child.0.stdout.take().context("No child stdout handle")?; - let mut raw_stdout = String::new(); - let mut filtered = String::new(); - let mut capped = false; - let mut saved_filter: Option> = None; + }); - { - let stdout_handle = io::stdout(); - let mut out = stdout_handle.lock(); - - match stdout_mode { - FilterMode::Passthrough => unreachable!("handled by early-return above"), - FilterMode::Streaming(mut filter) => { - for line in BufReader::new(stdout).lines().map_while(Result::ok) { - if capped { - continue; + if let FilterMode::Streaming(mut filter) = stdout_mode { + let stdout_handle = io::stdout(); + let mut out = stdout_handle.lock(); + + for msg in rx { + let (line, is_stderr) = match msg { + StreamLine::Stdout(l) => (l, false), + StreamLine::Stderr(l) => (l, true), + }; + if is_stderr { + if !capped_err { + if raw_stderr.len() + line.len() + 1 <= RAW_CAP { + raw_stderr.push_str(&line); + raw_stderr.push('\n'); + } else { + capped_err = true; + eprintln!("[rtk] warning: stderr exceeds 10 MiB — capture truncated"); + } } + } else if !capped_out { if raw_stdout.len() + line.len() + 1 <= RAW_CAP { raw_stdout.push_str(&line); raw_stdout.push('\n'); } else { - capped = true; - eprintln!("[rtk] warning: output exceeds 10 MiB — filter input truncated"); - continue; + capped_out = true; + eprintln!("[rtk] warning: stdout exceeds 10 MiB — filter input truncated"); } - if let Some(output) = filter.feed_line(&line) { - filtered.push_str(&output); - match write!(out, "{}", output) { - Err(e) if e.kind() == io::ErrorKind::BrokenPipe => break, - Err(e) => return Err(e.into()), - Ok(_) => {} - } + } + if let Some(output) = filter.feed_line(&line) { + filtered.push_str(&output); + match write!(out, "{}", output) { + Err(e) if e.kind() == io::ErrorKind::BrokenPipe => break, + Err(e) => return Err(e.into()), + Ok(_) => {} } } - let tail = filter.flush(); - filtered.push_str(&tail); - match write!(out, "{}", tail) { - Err(e) if e.kind() == io::ErrorKind::BrokenPipe => {} - Err(e) => return Err(e.into()), - Ok(_) => {} + } + let tail = filter.flush(); + filtered.push_str(&tail); + match write!(io::stdout(), "{}", tail) { + Err(e) if e.kind() == io::ErrorKind::BrokenPipe => {} + Err(e) => return Err(e.into()), + Ok(_) => {} + } + saved_filter = Some(filter); + } + + stdout_thread.join().ok(); + stderr_thread.join().ok(); + } else { + let stderr_thread = std::thread::spawn(move || -> String { + let mut raw_err = String::new(); + let mut capped = false; + for line in BufReader::new(stderr).lines().map_while(Result::ok) { + if raw_err.len() + line.len() + 1 <= RAW_CAP { + raw_err.push_str(&line); + raw_err.push('\n'); + } else if !capped { + capped = true; } - saved_filter = Some(filter); } - FilterMode::Buffered(filter_fn) => { - for line in BufReader::new(stdout).lines().map_while(Result::ok) { - if raw_stdout.len() + line.len() + 1 <= RAW_CAP { - raw_stdout.push_str(&line); - raw_stdout.push('\n'); - } else if !capped { - capped = true; - eprintln!("[rtk] warning: output exceeds 10 MiB — filter input truncated"); + raw_err + }); + + { + let stdout_handle = io::stdout(); + let mut out = stdout_handle.lock(); + + match stdout_mode { + FilterMode::Passthrough => unreachable!("handled by early-return above"), + FilterMode::Streaming(_) => unreachable!("handled by is_streaming branch"), + FilterMode::Buffered(filter_fn) => { + for line in BufReader::new(stdout).lines().map_while(Result::ok) { + if raw_stdout.len() + line.len() + 1 <= RAW_CAP { + raw_stdout.push_str(&line); + raw_stdout.push('\n'); + } else if !capped_out { + capped_out = true; + eprintln!( + "[rtk] warning: output exceeds 10 MiB — filter input truncated" + ); + } + } + filtered = std::panic::catch_unwind(std::panic::AssertUnwindSafe(|| { + filter_fn(&raw_stdout) + })) + .unwrap_or_else(|_| { + eprintln!("[rtk] warning: filter panicked — passing through raw output"); + raw_stdout.clone() + }); + match write!(out, "{}", filtered) { + Err(e) if e.kind() == io::ErrorKind::BrokenPipe => {} + Err(e) => return Err(e.into()), + Ok(_) => {} } } - filtered = std::panic::catch_unwind(std::panic::AssertUnwindSafe(|| { - filter_fn(&raw_stdout) - })) - .unwrap_or_else(|_| { - eprintln!("[rtk] warning: filter panicked — passing through raw output"); - raw_stdout.clone() - }); - match write!(out, "{}", filtered) { - Err(e) if e.kind() == io::ErrorKind::BrokenPipe => {} - Err(e) => return Err(e.into()), - Ok(_) => {} - } - } - FilterMode::CaptureOnly => { - for line in BufReader::new(stdout).lines().map_while(Result::ok) { - if raw_stdout.len() + line.len() + 1 <= RAW_CAP { - raw_stdout.push_str(&line); - raw_stdout.push('\n'); - } else if !capped { - capped = true; - eprintln!("[rtk] warning: output exceeds 10 MiB — filter input truncated"); + FilterMode::CaptureOnly => { + for line in BufReader::new(stdout).lines().map_while(Result::ok) { + if raw_stdout.len() + line.len() + 1 <= RAW_CAP { + raw_stdout.push_str(&line); + raw_stdout.push('\n'); + } else if !capped_out { + capped_out = true; + eprintln!( + "[rtk] warning: output exceeds 10 MiB — filter input truncated" + ); + } } + filtered = raw_stdout.clone(); } - filtered = raw_stdout.clone(); } } - } - let raw_stderr = stderr_thread.join().unwrap_or_else(|e| { - eprintln!("[rtk] warning: stderr reader thread panicked: {:?}", e); - String::new() - }); + raw_stderr = stderr_thread.join().unwrap_or_else(|e| { + eprintln!("[rtk] warning: stderr reader thread panicked: {:?}", e); + String::new() + }); + } if let Some(t) = stdin_thread { t.join().ok(); } @@ -864,4 +901,62 @@ pub(crate) mod tests { result ); } + + #[cfg(not(windows))] + #[test] + fn test_streaming_merges_stderr_through_filter() { + let mut cmd = Command::new("sh"); + cmd.args(["-c", "echo 'error[E0308]: type mismatch'; echo ' Compiling foo v1.0' >&2; echo ' Downloading bar v2.0' >&2; echo ' Finished dev' >&2; echo 'real error on stderr' >&2"]); + + struct CargoLikeHandler; + impl BlockHandler for CargoLikeHandler { + fn should_skip(&mut self, line: &str) -> bool { + let trimmed = line.trim_start(); + trimmed.starts_with("Compiling") + || trimmed.starts_with("Downloading") + || trimmed.starts_with("Finished") + } + fn is_block_start(&mut self, line: &str) -> bool { + line.starts_with("error") + } + fn is_block_continuation(&mut self, line: &str, _block: &[String]) -> bool { + line.starts_with(' ') + } + fn format_summary(&self, _: i32, _: &str) -> Option { + None + } + } + + let filter = BlockStreamFilter::new(CargoLikeHandler); + let result = run_streaming( + &mut cmd, + StdinMode::Null, + FilterMode::Streaming(Box::new(filter)), + ) + .unwrap(); + + assert!( + !result.filtered.contains("Compiling"), + "filtered output should not contain cargo noise, got: {}", + result.filtered + ); + assert!( + !result.filtered.contains("Downloading"), + "filtered output should not contain cargo noise, got: {}", + result.filtered + ); + assert!( + result.filtered.contains("error[E0308]"), + "filtered output should contain real errors, got: {}", + result.filtered + ); + assert!( + result.raw_stderr.contains("Compiling"), + "raw_stderr should still capture noise for tracking" + ); + assert!( + result.raw_stderr.contains("real error on stderr"), + "raw_stderr should capture real errors" + ); + } } From 7681dafc76f164cfad588fe37d9a165dcb476e10 Mon Sep 17 00:00:00 2001 From: aesoft <43991222+aeppling@users.noreply.github.com> Date: Fri, 24 Apr 2026 19:04:51 +0200 Subject: [PATCH 176/204] fix(cicd):: no semgrep alert on sh call cicd --- src/core/stream.rs | 1 + 1 file changed, 1 insertion(+) diff --git a/src/core/stream.rs b/src/core/stream.rs index 02bb0ffef..7970dfbb3 100644 --- a/src/core/stream.rs +++ b/src/core/stream.rs @@ -905,6 +905,7 @@ pub(crate) mod tests { #[cfg(not(windows))] #[test] fn test_streaming_merges_stderr_through_filter() { + // nosemgrep: interpreter-execution let mut cmd = Command::new("sh"); cmd.args(["-c", "echo 'error[E0308]: type mismatch'; echo ' Compiling foo v1.0' >&2; echo ' Downloading bar v2.0' >&2; echo ' Finished dev' >&2; echo 'real error on stderr' >&2"]); From 70b36b4dbc3e147219ad87cf539d073523b86a85 Mon Sep 17 00:00:00 2001 From: aesoft <43991222+aeppling@users.noreply.github.com> Date: Fri, 24 Apr 2026 19:37:18 +0200 Subject: [PATCH 177/204] fix(tracking): test env path --- src/core/tracking.rs | 24 +++++++++++------------- 1 file changed, 11 insertions(+), 13 deletions(-) diff --git a/src/core/tracking.rs b/src/core/tracking.rs index 596602673..7bb3e575f 100644 --- a/src/core/tracking.rs +++ b/src/core/tracking.rs @@ -1544,29 +1544,27 @@ mod tests { } // 7. get_db_path respects environment variable RTK_DB_PATH + // 8. get_db_path falls back to default when no custom config + // Combined into one test to avoid env var race between parallel tests #[test] - fn test_custom_db_path_env() { + fn test_db_path_env_and_default() { use std::env; + use std::sync::Mutex; + static ENV_LOCK: Mutex<()> = Mutex::new(()); + let _guard = ENV_LOCK.lock().unwrap(); let custom_path = env::temp_dir().join("rtk_test_custom.db"); env::set_var("RTK_DB_PATH", &custom_path); - let db_path = get_db_path().expect("Failed to get db path"); assert_eq!(db_path, custom_path); env::remove_var("RTK_DB_PATH"); - } - - // 8. get_db_path falls back to default when no custom config - #[test] - fn test_default_db_path() { - use std::env; - - // Ensure no env var is set - env::remove_var("RTK_DB_PATH"); - let db_path = get_db_path().expect("Failed to get db path"); - assert!(db_path.ends_with("rtk/history.db")); + assert!( + db_path.ends_with("rtk/history.db"), + "expected default path ending with rtk/history.db, got: {}", + db_path.display() + ); } // 9. project_filter_params uses GLOB pattern with * wildcard // added From 5e84e9471736fe58e89094854f4123ecb07c2d3b Mon Sep 17 00:00:00 2001 From: aesoft <43991222+aeppling@users.noreply.github.com> Date: Fri, 24 Apr 2026 20:41:13 +0200 Subject: [PATCH 178/204] fix(npm): regex match end line --- src/discover/rules.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/discover/rules.rs b/src/discover/rules.rs index 059bc48ca..74c876a1e 100644 --- a/src/discover/rules.rs +++ b/src/discover/rules.rs @@ -62,7 +62,7 @@ pub const RULES: &[RtkRule] = &[ subcmd_status: &[], }, RtkRule { - pattern: r"^npm\s+(exec|run|run-script|rum|urn|x)\s+", + pattern: r"^npm\s+(exec|run|run-script|rum|urn|x)(\s|$)", rtk_cmd: "rtk npm", rewrite_prefixes: &["npm"], category: "PackageManager", From 7e3690a23ab158ca8e1e890650554e20e3a0c17b Mon Sep 17 00:00:00 2001 From: aesoft <43991222+aeppling@users.noreply.github.com> Date: Sat, 25 Apr 2026 09:31:46 +0200 Subject: [PATCH 179/204] fix: remove wrong cicd benchmark + npm test regex --- scripts/benchmark.sh | 9 ++++----- src/discover/registry.rs | 25 +++++++++++++++++++++++++ 2 files changed, 29 insertions(+), 5 deletions(-) diff --git a/scripts/benchmark.sh b/scripts/benchmark.sh index 66f1d2345..15eaaf091 100755 --- a/scripts/benchmark.sh +++ b/scripts/benchmark.sh @@ -202,7 +202,6 @@ bench "git log -n 10" "git log -10" "$RTK git log -n 10" bench "git log -n 5" "git log -5" "$RTK git log -n 5" bench "git diff" "git diff HEAD~1 2>/dev/null || echo ''" "$RTK git diff HEAD~1" bench "git show" "git show HEAD --stat 2>/dev/null || true" "$RTK git show HEAD --stat" -bench "git branch" "git branch -a" "$RTK git branch -a" # =================== # grep @@ -211,7 +210,6 @@ section "grep" bench "grep fn" "grep -rn 'fn ' src/ || true" "$RTK grep 'fn ' src/" bench "grep struct" "grep -rn 'struct ' src/ || true" "$RTK grep 'struct ' src/" bench "grep -l 40" "grep -rn 'fn ' src/ || true" "$RTK grep 'fn ' src/ -l 40" -bench "grep --max 20" "grep -rn 'fn ' src/ | head -20 || true" "$RTK grep 'fn ' src/ --max 20" bench "grep -c" "grep -ron 'fn ' src/ || true" "$RTK grep 'fn ' src/ -c" # =================== @@ -327,7 +325,7 @@ fi # diff # =================== section "diff" -bench "diff" "diff Cargo.toml LICENSE 2>&1 || true" "$RTK diff Cargo.toml LICENSE" +bench "diff" "diff src/main.rs src/core/tracking.rs 2>&1 || true" "$RTK diff src/main.rs src/core/tracking.rs" # =================== # smart @@ -355,7 +353,8 @@ fi # =================== if command -v wget &> /dev/null; then section "wget" - bench "wget" "wget -qO- https://httpbin.org/robots.txt" "$RTK wget https://httpbin.org/robots.txt -O -" + bench "wget" "wget -qO- https://httpbin.org/json" "$RTK wget https://httpbin.org/json" + rm -f json 2>/dev/null fi # =================== @@ -415,7 +414,7 @@ fi # =================== # gh (skip si pas dispo ou pas dans un repo) # =================== -if command -v gh &> /dev/null && git rev-parse --git-dir &> /dev/null; then +if command -v gh &> /dev/null && git rev-parse --git-dir &> /dev/null && gh auth status &> /dev/null; then section "gh" bench "gh pr list" "gh pr list 2>&1 || true" "$RTK gh pr list" bench "gh run list" "gh run list 2>&1 || true" "$RTK gh run list" diff --git a/src/discover/registry.rs b/src/discover/registry.rs index dcbb4f0de..e40ff08b1 100644 --- a/src/discover/registry.rs +++ b/src/discover/registry.rs @@ -2790,6 +2790,31 @@ mod tests { } } + #[test] + fn test_rewrite_npm_bare_subcommand() { + let commands = vec!["exec", "run", "run-script", "x"]; + for command in commands { + assert_eq!( + rewrite_command(format!("npm {command}").as_str(), &[]), + Some(format!("rtk npm {command}")), + "Failed for bare command: npm {}", + command + ); + } + } + + #[test] + fn test_rewrite_npm_with_args() { + assert_eq!( + rewrite_command("npm run test", &[]), + Some("rtk npm run test".to_string()), + ); + assert_eq!( + rewrite_command("npm exec vitest", &[]), + Some("rtk vitest".to_string()), + ); + } + #[test] fn test_rewrite_npx() { assert_eq!( From e7ae6bf018882dba248f151ba4ec4929300b3e36 Mon Sep 17 00:00:00 2001 From: aesoft <43991222+aeppling@users.noreply.github.com> Date: Sat, 25 Apr 2026 10:32:22 +0200 Subject: [PATCH 180/204] fix(benchmark): extract format_diff_changes + remove wrong diff test --- scripts/benchmark.sh | 6 ------ src/cmds/git/diff_cmd.rs | 43 ++++++++++++++++++++++++++++++---------- 2 files changed, 32 insertions(+), 17 deletions(-) diff --git a/scripts/benchmark.sh b/scripts/benchmark.sh index 15eaaf091..af84b1133 100755 --- a/scripts/benchmark.sh +++ b/scripts/benchmark.sh @@ -321,12 +321,6 @@ else echo "⏭️ cargo build/test/clippy/check (cargo not in PATH, skipped)" fi -# =================== -# diff -# =================== -section "diff" -bench "diff" "diff src/main.rs src/core/tracking.rs 2>&1 || true" "$RTK diff src/main.rs src/core/tracking.rs" - # =================== # smart # =================== diff --git a/src/cmds/git/diff_cmd.rs b/src/cmds/git/diff_cmd.rs index 96a148bc3..59c95b100 100644 --- a/src/cmds/git/diff_cmd.rs +++ b/src/cmds/git/diff_cmd.rs @@ -40,17 +40,7 @@ pub fn run(file1: &Path, file2: &Path, verbose: u8) -> Result<()> { diff.added, diff.removed, diff.modified )); - // Never truncate diff content — users make decisions based on this data. - // Only the summary header provides compression; all changes are shown in full. - for change in &diff.changes { - match change { - DiffChange::Added(ln, c) => rtk.push_str(&format!("+{:4} {}\n", ln, c)), - DiffChange::Removed(ln, c) => rtk.push_str(&format!("-{:4} {}\n", ln, c)), - DiffChange::Modified(ln, old, new) => { - rtk.push_str(&format!("~{:4} {} → {}\n", ln, old, new)) - } - } - } + rtk.push_str(&format_diff_changes(&diff)); print!("{}", rtk); timer.track( @@ -93,6 +83,20 @@ struct DiffResult { changes: Vec, } +fn format_diff_changes(diff: &DiffResult) -> String { + let mut out = String::new(); + for change in &diff.changes { + match change { + DiffChange::Added(ln, c) => out.push_str(&format!("+{:4} {}\n", ln, c)), + DiffChange::Removed(ln, c) => out.push_str(&format!("-{:4} {}\n", ln, c)), + DiffChange::Modified(ln, old, new) => { + out.push_str(&format!("~{:4} {} → {}\n", ln, old, new)) + } + } + } + out +} + fn compute_diff(lines1: &[&str], lines2: &[&str]) -> DiffResult { let mut changes = Vec::new(); let mut added = 0; @@ -417,6 +421,23 @@ diff --git a/b.rs b/b.rs assert!(!result.changes.is_empty()); } + #[test] + fn test_format_diff_shows_all_changes() { + let mut a = Vec::new(); + let mut b = Vec::new(); + for i in 0..100 { + a.push(format!("old_line_{}", i)); + b.push(format!("new_line_{}", i)); + } + let a_refs: Vec<&str> = a.iter().map(|s| s.as_str()).collect(); + let b_refs: Vec<&str> = b.iter().map(|s| s.as_str()).collect(); + let diff = compute_diff(&a_refs, &b_refs); + let output = format_diff_changes(&diff); + + assert!(output.contains("old_line_0"), "should contain first change"); + assert!(output.contains("new_line_99"), "should contain last change"); + } + #[test] fn test_long_lines_not_truncated() { let long_line = "x".repeat(500); From 2c4569caa64d013ad4ada0b7580f9f16d8334c19 Mon Sep 17 00:00:00 2001 From: Trevin Chow Date: Wed, 22 Apr 2026 11:31:18 -0700 Subject: [PATCH 181/204] fix(npx): dispatch unknown tools to npx instead of npm The generic fallback arm of `Commands::Npx` called `npm_cmd::run`, which executes `npm` instead of `npx`. This broke every non-routed npx invocation. `rtk npx cowsay hello` failed with `npm error Missing script: "cowsay"` because the rewrite hook sends `npx ...` through as `rtk npx ...`, and `rtk npx` was then running npm. Mirror the existing prisma generic-subcommand passthrough pattern: resolve npx, forward all args, record a tracked passthrough, and propagate the exit status. Fixes #815 --- src/main.rs | 33 +++++++++++++++++++++++++++++++-- 1 file changed, 31 insertions(+), 2 deletions(-) diff --git a/src/main.rs b/src/main.rs index 82d994910..b255317f8 100644 --- a/src/main.rs +++ b/src/main.rs @@ -2058,8 +2058,22 @@ fn run_cli() -> Result { "prettier" => prettier_cmd::run(&args[1..], cli.verbose)?, "playwright" => playwright_cmd::run(&args[1..], cli.verbose)?, _ => { - // Generic passthrough with npm boilerplate filter - npm_cmd::run(&args, cli.verbose, cli.skip_env)? + // Generic npx passthrough: unknown tools run through npx, not npm + let timer = core::tracking::TimedExecution::start(); + let mut cmd = core::utils::resolved_command("npx"); + for arg in &args { + cmd.arg(arg); + } + if cli.skip_env { + cmd.env("SKIP_ENV_VALIDATION", "1"); + } + let status = cmd.status().context("Failed to run npx")?; + let args_str = args.join(" "); + timer.track_passthrough( + &format!("npx {}", args_str), + &format!("rtk npx {} (passthrough)", args_str), + ); + core::utils::exit_code_from_status(&status, "npx") } } } @@ -2988,4 +3002,19 @@ mod tests { "--ultra-compact long form must still enable ultra-compact mode" ); } + + #[test] + fn test_npx_unknown_tool_passthrough() { + // The bug (rtk-ai/rtk#815) was that unknown tools under `rtk npx` + // were dispatched to `npm` instead of `npx`. At the parse level, the + // Npx variant must carry all args through unchanged so the dispatch + // arm can forward them to npx. + let cli = Cli::try_parse_from(["rtk", "npx", "cowsay", "hello"]).unwrap(); + match cli.command { + Commands::Npx { args } => { + assert_eq!(args, vec!["cowsay", "hello"]); + } + _ => panic!("Expected Commands::Npx for unknown tool"), + } + } } From 4d5c2fc8a8c9fbe1bdc6d493b5c31f0be2938572 Mon Sep 17 00:00:00 2001 From: Trevin Chow Date: Thu, 23 Apr 2026 01:45:11 -0700 Subject: [PATCH 182/204] refactor(npx): use shared runner::run_passthrough_cmd helper Per @KuSh's review on #1458, the generic npx passthrough arm built its own Command + tracking::TimedExecution inline, duplicating the logic already covered by core::runner::run_passthrough. The sticking point was `SKIP_ENV_VALIDATION`: the existing helper takes tool + args and builds the Command itself, so there was no seam for a caller-supplied env var. Add a `run_passthrough_cmd` variant that accepts a pre-built Command and refactor the existing `run_passthrough` to delegate to it. The npx arm now builds a Command, sets SKIP_ENV_VALIDATION when requested, and hands it to the shared helper. This also upgrades the npx passthrough from a raw `cmd.status()` to the shared streaming + tracking path that pnpm_cmd already uses, so verbose logging and telemetry stay consistent across tools. --- src/core/runner.rs | 22 +++++++++++++++++----- src/main.rs | 12 ++++-------- 2 files changed, 21 insertions(+), 13 deletions(-) diff --git a/src/core/runner.rs b/src/core/runner.rs index f127a6081..73866e336 100644 --- a/src/core/runner.rs +++ b/src/core/runner.rs @@ -169,16 +169,28 @@ where } pub fn run_passthrough(tool: &str, args: &[std::ffi::OsString], verbose: u8) -> Result { - if verbose > 0 { - eprintln!("{} passthrough: {:?}", tool, args); - } let mut cmd = crate::core::utils::resolved_command(tool); cmd.args(args); let args_str = tracking::args_display(args); + run_passthrough_cmd(cmd, tool, &args_str, verbose) +} + +/// Passthrough variant for callers that need to build the Command themselves +/// (e.g., to set env vars like `SKIP_ENV_VALIDATION`). Handles the verbose log +/// line and delegates to `run` with `RunMode::Passthrough`. +pub fn run_passthrough_cmd( + cmd: Command, + tool_name: &str, + args_display: &str, + verbose: u8, +) -> Result { + if verbose > 0 { + eprintln!("{} passthrough: {}", tool_name, args_display); + } run( cmd, - tool, - &args_str, + tool_name, + args_display, RunMode::Passthrough, RunOptions::default(), ) diff --git a/src/main.rs b/src/main.rs index b255317f8..551e255e0 100644 --- a/src/main.rs +++ b/src/main.rs @@ -2058,8 +2058,9 @@ fn run_cli() -> Result { "prettier" => prettier_cmd::run(&args[1..], cli.verbose)?, "playwright" => playwright_cmd::run(&args[1..], cli.verbose)?, _ => { - // Generic npx passthrough: unknown tools run through npx, not npm - let timer = core::tracking::TimedExecution::start(); + // Generic npx passthrough: unknown tools run through npx, not npm. + // Build the Command here so we can set SKIP_ENV_VALIDATION, then hand + // it to the shared runner so tracking + streaming match pnpm_cmd. let mut cmd = core::utils::resolved_command("npx"); for arg in &args { cmd.arg(arg); @@ -2067,13 +2068,8 @@ fn run_cli() -> Result { if cli.skip_env { cmd.env("SKIP_ENV_VALIDATION", "1"); } - let status = cmd.status().context("Failed to run npx")?; let args_str = args.join(" "); - timer.track_passthrough( - &format!("npx {}", args_str), - &format!("rtk npx {} (passthrough)", args_str), - ); - core::utils::exit_code_from_status(&status, "npx") + core::runner::run_passthrough_cmd(cmd, "npx", &args_str, cli.verbose)? } } } From 614e5629de3462903cc1965b4a8710428c6b08eb Mon Sep 17 00:00:00 2001 From: Trevin Chow Date: Sat, 25 Apr 2026 15:14:01 -0700 Subject: [PATCH 183/204] refactor(npx): consolidate npm/npx execution in npm_cmd Add `npm_cmd::exec` for npx and an internal `run_filtered` helper that both `run` (npm) and `exec` (npx) share. The helper resolves the command, applies args, honors SKIP_ENV_VALIDATION, and routes through runner::run_filtered with the npm output filter. The npx fallback in main.rs collapses to `npm_cmd::exec(&args, cli.verbose, cli.skip_env)?`. Per @KuSh review on #1458. --- src/cmds/js/npm_cmd.rs | 46 +++++++++++++++++++++++++++--------------- src/main.rs | 15 +------------- 2 files changed, 31 insertions(+), 30 deletions(-) diff --git a/src/cmds/js/npm_cmd.rs b/src/cmds/js/npm_cmd.rs index cd8723ede..6066f104f 100644 --- a/src/cmds/js/npm_cmd.rs +++ b/src/cmds/js/npm_cmd.rs @@ -74,8 +74,6 @@ const NPM_SUBCOMMANDS: &[&str] = &[ ]; pub fn run(args: &[String], verbose: u8, skip_env: bool) -> Result { - let mut cmd = resolved_command("npm"); - // Determine if this is "npm run