Skip to content

Commit a42d956

Browse files
committed
feat(cli): restructure langcodec config
1 parent ff34f8a commit a42d956

7 files changed

Lines changed: 1114 additions & 319 deletions

File tree

README.md

Lines changed: 11 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -127,16 +127,20 @@ langcodec sync --source source.xcstrings --target target.xcstrings --match-lang
127127
Create a `langcodec.toml` in your project:
128128

129129
```toml
130-
[ai]
131-
provider = "openai"
132-
model = "gpt-4.1-mini"
130+
[openai]
131+
model = "gpt-5.4"
133132

134133
[translate]
134+
concurrency = 4
135+
136+
[translate.input]
135137
source = "locales/Localizable.xcstrings"
136-
source_lang = "en"
137-
target_lang = "fr,de"
138+
lang = "en"
138139
status = ["new", "stale"]
139-
concurrency = 4
140+
141+
[translate.output]
142+
lang = ["fr", "de"]
143+
status = "translated"
140144

141145
[annotate]
142146
input = "locales/Localizable.xcstrings"
@@ -151,7 +155,7 @@ langcodec translate
151155
langcodec annotate
152156
```
153157

154-
`translate` still accepts legacy `translate.provider` and `translate.model` if you have older config files. For larger projects, `translate.sources = [...]` can fan out parallel runs from config.
158+
When exactly one provider section is configured, `translate` and `annotate` use it automatically. If you configure multiple providers, choose one with `--provider` or `translate.provider`. For larger projects, `translate.input.sources = [...]` can fan out parallel runs from config.
155159

156160
`annotate` also supports `annotate.inputs = [...]` for config-driven in-place runs across multiple xcstrings files.
157161

langcodec-cli/README.md

Lines changed: 13 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -99,6 +99,7 @@ langcodec translate \
9999
- config defaults from `langcodec.toml`
100100
- multiple target languages for multi-language outputs
101101
- live progress updates
102+
- `--ui auto|plain|tui` for dashboard or plain terminal output
102103
- preflight validation before model requests
103104
- translation result summaries at the end
104105

@@ -120,21 +121,26 @@ langcodec annotate \
120121
- preserving manual comments
121122
- config defaults from `langcodec.toml`
122123
- source shortlisting before agent lookup
124+
- `--ui auto|plain|tui` for dashboard or plain terminal output
123125
- `--dry-run` and `--check` for CI-friendly runs
124126

125127
## Example Config
126128

127129
```toml
128-
[ai]
129-
provider = "openai"
130-
model = "gpt-4.1-mini"
130+
[openai]
131+
model = "gpt-5.4"
131132

132133
[translate]
134+
concurrency = 4
135+
136+
[translate.input]
133137
source = "locales/Localizable.xcstrings"
134-
source_lang = "en"
135-
target_lang = "fr,de"
138+
lang = "en"
136139
status = ["new", "stale"]
137-
concurrency = 4
140+
141+
[translate.output]
142+
lang = ["fr", "de"]
143+
status = "translated"
138144

139145
[annotate]
140146
input = "locales/Localizable.xcstrings"
@@ -149,7 +155,7 @@ langcodec translate
149155
langcodec annotate
150156
```
151157

152-
Legacy configs using `translate.provider` and `translate.model` still work. For larger repos, `translate.sources = [...]` can fan out parallel runs from config.
158+
When exactly one provider section is configured, `translate` and `annotate` use it automatically. If you configure multiple providers, choose one with `--provider` or `translate.provider`. For larger repos, `translate.input.sources = [...]` can fan out parallel runs from config.
153159

154160
For annotate fan-out runs, use `annotate.inputs = [...]` and omit `annotate.output` so each catalog is updated in place.
155161

langcodec-cli/src/ai.rs

Lines changed: 86 additions & 13 deletions
Original file line numberDiff line numberDiff line change
@@ -1,5 +1,6 @@
11
use std::sync::Arc;
22

3+
use crate::config::CliConfig;
34
use mentra::{
45
BuiltinProvider,
56
provider::{self, Provider},
@@ -58,17 +59,27 @@ pub(crate) struct ProviderSetup {
5859

5960
pub(crate) fn resolve_provider(
6061
cli: Option<&str>,
61-
shared_cfg: Option<&str>,
62-
legacy_cfg: Option<&str>,
62+
config: Option<&CliConfig>,
63+
translate_cfg: Option<&str>,
6364
) -> Result<ProviderKind, String> {
6465
if let Some(value) = cli {
6566
return ProviderKind::parse(value);
6667
}
67-
if let Some(value) = shared_cfg {
68+
if let Some(value) = translate_cfg {
6869
return ProviderKind::parse(value);
6970
}
70-
if let Some(value) = legacy_cfg {
71-
return ProviderKind::parse(value);
71+
if let Some(config) = config {
72+
let configured = config.configured_provider_names();
73+
match configured.len() {
74+
1 => return ProviderKind::parse(configured[0]),
75+
0 => {}
76+
_ => {
77+
return Err(
78+
"Multiple provider sections are configured; specify --provider or set translate.provider in langcodec.toml"
79+
.to_string(),
80+
);
81+
}
82+
}
7283
}
7384

7485
let mut available = Vec::new();
@@ -85,28 +96,36 @@ pub(crate) fn resolve_provider(
8596
match available.len() {
8697
1 => Ok(available.remove(0)),
8798
0 => Err(
88-
"--provider is required (or set ai.provider in langcodec.toml, or use legacy translate.provider, or configure exactly one provider API key)"
99+
"--provider is required (or configure exactly one provider section like [openai] in langcodec.toml, set translate.provider, or configure exactly one provider API key)"
89100
.to_string(),
90101
),
91102
_ => Err(
92-
"Multiple provider API keys are configured; specify --provider or set ai.provider in langcodec.toml"
103+
"Multiple provider API keys are configured; specify --provider or configure a single provider section in langcodec.toml"
93104
.to_string(),
94105
),
95106
}
96107
}
97108

98109
pub(crate) fn resolve_model(
99110
cli: Option<&str>,
100-
shared_cfg: Option<&str>,
101-
legacy_cfg: Option<&str>,
111+
config: Option<&CliConfig>,
112+
provider: &ProviderKind,
113+
translate_cfg: Option<&str>,
102114
) -> Result<String, String> {
103115
cli.map(ToOwned::to_owned)
104-
.or_else(|| shared_cfg.map(ToOwned::to_owned))
105-
.or_else(|| legacy_cfg.map(ToOwned::to_owned))
116+
.or_else(|| {
117+
config.and_then(|cfg| {
118+
cfg.provider_model(provider.display_name())
119+
.map(ToOwned::to_owned)
120+
})
121+
})
122+
.or_else(|| translate_cfg.map(ToOwned::to_owned))
106123
.or_else(|| std::env::var("MENTRA_MODEL").ok())
107124
.ok_or_else(|| {
108-
"--model is required (or set ai.model in langcodec.toml, or use legacy translate.model, or set MENTRA_MODEL)"
109-
.to_string()
125+
format!(
126+
"--model is required (or set [{}].model in langcodec.toml, set translate.model, or set MENTRA_MODEL)",
127+
provider.display_name()
128+
)
110129
})
111130
}
112131

@@ -134,3 +153,57 @@ pub(crate) fn build_provider(kind: &ProviderKind) -> Result<ProviderSetup, Strin
134153
provider,
135154
})
136155
}
156+
157+
#[cfg(test)]
158+
mod tests {
159+
use super::*;
160+
161+
#[test]
162+
fn resolve_provider_uses_single_configured_provider_section() {
163+
let config: CliConfig = toml::from_str(
164+
r#"
165+
[openai]
166+
model = "gpt-5.4"
167+
"#,
168+
)
169+
.expect("parse config");
170+
171+
let provider = resolve_provider(None, Some(&config), None).expect("resolve provider");
172+
assert_eq!(provider, ProviderKind::OpenAI);
173+
}
174+
175+
#[test]
176+
fn resolve_provider_rejects_multiple_configured_provider_sections() {
177+
let config: CliConfig = toml::from_str(
178+
r#"
179+
[openai]
180+
model = "gpt-5.4"
181+
182+
[anthropic]
183+
model = "claude-sonnet"
184+
"#,
185+
)
186+
.expect("parse config");
187+
188+
let err = resolve_provider(None, Some(&config), None).unwrap_err();
189+
assert!(err.contains("Multiple provider sections are configured"));
190+
}
191+
192+
#[test]
193+
fn resolve_model_prefers_selected_provider_section() {
194+
let config: CliConfig = toml::from_str(
195+
r#"
196+
[openai]
197+
model = "gpt-5.4"
198+
199+
[anthropic]
200+
model = "claude-sonnet"
201+
"#,
202+
)
203+
.expect("parse config");
204+
205+
let model = resolve_model(None, Some(&config), &ProviderKind::Anthropic, None)
206+
.expect("resolve model");
207+
assert_eq!(model, "claude-sonnet");
208+
}
209+
}

0 commit comments

Comments
 (0)