2020from http .client import HTTPException
2121from pathlib import Path
2222
23+ from netlab .runtime import require_executable
24+
25+ SUPPORTED_BACKENDS = ("mock" , "claude-cli" , "codex-cli" , "openai" )
26+ DEFAULT_CLAUDE_MODEL = "opus"
27+ DEFAULT_CODEX_MODEL = ""
28+ DEFAULT_OPENAI_MODEL = "gpt-4"
29+
2330
2431class LLMBackend (ABC ):
2532 """Abstract base class for LLM backends."""
@@ -49,22 +56,38 @@ def generate(self, prompt: str, system: str = "") -> str:
4956class ClaudeCLIBackend (LLMBackend ):
5057 """Backend that calls the `claude` CLI tool via subprocess."""
5158
52- def __init__ (self , model : str = "opus" ) -> None :
59+ def __init__ (
60+ self ,
61+ model : str = DEFAULT_CLAUDE_MODEL ,
62+ command : str | None = None ,
63+ ) -> None :
5364 self .model = model
65+ self .command = command
5466
5567 def generate (self , prompt : str , system : str = "" ) -> str :
5668 # Claude Code headless mode uses -p/--print with the prompt as its value.
57- cmd = ["claude" , "-p" , prompt ]
69+ executable = require_executable (
70+ "claude" ,
71+ explicit = self .command ,
72+ env_var = "CLAUDE_BIN" ,
73+ display_name = "Claude CLI" ,
74+ )
75+ cmd = [executable , "-p" , prompt ]
5876 if self .model :
5977 cmd .extend (["--model" , self .model ])
6078 if system :
6179 cmd .extend (["--system-prompt" , system ])
62- result = subprocess .run (
63- cmd ,
64- capture_output = True ,
65- text = True ,
66- check = False ,
67- )
80+ try :
81+ result = subprocess .run (
82+ cmd ,
83+ capture_output = True ,
84+ text = True ,
85+ check = False ,
86+ )
87+ except FileNotFoundError as exc :
88+ raise RuntimeError (
89+ f"Claude CLI executable not found: { executable } "
90+ ) from exc
6891 if result .returncode != 0 :
6992 raise RuntimeError (
7093 f"claude CLI failed (exit { result .returncode } ): { result .stderr } "
@@ -80,8 +103,13 @@ class CodexCLIBackend(LLMBackend):
80103 the ``-o`` (output-last-message) flag.
81104 """
82105
83- def __init__ (self , model : str = "" ) -> None :
106+ def __init__ (
107+ self ,
108+ model : str = DEFAULT_CODEX_MODEL ,
109+ command : str | None = None ,
110+ ) -> None :
84111 self .model = model
112+ self .command = command
85113
86114 def generate (self , prompt : str , system : str = "" ) -> str :
87115 full_prompt = f"{ system } \n \n { prompt } " if system else prompt
@@ -90,8 +118,14 @@ def generate(self, prompt: str, system: str = "") -> str:
90118 output_path = tmp .name
91119
92120 try :
93- cmd = [
121+ executable = require_executable (
94122 "codex" ,
123+ explicit = self .command ,
124+ env_var = "CODEX_BIN" ,
125+ display_name = "Codex CLI" ,
126+ )
127+ cmd = [
128+ executable ,
95129 "exec" ,
96130 "--ephemeral" ,
97131 "--sandbox" ,
@@ -103,12 +137,17 @@ def generate(self, prompt: str, system: str = "") -> str:
103137 if self .model :
104138 cmd .extend (["-m" , self .model ])
105139 cmd .append (full_prompt )
106- result = subprocess .run (
107- cmd ,
108- capture_output = True ,
109- text = True ,
110- check = False ,
111- )
140+ try :
141+ result = subprocess .run (
142+ cmd ,
143+ capture_output = True ,
144+ text = True ,
145+ check = False ,
146+ )
147+ except FileNotFoundError as exc :
148+ raise RuntimeError (
149+ f"Codex CLI executable not found: { executable } "
150+ ) from exc
112151 if result .returncode != 0 :
113152 raise RuntimeError (
114153 f"codex CLI failed (exit { result .returncode } ): { result .stderr } "
@@ -144,7 +183,12 @@ def __init__(
144183 self .timeout = timeout
145184
146185 def generate (self , prompt : str , system : str = "" ) -> str :
147- url = f"{ self .base_url } /v1/chat/completions"
186+ # base_url may or may not include /v1 — normalize
187+ base = self .base_url .rstrip ("/" )
188+ if base .endswith ("/v1" ):
189+ url = f"{ base } /chat/completions"
190+ else :
191+ url = f"{ base } /v1/chat/completions"
148192 messages : list [dict [str , str ]] = []
149193 if system :
150194 messages .append ({"role" : "system" , "content" : system })
0 commit comments