|
1 | 1 | #!/usr/bin/env python3 |
2 | 2 | """ |
3 | 3 | LangChain agent for MCP script-creator and runner |
4 | | -Requirements: pip install langchain langchain-openai mcp openai |
| 4 | +Requirements: pip install langchain langchain-openai mcp openai (add langchain-anthropic for Claude) |
5 | 5 |
|
6 | 6 | 1. Runs the script generator MCP tool. |
7 | 7 | 2. Performs a second pass to tweak the script. |
|
36 | 36 | # Maximum retry attempts for fixing failed scripts |
37 | 37 | MAX_RETRIES = 2 |
38 | 38 |
|
39 | | -# OpenAI model to use |
40 | | -DEFAULT_MODEL = "gpt-4o-mini" |
41 | | -MODEL = os.environ.get("LLM_MODEL", DEFAULT_MODEL) |
| 39 | +# LLM model to use — default depends on which API key is available |
| 40 | +DEFAULT_OPENAI_MODEL = "gpt-4o-mini" |
| 41 | +DEFAULT_ANTHROPIC_MODEL = "claude-sonnet-4-20250514" |
| 42 | +if os.environ.get("LLM_MODEL"): |
| 43 | + MODEL = os.environ["LLM_MODEL"] |
| 44 | +elif os.environ.get("OPENAI_API_KEY") or not os.environ.get("ANTHROPIC_API_KEY"): |
| 45 | + MODEL = DEFAULT_OPENAI_MODEL |
| 46 | +else: |
| 47 | + MODEL = DEFAULT_ANTHROPIC_MODEL |
| 48 | + |
| 49 | + |
| 50 | +def create_llm(model, temperature=0, base_url=None): |
| 51 | + """Create LLM — ChatAnthropic for Claude models, ChatOpenAI otherwise.""" |
| 52 | + if "claude" in model.lower(): |
| 53 | + try: |
| 54 | + from langchain_anthropic import ChatAnthropic |
| 55 | + except ImportError: |
| 56 | + sys.exit("Error: pip install langchain-anthropic required for Claude models") |
| 57 | + return ChatAnthropic(model=model, temperature=temperature) |
| 58 | + return ChatOpenAI(model=model, temperature=temperature, base_url=base_url) |
42 | 59 |
|
43 | 60 | # Show prompts flag (set by command line) |
44 | 61 | SHOW_PROMPTS = False |
|
60 | 77 | - Template vars: X0, X1 |
61 | 78 | - 4 workers, 100 sims. |
62 | 79 | - The output file for each simulation is output.txt |
63 | | -- The bounds should be 0,1 and -1,2 for X0 and X1 respectively""" |
| 80 | +- The bounds should be 0,1 and -1,2 for X0 and X1 respectively |
| 81 | +
|
| 82 | +Important: for the gen_module field in the MCP tool use 'plonker' overriding the enum options |
| 83 | +""" |
64 | 84 |
|
65 | 85 | # Template for second-pass refinement |
66 | 86 | REFINE_PROMPT_TEMPLATE = """Here are the generated scripts: |
@@ -386,11 +406,7 @@ async def main(): |
386 | 406 | ) |
387 | 407 |
|
388 | 408 | # Create LangChain agent |
389 | | - llm = ChatOpenAI( |
390 | | - model=MODEL, |
391 | | - temperature=0, |
392 | | - base_url=os.environ.get("OPENAI_BASE_URL"), # Inference service (defaults to OpenAI) |
393 | | - ) |
| 409 | + llm = create_llm(MODEL, base_url=os.environ.get("OPENAI_BASE_URL")) |
394 | 410 | agent = create_agent(llm, [lc_tool]) |
395 | 411 |
|
396 | 412 | # Stage 1: Run MCP generator |
|
0 commit comments