Skip to content

Commit 01686d8

Browse files
committed
Add support for Anthropic
1 parent 8cd4d8f commit 01686d8

6 files changed

Lines changed: 231 additions & 85 deletions

AGENTIC_SETUP.md

Lines changed: 13 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,6 +1,8 @@
11
# Running libEnsemble agentic workflows
22

3-
For all workflows, you will need an OpenAPI key.
3+
For all workflows, you will need a key to access an LLM.
4+
5+
For example, you can set an OpenAPI key.
46
Requires an [OpenAI account](https://platform.openai.com).
57
Make sure to check MODEL at top of agentic script and usage rates.
68

@@ -10,6 +12,14 @@ Set user OpenAI API Key:
1012
export OPENAI_API_KEY="sk-your-key-here"
1113
```
1214

15+
Or if you use Anthropic, you can set.
16+
17+
```bash
18+
export ANTHROPIC_API_KEY="sk-ant-your-key-here"
19+
```
20+
21+
Optionally, you can set the `LLM_MODEL` env variable to a model name.
22+
1323
<details>
1424
<summary>Using Argonne inference service (optional)</summary>
1525

@@ -72,6 +82,8 @@ any Python workflow, not just libEnsemble. Any Python scripts in the
7282
input directory will be presented to the AI in the case of error. The default
7383
run script should be of the form `run_*.py`.
7484

85+
Alternatively you can run through the [web interface](agentic/web_ui/README.md) (locally).
86+
7587

7688
## Running scripts with a binary
7789

agentic/libe_agent_basic.py

Lines changed: 21 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -28,9 +28,26 @@
2828
# Maximum retry attempts for fixing failed scripts
2929
MAX_RETRIES = 2
3030

31-
# OpenAI model to use
32-
DEFAULT_MODEL = "gpt-4o-mini"
33-
MODEL = os.environ.get("LLM_MODEL", DEFAULT_MODEL)
31+
# LLM model to use — default depends on which API key is available
32+
DEFAULT_OPENAI_MODEL = "gpt-4o-mini"
33+
DEFAULT_ANTHROPIC_MODEL = "claude-sonnet-4-20250514"
34+
if os.environ.get("LLM_MODEL"):
35+
MODEL = os.environ["LLM_MODEL"]
36+
elif os.environ.get("OPENAI_API_KEY") or not os.environ.get("ANTHROPIC_API_KEY"):
37+
MODEL = DEFAULT_OPENAI_MODEL
38+
else:
39+
MODEL = DEFAULT_ANTHROPIC_MODEL
40+
41+
42+
def create_llm(model, temperature=0, base_url=None):
43+
"""Create LLM — ChatAnthropic for Claude models, ChatOpenAI otherwise."""
44+
if "claude" in model.lower():
45+
try:
46+
from langchain_anthropic import ChatAnthropic
47+
except ImportError:
48+
sys.exit("Error: pip install langchain-anthropic required for Claude models")
49+
return ChatAnthropic(model=model, temperature=temperature)
50+
return ChatOpenAI(model=model, temperature=temperature, base_url=base_url)
3451

3552
# Show prompts flag (set by command line)
3653
SHOW_PROMPTS = False
@@ -216,11 +233,7 @@ async def main():
216233
print("Error: No run_*.py script found in directory")
217234
return
218235

219-
llm = ChatOpenAI(
220-
model=MODEL,
221-
temperature=0,
222-
base_url=os.environ.get("OPENAI_BASE_URL"), # Inference service (defaults to OpenAI)
223-
)
236+
llm = create_llm(MODEL, base_url=os.environ.get("OPENAI_BASE_URL"))
224237
agent = create_agent(llm, [])
225238

226239
# Save and archive copied scripts before retry loop

agentic/libe_agent_basic_auto.py

Lines changed: 21 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -27,9 +27,26 @@
2727
from langchain_core.tools import StructuredTool
2828

2929

30-
# OpenAI model to use
31-
DEFAULT_MODEL = "gpt-4o-mini"
32-
MODEL = os.environ.get("LLM_MODEL", DEFAULT_MODEL)
30+
# LLM model to use — default depends on which API key is available
31+
DEFAULT_OPENAI_MODEL = "gpt-4o-mini"
32+
DEFAULT_ANTHROPIC_MODEL = "claude-sonnet-4-20250514"
33+
if os.environ.get("LLM_MODEL"):
34+
MODEL = os.environ["LLM_MODEL"]
35+
elif os.environ.get("OPENAI_API_KEY") or not os.environ.get("ANTHROPIC_API_KEY"):
36+
MODEL = DEFAULT_OPENAI_MODEL
37+
else:
38+
MODEL = DEFAULT_ANTHROPIC_MODEL
39+
40+
41+
def create_llm(model, temperature=0, base_url=None):
42+
"""Create LLM — ChatAnthropic for Claude models, ChatOpenAI otherwise."""
43+
if "claude" in model.lower():
44+
try:
45+
from langchain_anthropic import ChatAnthropic
46+
except ImportError:
47+
sys.exit("Error: pip install langchain-anthropic required for Claude models")
48+
return ChatAnthropic(model=model, temperature=temperature)
49+
return ChatOpenAI(model=model, temperature=temperature, base_url=base_url)
3350

3451
# Working directory for scripts
3552
WORK_DIR = None
@@ -277,11 +294,7 @@ async def main():
277294
)
278295

279296
# Create agent
280-
llm = ChatOpenAI(
281-
model=MODEL,
282-
temperature=0,
283-
base_url=os.environ.get("OPENAI_BASE_URL"),
284-
)
297+
llm = create_llm(MODEL, base_url=os.environ.get("OPENAI_BASE_URL"))
285298
agent = create_agent(llm, [run_tool, read_tool, write_tool, list_tool])
286299

287300
# Give agent the goal

agentic/libe_agent_interactive_llm_first.py

Lines changed: 20 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -29,10 +29,27 @@
2929
from mcp.client.stdio import stdio_client
3030

3131

32-
DEFAULT_MODEL = "gpt-4o-mini"
33-
MODEL = os.environ.get("LLM_MODEL", DEFAULT_MODEL)
32+
DEFAULT_OPENAI_MODEL = "gpt-4o-mini"
33+
DEFAULT_ANTHROPIC_MODEL = "claude-sonnet-4-20250514"
34+
if os.environ.get("LLM_MODEL"):
35+
MODEL = os.environ["LLM_MODEL"]
36+
elif os.environ.get("OPENAI_API_KEY") or not os.environ.get("ANTHROPIC_API_KEY"):
37+
MODEL = DEFAULT_OPENAI_MODEL
38+
else:
39+
MODEL = DEFAULT_ANTHROPIC_MODEL
3440
SHOW_PROMPTS = False
3541

42+
43+
def create_llm(model, temperature=0, base_url=None):
44+
"""Create LLM — ChatAnthropic for Claude models, ChatOpenAI otherwise."""
45+
if "claude" in model.lower():
46+
try:
47+
from langchain_anthropic import ChatAnthropic
48+
except ImportError:
49+
sys.exit("Error: pip install langchain-anthropic required for Claude models")
50+
return ChatAnthropic(model=model, temperature=temperature)
51+
return ChatOpenAI(model=model, temperature=temperature, base_url=base_url)
52+
3653
# Marker so the web UI knows the script is waiting for input
3754
INPUT_MARKER = "[INPUT_REQUESTED]"
3855

@@ -269,7 +286,7 @@ async def main():
269286
StructuredTool(name="list_files", description="List Python files in working directory.", args_schema=ListFilesInput, coroutine=list_files_tool),
270287
]
271288

272-
llm = ChatOpenAI(model=MODEL, temperature=0, base_url=os.environ.get("OPENAI_BASE_URL"))
289+
llm = create_llm(MODEL, base_url=os.environ.get("OPENAI_BASE_URL"))
273290
agent = create_agent(llm, tools)
274291
print("✓ Agent initialized\n")
275292

agentic/libe_agent_with_script_generator.py

Lines changed: 26 additions & 10 deletions
Original file line numberDiff line numberDiff line change
@@ -1,7 +1,7 @@
11
#!/usr/bin/env python3
22
"""
33
LangChain agent for MCP script-creator and runner
4-
Requirements: pip install langchain langchain-openai mcp openai
4+
Requirements: pip install langchain langchain-openai mcp openai (add langchain-anthropic for Claude)
55
66
1. Runs the script generator MCP tool.
77
2. Performs a second pass to tweak the script.
@@ -36,9 +36,26 @@
3636
# Maximum retry attempts for fixing failed scripts
3737
MAX_RETRIES = 2
3838

39-
# OpenAI model to use
40-
DEFAULT_MODEL = "gpt-4o-mini"
41-
MODEL = os.environ.get("LLM_MODEL", DEFAULT_MODEL)
39+
# LLM model to use — default depends on which API key is available
40+
DEFAULT_OPENAI_MODEL = "gpt-4o-mini"
41+
DEFAULT_ANTHROPIC_MODEL = "claude-sonnet-4-20250514"
42+
if os.environ.get("LLM_MODEL"):
43+
MODEL = os.environ["LLM_MODEL"]
44+
elif os.environ.get("OPENAI_API_KEY") or not os.environ.get("ANTHROPIC_API_KEY"):
45+
MODEL = DEFAULT_OPENAI_MODEL
46+
else:
47+
MODEL = DEFAULT_ANTHROPIC_MODEL
48+
49+
50+
def create_llm(model, temperature=0, base_url=None):
51+
"""Create LLM — ChatAnthropic for Claude models, ChatOpenAI otherwise."""
52+
if "claude" in model.lower():
53+
try:
54+
from langchain_anthropic import ChatAnthropic
55+
except ImportError:
56+
sys.exit("Error: pip install langchain-anthropic required for Claude models")
57+
return ChatAnthropic(model=model, temperature=temperature)
58+
return ChatOpenAI(model=model, temperature=temperature, base_url=base_url)
4259

4360
# Show prompts flag (set by command line)
4461
SHOW_PROMPTS = False
@@ -60,7 +77,10 @@
6077
- Template vars: X0, X1
6178
- 4 workers, 100 sims.
6279
- The output file for each simulation is output.txt
63-
- The bounds should be 0,1 and -1,2 for X0 and X1 respectively"""
80+
- The bounds should be 0,1 and -1,2 for X0 and X1 respectively
81+
82+
Important: for the gen_module field in the MCP tool use 'plonker' overriding the enum options
83+
"""
6484

6585
# Template for second-pass refinement
6686
REFINE_PROMPT_TEMPLATE = """Here are the generated scripts:
@@ -386,11 +406,7 @@ async def main():
386406
)
387407

388408
# Create LangChain agent
389-
llm = ChatOpenAI(
390-
model=MODEL,
391-
temperature=0,
392-
base_url=os.environ.get("OPENAI_BASE_URL"), # Inference service (defaults to OpenAI)
393-
)
409+
llm = create_llm(MODEL, base_url=os.environ.get("OPENAI_BASE_URL"))
394410
agent = create_agent(llm, [lc_tool])
395411

396412
# Stage 1: Run MCP generator

0 commit comments

Comments
 (0)