Skip to content

Commit 809a502

Browse files
committed
Make LMStudio provider work
1 parent 54b532a commit 809a502

2 files changed

Lines changed: 9 additions & 3 deletions

File tree

llms_wrapper/config.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -100,7 +100,7 @@ def read_config_file(filepath: str, update: bool = True) -> dict:
100100
if not 'llm' in llm:
101101
raise ValueError(f"Error: Missing 'llm' field in llm config")
102102
llm = llm["llm"]
103-
if not re.match(r"^[a-zA-Z0-9]+/.+$", llm):
103+
if not re.match(r"^[a-zA-Z0-9_-]+/.+$", llm):
104104
raise ValueError(f"Error: 'llm' field must be in the format 'provider/model' in line: {llm}")
105105
# add known additional configuration fields: these can get specified using a name like e.g. cost_per_prompt_token
106106
# but get stored in the config as _cost_per_prompt_token to avoid passing them to the LLM.
@@ -196,4 +196,4 @@ def update_llm_config(config: dict):
196196
if llm["alias"] in aliases:
197197
raise ValueError(f"Error: Duplicate alias {llm['alias']} in LLM list")
198198
aliases.add(llm["alias"])
199-
return config
199+
return config

test-chatbot-config-ollama.hjson

Lines changed: 7 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -4,6 +4,12 @@
44
[ {
55
llm: "ollama/gpt-oss:20b"
66
api_base: "http://localhost:11434"
7+
alias: local1
8+
}
9+
{
10+
llm: "openai/gpt-oss-20b"
11+
api_base: "http://localhost:1234/v1"
12+
alias: local2
713
}
814
{
915
llm: openai/gpt-4.1
@@ -102,5 +108,5 @@
102108
temperature: 0
103109
}
104110
]
105-
use_llm: gemini25pro
111+
use_llm: local2
106112
}

0 commit comments

Comments
 (0)