Skip to content

Commit 9434946

Browse files
authored
Merge pull request #18 from techartdev/papurha/safe-fixes-and-reworks
Fix model selection, cleanup dead schema, and rework Assist agent routing
2 parents 7050441 + e17bd0a commit 9434946

11 files changed

Lines changed: 121 additions & 138 deletions

File tree

README.md

Lines changed: 13 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -28,6 +28,7 @@ OpenClaw is a Home Assistant custom integration that connects your HA instance t
2828
- `openclaw.send_message`
2929
- `openclaw.clear_history`
3030
- `openclaw.invoke_tool`
31+
- **Integration options** for model selection and voice-specific routing
3132
- **Event**
3233
- `openclaw_message_received`
3334
- `openclaw_tool_invoked`
@@ -192,6 +193,13 @@ When enabled, OpenClaw tool-call responses can execute Home Assistant services.
192193
- **Wake word enabled**
193194
- **Wake word** (default: `hey openclaw`)
194195
- **Voice input provider** (`browser` or `assist_stt`)
196+
- **Voice agent ID** (optional)
197+
198+
### Model selection
199+
200+
- **Active Model** select entity controls the model override used for chat-card and Assist requests.
201+
- If no model is selected, the gateway default is used.
202+
- Assist conversation IDs are conservatively namespaced by agent ID so different agents do not accidentally share the same conversation context.
195203

196204
### Voice provider usage
197205

@@ -227,11 +235,13 @@ If voice is unreliable in Brave, use Chrome/Edge for card voice input or continu
227235

228236
Send a message to OpenClaw.
229237

238+
> Note: file attachments are not currently supported by this service. The old `attachments` field was removed because it was accepted by the schema but never sent to the gateway.
239+
230240
Fields:
231241

232242
- `message` (required)
233243
- `session_id` (optional)
234-
- `attachments` (optional)
244+
- `agent_id` (optional)
235245

236246
Example:
237247

@@ -313,6 +323,8 @@ action:
313323

314324
Fired when `openclaw.invoke_tool` completes.
315325

326+
The integration also exposes native Event entities for both message-received and tool-invoked events so they can be selected directly in the automation UI.
327+
316328
Event data includes:
317329

318330
- `tool`

custom_components/openclaw/__init__.py

Lines changed: 8 additions & 51 deletions
Original file line numberDiff line numberDiff line change
@@ -31,7 +31,6 @@
3131
from .api import OpenClawApiClient, OpenClawApiError
3232
from .const import (
3333
ATTR_AGENT_ID,
34-
ATTR_ATTACHMENTS,
3534
ATTR_MESSAGE,
3635
ATTR_MODEL,
3736
ATTR_OK,
@@ -90,6 +89,7 @@
9089
)
9190
from .coordinator import OpenClawCoordinator
9291
from .exposure import apply_context_policy, build_exposed_entities_context
92+
from .helpers import extract_text_recursive
9393

9494
_LOGGER = logging.getLogger(__name__)
9595

@@ -98,6 +98,7 @@
9898
_VOICE_REQUEST_HEADERS = {
9999
"x-openclaw-source": "voice",
100100
"x-ha-voice": "true",
101+
"x-openclaw-message-channel": "voice",
101102
}
102103

103104
# Path to the chat card JS inside the integration package (custom_components/openclaw/www/)
@@ -106,7 +107,7 @@
106107
# URL at which the card JS is served (registered via register_static_path)
107108
_CARD_STATIC_URL = f"/openclaw/{_CARD_FILENAME}"
108109
# Versioned URL used for Lovelace resource registration to avoid stale browser cache
109-
_CARD_URL = f"{_CARD_STATIC_URL}?v=0.1.60"
110+
_CARD_URL = f"{_CARD_STATIC_URL}?v=0.1.61"
110111

111112
OpenClawConfigEntry = ConfigEntry
112113

@@ -117,7 +118,6 @@
117118
vol.Required(ATTR_MESSAGE): cv.string,
118119
vol.Optional(ATTR_SOURCE): cv.string,
119120
vol.Optional(ATTR_SESSION_ID): cv.string,
120-
vol.Optional(ATTR_ATTACHMENTS): vol.All(cv.ensure_list, [cv.string]),
121121
vol.Optional(ATTR_AGENT_ID): cv.string,
122122
}
123123
)
@@ -447,13 +447,16 @@ async def handle_send_message(call: ServiceCall) -> None:
447447
)
448448
system_prompt = apply_context_policy(raw_context, max_chars, strategy)
449449

450+
active_model = _normalize_optional_text(options.get("active_model"))
451+
450452
_append_chat_history(hass, session_id, "user", message)
451453

452454
response = await client.async_send_message(
453455
message=message,
454456
session_id=session_id,
455457
system_prompt=system_prompt,
456458
agent_id=resolved_agent_id,
459+
model=active_model,
457460
extra_headers=extra_headers,
458461
)
459462

@@ -469,6 +472,7 @@ async def handle_send_message(call: ServiceCall) -> None:
469472
session_id=session_id,
470473
system_prompt=system_prompt,
471474
agent_id=resolved_agent_id,
475+
model=active_model,
472476
extra_headers=extra_headers,
473477
)
474478

@@ -635,53 +639,6 @@ def _get_entry_options(hass: HomeAssistant, entry_data: dict[str, Any]) -> dict[
635639
return latest_entry.options if latest_entry else {}
636640

637641

638-
def _extract_text_recursive(value: Any, depth: int = 0) -> str | None:
639-
"""Recursively extract assistant text from nested response payloads."""
640-
if depth > 8:
641-
return None
642-
643-
if isinstance(value, str):
644-
text = value.strip()
645-
return text or None
646-
647-
if isinstance(value, list):
648-
parts: list[str] = []
649-
for item in value:
650-
extracted = _extract_text_recursive(item, depth + 1)
651-
if extracted:
652-
parts.append(extracted)
653-
if parts:
654-
return "\n".join(parts)
655-
return None
656-
657-
if isinstance(value, dict):
658-
priority_keys = (
659-
"output_text",
660-
"text",
661-
"content",
662-
"message",
663-
"response",
664-
"answer",
665-
"choices",
666-
"output",
667-
"delta",
668-
)
669-
670-
for key in priority_keys:
671-
if key not in value:
672-
continue
673-
extracted = _extract_text_recursive(value.get(key), depth + 1)
674-
if extracted:
675-
return extracted
676-
677-
for nested_value in value.values():
678-
extracted = _extract_text_recursive(nested_value, depth + 1)
679-
if extracted:
680-
return extracted
681-
682-
return None
683-
684-
685642
def _summarize_tool_result(value: Any, max_len: int = 240) -> str | None:
686643
"""Return compact string preview of tool result payload."""
687644
if value is None:
@@ -703,7 +660,7 @@ def _summarize_tool_result(value: Any, max_len: int = 240) -> str | None:
703660

704661
def _extract_assistant_message(response: dict[str, Any]) -> str | None:
705662
"""Extract assistant text from modern/legacy OpenAI-compatible responses."""
706-
return _extract_text_recursive(response)
663+
return extract_text_recursive(response)
707664

708665

709666
def _extract_tool_calls(response: dict[str, Any]) -> list[dict[str, Any]]:

custom_components/openclaw/api.py

Lines changed: 5 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -104,6 +104,11 @@ def _headers(
104104
async def _get_session(self) -> aiohttp.ClientSession:
105105
"""Get or create an aiohttp session."""
106106
if self._session is None or self._session.closed:
107+
if self._session is not None and self._session.closed:
108+
_LOGGER.warning(
109+
"Primary aiohttp session unavailable — creating fallback session. "
110+
"This may bypass HA connection management"
111+
)
107112
self._session = aiohttp.ClientSession()
108113
return self._session
109114

custom_components/openclaw/conversation.py

Lines changed: 35 additions & 58 deletions
Original file line numberDiff line numberDiff line change
@@ -24,11 +24,13 @@
2424
ATTR_SESSION_ID,
2525
ATTR_TIMESTAMP,
2626
CONF_ASSIST_SESSION_ID,
27+
CONF_AGENT_ID,
2728
CONF_CONTEXT_MAX_CHARS,
2829
CONF_CONTEXT_STRATEGY,
2930
CONF_INCLUDE_EXPOSED_CONTEXT,
3031
CONF_VOICE_AGENT_ID,
3132
DEFAULT_ASSIST_SESSION_ID,
33+
DEFAULT_AGENT_ID,
3234
DEFAULT_CONTEXT_MAX_CHARS,
3335
DEFAULT_CONTEXT_STRATEGY,
3436
DEFAULT_INCLUDE_EXPOSED_CONTEXT,
@@ -38,6 +40,7 @@
3840
)
3941
from .coordinator import OpenClawCoordinator
4042
from .exposure import apply_context_policy, build_exposed_entities_context
43+
from .helpers import extract_text_recursive
4144

4245
_LOGGER = logging.getLogger(__name__)
4346

@@ -117,12 +120,20 @@ async def async_process(
117120
coordinator: OpenClawCoordinator = entry_data["coordinator"]
118121

119122
message = user_input.text
120-
conversation_id = self._resolve_conversation_id(user_input)
121123
assistant_id = "conversation"
122124
options = self.entry.options
123125
voice_agent_id = self._normalize_optional_text(
124126
options.get(CONF_VOICE_AGENT_ID)
125127
)
128+
configured_agent_id = self._normalize_optional_text(
129+
options.get(
130+
CONF_AGENT_ID,
131+
self.entry.data.get(CONF_AGENT_ID, DEFAULT_AGENT_ID),
132+
)
133+
)
134+
resolved_agent_id = voice_agent_id or configured_agent_id
135+
conversation_id = self._resolve_conversation_id(user_input, resolved_agent_id)
136+
active_model = self._normalize_optional_text(options.get("active_model"))
126137
include_context = options.get(
127138
CONF_INCLUDE_EXPOSED_CONTEXT,
128139
DEFAULT_INCLUDE_EXPOSED_CONTEXT,
@@ -149,8 +160,9 @@ async def async_process(
149160
client,
150161
message,
151162
conversation_id,
152-
voice_agent_id,
163+
resolved_agent_id,
153164
system_prompt,
165+
active_model,
154166
)
155167
except OpenClawApiError as err:
156168
_LOGGER.error("OpenClaw conversation error: %s", err)
@@ -165,8 +177,9 @@ async def async_process(
165177
client,
166178
message,
167179
conversation_id,
168-
voice_agent_id,
180+
resolved_agent_id,
169181
system_prompt,
182+
active_model,
170183
)
171184
except OpenClawApiError as retry_err:
172185
return self._error_result(
@@ -205,8 +218,12 @@ async def async_process(
205218
continue_conversation=self._should_continue(full_response),
206219
)
207220

208-
def _resolve_conversation_id(self, user_input: conversation.ConversationInput) -> str:
209-
"""Return conversation id from HA or a stable Assist fallback session key."""
221+
def _resolve_conversation_id(
222+
self,
223+
user_input: conversation.ConversationInput,
224+
agent_id: str | None,
225+
) -> str:
226+
"""Return conversation id from HA with conservative agent namespacing."""
210227
configured_session_id = self._normalize_optional_text(
211228
self.entry.options.get(
212229
CONF_ASSIST_SESSION_ID,
@@ -216,19 +233,25 @@ def _resolve_conversation_id(self, user_input: conversation.ConversationInput) -
216233
if configured_session_id:
217234
return configured_session_id
218235

236+
agent_suffix = self._normalize_optional_text(agent_id)
237+
219238
if user_input.conversation_id:
239+
if agent_suffix:
240+
return f"{user_input.conversation_id}:{agent_suffix}"
220241
return user_input.conversation_id
221242

222243
context = getattr(user_input, "context", None)
223244
user_id = getattr(context, "user_id", None)
224245
if user_id:
225-
return f"assist_user_{user_id}"
246+
base_id = f"assist_user_{user_id}"
247+
return f"{base_id}:{agent_suffix}" if agent_suffix else base_id
226248

227249
device_id = getattr(user_input, "device_id", None)
228250
if device_id:
229-
return f"assist_device_{device_id}"
251+
base_id = f"assist_device_{device_id}"
252+
return f"{base_id}:{agent_suffix}" if agent_suffix else base_id
230253

231-
return "assist_default"
254+
return f"assist_default:{agent_suffix}" if agent_suffix else "assist_default"
232255

233256
def _normalize_optional_text(self, value: Any) -> str | None:
234257
"""Return a stripped string or None for blank values."""
@@ -244,13 +267,14 @@ async def _get_response(
244267
conversation_id: str,
245268
agent_id: str | None = None,
246269
system_prompt: str | None = None,
270+
model: str | None = None,
247271
) -> str:
248272
"""Get a response from OpenClaw, trying streaming first."""
249-
# Try streaming (lower TTFB for voice pipeline)
250273
full_response = ""
251274
async for chunk in client.async_stream_message(
252275
message=message,
253276
session_id=conversation_id,
277+
model=model,
254278
system_prompt=system_prompt,
255279
agent_id=agent_id,
256280
extra_headers=_VOICE_REQUEST_HEADERS,
@@ -260,62 +284,15 @@ async def _get_response(
260284
if full_response:
261285
return full_response
262286

263-
# Fallback to non-streaming
264287
response = await client.async_send_message(
265288
message=message,
266289
session_id=conversation_id,
290+
model=model,
267291
system_prompt=system_prompt,
268292
agent_id=agent_id,
269293
extra_headers=_VOICE_REQUEST_HEADERS,
270294
)
271-
extracted = self._extract_text_recursive(response)
272-
return extracted or ""
273-
274-
def _extract_text_recursive(self, value: Any, depth: int = 0) -> str | None:
275-
"""Recursively extract assistant text from nested response payloads."""
276-
if depth > 8:
277-
return None
278-
279-
if isinstance(value, str):
280-
text = value.strip()
281-
return text or None
282-
283-
if isinstance(value, list):
284-
parts: list[str] = []
285-
for item in value:
286-
extracted = self._extract_text_recursive(item, depth + 1)
287-
if extracted:
288-
parts.append(extracted)
289-
if parts:
290-
return "\n".join(parts)
291-
return None
292-
293-
if isinstance(value, dict):
294-
priority_keys = (
295-
"output_text",
296-
"text",
297-
"content",
298-
"message",
299-
"response",
300-
"answer",
301-
"choices",
302-
"output",
303-
"delta",
304-
)
305-
306-
for key in priority_keys:
307-
if key not in value:
308-
continue
309-
extracted = self._extract_text_recursive(value.get(key), depth + 1)
310-
if extracted:
311-
return extracted
312-
313-
for nested_value in value.values():
314-
extracted = self._extract_text_recursive(nested_value, depth + 1)
315-
if extracted:
316-
return extracted
317-
318-
return None
295+
return extract_text_recursive(response) or ""
319296

320297
@staticmethod
321298
def _should_continue(response: str) -> bool:

0 commit comments

Comments
 (0)