Skip to content

Commit e1a6f75

Browse files
committed
Fix splunklib/ai/README.md
1 parent 8741728 commit e1a6f75

1 file changed

Lines changed: 23 additions & 19 deletions

File tree

splunklib/ai/README.md

Lines changed: 23 additions & 19 deletions
Original file line numberDiff line numberDiff line change
@@ -114,7 +114,7 @@ async with Agent(model=model) as agent: ....
114114

115115
## Messages
116116

117-
`Agent.invoke` processes a list of `BaseMessage` objects and returns a new list reflecting both prior messages and the LLM’s outputs.
117+
`Agent.invoke` processes a list of `BaseMessage` objects and returns an `AgentResponse` containing the updated message history and optional structured output.
118118

119119
`BaseMessage` is a base class, that is extended by:
120120

@@ -144,7 +144,7 @@ async with Agent(
144144
model=model,
145145
system_prompt="Your name is Stefan",
146146
service=service,
147-
tool_settings=ToolSettings(local=True),
147+
tool_settings=ToolSettings(local=True, remote=None),
148148
) as agent: ...
149149
```
150150

@@ -212,9 +212,10 @@ async with Agent(
212212
system_prompt="...",
213213
tool_settings=ToolSettings(
214214
# local=True, # enable all local tools
215-
local=RemoteToolSettings(
215+
local=LocalToolSettings(
216216
allowlist=ToolAllowlist(names=["tool1"], tags=["tag1"])
217-
)
217+
),
218+
remote=None,
218219
),
219220
) as agent: ...
220221
```
@@ -278,7 +279,7 @@ These logs are forwarded to the `logger` passed to the `Agent` constructor.
278279

279280
### Tool filtering
280281

281-
Remote tools must intentionally allowlisted before they are made available to the LLM.
282+
Remote tools must be intentionally allowlisted before they are made available to the LLM.
282283

283284
```py
284285
from splunklib.ai import Agent, OpenAIModel
@@ -308,13 +309,14 @@ tool_settings=ToolSettings(
308309
local=LocalToolSettings(
309310
allowlist=ToolAllowlist(custom_predicate=lambda tool: tool.name.startswith("my_"))
310311
),
312+
remote=None,
311313
)
312314
```
313315

314316
As a shorthand, pass `local=True` to load all local tools with no filtering:
315317

316318
```py
317-
tool_settings=ToolSettings(local=True)
319+
tool_settings=ToolSettings(local=True, remote=None)
318320
```
319321

320322
## Conversation stores
@@ -423,7 +425,8 @@ async with (
423425
name="debugging_agent",
424426
description="Agent, that provided with logs will analyze and debug complex issues",
425427
tool_settings=ToolSettings(
426-
local=LocalToolSettings(allowlist=ToolAllowlist(tags=["debugging"]))
428+
local=LocalToolSettings(allowlist=ToolAllowlist(tags=["debugging"])),
429+
remote=None,
427430
),
428431
) as debugging_agent,
429432
Agent(
@@ -436,7 +439,8 @@ async with (
436439
name="log_analyzer_agent",
437440
description="Agent, that provided with a problem details will return logs, that could be related to the problem",
438441
tool_settings=ToolSettings(
439-
local=LocalToolSettings(allowlist=ToolAllowlist(tags=["spl"]))
442+
local=LocalToolSettings(allowlist=ToolAllowlist(tags=["spl"])),
443+
remote=None,
440444
),
441445
) as log_analyzer_agent,
442446
):
@@ -470,7 +474,7 @@ The input and output schemas are defined as `pydantic.BaseModel` classes and pas
470474

471475
A subagent can be given its own `conversation_store`, enabling multi-turn conversations between
472476
the supervisor and the subagent. When a subagent has a store, the supervisor can resume prior
473-
conversations with an subagent.
477+
conversations with a subagent.
474478

475479
```py
476480
from splunklib.ai import Agent, OpenAIModel
@@ -563,7 +567,7 @@ structured output based on the capabilities of the underlying model:
563567

564568
- **Tool strategy** - used as a fallback when the model does not natively support structured outputs.
565569
The LLM passes the structured output into a tool call, according to the tool input schema. The
566-
tool schema correspponds to the `output_schema` pydantic model as passed to the `Agent` constructor.
570+
tool schema corresponds to the `output_schema` pydantic model as passed to the `Agent` constructor.
567571
In that case the returned `AIMessage` will contain the `structured_output_calls` field populated
568572
and a `StructuredOutputMessage` will be appended to the message list, since each tool call must
569573
have a corresponding tool response.
@@ -584,14 +588,15 @@ Output schema generation can fail for various reasons:
584588
```py
585589
class Output(BaseModel):
586590
min_score: float
587-
max_score: float = Field(descripiton="max_score must be less or equal than min_score")
591+
max_score: float = Field(description="max_score must be greater than min_score")
588592

589593
@model_validator(mode="after")
590594
def max_must_exceed_min(self) -> "Output":
591595
if self.max_score <= self.min_score:
592596
raise ValueError("max_score must be greater than min_score")
593597
return self
594598
```
599+
595600
- In case of **tool strategy** if the LLM model returned multiple structured output tool calls.
596601

597602
By default the output schema generation is re-tried, until the LLM generates a valid output.
@@ -667,7 +672,9 @@ Class-based middleware:
667672

668673
```py
669674
from typing import Any, override
675+
from splunklib.ai.messages import SubagentTextResult, ToolResult
670676
from splunklib.ai.middleware import (
677+
AgentMiddleware,
671678
AgentMiddlewareHandler,
672679
AgentRequest,
673680
ModelMiddlewareHandler,
@@ -712,17 +719,15 @@ class ExampleMiddleware(AgentMiddleware):
712719
self, request: ToolRequest, handler: ToolMiddlewareHandler
713720
) -> ToolResponse:
714721
if request.call.name == "temperature":
715-
return ToolResponse(content="25.0")
722+
return ToolResponse(result=ToolResult(content="25.0", structured_content=None))
716723
return await handler(request)
717724

718725
@override
719726
async def subagent_middleware(
720727
self, request: SubagentRequest, handler: SubagentMiddlewareHandler
721728
) -> SubagentResponse:
722729
if request.call.name == "SummaryAgent":
723-
return SubagentResponse(
724-
content="Executive summary: no critical incidents detected."
725-
)
730+
return SubagentResponse(result=SubagentTextResult(content="Executive summary: no critical incidents detected."))
726731
return await handler(request)
727732
```
728733

@@ -789,13 +794,14 @@ async def mock_temperature(
789794
request: ToolRequest, handler: ToolMiddlewareHandler
790795
) -> ToolResponse:
791796
if request.call.name == "temperature":
792-
return ToolResponse(content="25.0")
797+
return ToolResponse(result=ToolResult(content="25.0", structured_content=None))
793798
return await handler(request)
794799
```
795800

796801
Example subagent middleware:
797802

798803
```py
804+
from splunklib.ai.messages import SubagentTextResult
799805
from splunklib.ai.middleware import (
800806
subagent_middleware,
801807
SubagentMiddlewareHandler,
@@ -809,9 +815,7 @@ async def mock_subagent(
809815
request: SubagentRequest, handler: SubagentMiddlewareHandler
810816
) -> SubagentResponse:
811817
if request.call.name == "SummaryAgent":
812-
return SubagentResponse(
813-
content="Executive summary: no critical incidents detected."
814-
)
818+
return SubagentResponse(result=SubagentTextResult(content="Executive summary: no critical incidents detected."))
815819
return await handler(request)
816820
```
817821

0 commit comments

Comments
 (0)