| title | Python Client |
|---|---|
| description | Connect to agent runtime in sandbox from Python. |
The Python client is the control plane for RuntimeUse. It connects to the sandbox runtime, sends the invocation, and turns runtime messages into a single QueryResult.
pip install runtimeuse-clientimport asyncio
from runtimeuse_client import QueryOptions, RuntimeUseClient, TextResult
async def main() -> None:
client = RuntimeUseClient(ws_url="ws://localhost:8080")
result = await client.query(
prompt="What is 2 + 2",
options=QueryOptions(
system_prompt="You are a helpful assistant.",
model="gpt-4.1",
),
)
assert isinstance(result.data, TextResult)
print(result.data.text)
print(result.metadata)
asyncio.run(main())query() returns a QueryResult with:
data: eitherTextResult(.text) orStructuredOutputResult(.structured_output)metadata: execution metadata returned by the runtime (includes token usage when available)
Pass output_format_json_schema_str when your application needs machine-readable output instead of free-form text. The result will be a StructuredOutputResult.
import json
from pydantic import BaseModel
from runtimeuse_client import StructuredOutputResult
class RepoStats(BaseModel):
file_count: int
char_count: int
result = await client.query(
prompt="Inspect the repository and return the total file count and character count as JSON.",
options=QueryOptions(
system_prompt="You are a helpful assistant.",
model="gpt-4.1",
output_format_json_schema_str=json.dumps(
{
"type": "json_schema",
"schema": RepoStats.model_json_schema(),
}
),
),
)
assert isinstance(result.data, StructuredOutputResult)
stats = RepoStats.model_validate(result.data.structured_output)
print(stats)Use pre_agent_downloadables to fetch a repository, zip archive, or any URL into the sandbox before the agent runs. This is the primary way to give the agent access to a codebase or dataset.
from runtimeuse_client import RuntimeEnvironmentDownloadableInterface
result = await client.query(
prompt="Summarize the contents of this repository and list your favorite file.",
options=QueryOptions(
system_prompt="You are a helpful assistant.",
model="gpt-4.1",
pre_agent_downloadables=[
RuntimeEnvironmentDownloadableInterface(
download_url="https://github.com/openai/codex/archive/refs/heads/main.zip",
working_dir="/runtimeuse",
)
],
),
)The runtime downloads and extracts the file before handing control to the agent.
When the runtime requests an artifact upload, return a presigned URL and content type from on_artifact_upload_request. Set artifacts_dir to tell the runtime which sandbox directory contains the files to upload - both options must be provided together.
from runtimeuse_client import ArtifactUploadResult
async def on_artifact_upload_request(request) -> ArtifactUploadResult:
presigned_url = await create_presigned_url(request.filename)
return ArtifactUploadResult(
presigned_url=presigned_url,
content_type="application/octet-stream",
)
result = await client.query(
prompt="Generate a report and save it as report.txt.",
options=QueryOptions(
system_prompt="You are a helpful assistant.",
model="gpt-4.1",
artifacts_dir="/runtimeuse/output",
on_artifact_upload_request=on_artifact_upload_request,
),
)Use on_assistant_message when you want intermediate progress while the run is still happening.
async def on_assistant_message(msg) -> None:
for block in msg.text_blocks:
print(f"[assistant] {block}")
result = await client.query(
prompt="Inspect this repository.",
options=QueryOptions(
system_prompt="You are a helpful assistant.",
model="gpt-4.1",
on_assistant_message=on_assistant_message,
),
)Use execute_commands() when you only need to run shell commands in the sandbox -- no agent invocation, no prompt. The method returns per-command exit codes and raises AgentRuntimeError if any command fails.
from runtimeuse_client import (
CommandInterface,
ExecuteCommandsOptions,
RuntimeUseClient,
)
client = RuntimeUseClient(ws_url="ws://localhost:8080")
result = await client.execute_commands(
commands=[
CommandInterface(command="mkdir -p /app/output"),
CommandInterface(command="echo 'sandbox is ready' > /app/output/status.txt"),
CommandInterface(command="cat /app/output/status.txt"),
],
options=ExecuteCommandsOptions(
on_assistant_message=on_assistant_message, # streams stdout/stderr
),
)
for item in result.results:
print(f"{item.command} -> exit {item.exit_code}")execute_commands() supports the same callbacks and options as query(): streaming via on_assistant_message, artifact uploads, cancellation, timeout, and secrets_to_redact. Use pre_execution_downloadables to fetch files into the sandbox before the commands run.
Call client.abort() from another coroutine to cancel an in-flight query. The client sends a cancel message to the runtime and query() raises CancelledException.
import asyncio
from runtimeuse_client import CancelledException
async def cancel_soon(client: RuntimeUseClient) -> None:
await asyncio.sleep(5)
client.abort()
try:
asyncio.create_task(cancel_soon(client))
await client.query(prompt="Do the thing.", options=options)
except CancelledException:
print("Run was cancelled")Use timeout (in seconds) to limit how long a query can run. If the limit is exceeded, query() raises TimeoutError.
result = await client.query(
prompt="Do the thing.",
options=QueryOptions(
system_prompt="You are a helpful assistant.",
model="gpt-4.1",
timeout=120,
),
)Pass secrets_to_redact to strip sensitive strings from any output or logs that leave the sandbox.
result = await client.query(
prompt="Check the API status.",
options=QueryOptions(
system_prompt="You are a helpful assistant.",
model="gpt-4.1",
secrets_to_redact=["sk-live-abc123", "my_db_password"],
),
)query() raises AgentRuntimeError if the runtime sends back an error. The exception carries .error (the error message) and .metadata.
from runtimeuse_client import AgentRuntimeError
try:
result = await client.query(prompt="Do the thing.", options=options)
except AgentRuntimeError as e:
print(f"Runtime error: {e.error}")
print(f"Metadata: {e.metadata}")