diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml
index b1e32a40..dd0e630e 100644
--- a/.github/workflows/ci.yml
+++ b/.github/workflows/ci.yml
@@ -1,12 +1,14 @@
name: CI
on:
push:
- branches-ignore:
- - 'generated'
- - 'codegen/**'
- - 'integrated/**'
- - 'stl-preview-head/**'
- - 'stl-preview-base/**'
+ branches:
+ - '**'
+ - '!integrated/**'
+ - '!stl-preview-head/**'
+ - '!stl-preview-base/**'
+ - '!generated'
+ - '!codegen/**'
+ - 'codegen/stl/**'
pull_request:
branches-ignore:
- 'stl-preview-head/**'
@@ -17,9 +19,9 @@ jobs:
timeout-minutes: 10
name: lint
runs-on: ${{ github.repository == 'stainless-sdks/writer-python' && 'depot-ubuntu-24.04' || 'ubuntu-latest' }}
- if: github.event_name == 'push' || github.event.pull_request.head.repo.fork
+ if: (github.event_name == 'push' || github.event.pull_request.head.repo.fork) && (github.event_name != 'push' || github.event.head_commit.message != 'codegen metadata')
steps:
- - uses: actions/checkout@v4
+ - uses: actions/checkout@v6
- name: Install Rye
run: |
@@ -36,7 +38,7 @@ jobs:
run: ./scripts/lint
build:
- if: github.event_name == 'push' || github.event.pull_request.head.repo.fork
+ if: (github.event_name == 'push' || github.event.pull_request.head.repo.fork) && (github.event_name != 'push' || github.event.head_commit.message != 'codegen metadata')
timeout-minutes: 10
name: build
permissions:
@@ -44,7 +46,7 @@ jobs:
id-token: write
runs-on: ${{ github.repository == 'stainless-sdks/writer-python' && 'depot-ubuntu-24.04' || 'ubuntu-latest' }}
steps:
- - uses: actions/checkout@v4
+ - uses: actions/checkout@v6
- name: Install Rye
run: |
@@ -61,14 +63,18 @@ jobs:
run: rye build
- name: Get GitHub OIDC Token
- if: github.repository == 'stainless-sdks/writer-python'
+ if: |-
+ github.repository == 'stainless-sdks/writer-python' &&
+ !startsWith(github.ref, 'refs/heads/stl/')
id: github-oidc
- uses: actions/github-script@v6
+ uses: actions/github-script@v8
with:
script: core.setOutput('github_token', await core.getIDToken());
- name: Upload tarball
- if: github.repository == 'stainless-sdks/writer-python'
+ if: |-
+ github.repository == 'stainless-sdks/writer-python' &&
+ !startsWith(github.ref, 'refs/heads/stl/')
env:
URL: https://pkg.stainless.com/s
AUTH: ${{ steps.github-oidc.outputs.github_token }}
@@ -81,7 +87,7 @@ jobs:
runs-on: ${{ github.repository == 'stainless-sdks/writer-python' && 'depot-ubuntu-24.04' || 'ubuntu-latest' }}
if: github.event_name == 'push' || github.event.pull_request.head.repo.fork
steps:
- - uses: actions/checkout@v4
+ - uses: actions/checkout@v6
- name: Install Rye
run: |
diff --git a/.github/workflows/publish-pypi.yml b/.github/workflows/publish-pypi.yml
index 85424099..a150e406 100644
--- a/.github/workflows/publish-pypi.yml
+++ b/.github/workflows/publish-pypi.yml
@@ -14,7 +14,7 @@ jobs:
runs-on: ubuntu-latest
steps:
- - uses: actions/checkout@v4
+ - uses: actions/checkout@v6
- name: Install Rye
run: |
diff --git a/.github/workflows/release-doctor.yml b/.github/workflows/release-doctor.yml
index 43d314d3..4a6bb93d 100644
--- a/.github/workflows/release-doctor.yml
+++ b/.github/workflows/release-doctor.yml
@@ -12,7 +12,7 @@ jobs:
if: github.repository == 'writer/writer-python' && (github.event_name == 'push' || github.event_name == 'workflow_dispatch' || startsWith(github.head_ref, 'release-please') || github.head_ref == 'next')
steps:
- - uses: actions/checkout@v4
+ - uses: actions/checkout@v6
- name: Check release environment
run: |
diff --git a/.gitignore b/.gitignore
index 95ceb189..3824f4c4 100644
--- a/.gitignore
+++ b/.gitignore
@@ -1,4 +1,5 @@
.prism.log
+.stdy.log
_dev
__pycache__
diff --git a/.release-please-manifest.json b/.release-please-manifest.json
index b44b2870..4dedeaeb 100644
--- a/.release-please-manifest.json
+++ b/.release-please-manifest.json
@@ -1,3 +1,3 @@
{
- ".": "2.4.0"
+ ".": "2.5.0"
}
\ No newline at end of file
diff --git a/.stats.yml b/.stats.yml
index a774d25b..18b117f4 100644
--- a/.stats.yml
+++ b/.stats.yml
@@ -1,4 +1,4 @@
-configured_endpoints: 33
-openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/writerai%2Fwriter-ea6ec4b34f6b7fdecc564f59b2e31482eee05830bf8dc1f389461b158de1548e.yml
-openapi_spec_hash: ea89c1faed473908be2740efe6da255f
-config_hash: 247c2ce23a36ef7446d356308329c87b
+configured_endpoints: 30
+openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/writerai%2Fwriter-ea6b4de3976794a02ea8fc01669d901cd7b159ba0d598cc9653e01c987a2f806.yml
+openapi_spec_hash: 4d4a9ba232d19a6180e6d4a7d5566103
+config_hash: 8701b1a467238f1afdeceeb7feb1adc6
diff --git a/CHANGELOG.md b/CHANGELOG.md
index e3c84d01..605d1d95 100644
--- a/CHANGELOG.md
+++ b/CHANGELOG.md
@@ -1,5 +1,64 @@
# Changelog
+## 2.5.0 (2026-04-10)
+
+Full Changelog: [v2.4.0...v2.5.0](https://github.com/writer/writer-python/compare/v2.4.0...v2.5.0)
+
+### Features
+
+* **api:** Deprecate AI Detection, Medical Comprehend, and Context-Aware Text Splitting ([c6bee06](https://github.com/writer/writer-python/commit/c6bee0669b39c1563d313929d9461887cf88258a))
+* **client:** add custom JSON encoder for extended type support ([a855555](https://github.com/writer/writer-python/commit/a85555525a3f1188ee8ea9627f965df7a4fd0c87))
+* **client:** add support for binary request streaming ([56bd96e](https://github.com/writer/writer-python/commit/56bd96eb78fa32be252ec513660151c9fead3714))
+* **internal:** implement indices array format for query and form serialization ([898e230](https://github.com/writer/writer-python/commit/898e2309062a9fd1983a34319653b7d54a651fcd))
+
+
+### Bug Fixes
+
+* **client:** preserve hardcoded query params when merging with user params ([70761a4](https://github.com/writer/writer-python/commit/70761a47b5d5ac574ce7d8e2938562e1cb2fe2a9))
+* **deps:** bump minimum typing-extensions version ([e8e396e](https://github.com/writer/writer-python/commit/e8e396e83491bf46064cc5612fa29336e666d83d))
+* **docs:** fix mcp installation instructions for remote servers ([b448904](https://github.com/writer/writer-python/commit/b4489040d3c7fe661bdefa8e8faf4889cac984b2))
+* ensure file data are only sent as 1 parameter ([ae4c4ad](https://github.com/writer/writer-python/commit/ae4c4ad8931aed11cd3e06fccc735b6ef6318884))
+* **pydantic:** do not pass `by_alias` unless set ([f8a82fe](https://github.com/writer/writer-python/commit/f8a82feefe1355e507a7d0d5f6612ce543e9bad7))
+* remove custom binary_request handling superseded by codegen ([#28](https://github.com/writer/writer-python/issues/28)) ([763afaf](https://github.com/writer/writer-python/commit/763afaf1977954a9f5ccce9df9716e7f72a13389))
+* resolve duplicate code ([#29](https://github.com/writer/writer-python/issues/29)) ([57837bc](https://github.com/writer/writer-python/commit/57837bc12ba7bf9e83c333c39fe4f6524d229be6))
+* sanitize endpoint path params ([e87c2a0](https://github.com/writer/writer-python/commit/e87c2a052e70b421c2ccad30451853ce900d7cd0))
+
+
+### Chores
+
+* **ci:** skip lint on metadata-only changes ([e8479c1](https://github.com/writer/writer-python/commit/e8479c17eef94d004ce86f08956f608ff75102cd))
+* **ci:** skip uploading artifacts on stainless-internal branches ([f8763df](https://github.com/writer/writer-python/commit/f8763df9664747b5bdc35bb2dfde35f4bb8f7d21))
+* **ci:** upgrade `actions/github-script` ([aa28320](https://github.com/writer/writer-python/commit/aa283208404097945e299db4ffd23c36d973f414))
+* format all `api.md` files ([0d10457](https://github.com/writer/writer-python/commit/0d10457e3978e1d3f8b7cbedb634d2f68a4812b7))
+* **internal:** add request options to SSE classes ([fead372](https://github.com/writer/writer-python/commit/fead372f0986f9ab11cae22d2ce38e0f1dc094aa))
+* **internal:** bump dependencies ([6d39e6e](https://github.com/writer/writer-python/commit/6d39e6eef109d47d142224a93819407d353841e1))
+* **internal:** fix lint error on Python 3.14 ([0cf9c5f](https://github.com/writer/writer-python/commit/0cf9c5fdad07b9bba6a7d8c6540014280e7b97c6))
+* **internal:** make `test_proxy_environment_variables` more resilient ([00a019f](https://github.com/writer/writer-python/commit/00a019f36477624456cc047acfe9756c1a7977cd))
+* **internal:** make `test_proxy_environment_variables` more resilient to env ([2ec43af](https://github.com/writer/writer-python/commit/2ec43afcb5384be2428913f2d11db43892a336f6))
+* **internal:** tweak CI branches ([cff7016](https://github.com/writer/writer-python/commit/cff70161f1868dd4ea441b40fb3f9845459791c5))
+* **internal:** update `actions/checkout` version ([4de949f](https://github.com/writer/writer-python/commit/4de949f0dfba55067898bd3b25cdc6dceb193541))
+* **internal:** update gitignore ([c1351cf](https://github.com/writer/writer-python/commit/c1351cf85cf3959c5b5894731da9841fa8ffcf76))
+* **internal:** version bump ([6733bea](https://github.com/writer/writer-python/commit/6733bea446f6f96314b8df1a6f612f51bf12cbec))
+* **test:** do not count install time for mock server timeout ([ca18be8](https://github.com/writer/writer-python/commit/ca18be8d1efe67d932ae376ca2dead4b50da25b0))
+* **tests:** bump steady to v0.19.4 ([57552b1](https://github.com/writer/writer-python/commit/57552b1487456e52b7f6bc9a04021469426131a1))
+* **tests:** bump steady to v0.19.5 ([ec60cc4](https://github.com/writer/writer-python/commit/ec60cc49a2df464fda427fc53202eaa178a5c0ce))
+* **tests:** bump steady to v0.19.6 ([54e2229](https://github.com/writer/writer-python/commit/54e22299884a96539e861759286846c1cf6368e0))
+* **tests:** bump steady to v0.19.7 ([9b78194](https://github.com/writer/writer-python/commit/9b78194c51ddcdc933bc476e6c8d73ef9cdd8aeb))
+* **tests:** bump steady to v0.20.1 ([09fa209](https://github.com/writer/writer-python/commit/09fa209274456cab49225fde9ffef8f1e0e11406))
+* **tests:** bump steady to v0.20.2 ([4bb6605](https://github.com/writer/writer-python/commit/4bb6605e9837929848711e27b57c93e7c33b0e40))
+* update mock server docs ([dd5b8a9](https://github.com/writer/writer-python/commit/dd5b8a9924b13047e661e4c1ea59b59aa2b3bfef))
+* update placeholder string ([3d66005](https://github.com/writer/writer-python/commit/3d66005c744fec16d503b28342511b06caabee87))
+
+
+### Documentation
+
+* **api:** updates to API spec ([832318c](https://github.com/writer/writer-python/commit/832318cd702f5cd3e8150d0a09ec227685decee9))
+
+
+### Refactors
+
+* **tests:** switch from prism to steady ([b63e313](https://github.com/writer/writer-python/commit/b63e3135f77a4c73063118be89244c6b8674fbc5))
+
## 2.4.0 (2026-02-06)
Full Changelog: [v2.4.0-rc1...v2.4.0](https://github.com/writer/writer-python/compare/v2.4.0-rc1...v2.4.0)
diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md
index 47a8ec3e..508bbc40 100644
--- a/CONTRIBUTING.md
+++ b/CONTRIBUTING.md
@@ -85,11 +85,10 @@ $ pip install ./path-to-wheel-file.whl
## Running tests
-Most tests require you to [set up a mock server](https://github.com/stoplightio/prism) against the OpenAPI spec to run the tests.
+Most tests require you to [set up a mock server](https://github.com/dgellow/steady) against the OpenAPI spec to run the tests.
```sh
-# you will need npm installed
-$ npx prism mock path/to/your/openapi.yml
+$ ./scripts/mock
```
```sh
diff --git a/README.md b/README.md
index 56ae3c13..736aaee2 100644
--- a/README.md
+++ b/README.md
@@ -13,8 +13,8 @@ It is generated with [Stainless](https://www.stainless.com/).
Use the Writer MCP Server to enable AI assistants to interact with this API, allowing them to explore endpoints, make test requests, and use documentation to help integrate this SDK into your application.
-[](https://cursor.com/en-US/install-mcp?name=writer-sdk-mcp&config=eyJjb21tYW5kIjoibnB4IiwiYXJncyI6WyIteSIsIndyaXRlci1zZGstbWNwIl19)
-[](https://vscode.stainless.com/mcp/%7B%22name%22%3A%22writer-sdk-mcp%22%2C%22command%22%3A%22npx%22%2C%22args%22%3A%5B%22-y%22%2C%22writer-sdk-mcp%22%5D%7D)
+[](https://cursor.com/en-US/install-mcp?name=writer-sdk-mcp&config=eyJjb21tYW5kIjoibnB4IiwiYXJncyI6WyIteSIsIndyaXRlci1zZGstbWNwIl0sImVudiI6eyJXUklURVJfQVBJX0tFWSI6Ik15IEFQSSBLZXkifX0)
+[](https://vscode.stainless.com/mcp/%7B%22name%22%3A%22writer-sdk-mcp%22%2C%22command%22%3A%22npx%22%2C%22args%22%3A%5B%22-y%22%2C%22writer-sdk-mcp%22%5D%2C%22env%22%3A%7B%22WRITER_API_KEY%22%3A%22My%20API%20Key%22%7D%7D)
> Note: You may need to set environment variables in your MCP client.
@@ -28,7 +28,7 @@ To install the package from PyPI, use `pip`:
```sh
# install from PyPI
-pip install '--pre writer-sdk'
+pip install writer-sdk
```
## Prequisites
@@ -125,7 +125,7 @@ You can enable this by installing `aiohttp`:
```sh
# install from PyPI
-pip install '--pre writer-sdk[aiohttp]'
+pip install writer-sdk[aiohttp]
```
Then you can enable it by instantiating the client with `http_client=DefaultAioHttpClient()`:
diff --git a/api.md b/api.md
index 4569c139..cd8c8e0e 100644
--- a/api.md
+++ b/api.md
@@ -162,32 +162,13 @@ Methods:
Types:
```python
-from writerai.types import (
- ToolAIDetectResponse,
- ToolContextAwareSplittingResponse,
- ToolParsePdfResponse,
- ToolWebSearchResponse,
-)
-```
-
-Methods:
-
-- client.tools.ai_detect(\*\*params) -> ToolAIDetectResponse
-- client.tools.context_aware_splitting(\*\*params) -> ToolContextAwareSplittingResponse
-- client.tools.parse_pdf(file_id, \*\*params) -> ToolParsePdfResponse
-- client.tools.web_search(\*\*params) -> ToolWebSearchResponse
-
-## Comprehend
-
-Types:
-
-```python
-from writerai.types.tools import ComprehendMedicalResponse
+from writerai.types import ToolParsePdfResponse, ToolWebSearchResponse
```
Methods:
-- client.tools.comprehend.medical(\*\*params) -> ComprehendMedicalResponse
+- client.tools.parse_pdf(file_id, \*\*params) -> ToolParsePdfResponse
+- client.tools.web_search(\*\*params) -> ToolWebSearchResponse
# Translation
diff --git a/pyproject.toml b/pyproject.toml
index 37522e73..e9cf5755 100644
--- a/pyproject.toml
+++ b/pyproject.toml
@@ -1,6 +1,6 @@
[project]
name = "writer-sdk"
-version = "2.4.0"
+version = "2.5.0"
description = "The official Python library for the writer API"
dynamic = ["readme"]
license = "Apache-2.0"
@@ -11,7 +11,7 @@ authors = [
dependencies = [
"httpx>=0.23.0, <1",
"pydantic>=1.9.0, <3",
- "typing-extensions>=4.10, <5",
+ "typing-extensions>=4.14, <5",
"anyio>=3.5.0, <5",
"distro>=1.7.0, <2",
"sniffio",
@@ -73,7 +73,7 @@ format = { chain = [
# run formatting again to fix any inconsistencies when imports are stripped
"format:ruff",
]}
-"format:docs" = "python scripts/utils/ruffen-docs.py README.md api.md"
+"format:docs" = "bash -c 'python scripts/utils/ruffen-docs.py README.md $(find . -type f -name api.md)'"
"format:ruff" = "ruff format"
"lint" = { chain = [
diff --git a/requirements-dev.lock b/requirements-dev.lock
index b416f80c..db66397b 100644
--- a/requirements-dev.lock
+++ b/requirements-dev.lock
@@ -12,14 +12,14 @@
-e file:.
aiohappyeyeballs==2.6.1
# via aiohttp
-aiohttp==3.13.2
+aiohttp==3.13.3
# via httpx-aiohttp
# via writer-sdk
aiosignal==1.4.0
# via aiohttp
annotated-types==0.7.0
# via pydantic
-anyio==4.12.0
+anyio==4.12.1
# via httpx
# via writer-sdk
argcomplete==3.6.3
@@ -33,7 +33,7 @@ attrs==25.4.0
# via nox
backports-asyncio-runner==1.2.0
# via pytest-asyncio
-certifi==2025.11.12
+certifi==2026.1.4
# via httpcore
# via httpx
colorlog==6.10.1
@@ -63,7 +63,7 @@ httpx==0.28.1
# via httpx-aiohttp
# via respx
# via writer-sdk
-httpx-aiohttp==0.1.9
+httpx-aiohttp==0.1.12
# via writer-sdk
humanize==4.13.0
# via nox
@@ -71,7 +71,7 @@ idna==3.11
# via anyio
# via httpx
# via yarl
-importlib-metadata==8.7.0
+importlib-metadata==8.7.1
iniconfig==2.1.0
# via pytest
inline-snapshot==0.20.5
@@ -87,14 +87,14 @@ multidict==6.7.0
mypy==1.17.0
mypy-extensions==1.1.0
# via mypy
-nodeenv==1.9.1
+nodeenv==1.10.0
# via pyright
nox==2025.11.12
packaging==25.0
# via dependency-groups
# via nox
# via pytest
-pathspec==0.12.1
+pathspec==1.0.3
# via mypy
platformdirs==4.4.0
# via virtualenv
@@ -120,13 +120,13 @@ python-dateutil==2.9.0.post0
# via time-machine
respx==0.22.0
rich==14.2.0
-ruff==0.14.7
+ruff==0.14.13
six==1.17.0
# via python-dateutil
sniffio==1.3.1
# via writer-sdk
time-machine==2.19.0
-tomli==2.3.0
+tomli==2.4.0
# via dependency-groups
# via mypy
# via nox
@@ -146,7 +146,7 @@ typing-extensions==4.15.0
# via writer-sdk
typing-inspection==0.4.2
# via pydantic
-virtualenv==20.35.4
+virtualenv==20.36.1
# via nox
yarl==1.22.0
# via aiohttp
diff --git a/requirements.lock b/requirements.lock
index 90343d3b..da1e220e 100644
--- a/requirements.lock
+++ b/requirements.lock
@@ -12,21 +12,21 @@
-e file:.
aiohappyeyeballs==2.6.1
# via aiohttp
-aiohttp==3.13.2
+aiohttp==3.13.3
# via httpx-aiohttp
# via writer-sdk
aiosignal==1.4.0
# via aiohttp
annotated-types==0.7.0
# via pydantic
-anyio==4.12.0
+anyio==4.12.1
# via httpx
# via writer-sdk
async-timeout==5.0.1
# via aiohttp
attrs==25.4.0
# via aiohttp
-certifi==2025.11.12
+certifi==2026.1.4
# via httpcore
# via httpx
distro==1.9.0
@@ -43,7 +43,7 @@ httpcore==1.0.9
httpx==0.28.1
# via httpx-aiohttp
# via writer-sdk
-httpx-aiohttp==0.1.9
+httpx-aiohttp==0.1.12
# via writer-sdk
idna==3.11
# via anyio
diff --git a/scripts/mock b/scripts/mock
index 0b28f6ea..5cd7c157 100755
--- a/scripts/mock
+++ b/scripts/mock
@@ -19,23 +19,34 @@ fi
echo "==> Starting mock server with URL ${URL}"
-# Run prism mock on the given spec
+# Run steady mock on the given spec
if [ "$1" == "--daemon" ]; then
- npm exec --package=@stainless-api/prism-cli@5.15.0 -- prism mock "$URL" &> .prism.log &
+ # Pre-install the package so the download doesn't eat into the startup timeout
+ npm exec --package=@stdy/cli@0.20.2 -- steady --version
- # Wait for server to come online
+ npm exec --package=@stdy/cli@0.20.2 -- steady --host 127.0.0.1 -p 4010 --validator-query-array-format=comma --validator-form-array-format=comma --validator-query-object-format=brackets --validator-form-object-format=brackets "$URL" &> .stdy.log &
+
+ # Wait for server to come online via health endpoint (max 30s)
echo -n "Waiting for server"
- while ! grep -q "✖ fatal\|Prism is listening" ".prism.log" ; do
+ attempts=0
+ while ! curl --silent --fail "http://127.0.0.1:4010/_x-steady/health" >/dev/null 2>&1; do
+ if ! kill -0 $! 2>/dev/null; then
+ echo
+ cat .stdy.log
+ exit 1
+ fi
+ attempts=$((attempts + 1))
+ if [ "$attempts" -ge 300 ]; then
+ echo
+ echo "Timed out waiting for Steady server to start"
+ cat .stdy.log
+ exit 1
+ fi
echo -n "."
sleep 0.1
done
- if grep -q "✖ fatal" ".prism.log"; then
- cat .prism.log
- exit 1
- fi
-
echo
else
- npm exec --package=@stainless-api/prism-cli@5.15.0 -- prism mock "$URL"
+ npm exec --package=@stdy/cli@0.20.2 -- steady --host 127.0.0.1 -p 4010 --validator-query-array-format=comma --validator-form-array-format=comma --validator-query-object-format=brackets --validator-form-object-format=brackets "$URL"
fi
diff --git a/scripts/test b/scripts/test
index dbeda2d2..b8143aa3 100755
--- a/scripts/test
+++ b/scripts/test
@@ -9,8 +9,8 @@ GREEN='\033[0;32m'
YELLOW='\033[0;33m'
NC='\033[0m' # No Color
-function prism_is_running() {
- curl --silent "http://localhost:4010" >/dev/null 2>&1
+function steady_is_running() {
+ curl --silent "http://127.0.0.1:4010/_x-steady/health" >/dev/null 2>&1
}
kill_server_on_port() {
@@ -25,7 +25,7 @@ function is_overriding_api_base_url() {
[ -n "$TEST_API_BASE_URL" ]
}
-if ! is_overriding_api_base_url && ! prism_is_running ; then
+if ! is_overriding_api_base_url && ! steady_is_running ; then
# When we exit this script, make sure to kill the background mock server process
trap 'kill_server_on_port 4010' EXIT
@@ -36,19 +36,19 @@ fi
if is_overriding_api_base_url ; then
echo -e "${GREEN}✔ Running tests against ${TEST_API_BASE_URL}${NC}"
echo
-elif ! prism_is_running ; then
- echo -e "${RED}ERROR:${NC} The test suite will not run without a mock Prism server"
+elif ! steady_is_running ; then
+ echo -e "${RED}ERROR:${NC} The test suite will not run without a mock Steady server"
echo -e "running against your OpenAPI spec."
echo
echo -e "To run the server, pass in the path or url of your OpenAPI"
- echo -e "spec to the prism command:"
+ echo -e "spec to the steady command:"
echo
- echo -e " \$ ${YELLOW}npm exec --package=@stainless-api/prism-cli@5.15.0 -- prism mock path/to/your.openapi.yml${NC}"
+ echo -e " \$ ${YELLOW}npm exec --package=@stdy/cli@0.20.2 -- steady path/to/your.openapi.yml --host 127.0.0.1 -p 4010 --validator-query-array-format=comma --validator-form-array-format=comma --validator-query-object-format=brackets --validator-form-object-format=brackets${NC}"
echo
exit 1
else
- echo -e "${GREEN}✔ Mock prism server is running with your OpenAPI spec${NC}"
+ echo -e "${GREEN}✔ Mock steady server is running with your OpenAPI spec${NC}"
echo
fi
diff --git a/src/writerai/_base_client.py b/src/writerai/_base_client.py
index 04853fca..104bc87c 100644
--- a/src/writerai/_base_client.py
+++ b/src/writerai/_base_client.py
@@ -9,6 +9,7 @@
import inspect
import logging
import platform
+import warnings
import email.utils
from types import TracebackType
from random import random
@@ -52,9 +53,11 @@
ResponseT,
AnyMapping,
PostParser,
+ BinaryTypes,
RequestFiles,
HttpxSendArgs,
RequestOptions,
+ AsyncBinaryTypes,
HttpxRequestFiles,
ModelBuilderProtocol,
not_given,
@@ -84,6 +87,7 @@
APIConnectionError,
APIResponseValidationError,
)
+from ._utils._json import openapi_dumps
log: logging.Logger = logging.getLogger(__name__)
@@ -478,8 +482,19 @@ def _build_request(
retries_taken: int = 0,
) -> httpx.Request:
if log.isEnabledFor(logging.DEBUG):
- log.debug("Request options: %s", model_dump(options, exclude_unset=True))
-
+ log.debug(
+ "Request options: %s",
+ model_dump(
+ options,
+ exclude_unset=True,
+ # Pydantic v1 can't dump every type we support in content, so we exclude it for now.
+ exclude={
+ "content",
+ }
+ if PYDANTIC_V1
+ else {},
+ ),
+ )
kwargs: dict[str, Any] = {}
json_data = options.json_data
@@ -495,7 +510,6 @@ def _build_request(
params = _merge_mappings(self.default_query, options.params)
content_type = headers.get("Content-Type")
files = options.files
- content = options.content
# If the given Content-Type header is multipart/form-data then it
# has to be removed so that httpx can generate the header with
@@ -527,6 +541,10 @@ def _build_request(
files = cast(HttpxRequestFiles, ForceMultipartDict())
prepared_url = self._prepare_url(options.url)
+ # preserve hard-coded query params from the url
+ if params and prepared_url.query:
+ params = {**dict(prepared_url.params.items()), **params}
+ prepared_url = prepared_url.copy_with(raw_path=prepared_url.raw_path.split(b"?", 1)[0])
if "_" in prepared_url.host:
# work around https://github.com/encode/httpx/discussions/2880
kwargs["extensions"] = {"sni_hostname": prepared_url.host.replace("_", "-")}
@@ -534,10 +552,18 @@ def _build_request(
is_body_allowed = options.method.lower() != "get"
if is_body_allowed:
- if isinstance(json_data, bytes):
+ if options.content is not None and json_data is not None:
+ raise TypeError("Passing both `content` and `json_data` is not supported")
+ if options.content is not None and files is not None:
+ raise TypeError("Passing both `content` and `files` is not supported")
+ if options.content is not None:
+ kwargs["content"] = options.content
+ elif isinstance(json_data, bytes):
kwargs["content"] = json_data
- else:
- kwargs["json"] = json_data if is_given(json_data) else None
+ elif not files:
+ # Don't set content when JSON is sent as multipart/form-data,
+ # since httpx's content param overrides other body arguments
+ kwargs["content"] = openapi_dumps(json_data) if is_given(json_data) and json_data is not None else None
kwargs["files"] = files
else:
headers.pop("Content-Type", None)
@@ -554,7 +580,6 @@ def _build_request(
# so that passing a `TypedDict` doesn't cause an error.
# https://github.com/microsoft/pyright/issues/3526#event-6715453066
params=self.qs.stringify(cast(Mapping[str, Any], params)) if params else None,
- content=content,
**kwargs,
)
@@ -1197,6 +1222,7 @@ def post(
*,
cast_to: Type[ResponseT],
body: Body | None = None,
+ content: BinaryTypes | None = None,
options: RequestOptions = {},
files: RequestFiles | None = None,
binary_request: FileTypes | None = None,
@@ -1210,6 +1236,7 @@ def post(
*,
cast_to: Type[ResponseT],
body: Body | None = None,
+ content: BinaryTypes | None = None,
options: RequestOptions = {},
files: RequestFiles | None = None,
binary_request: FileTypes | None = None,
@@ -1224,6 +1251,7 @@ def post(
*,
cast_to: Type[ResponseT],
body: Body | None = None,
+ content: BinaryTypes | None = None,
options: RequestOptions = {},
files: RequestFiles | None = None,
binary_request: FileTypes | None = None,
@@ -1237,18 +1265,30 @@ def post(
*,
cast_to: Type[ResponseT],
body: Body | None = None,
+ content: BinaryTypes | None = None,
options: RequestOptions = {},
files: RequestFiles | None = None,
binary_request: FileTypes | None = None,
stream: bool = False,
stream_cls: type[_StreamT] | None = None,
) -> ResponseT | _StreamT:
+ if body is not None and content is not None:
+ raise TypeError("Passing both `body` and `content` is not supported")
+ if files is not None and content is not None:
+ raise TypeError("Passing both `files` and `content` is not supported")
+ if isinstance(body, bytes):
+ warnings.warn(
+ "Passing raw bytes as `body` is deprecated and will be removed in a future version. "
+ "Please pass raw bytes via the `content` parameter instead.",
+ DeprecationWarning,
+ stacklevel=2,
+ )
opts = FinalRequestOptions.construct(
method="post",
url=path,
json_data=body,
files=to_httpx_files(files),
- content=get_file_content(_transform_file(binary_request)) if binary_request else None,
+ content=(get_file_content(_transform_file(binary_request)) if binary_request is not None else content),
**options,
)
return cast(ResponseT, self.request(cast_to, opts, stream=stream, stream_cls=stream_cls))
@@ -1259,11 +1299,23 @@ def patch(
*,
cast_to: Type[ResponseT],
body: Body | None = None,
+ content: BinaryTypes | None = None,
files: RequestFiles | None = None,
options: RequestOptions = {},
) -> ResponseT:
+ if body is not None and content is not None:
+ raise TypeError("Passing both `body` and `content` is not supported")
+ if files is not None and content is not None:
+ raise TypeError("Passing both `files` and `content` is not supported")
+ if isinstance(body, bytes):
+ warnings.warn(
+ "Passing raw bytes as `body` is deprecated and will be removed in a future version. "
+ "Please pass raw bytes via the `content` parameter instead.",
+ DeprecationWarning,
+ stacklevel=2,
+ )
opts = FinalRequestOptions.construct(
- method="patch", url=path, json_data=body, files=to_httpx_files(files), **options
+ method="patch", url=path, json_data=body, content=content, files=to_httpx_files(files), **options
)
return self.request(cast_to, opts)
@@ -1273,11 +1325,23 @@ def put(
*,
cast_to: Type[ResponseT],
body: Body | None = None,
+ content: BinaryTypes | None = None,
files: RequestFiles | None = None,
options: RequestOptions = {},
) -> ResponseT:
+ if body is not None and content is not None:
+ raise TypeError("Passing both `body` and `content` is not supported")
+ if files is not None and content is not None:
+ raise TypeError("Passing both `files` and `content` is not supported")
+ if isinstance(body, bytes):
+ warnings.warn(
+ "Passing raw bytes as `body` is deprecated and will be removed in a future version. "
+ "Please pass raw bytes via the `content` parameter instead.",
+ DeprecationWarning,
+ stacklevel=2,
+ )
opts = FinalRequestOptions.construct(
- method="put", url=path, json_data=body, files=to_httpx_files(files), **options
+ method="put", url=path, json_data=body, content=content, files=to_httpx_files(files), **options
)
return self.request(cast_to, opts)
@@ -1287,9 +1351,19 @@ def delete(
*,
cast_to: Type[ResponseT],
body: Body | None = None,
+ content: BinaryTypes | None = None,
options: RequestOptions = {},
) -> ResponseT:
- opts = FinalRequestOptions.construct(method="delete", url=path, json_data=body, **options)
+ if body is not None and content is not None:
+ raise TypeError("Passing both `body` and `content` is not supported")
+ if isinstance(body, bytes):
+ warnings.warn(
+ "Passing raw bytes as `body` is deprecated and will be removed in a future version. "
+ "Please pass raw bytes via the `content` parameter instead.",
+ DeprecationWarning,
+ stacklevel=2,
+ )
+ opts = FinalRequestOptions.construct(method="delete", url=path, json_data=body, content=content, **options)
return self.request(cast_to, opts)
def get_api_list(
@@ -1729,6 +1803,7 @@ async def post(
*,
cast_to: Type[ResponseT],
body: Body | None = None,
+ content: AsyncBinaryTypes | None = None,
files: RequestFiles | None = None,
binary_request: FileTypes | None = None,
options: RequestOptions = {},
@@ -1742,6 +1817,7 @@ async def post(
*,
cast_to: Type[ResponseT],
body: Body | None = None,
+ content: AsyncBinaryTypes | None = None,
files: RequestFiles | None = None,
binary_request: FileTypes | None = None,
options: RequestOptions = {},
@@ -1756,6 +1832,7 @@ async def post(
*,
cast_to: Type[ResponseT],
body: Body | None = None,
+ content: AsyncBinaryTypes | None = None,
files: RequestFiles | None = None,
binary_request: FileTypes | None = None,
options: RequestOptions = {},
@@ -1769,18 +1846,32 @@ async def post(
*,
cast_to: Type[ResponseT],
body: Body | None = None,
+ content: AsyncBinaryTypes | None = None,
files: RequestFiles | None = None,
binary_request: FileTypes | None = None,
options: RequestOptions = {},
stream: bool = False,
stream_cls: type[_AsyncStreamT] | None = None,
) -> ResponseT | _AsyncStreamT:
+ if body is not None and content is not None:
+ raise TypeError("Passing both `body` and `content` is not supported")
+ if files is not None and content is not None:
+ raise TypeError("Passing both `files` and `content` is not supported")
+ if isinstance(body, bytes):
+ warnings.warn(
+ "Passing raw bytes as `body` is deprecated and will be removed in a future version. "
+ "Please pass raw bytes via the `content` parameter instead.",
+ DeprecationWarning,
+ stacklevel=2,
+ )
opts = FinalRequestOptions.construct(
method="post",
url=path,
json_data=body,
files=await async_to_httpx_files(files),
- content=get_file_content(await _async_transform_file(binary_request)) if binary_request else None,
+ content=(
+ get_file_content(await _async_transform_file(binary_request)) if binary_request is not None else content
+ ),
**options,
)
return await self.request(cast_to, opts, stream=stream, stream_cls=stream_cls)
@@ -1791,11 +1882,28 @@ async def patch(
*,
cast_to: Type[ResponseT],
body: Body | None = None,
+ content: AsyncBinaryTypes | None = None,
files: RequestFiles | None = None,
options: RequestOptions = {},
) -> ResponseT:
+ if body is not None and content is not None:
+ raise TypeError("Passing both `body` and `content` is not supported")
+ if files is not None and content is not None:
+ raise TypeError("Passing both `files` and `content` is not supported")
+ if isinstance(body, bytes):
+ warnings.warn(
+ "Passing raw bytes as `body` is deprecated and will be removed in a future version. "
+ "Please pass raw bytes via the `content` parameter instead.",
+ DeprecationWarning,
+ stacklevel=2,
+ )
opts = FinalRequestOptions.construct(
- method="patch", url=path, json_data=body, files=await async_to_httpx_files(files), **options
+ method="patch",
+ url=path,
+ json_data=body,
+ content=content,
+ files=await async_to_httpx_files(files),
+ **options,
)
return await self.request(cast_to, opts)
@@ -1805,11 +1913,23 @@ async def put(
*,
cast_to: Type[ResponseT],
body: Body | None = None,
+ content: AsyncBinaryTypes | None = None,
files: RequestFiles | None = None,
options: RequestOptions = {},
) -> ResponseT:
+ if body is not None and content is not None:
+ raise TypeError("Passing both `body` and `content` is not supported")
+ if files is not None and content is not None:
+ raise TypeError("Passing both `files` and `content` is not supported")
+ if isinstance(body, bytes):
+ warnings.warn(
+ "Passing raw bytes as `body` is deprecated and will be removed in a future version. "
+ "Please pass raw bytes via the `content` parameter instead.",
+ DeprecationWarning,
+ stacklevel=2,
+ )
opts = FinalRequestOptions.construct(
- method="put", url=path, json_data=body, files=await async_to_httpx_files(files), **options
+ method="put", url=path, json_data=body, content=content, files=await async_to_httpx_files(files), **options
)
return await self.request(cast_to, opts)
@@ -1819,9 +1939,19 @@ async def delete(
*,
cast_to: Type[ResponseT],
body: Body | None = None,
+ content: AsyncBinaryTypes | None = None,
options: RequestOptions = {},
) -> ResponseT:
- opts = FinalRequestOptions.construct(method="delete", url=path, json_data=body, **options)
+ if body is not None and content is not None:
+ raise TypeError("Passing both `body` and `content` is not supported")
+ if isinstance(body, bytes):
+ warnings.warn(
+ "Passing raw bytes as `body` is deprecated and will be removed in a future version. "
+ "Please pass raw bytes via the `content` parameter instead.",
+ DeprecationWarning,
+ stacklevel=2,
+ )
+ opts = FinalRequestOptions.construct(method="delete", url=path, json_data=body, content=content, **options)
return await self.request(cast_to, opts)
def get_api_list(
diff --git a/src/writerai/_client.py b/src/writerai/_client.py
index 0f4a5ca0..014f6f03 100644
--- a/src/writerai/_client.py
+++ b/src/writerai/_client.py
@@ -35,11 +35,11 @@
from .resources import chat, files, tools, graphs, models, vision, completions, translation, applications
from .resources.chat import ChatResource, AsyncChatResource
from .resources.files import FilesResource, AsyncFilesResource
+ from .resources.tools import ToolsResource, AsyncToolsResource
from .resources.graphs import GraphsResource, AsyncGraphsResource
from .resources.models import ModelsResource, AsyncModelsResource
from .resources.vision import VisionResource, AsyncVisionResource
from .resources.completions import CompletionsResource, AsyncCompletionsResource
- from .resources.tools.tools import ToolsResource, AsyncToolsResource
from .resources.translation import TranslationResource, AsyncTranslationResource
from .resources.applications.applications import ApplicationsResource, AsyncApplicationsResource
diff --git a/src/writerai/_compat.py b/src/writerai/_compat.py
index 73a1f3ea..340c91a6 100644
--- a/src/writerai/_compat.py
+++ b/src/writerai/_compat.py
@@ -2,7 +2,7 @@
from typing import TYPE_CHECKING, Any, Union, Generic, TypeVar, Callable, cast, overload
from datetime import date, datetime
-from typing_extensions import Self, Literal
+from typing_extensions import Self, Literal, TypedDict
import pydantic
from pydantic.fields import FieldInfo
@@ -131,6 +131,10 @@ def model_json(model: pydantic.BaseModel, *, indent: int | None = None) -> str:
return model.model_dump_json(indent=indent)
+class _ModelDumpKwargs(TypedDict, total=False):
+ by_alias: bool
+
+
def model_dump(
model: pydantic.BaseModel,
*,
@@ -139,8 +143,12 @@ def model_dump(
exclude_defaults: bool = False,
warnings: bool = True,
mode: Literal["json", "python"] = "python",
+ by_alias: bool | None = None,
) -> dict[str, Any]:
if (not PYDANTIC_V1) or hasattr(model, "model_dump"):
+ kwargs: _ModelDumpKwargs = {}
+ if by_alias is not None:
+ kwargs["by_alias"] = by_alias
return model.model_dump(
mode=mode,
exclude=exclude,
@@ -148,13 +156,12 @@ def model_dump(
exclude_defaults=exclude_defaults,
# warnings are not supported in Pydantic v1
warnings=True if PYDANTIC_V1 else warnings,
+ **kwargs,
)
return cast(
"dict[str, Any]",
model.dict( # pyright: ignore[reportDeprecated, reportUnnecessaryCast]
- exclude=exclude,
- exclude_unset=exclude_unset,
- exclude_defaults=exclude_defaults,
+ exclude=exclude, exclude_unset=exclude_unset, exclude_defaults=exclude_defaults, by_alias=bool(by_alias)
),
)
diff --git a/src/writerai/_models.py b/src/writerai/_models.py
index 94bf248d..29070e05 100644
--- a/src/writerai/_models.py
+++ b/src/writerai/_models.py
@@ -3,7 +3,20 @@
import os
import inspect
import weakref
-from typing import TYPE_CHECKING, Any, Type, Union, Generic, TypeVar, Callable, Optional, cast
+from typing import (
+ IO,
+ TYPE_CHECKING,
+ Any,
+ Type,
+ Union,
+ Generic,
+ TypeVar,
+ Callable,
+ Iterable,
+ Optional,
+ AsyncIterable,
+ cast,
+)
from datetime import date, datetime
from typing_extensions import (
List,
@@ -32,7 +45,6 @@
Timeout,
NotGiven,
AnyMapping,
- HttpxFileContent,
HttpxRequestFiles,
)
from ._utils import (
@@ -787,8 +799,8 @@ class FinalRequestOptionsInput(TypedDict, total=False):
max_retries: int
timeout: float | Timeout | None
files: HttpxRequestFiles | None
- content: HttpxFileContent | None
idempotency_key: str
+ content: Union[bytes, bytearray, IO[bytes], Iterable[bytes], AsyncIterable[bytes], None]
json_data: Body
extra_json: AnyMapping
follow_redirects: bool
@@ -803,11 +815,11 @@ class FinalRequestOptions(pydantic.BaseModel):
max_retries: Union[int, NotGiven] = NotGiven()
timeout: Union[float, Timeout, None, NotGiven] = NotGiven()
files: Union[HttpxRequestFiles, None] = None
- content: Union[HttpxFileContent, None] = None
idempotency_key: Union[str, None] = None
post_parser: Union[Callable[[Any], Any], NotGiven] = NotGiven()
follow_redirects: Union[bool, None] = None
+ content: Union[bytes, bytearray, IO[bytes], Iterable[bytes], AsyncIterable[bytes], None] = None
# It should be noted that we cannot use `json` here as that would override
# a BaseModel method in an incompatible fashion.
json_data: Union[Body, None] = None
diff --git a/src/writerai/_qs.py b/src/writerai/_qs.py
index ada6fd3f..de8c99bc 100644
--- a/src/writerai/_qs.py
+++ b/src/writerai/_qs.py
@@ -101,7 +101,10 @@ def _stringify_item(
items.extend(self._stringify_item(key, item, opts))
return items
elif array_format == "indices":
- raise NotImplementedError("The array indices format is not supported yet")
+ items = []
+ for i, item in enumerate(value):
+ items.extend(self._stringify_item(f"{key}[{i}]", item, opts))
+ return items
elif array_format == "brackets":
items = []
key = key + "[]"
diff --git a/src/writerai/_response.py b/src/writerai/_response.py
index 2183819a..814b0419 100644
--- a/src/writerai/_response.py
+++ b/src/writerai/_response.py
@@ -152,6 +152,7 @@ def _parse(self, *, to: type[_T] | None = None) -> R | _T:
),
response=self.http_response,
client=cast(Any, self._client),
+ options=self._options,
),
)
@@ -162,6 +163,7 @@ def _parse(self, *, to: type[_T] | None = None) -> R | _T:
cast_to=extract_stream_chunk_type(self._stream_cls),
response=self.http_response,
client=cast(Any, self._client),
+ options=self._options,
),
)
@@ -175,6 +177,7 @@ def _parse(self, *, to: type[_T] | None = None) -> R | _T:
cast_to=cast_to,
response=self.http_response,
client=cast(Any, self._client),
+ options=self._options,
),
)
diff --git a/src/writerai/_streaming.py b/src/writerai/_streaming.py
index a9e69241..389e0f31 100644
--- a/src/writerai/_streaming.py
+++ b/src/writerai/_streaming.py
@@ -4,7 +4,7 @@
import json
import inspect
from types import TracebackType
-from typing import TYPE_CHECKING, Any, Generic, TypeVar, Iterator, AsyncIterator, cast
+from typing import TYPE_CHECKING, Any, Generic, TypeVar, Iterator, Optional, AsyncIterator, cast
from typing_extensions import Self, Protocol, TypeGuard, override, get_origin, runtime_checkable
import httpx
@@ -13,6 +13,7 @@
if TYPE_CHECKING:
from ._client import Writer, AsyncWriter
+ from ._models import FinalRequestOptions
_T = TypeVar("_T")
@@ -22,7 +23,7 @@ class Stream(Generic[_T]):
"""Provides the core interface to iterate over a synchronous stream response."""
response: httpx.Response
-
+ _options: Optional[FinalRequestOptions] = None
_decoder: SSEBytesDecoder
def __init__(
@@ -31,10 +32,12 @@ def __init__(
cast_to: type[_T],
response: httpx.Response,
client: Writer,
+ options: Optional[FinalRequestOptions] = None,
) -> None:
self.response = response
self._cast_to = cast_to
self._client = client
+ self._options = options
self._decoder = client._make_sse_decoder()
self._iterator = self.__stream__()
@@ -104,7 +107,7 @@ class AsyncStream(Generic[_T]):
"""Provides the core interface to iterate over an asynchronous stream response."""
response: httpx.Response
-
+ _options: Optional[FinalRequestOptions] = None
_decoder: SSEDecoder | SSEBytesDecoder
def __init__(
@@ -113,10 +116,12 @@ def __init__(
cast_to: type[_T],
response: httpx.Response,
client: AsyncWriter,
+ options: Optional[FinalRequestOptions] = None,
) -> None:
self.response = response
self._cast_to = cast_to
self._client = client
+ self._options = options
self._decoder = client._make_sse_decoder()
self._iterator = self.__stream__()
diff --git a/src/writerai/_types.py b/src/writerai/_types.py
index ed3a7f53..44e94d72 100644
--- a/src/writerai/_types.py
+++ b/src/writerai/_types.py
@@ -13,9 +13,11 @@
Mapping,
TypeVar,
Callable,
+ Iterable,
Iterator,
Optional,
Sequence,
+ AsyncIterable,
)
from typing_extensions import (
Set,
@@ -56,6 +58,13 @@
else:
Base64FileInput = Union[IO[bytes], PathLike]
FileContent = Union[IO[bytes], bytes, PathLike] # PathLike is not subscriptable in Python 3.8.
+
+
+# Used for sending raw binary data / streaming data in request bodies
+# e.g. for file uploads without multipart encoding
+BinaryTypes = Union[bytes, bytearray, IO[bytes], Iterable[bytes]]
+AsyncBinaryTypes = Union[bytes, bytearray, IO[bytes], AsyncIterable[bytes]]
+
FileTypes = Union[
# file (or bytes)
FileContent,
diff --git a/src/writerai/_utils/__init__.py b/src/writerai/_utils/__init__.py
index dc64e29a..10cb66d2 100644
--- a/src/writerai/_utils/__init__.py
+++ b/src/writerai/_utils/__init__.py
@@ -1,3 +1,4 @@
+from ._path import path_template as path_template
from ._sync import asyncify as asyncify
from ._proxy import LazyProxy as LazyProxy
from ._utils import (
diff --git a/src/writerai/_utils/_compat.py b/src/writerai/_utils/_compat.py
index dd703233..2c70b299 100644
--- a/src/writerai/_utils/_compat.py
+++ b/src/writerai/_utils/_compat.py
@@ -26,7 +26,7 @@ def is_union(tp: Optional[Type[Any]]) -> bool:
else:
import types
- return tp is Union or tp is types.UnionType
+ return tp is Union or tp is types.UnionType # type: ignore[comparison-overlap]
def is_typeddict(tp: Type[Any]) -> bool:
diff --git a/src/writerai/_utils/_json.py b/src/writerai/_utils/_json.py
new file mode 100644
index 00000000..60584214
--- /dev/null
+++ b/src/writerai/_utils/_json.py
@@ -0,0 +1,35 @@
+import json
+from typing import Any
+from datetime import datetime
+from typing_extensions import override
+
+import pydantic
+
+from .._compat import model_dump
+
+
+def openapi_dumps(obj: Any) -> bytes:
+ """
+ Serialize an object to UTF-8 encoded JSON bytes.
+
+ Extends the standard json.dumps with support for additional types
+ commonly used in the SDK, such as `datetime`, `pydantic.BaseModel`, etc.
+ """
+ return json.dumps(
+ obj,
+ cls=_CustomEncoder,
+ # Uses the same defaults as httpx's JSON serialization
+ ensure_ascii=False,
+ separators=(",", ":"),
+ allow_nan=False,
+ ).encode()
+
+
+class _CustomEncoder(json.JSONEncoder):
+ @override
+ def default(self, o: Any) -> Any:
+ if isinstance(o, datetime):
+ return o.isoformat()
+ if isinstance(o, pydantic.BaseModel):
+ return model_dump(o, exclude_unset=True, mode="json", by_alias=True)
+ return super().default(o)
diff --git a/src/writerai/_utils/_path.py b/src/writerai/_utils/_path.py
new file mode 100644
index 00000000..4d6e1e4c
--- /dev/null
+++ b/src/writerai/_utils/_path.py
@@ -0,0 +1,127 @@
+from __future__ import annotations
+
+import re
+from typing import (
+ Any,
+ Mapping,
+ Callable,
+)
+from urllib.parse import quote
+
+# Matches '.' or '..' where each dot is either literal or percent-encoded (%2e / %2E).
+_DOT_SEGMENT_RE = re.compile(r"^(?:\.|%2[eE]){1,2}$")
+
+_PLACEHOLDER_RE = re.compile(r"\{(\w+)\}")
+
+
+def _quote_path_segment_part(value: str) -> str:
+ """Percent-encode `value` for use in a URI path segment.
+
+ Considers characters not in `pchar` set from RFC 3986 §3.3 to be unsafe.
+ https://datatracker.ietf.org/doc/html/rfc3986#section-3.3
+ """
+ # quote() already treats unreserved characters (letters, digits, and -._~)
+ # as safe, so we only need to add sub-delims, ':', and '@'.
+ # Notably, unlike the default `safe` for quote(), / is unsafe and must be quoted.
+ return quote(value, safe="!$&'()*+,;=:@")
+
+
+def _quote_query_part(value: str) -> str:
+ """Percent-encode `value` for use in a URI query string.
+
+ Considers &, = and characters not in `query` set from RFC 3986 §3.4 to be unsafe.
+ https://datatracker.ietf.org/doc/html/rfc3986#section-3.4
+ """
+ return quote(value, safe="!$'()*+,;:@/?")
+
+
+def _quote_fragment_part(value: str) -> str:
+ """Percent-encode `value` for use in a URI fragment.
+
+ Considers characters not in `fragment` set from RFC 3986 §3.5 to be unsafe.
+ https://datatracker.ietf.org/doc/html/rfc3986#section-3.5
+ """
+ return quote(value, safe="!$&'()*+,;=:@/?")
+
+
+def _interpolate(
+ template: str,
+ values: Mapping[str, Any],
+ quoter: Callable[[str], str],
+) -> str:
+ """Replace {name} placeholders in `template`, quoting each value with `quoter`.
+
+ Placeholder names are looked up in `values`.
+
+ Raises:
+ KeyError: If a placeholder is not found in `values`.
+ """
+ # re.split with a capturing group returns alternating
+ # [text, name, text, name, ..., text] elements.
+ parts = _PLACEHOLDER_RE.split(template)
+
+ for i in range(1, len(parts), 2):
+ name = parts[i]
+ if name not in values:
+ raise KeyError(f"a value for placeholder {{{name}}} was not provided")
+ val = values[name]
+ if val is None:
+ parts[i] = "null"
+ elif isinstance(val, bool):
+ parts[i] = "true" if val else "false"
+ else:
+ parts[i] = quoter(str(values[name]))
+
+ return "".join(parts)
+
+
+def path_template(template: str, /, **kwargs: Any) -> str:
+ """Interpolate {name} placeholders in `template` from keyword arguments.
+
+ Args:
+ template: The template string containing {name} placeholders.
+ **kwargs: Keyword arguments to interpolate into the template.
+
+ Returns:
+ The template with placeholders interpolated and percent-encoded.
+
+ Safe characters for percent-encoding are dependent on the URI component.
+ Placeholders in path and fragment portions are percent-encoded where the `segment`
+ and `fragment` sets from RFC 3986 respectively are considered safe.
+ Placeholders in the query portion are percent-encoded where the `query` set from
+ RFC 3986 §3.3 is considered safe except for = and & characters.
+
+ Raises:
+ KeyError: If a placeholder is not found in `kwargs`.
+ ValueError: If resulting path contains /./ or /../ segments (including percent-encoded dot-segments).
+ """
+ # Split the template into path, query, and fragment portions.
+ fragment_template: str | None = None
+ query_template: str | None = None
+
+ rest = template
+ if "#" in rest:
+ rest, fragment_template = rest.split("#", 1)
+ if "?" in rest:
+ rest, query_template = rest.split("?", 1)
+ path_template = rest
+
+ # Interpolate each portion with the appropriate quoting rules.
+ path_result = _interpolate(path_template, kwargs, _quote_path_segment_part)
+
+ # Reject dot-segments (. and ..) in the final assembled path. The check
+ # runs after interpolation so that adjacent placeholders or a mix of static
+ # text and placeholders that together form a dot-segment are caught.
+ # Also reject percent-encoded dot-segments to protect against incorrectly
+ # implemented normalization in servers/proxies.
+ for segment in path_result.split("/"):
+ if _DOT_SEGMENT_RE.match(segment):
+ raise ValueError(f"Constructed path {path_result!r} contains dot-segment {segment!r} which is not allowed")
+
+ result = path_result
+ if query_template is not None:
+ result += "?" + _interpolate(query_template, kwargs, _quote_query_part)
+ if fragment_template is not None:
+ result += "#" + _interpolate(fragment_template, kwargs, _quote_fragment_part)
+
+ return result
diff --git a/src/writerai/_utils/_utils.py b/src/writerai/_utils/_utils.py
index eec7f4a1..63b8cd60 100644
--- a/src/writerai/_utils/_utils.py
+++ b/src/writerai/_utils/_utils.py
@@ -86,8 +86,9 @@ def _extract_items(
index += 1
if is_dict(obj):
try:
- # We are at the last entry in the path so we must remove the field
- if (len(path)) == index:
+ # Remove the field if there are no more dict keys in the path,
+ # only "" traversal markers or end.
+ if all(p == "" for p in path[index:]):
item = obj.pop(key)
else:
item = obj[key]
diff --git a/src/writerai/_version.py b/src/writerai/_version.py
index 227300e7..9c0ee430 100644
--- a/src/writerai/_version.py
+++ b/src/writerai/_version.py
@@ -1,4 +1,4 @@
# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
__title__ = "writerai"
-__version__ = "2.4.0" # x-release-please-version
+__version__ = "2.5.0" # x-release-please-version
diff --git a/src/writerai/resources/applications/applications.py b/src/writerai/resources/applications/applications.py
index d58d476f..8c13bb40 100644
--- a/src/writerai/resources/applications/applications.py
+++ b/src/writerai/resources/applications/applications.py
@@ -25,7 +25,7 @@
)
from ...types import application_list_params, application_generate_content_params
from ..._types import Body, Omit, Query, Headers, NotGiven, omit, not_given
-from ..._utils import required_args, maybe_transform, async_maybe_transform
+from ..._utils import path_template, required_args, maybe_transform, async_maybe_transform
from ..._compat import cached_property
from ..._resource import SyncAPIResource, AsyncAPIResource
from ..._response import (
@@ -100,7 +100,7 @@ def retrieve(
if not application_id:
raise ValueError(f"Expected a non-empty value for `application_id` but received {application_id!r}")
return self._get(
- f"/v1/applications/{application_id}",
+ path_template("/v1/applications/{application_id}", application_id=application_id),
options=make_request_options(
extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
),
@@ -280,7 +280,7 @@ def generate_content(
if not application_id:
raise ValueError(f"Expected a non-empty value for `application_id` but received {application_id!r}")
return self._post(
- f"/v1/applications/{application_id}",
+ path_template("/v1/applications/{application_id}", application_id=application_id),
body=maybe_transform(
{
"inputs": inputs,
@@ -354,7 +354,7 @@ async def retrieve(
if not application_id:
raise ValueError(f"Expected a non-empty value for `application_id` but received {application_id!r}")
return await self._get(
- f"/v1/applications/{application_id}",
+ path_template("/v1/applications/{application_id}", application_id=application_id),
options=make_request_options(
extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
),
@@ -534,7 +534,7 @@ async def generate_content(
if not application_id:
raise ValueError(f"Expected a non-empty value for `application_id` but received {application_id!r}")
return await self._post(
- f"/v1/applications/{application_id}",
+ path_template("/v1/applications/{application_id}", application_id=application_id),
body=await async_maybe_transform(
{
"inputs": inputs,
diff --git a/src/writerai/resources/applications/graphs.py b/src/writerai/resources/applications/graphs.py
index 2ef559a4..212844a9 100644
--- a/src/writerai/resources/applications/graphs.py
+++ b/src/writerai/resources/applications/graphs.py
@@ -5,7 +5,7 @@
import httpx
from ..._types import Body, Query, Headers, NotGiven, SequenceNotStr, not_given
-from ..._utils import maybe_transform, async_maybe_transform
+from ..._utils import path_template, maybe_transform, async_maybe_transform
from ..._compat import cached_property
from ..._resource import SyncAPIResource, AsyncAPIResource
from ..._response import (
@@ -72,7 +72,7 @@ def update(
if not application_id:
raise ValueError(f"Expected a non-empty value for `application_id` but received {application_id!r}")
return self._put(
- f"/v1/applications/{application_id}/graphs",
+ path_template("/v1/applications/{application_id}/graphs", application_id=application_id),
body=maybe_transform({"graph_ids": graph_ids}, graph_update_params.GraphUpdateParams),
options=make_request_options(
extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
@@ -107,7 +107,7 @@ def list(
if not application_id:
raise ValueError(f"Expected a non-empty value for `application_id` but received {application_id!r}")
return self._get(
- f"/v1/applications/{application_id}/graphs",
+ path_template("/v1/applications/{application_id}/graphs", application_id=application_id),
options=make_request_options(
extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
),
@@ -166,7 +166,7 @@ async def update(
if not application_id:
raise ValueError(f"Expected a non-empty value for `application_id` but received {application_id!r}")
return await self._put(
- f"/v1/applications/{application_id}/graphs",
+ path_template("/v1/applications/{application_id}/graphs", application_id=application_id),
body=await async_maybe_transform({"graph_ids": graph_ids}, graph_update_params.GraphUpdateParams),
options=make_request_options(
extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
@@ -201,7 +201,7 @@ async def list(
if not application_id:
raise ValueError(f"Expected a non-empty value for `application_id` but received {application_id!r}")
return await self._get(
- f"/v1/applications/{application_id}/graphs",
+ path_template("/v1/applications/{application_id}/graphs", application_id=application_id),
options=make_request_options(
extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
),
diff --git a/src/writerai/resources/applications/jobs.py b/src/writerai/resources/applications/jobs.py
index f27f6a3e..69891e71 100644
--- a/src/writerai/resources/applications/jobs.py
+++ b/src/writerai/resources/applications/jobs.py
@@ -8,7 +8,7 @@
import httpx
from ..._types import Body, Omit, Query, Headers, NotGiven, omit, not_given
-from ..._utils import maybe_transform, async_maybe_transform
+from ..._utils import path_template, maybe_transform, async_maybe_transform
from ..._compat import cached_property
from ..._resource import SyncAPIResource, AsyncAPIResource
from ..._response import (
@@ -77,7 +77,7 @@ def create(
if not application_id:
raise ValueError(f"Expected a non-empty value for `application_id` but received {application_id!r}")
return self._post(
- f"/v1/applications/{application_id}/jobs",
+ path_template("/v1/applications/{application_id}/jobs", application_id=application_id),
body=maybe_transform({"inputs": inputs}, job_create_params.JobCreateParams),
options=make_request_options(
extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
@@ -111,7 +111,7 @@ def retrieve(
if not job_id:
raise ValueError(f"Expected a non-empty value for `job_id` but received {job_id!r}")
return self._get(
- f"/v1/applications/jobs/{job_id}",
+ path_template("/v1/applications/jobs/{job_id}", job_id=job_id),
options=make_request_options(
extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
),
@@ -154,7 +154,7 @@ def list(
if not application_id:
raise ValueError(f"Expected a non-empty value for `application_id` but received {application_id!r}")
return self._get_api_list(
- f"/v1/applications/{application_id}/jobs",
+ path_template("/v1/applications/{application_id}/jobs", application_id=application_id),
page=SyncApplicationJobsOffset[ApplicationGenerateAsyncResponse],
options=make_request_options(
extra_headers=extra_headers,
@@ -200,7 +200,7 @@ def retry(
if not job_id:
raise ValueError(f"Expected a non-empty value for `job_id` but received {job_id!r}")
return self._post(
- f"/v1/applications/jobs/{job_id}/retry",
+ path_template("/v1/applications/jobs/{job_id}/retry", job_id=job_id),
options=make_request_options(
extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
),
@@ -258,7 +258,7 @@ async def create(
if not application_id:
raise ValueError(f"Expected a non-empty value for `application_id` but received {application_id!r}")
return await self._post(
- f"/v1/applications/{application_id}/jobs",
+ path_template("/v1/applications/{application_id}/jobs", application_id=application_id),
body=await async_maybe_transform({"inputs": inputs}, job_create_params.JobCreateParams),
options=make_request_options(
extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
@@ -292,7 +292,7 @@ async def retrieve(
if not job_id:
raise ValueError(f"Expected a non-empty value for `job_id` but received {job_id!r}")
return await self._get(
- f"/v1/applications/jobs/{job_id}",
+ path_template("/v1/applications/jobs/{job_id}", job_id=job_id),
options=make_request_options(
extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
),
@@ -335,7 +335,7 @@ def list(
if not application_id:
raise ValueError(f"Expected a non-empty value for `application_id` but received {application_id!r}")
return self._get_api_list(
- f"/v1/applications/{application_id}/jobs",
+ path_template("/v1/applications/{application_id}/jobs", application_id=application_id),
page=AsyncApplicationJobsOffset[ApplicationGenerateAsyncResponse],
options=make_request_options(
extra_headers=extra_headers,
@@ -381,7 +381,7 @@ async def retry(
if not job_id:
raise ValueError(f"Expected a non-empty value for `job_id` but received {job_id!r}")
return await self._post(
- f"/v1/applications/jobs/{job_id}/retry",
+ path_template("/v1/applications/jobs/{job_id}/retry", job_id=job_id),
options=make_request_options(
extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
),
diff --git a/src/writerai/resources/files.py b/src/writerai/resources/files.py
index 94d8495d..0a8e5a48 100644
--- a/src/writerai/resources/files.py
+++ b/src/writerai/resources/files.py
@@ -8,7 +8,7 @@
from ..types import file_list_params, file_retry_params, file_upload_params
from .._types import Body, Omit, Query, Headers, NotGiven, FileTypes, SequenceNotStr, omit, not_given
-from .._utils import maybe_transform, async_maybe_transform
+from .._utils import path_template, maybe_transform, async_maybe_transform
from .._compat import cached_property
from .._resource import SyncAPIResource, AsyncAPIResource
from .._response import (
@@ -81,7 +81,7 @@ def retrieve(
if not file_id:
raise ValueError(f"Expected a non-empty value for `file_id` but received {file_id!r}")
return self._get(
- f"/v1/files/{file_id}",
+ path_template("/v1/files/{file_id}", file_id=file_id),
options=make_request_options(
extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
),
@@ -189,7 +189,7 @@ def delete(
if not file_id:
raise ValueError(f"Expected a non-empty value for `file_id` but received {file_id!r}")
return self._delete(
- f"/v1/files/{file_id}",
+ path_template("/v1/files/{file_id}", file_id=file_id),
options=make_request_options(
extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
),
@@ -225,7 +225,7 @@ def download(
raise ValueError(f"Expected a non-empty value for `file_id` but received {file_id!r}")
extra_headers = {"Accept": "application/octet-stream", **(extra_headers or {})}
return self._get(
- f"/v1/files/{file_id}/download",
+ path_template("/v1/files/{file_id}/download", file_id=file_id),
options=make_request_options(
extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
),
@@ -369,7 +369,7 @@ async def retrieve(
if not file_id:
raise ValueError(f"Expected a non-empty value for `file_id` but received {file_id!r}")
return await self._get(
- f"/v1/files/{file_id}",
+ path_template("/v1/files/{file_id}", file_id=file_id),
options=make_request_options(
extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
),
@@ -477,7 +477,7 @@ async def delete(
if not file_id:
raise ValueError(f"Expected a non-empty value for `file_id` but received {file_id!r}")
return await self._delete(
- f"/v1/files/{file_id}",
+ path_template("/v1/files/{file_id}", file_id=file_id),
options=make_request_options(
extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
),
@@ -513,7 +513,7 @@ async def download(
raise ValueError(f"Expected a non-empty value for `file_id` but received {file_id!r}")
extra_headers = {"Accept": "application/octet-stream", **(extra_headers or {})}
return await self._get(
- f"/v1/files/{file_id}/download",
+ path_template("/v1/files/{file_id}/download", file_id=file_id),
options=make_request_options(
extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
),
diff --git a/src/writerai/resources/graphs.py b/src/writerai/resources/graphs.py
index 8669d8a2..befd9de7 100644
--- a/src/writerai/resources/graphs.py
+++ b/src/writerai/resources/graphs.py
@@ -15,7 +15,7 @@
graph_add_file_to_graph_params,
)
from .._types import Body, Omit, Query, Headers, NotGiven, FileTypes, SequenceNotStr, omit, not_given
-from .._utils import required_args, maybe_transform, async_maybe_transform
+from .._utils import path_template, required_args, maybe_transform, async_maybe_transform
from .._compat import cached_property
from .._resource import SyncAPIResource, AsyncAPIResource
from .._response import (
@@ -130,7 +130,7 @@ def retrieve(
if not graph_id:
raise ValueError(f"Expected a non-empty value for `graph_id` but received {graph_id!r}")
return self._get(
- f"/v1/graphs/{graph_id}",
+ path_template("/v1/graphs/{graph_id}", graph_id=graph_id),
options=make_request_options(
extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
),
@@ -176,7 +176,7 @@ def update(
if not graph_id:
raise ValueError(f"Expected a non-empty value for `graph_id` but received {graph_id!r}")
return self._put(
- f"/v1/graphs/{graph_id}",
+ path_template("/v1/graphs/{graph_id}", graph_id=graph_id),
body=maybe_transform(
{
"description": description,
@@ -276,7 +276,7 @@ def delete(
if not graph_id:
raise ValueError(f"Expected a non-empty value for `graph_id` but received {graph_id!r}")
return self._delete(
- f"/v1/graphs/{graph_id}",
+ path_template("/v1/graphs/{graph_id}", graph_id=graph_id),
options=make_request_options(
extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
),
@@ -312,7 +312,7 @@ def add_file_to_graph(
if not graph_id:
raise ValueError(f"Expected a non-empty value for `graph_id` but received {graph_id!r}")
return self._post(
- f"/v1/graphs/{graph_id}/file",
+ path_template("/v1/graphs/{graph_id}/file", graph_id=graph_id),
body=maybe_transform({"file_id": file_id}, graph_add_file_to_graph_params.GraphAddFileToGraphParams),
options=make_request_options(
extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
@@ -550,7 +550,7 @@ def remove_file_from_graph(
if not file_id:
raise ValueError(f"Expected a non-empty value for `file_id` but received {file_id!r}")
return self._delete(
- f"/v1/graphs/{graph_id}/file/{file_id}",
+ path_template("/v1/graphs/{graph_id}/file/{file_id}", graph_id=graph_id, file_id=file_id),
options=make_request_options(
extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
),
@@ -649,7 +649,7 @@ async def retrieve(
if not graph_id:
raise ValueError(f"Expected a non-empty value for `graph_id` but received {graph_id!r}")
return await self._get(
- f"/v1/graphs/{graph_id}",
+ path_template("/v1/graphs/{graph_id}", graph_id=graph_id),
options=make_request_options(
extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
),
@@ -695,7 +695,7 @@ async def update(
if not graph_id:
raise ValueError(f"Expected a non-empty value for `graph_id` but received {graph_id!r}")
return await self._put(
- f"/v1/graphs/{graph_id}",
+ path_template("/v1/graphs/{graph_id}", graph_id=graph_id),
body=await async_maybe_transform(
{
"description": description,
@@ -795,7 +795,7 @@ async def delete(
if not graph_id:
raise ValueError(f"Expected a non-empty value for `graph_id` but received {graph_id!r}")
return await self._delete(
- f"/v1/graphs/{graph_id}",
+ path_template("/v1/graphs/{graph_id}", graph_id=graph_id),
options=make_request_options(
extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
),
@@ -831,7 +831,7 @@ async def add_file_to_graph(
if not graph_id:
raise ValueError(f"Expected a non-empty value for `graph_id` but received {graph_id!r}")
return await self._post(
- f"/v1/graphs/{graph_id}/file",
+ path_template("/v1/graphs/{graph_id}/file", graph_id=graph_id),
body=await async_maybe_transform(
{"file_id": file_id}, graph_add_file_to_graph_params.GraphAddFileToGraphParams
),
@@ -1071,7 +1071,7 @@ async def remove_file_from_graph(
if not file_id:
raise ValueError(f"Expected a non-empty value for `file_id` but received {file_id!r}")
return await self._delete(
- f"/v1/graphs/{graph_id}/file/{file_id}",
+ path_template("/v1/graphs/{graph_id}/file/{file_id}", graph_id=graph_id, file_id=file_id),
options=make_request_options(
extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
),
diff --git a/src/writerai/resources/tools/tools.py b/src/writerai/resources/tools.py
similarity index 68%
rename from src/writerai/resources/tools/tools.py
rename to src/writerai/resources/tools.py
index d48109e0..8fd00630 100644
--- a/src/writerai/resources/tools/tools.py
+++ b/src/writerai/resources/tools.py
@@ -8,44 +8,25 @@
import httpx
-from ...types import (
- tool_ai_detect_params,
- tool_parse_pdf_params,
- tool_web_search_params,
- tool_context_aware_splitting_params,
-)
-from ..._types import Body, Omit, Query, Headers, NotGiven, SequenceNotStr, omit, not_given
-from ..._utils import maybe_transform, async_maybe_transform
-from ..._compat import cached_property
-from .comprehend import (
- ComprehendResource,
- AsyncComprehendResource,
- ComprehendResourceWithRawResponse,
- AsyncComprehendResourceWithRawResponse,
- ComprehendResourceWithStreamingResponse,
- AsyncComprehendResourceWithStreamingResponse,
-)
-from ..._resource import SyncAPIResource, AsyncAPIResource
-from ..._response import (
+from ..types import tool_parse_pdf_params, tool_web_search_params
+from .._types import Body, Omit, Query, Headers, NotGiven, SequenceNotStr, omit, not_given
+from .._utils import path_template, maybe_transform, async_maybe_transform
+from .._compat import cached_property
+from .._resource import SyncAPIResource, AsyncAPIResource
+from .._response import (
to_raw_response_wrapper,
to_streamed_response_wrapper,
async_to_raw_response_wrapper,
async_to_streamed_response_wrapper,
)
-from ..._base_client import make_request_options
-from ...types.tool_ai_detect_response import ToolAIDetectResponse
-from ...types.tool_parse_pdf_response import ToolParsePdfResponse
-from ...types.tool_web_search_response import ToolWebSearchResponse
-from ...types.tool_context_aware_splitting_response import ToolContextAwareSplittingResponse
+from .._base_client import make_request_options
+from ..types.tool_parse_pdf_response import ToolParsePdfResponse
+from ..types.tool_web_search_response import ToolWebSearchResponse
__all__ = ["ToolsResource", "AsyncToolsResource"]
class ToolsResource(SyncAPIResource):
- @cached_property
- def comprehend(self) -> ComprehendResource:
- return ComprehendResource(self._client)
-
@cached_property
def with_raw_response(self) -> ToolsResourceWithRawResponse:
"""
@@ -65,95 +46,6 @@ def with_streaming_response(self) -> ToolsResourceWithStreamingResponse:
"""
return ToolsResourceWithStreamingResponse(self)
- @typing_extensions.deprecated(
- "Will be removed in a future release. Please migrate to alternative solutions. See documentation at dev.writer.com for more information."
- )
- def ai_detect(
- self,
- *,
- input: str,
- # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
- # The extra values given here take precedence over values defined on the client or passed to this method.
- extra_headers: Headers | None = None,
- extra_query: Query | None = None,
- extra_body: Body | None = None,
- timeout: float | httpx.Timeout | None | NotGiven = not_given,
- ) -> ToolAIDetectResponse:
- """Detects if content is AI- or human-generated, with a confidence score.
-
- Content
- must have at least 350 characters
-
- Args:
- input: The content to determine if it is AI- or human-generated. Content must have at
- least 350 characters.
-
- extra_headers: Send extra headers
-
- extra_query: Add additional query parameters to the request
-
- extra_body: Add additional JSON properties to the request
-
- timeout: Override the client-level default timeout for this request, in seconds
- """
- return self._post(
- "/v1/tools/ai-detect",
- body=maybe_transform({"input": input}, tool_ai_detect_params.ToolAIDetectParams),
- options=make_request_options(
- extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
- ),
- cast_to=ToolAIDetectResponse,
- )
-
- @typing_extensions.deprecated(
- "Will be removed in a future release. Please migrate to alternative solutions. See documentation at dev.writer.com for more information."
- )
- def context_aware_splitting(
- self,
- *,
- strategy: Literal["llm_split", "fast_split", "hybrid_split"],
- text: str,
- # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
- # The extra values given here take precedence over values defined on the client or passed to this method.
- extra_headers: Headers | None = None,
- extra_query: Query | None = None,
- extra_body: Body | None = None,
- timeout: float | httpx.Timeout | None | NotGiven = not_given,
- ) -> ToolContextAwareSplittingResponse:
- """
- Splits a long block of text (maximum 4000 words) into smaller chunks while
- preserving the semantic meaning of the text and context between the chunks.
-
- Args:
- strategy: The strategy to use for splitting the text into chunks. `llm_split` uses the
- language model to split the text, `fast_split` uses a fast heuristic-based
- approach, and `hybrid_split` combines both strategies.
-
- text: The text to split into chunks.
-
- extra_headers: Send extra headers
-
- extra_query: Add additional query parameters to the request
-
- extra_body: Add additional JSON properties to the request
-
- timeout: Override the client-level default timeout for this request, in seconds
- """
- return self._post(
- "/v1/tools/context-aware-splitting",
- body=maybe_transform(
- {
- "strategy": strategy,
- "text": text,
- },
- tool_context_aware_splitting_params.ToolContextAwareSplittingParams,
- ),
- options=make_request_options(
- extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
- ),
- cast_to=ToolContextAwareSplittingResponse,
- )
-
@typing_extensions.deprecated(
"Will be removed in a future release. A replacement PDF parsing tool for chat completions is planned; see documentation at dev.writer.com for more information."
)
@@ -186,7 +78,7 @@ def parse_pdf(
if not file_id:
raise ValueError(f"Expected a non-empty value for `file_id` but received {file_id!r}")
return self._post(
- f"/v1/tools/pdf-parser/{file_id}",
+ path_template("/v1/tools/pdf-parser/{file_id}", file_id=file_id),
body=maybe_transform({"format": format}, tool_parse_pdf_params.ToolParsePdfParams),
options=make_request_options(
extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
@@ -472,10 +364,6 @@ def web_search(
class AsyncToolsResource(AsyncAPIResource):
- @cached_property
- def comprehend(self) -> AsyncComprehendResource:
- return AsyncComprehendResource(self._client)
-
@cached_property
def with_raw_response(self) -> AsyncToolsResourceWithRawResponse:
"""
@@ -495,95 +383,6 @@ def with_streaming_response(self) -> AsyncToolsResourceWithStreamingResponse:
"""
return AsyncToolsResourceWithStreamingResponse(self)
- @typing_extensions.deprecated(
- "Will be removed in a future release. Please migrate to alternative solutions. See documentation at dev.writer.com for more information."
- )
- async def ai_detect(
- self,
- *,
- input: str,
- # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
- # The extra values given here take precedence over values defined on the client or passed to this method.
- extra_headers: Headers | None = None,
- extra_query: Query | None = None,
- extra_body: Body | None = None,
- timeout: float | httpx.Timeout | None | NotGiven = not_given,
- ) -> ToolAIDetectResponse:
- """Detects if content is AI- or human-generated, with a confidence score.
-
- Content
- must have at least 350 characters
-
- Args:
- input: The content to determine if it is AI- or human-generated. Content must have at
- least 350 characters.
-
- extra_headers: Send extra headers
-
- extra_query: Add additional query parameters to the request
-
- extra_body: Add additional JSON properties to the request
-
- timeout: Override the client-level default timeout for this request, in seconds
- """
- return await self._post(
- "/v1/tools/ai-detect",
- body=await async_maybe_transform({"input": input}, tool_ai_detect_params.ToolAIDetectParams),
- options=make_request_options(
- extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
- ),
- cast_to=ToolAIDetectResponse,
- )
-
- @typing_extensions.deprecated(
- "Will be removed in a future release. Please migrate to alternative solutions. See documentation at dev.writer.com for more information."
- )
- async def context_aware_splitting(
- self,
- *,
- strategy: Literal["llm_split", "fast_split", "hybrid_split"],
- text: str,
- # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
- # The extra values given here take precedence over values defined on the client or passed to this method.
- extra_headers: Headers | None = None,
- extra_query: Query | None = None,
- extra_body: Body | None = None,
- timeout: float | httpx.Timeout | None | NotGiven = not_given,
- ) -> ToolContextAwareSplittingResponse:
- """
- Splits a long block of text (maximum 4000 words) into smaller chunks while
- preserving the semantic meaning of the text and context between the chunks.
-
- Args:
- strategy: The strategy to use for splitting the text into chunks. `llm_split` uses the
- language model to split the text, `fast_split` uses a fast heuristic-based
- approach, and `hybrid_split` combines both strategies.
-
- text: The text to split into chunks.
-
- extra_headers: Send extra headers
-
- extra_query: Add additional query parameters to the request
-
- extra_body: Add additional JSON properties to the request
-
- timeout: Override the client-level default timeout for this request, in seconds
- """
- return await self._post(
- "/v1/tools/context-aware-splitting",
- body=await async_maybe_transform(
- {
- "strategy": strategy,
- "text": text,
- },
- tool_context_aware_splitting_params.ToolContextAwareSplittingParams,
- ),
- options=make_request_options(
- extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
- ),
- cast_to=ToolContextAwareSplittingResponse,
- )
-
@typing_extensions.deprecated(
"Will be removed in a future release. A replacement PDF parsing tool for chat completions is planned; see documentation at dev.writer.com for more information."
)
@@ -616,7 +415,7 @@ async def parse_pdf(
if not file_id:
raise ValueError(f"Expected a non-empty value for `file_id` but received {file_id!r}")
return await self._post(
- f"/v1/tools/pdf-parser/{file_id}",
+ path_template("/v1/tools/pdf-parser/{file_id}", file_id=file_id),
body=await async_maybe_transform({"format": format}, tool_parse_pdf_params.ToolParsePdfParams),
options=make_request_options(
extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
@@ -905,16 +704,6 @@ class ToolsResourceWithRawResponse:
def __init__(self, tools: ToolsResource) -> None:
self._tools = tools
- self.ai_detect = ( # pyright: ignore[reportDeprecated]
- to_raw_response_wrapper(
- tools.ai_detect, # pyright: ignore[reportDeprecated],
- )
- )
- self.context_aware_splitting = ( # pyright: ignore[reportDeprecated]
- to_raw_response_wrapper(
- tools.context_aware_splitting, # pyright: ignore[reportDeprecated],
- )
- )
self.parse_pdf = ( # pyright: ignore[reportDeprecated]
to_raw_response_wrapper(
tools.parse_pdf, # pyright: ignore[reportDeprecated],
@@ -926,25 +715,11 @@ def __init__(self, tools: ToolsResource) -> None:
)
)
- @cached_property
- def comprehend(self) -> ComprehendResourceWithRawResponse:
- return ComprehendResourceWithRawResponse(self._tools.comprehend)
-
class AsyncToolsResourceWithRawResponse:
def __init__(self, tools: AsyncToolsResource) -> None:
self._tools = tools
- self.ai_detect = ( # pyright: ignore[reportDeprecated]
- async_to_raw_response_wrapper(
- tools.ai_detect, # pyright: ignore[reportDeprecated],
- )
- )
- self.context_aware_splitting = ( # pyright: ignore[reportDeprecated]
- async_to_raw_response_wrapper(
- tools.context_aware_splitting, # pyright: ignore[reportDeprecated],
- )
- )
self.parse_pdf = ( # pyright: ignore[reportDeprecated]
async_to_raw_response_wrapper(
tools.parse_pdf, # pyright: ignore[reportDeprecated],
@@ -956,25 +731,11 @@ def __init__(self, tools: AsyncToolsResource) -> None:
)
)
- @cached_property
- def comprehend(self) -> AsyncComprehendResourceWithRawResponse:
- return AsyncComprehendResourceWithRawResponse(self._tools.comprehend)
-
class ToolsResourceWithStreamingResponse:
def __init__(self, tools: ToolsResource) -> None:
self._tools = tools
- self.ai_detect = ( # pyright: ignore[reportDeprecated]
- to_streamed_response_wrapper(
- tools.ai_detect, # pyright: ignore[reportDeprecated],
- )
- )
- self.context_aware_splitting = ( # pyright: ignore[reportDeprecated]
- to_streamed_response_wrapper(
- tools.context_aware_splitting, # pyright: ignore[reportDeprecated],
- )
- )
self.parse_pdf = ( # pyright: ignore[reportDeprecated]
to_streamed_response_wrapper(
tools.parse_pdf, # pyright: ignore[reportDeprecated],
@@ -986,25 +747,11 @@ def __init__(self, tools: ToolsResource) -> None:
)
)
- @cached_property
- def comprehend(self) -> ComprehendResourceWithStreamingResponse:
- return ComprehendResourceWithStreamingResponse(self._tools.comprehend)
-
class AsyncToolsResourceWithStreamingResponse:
def __init__(self, tools: AsyncToolsResource) -> None:
self._tools = tools
- self.ai_detect = ( # pyright: ignore[reportDeprecated]
- async_to_streamed_response_wrapper(
- tools.ai_detect, # pyright: ignore[reportDeprecated],
- )
- )
- self.context_aware_splitting = ( # pyright: ignore[reportDeprecated]
- async_to_streamed_response_wrapper(
- tools.context_aware_splitting, # pyright: ignore[reportDeprecated],
- )
- )
self.parse_pdf = ( # pyright: ignore[reportDeprecated]
async_to_streamed_response_wrapper(
tools.parse_pdf, # pyright: ignore[reportDeprecated],
@@ -1015,7 +762,3 @@ def __init__(self, tools: AsyncToolsResource) -> None:
tools.web_search, # pyright: ignore[reportDeprecated],
)
)
-
- @cached_property
- def comprehend(self) -> AsyncComprehendResourceWithStreamingResponse:
- return AsyncComprehendResourceWithStreamingResponse(self._tools.comprehend)
diff --git a/src/writerai/resources/tools/__init__.py b/src/writerai/resources/tools/__init__.py
deleted file mode 100644
index 8f4ceef3..00000000
--- a/src/writerai/resources/tools/__init__.py
+++ /dev/null
@@ -1,33 +0,0 @@
-# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
-from .tools import (
- ToolsResource,
- AsyncToolsResource,
- ToolsResourceWithRawResponse,
- AsyncToolsResourceWithRawResponse,
- ToolsResourceWithStreamingResponse,
- AsyncToolsResourceWithStreamingResponse,
-)
-from .comprehend import (
- ComprehendResource,
- AsyncComprehendResource,
- ComprehendResourceWithRawResponse,
- AsyncComprehendResourceWithRawResponse,
- ComprehendResourceWithStreamingResponse,
- AsyncComprehendResourceWithStreamingResponse,
-)
-
-__all__ = [
- "ComprehendResource",
- "AsyncComprehendResource",
- "ComprehendResourceWithRawResponse",
- "AsyncComprehendResourceWithRawResponse",
- "ComprehendResourceWithStreamingResponse",
- "AsyncComprehendResourceWithStreamingResponse",
- "ToolsResource",
- "AsyncToolsResource",
- "ToolsResourceWithRawResponse",
- "AsyncToolsResourceWithRawResponse",
- "ToolsResourceWithStreamingResponse",
- "AsyncToolsResourceWithStreamingResponse",
-]
diff --git a/src/writerai/resources/tools/comprehend.py b/src/writerai/resources/tools/comprehend.py
deleted file mode 100644
index 2cff5f4f..00000000
--- a/src/writerai/resources/tools/comprehend.py
+++ /dev/null
@@ -1,208 +0,0 @@
-# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
-from __future__ import annotations
-
-import typing_extensions
-from typing_extensions import Literal
-
-import httpx
-
-from ..._types import Body, Query, Headers, NotGiven, not_given
-from ..._utils import maybe_transform, async_maybe_transform
-from ..._compat import cached_property
-from ..._resource import SyncAPIResource, AsyncAPIResource
-from ..._response import (
- to_raw_response_wrapper,
- to_streamed_response_wrapper,
- async_to_raw_response_wrapper,
- async_to_streamed_response_wrapper,
-)
-from ...types.tools import comprehend_medical_params
-from ..._base_client import make_request_options
-from ...types.tools.comprehend_medical_response import ComprehendMedicalResponse
-
-__all__ = ["ComprehendResource", "AsyncComprehendResource"]
-
-
-class ComprehendResource(SyncAPIResource):
- @cached_property
- def with_raw_response(self) -> ComprehendResourceWithRawResponse:
- """
- This property can be used as a prefix for any HTTP method call to return
- the raw response object instead of the parsed content.
-
- For more information, see https://www.github.com/writer/writer-python#accessing-raw-response-data-eg-headers
- """
- return ComprehendResourceWithRawResponse(self)
-
- @cached_property
- def with_streaming_response(self) -> ComprehendResourceWithStreamingResponse:
- """
- An alternative to `.with_raw_response` that doesn't eagerly read the response body.
-
- For more information, see https://www.github.com/writer/writer-python#with_streaming_response
- """
- return ComprehendResourceWithStreamingResponse(self)
-
- @typing_extensions.deprecated(
- "Will be removed in a future release. Migrate to `chat.chat` with the LLM tool using the `palmyra-med` model for medical analysis. See documentation at dev.writer.com for more information."
- )
- def medical(
- self,
- *,
- content: str,
- response_type: Literal["Entities", "RxNorm", "ICD-10-CM", "SNOMED CT"],
- # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
- # The extra values given here take precedence over values defined on the client or passed to this method.
- extra_headers: Headers | None = None,
- extra_query: Query | None = None,
- extra_body: Body | None = None,
- timeout: float | httpx.Timeout | None | NotGiven = not_given,
- ) -> ComprehendMedicalResponse:
- """
- Analyze unstructured medical text to extract entities labeled with standardized
- medical codes and confidence scores.
-
- Args:
- content: The text to analyze.
-
- response_type: The structure of the response to return. `Entities` returns medical entities,
- `RxNorm` returns medication information, `ICD-10-CM` returns diagnosis codes,
- and `SNOMED CT` returns medical concepts.
-
- extra_headers: Send extra headers
-
- extra_query: Add additional query parameters to the request
-
- extra_body: Add additional JSON properties to the request
-
- timeout: Override the client-level default timeout for this request, in seconds
- """
- return self._post(
- "/v1/tools/comprehend/medical",
- body=maybe_transform(
- {
- "content": content,
- "response_type": response_type,
- },
- comprehend_medical_params.ComprehendMedicalParams,
- ),
- options=make_request_options(
- extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
- ),
- cast_to=ComprehendMedicalResponse,
- )
-
-
-class AsyncComprehendResource(AsyncAPIResource):
- @cached_property
- def with_raw_response(self) -> AsyncComprehendResourceWithRawResponse:
- """
- This property can be used as a prefix for any HTTP method call to return
- the raw response object instead of the parsed content.
-
- For more information, see https://www.github.com/writer/writer-python#accessing-raw-response-data-eg-headers
- """
- return AsyncComprehendResourceWithRawResponse(self)
-
- @cached_property
- def with_streaming_response(self) -> AsyncComprehendResourceWithStreamingResponse:
- """
- An alternative to `.with_raw_response` that doesn't eagerly read the response body.
-
- For more information, see https://www.github.com/writer/writer-python#with_streaming_response
- """
- return AsyncComprehendResourceWithStreamingResponse(self)
-
- @typing_extensions.deprecated(
- "Will be removed in a future release. Migrate to `chat.chat` with the LLM tool using the `palmyra-med` model for medical analysis. See documentation at dev.writer.com for more information."
- )
- async def medical(
- self,
- *,
- content: str,
- response_type: Literal["Entities", "RxNorm", "ICD-10-CM", "SNOMED CT"],
- # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
- # The extra values given here take precedence over values defined on the client or passed to this method.
- extra_headers: Headers | None = None,
- extra_query: Query | None = None,
- extra_body: Body | None = None,
- timeout: float | httpx.Timeout | None | NotGiven = not_given,
- ) -> ComprehendMedicalResponse:
- """
- Analyze unstructured medical text to extract entities labeled with standardized
- medical codes and confidence scores.
-
- Args:
- content: The text to analyze.
-
- response_type: The structure of the response to return. `Entities` returns medical entities,
- `RxNorm` returns medication information, `ICD-10-CM` returns diagnosis codes,
- and `SNOMED CT` returns medical concepts.
-
- extra_headers: Send extra headers
-
- extra_query: Add additional query parameters to the request
-
- extra_body: Add additional JSON properties to the request
-
- timeout: Override the client-level default timeout for this request, in seconds
- """
- return await self._post(
- "/v1/tools/comprehend/medical",
- body=await async_maybe_transform(
- {
- "content": content,
- "response_type": response_type,
- },
- comprehend_medical_params.ComprehendMedicalParams,
- ),
- options=make_request_options(
- extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
- ),
- cast_to=ComprehendMedicalResponse,
- )
-
-
-class ComprehendResourceWithRawResponse:
- def __init__(self, comprehend: ComprehendResource) -> None:
- self._comprehend = comprehend
-
- self.medical = ( # pyright: ignore[reportDeprecated]
- to_raw_response_wrapper(
- comprehend.medical, # pyright: ignore[reportDeprecated],
- )
- )
-
-
-class AsyncComprehendResourceWithRawResponse:
- def __init__(self, comprehend: AsyncComprehendResource) -> None:
- self._comprehend = comprehend
-
- self.medical = ( # pyright: ignore[reportDeprecated]
- async_to_raw_response_wrapper(
- comprehend.medical, # pyright: ignore[reportDeprecated],
- )
- )
-
-
-class ComprehendResourceWithStreamingResponse:
- def __init__(self, comprehend: ComprehendResource) -> None:
- self._comprehend = comprehend
-
- self.medical = ( # pyright: ignore[reportDeprecated]
- to_streamed_response_wrapper(
- comprehend.medical, # pyright: ignore[reportDeprecated],
- )
- )
-
-
-class AsyncComprehendResourceWithStreamingResponse:
- def __init__(self, comprehend: AsyncComprehendResource) -> None:
- self._comprehend = comprehend
-
- self.medical = ( # pyright: ignore[reportDeprecated]
- async_to_streamed_response_wrapper(
- comprehend.medical, # pyright: ignore[reportDeprecated],
- )
- )
diff --git a/src/writerai/types/__init__.py b/src/writerai/types/__init__.py
index 3ded6249..195d442a 100644
--- a/src/writerai/types/__init__.py
+++ b/src/writerai/types/__init__.py
@@ -41,7 +41,6 @@
from .graph_delete_response import GraphDeleteResponse as GraphDeleteResponse
from .graph_question_params import GraphQuestionParams as GraphQuestionParams
from .graph_update_response import GraphUpdateResponse as GraphUpdateResponse
-from .tool_ai_detect_params import ToolAIDetectParams as ToolAIDetectParams
from .tool_parse_pdf_params import ToolParsePdfParams as ToolParsePdfParams
from .vision_analyze_params import VisionAnalyzeParams as VisionAnalyzeParams
from .chat_completion_choice import ChatCompletionChoice as ChatCompletionChoice
@@ -49,7 +48,6 @@
from .application_list_params import ApplicationListParams as ApplicationListParams
from .chat_completion_message import ChatCompletionMessage as ChatCompletionMessage
from .question_response_chunk import QuestionResponseChunk as QuestionResponseChunk
-from .tool_ai_detect_response import ToolAIDetectResponse as ToolAIDetectResponse
from .tool_parse_pdf_response import ToolParsePdfResponse as ToolParsePdfResponse
from .completion_create_params import CompletionCreateParams as CompletionCreateParams
from .tool_web_search_response import ToolWebSearchResponse as ToolWebSearchResponse
@@ -59,11 +57,7 @@
from .graph_add_file_to_graph_params import GraphAddFileToGraphParams as GraphAddFileToGraphParams
from .application_generate_content_chunk import ApplicationGenerateContentChunk as ApplicationGenerateContentChunk
from .application_generate_content_params import ApplicationGenerateContentParams as ApplicationGenerateContentParams
-from .tool_context_aware_splitting_params import ToolContextAwareSplittingParams as ToolContextAwareSplittingParams
from .application_generate_content_response import (
ApplicationGenerateContentResponse as ApplicationGenerateContentResponse,
)
from .graph_remove_file_from_graph_response import GraphRemoveFileFromGraphResponse as GraphRemoveFileFromGraphResponse
-from .tool_context_aware_splitting_response import (
- ToolContextAwareSplittingResponse as ToolContextAwareSplittingResponse,
-)
diff --git a/src/writerai/types/tool_ai_detect_params.py b/src/writerai/types/tool_ai_detect_params.py
deleted file mode 100644
index e162d4c3..00000000
--- a/src/writerai/types/tool_ai_detect_params.py
+++ /dev/null
@@ -1,15 +0,0 @@
-# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
-from __future__ import annotations
-
-from typing_extensions import Required, TypedDict
-
-__all__ = ["ToolAIDetectParams"]
-
-
-class ToolAIDetectParams(TypedDict, total=False):
- input: Required[str]
- """The content to determine if it is AI- or human-generated.
-
- Content must have at least 350 characters.
- """
diff --git a/src/writerai/types/tool_ai_detect_response.py b/src/writerai/types/tool_ai_detect_response.py
deleted file mode 100644
index 48052a29..00000000
--- a/src/writerai/types/tool_ai_detect_response.py
+++ /dev/null
@@ -1,13 +0,0 @@
-# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
-from typing_extensions import Literal
-
-from .._models import BaseModel
-
-__all__ = ["ToolAIDetectResponse"]
-
-
-class ToolAIDetectResponse(BaseModel):
- label: Literal["fake", "real"]
-
- score: float
diff --git a/src/writerai/types/tool_context_aware_splitting_params.py b/src/writerai/types/tool_context_aware_splitting_params.py
deleted file mode 100644
index eb94d79d..00000000
--- a/src/writerai/types/tool_context_aware_splitting_params.py
+++ /dev/null
@@ -1,19 +0,0 @@
-# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
-from __future__ import annotations
-
-from typing_extensions import Literal, Required, TypedDict
-
-__all__ = ["ToolContextAwareSplittingParams"]
-
-
-class ToolContextAwareSplittingParams(TypedDict, total=False):
- strategy: Required[Literal["llm_split", "fast_split", "hybrid_split"]]
- """The strategy to use for splitting the text into chunks.
-
- `llm_split` uses the language model to split the text, `fast_split` uses a fast
- heuristic-based approach, and `hybrid_split` combines both strategies.
- """
-
- text: Required[str]
- """The text to split into chunks."""
diff --git a/src/writerai/types/tool_context_aware_splitting_response.py b/src/writerai/types/tool_context_aware_splitting_response.py
deleted file mode 100644
index 74f3a773..00000000
--- a/src/writerai/types/tool_context_aware_splitting_response.py
+++ /dev/null
@@ -1,15 +0,0 @@
-# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
-from typing import List
-
-from .._models import BaseModel
-
-__all__ = ["ToolContextAwareSplittingResponse"]
-
-
-class ToolContextAwareSplittingResponse(BaseModel):
- chunks: List[str]
- """
- An array of text chunks generated by splitting the input text based on the
- specified strategy.
- """
diff --git a/src/writerai/types/tools/__init__.py b/src/writerai/types/tools/__init__.py
deleted file mode 100644
index 23e03174..00000000
--- a/src/writerai/types/tools/__init__.py
+++ /dev/null
@@ -1,6 +0,0 @@
-# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
-from __future__ import annotations
-
-from .comprehend_medical_params import ComprehendMedicalParams as ComprehendMedicalParams
-from .comprehend_medical_response import ComprehendMedicalResponse as ComprehendMedicalResponse
diff --git a/src/writerai/types/tools/comprehend_medical_params.py b/src/writerai/types/tools/comprehend_medical_params.py
deleted file mode 100644
index 6377654f..00000000
--- a/src/writerai/types/tools/comprehend_medical_params.py
+++ /dev/null
@@ -1,19 +0,0 @@
-# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
-from __future__ import annotations
-
-from typing_extensions import Literal, Required, TypedDict
-
-__all__ = ["ComprehendMedicalParams"]
-
-
-class ComprehendMedicalParams(TypedDict, total=False):
- content: Required[str]
- """The text to analyze."""
-
- response_type: Required[Literal["Entities", "RxNorm", "ICD-10-CM", "SNOMED CT"]]
- """The structure of the response to return.
-
- `Entities` returns medical entities, `RxNorm` returns medication information,
- `ICD-10-CM` returns diagnosis codes, and `SNOMED CT` returns medical concepts.
- """
diff --git a/src/writerai/types/tools/comprehend_medical_response.py b/src/writerai/types/tools/comprehend_medical_response.py
deleted file mode 100644
index 9489f389..00000000
--- a/src/writerai/types/tools/comprehend_medical_response.py
+++ /dev/null
@@ -1,90 +0,0 @@
-# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
-from typing import List, Optional
-
-from ..._models import BaseModel
-
-__all__ = [
- "ComprehendMedicalResponse",
- "Entity",
- "EntityAttribute",
- "EntityAttributeConcept",
- "EntityAttributeTrait",
- "EntityConcept",
- "EntityTrait",
-]
-
-
-class EntityAttributeConcept(BaseModel):
- code: str
-
- description: str
-
- score: float
-
-
-class EntityAttributeTrait(BaseModel):
- name: str
-
- score: float
-
-
-class EntityAttribute(BaseModel):
- begin_offset: int
-
- concepts: List[EntityAttributeConcept]
-
- end_offset: int
-
- relationship_score: float
-
- score: float
-
- text: str
-
- traits: List[EntityAttributeTrait]
-
- type: str
-
- category: Optional[str] = None
-
- relationship_type: Optional[str] = None
-
-
-class EntityConcept(BaseModel):
- code: str
-
- description: str
-
- score: float
-
-
-class EntityTrait(BaseModel):
- name: str
-
- score: float
-
-
-class Entity(BaseModel):
- attributes: List[EntityAttribute]
-
- begin_offset: int
-
- category: str
-
- concepts: List[EntityConcept]
-
- end_offset: int
-
- score: float
-
- text: str
-
- traits: List[EntityTrait]
-
- type: str
-
-
-class ComprehendMedicalResponse(BaseModel):
- entities: List[Entity]
- """An array of medical entities extracted from the input text."""
diff --git a/tests/api_resources/test_files.py b/tests/api_resources/test_files.py
index b22112b3..8443bbc0 100644
--- a/tests/api_resources/test_files.py
+++ b/tests/api_resources/test_files.py
@@ -233,7 +233,7 @@ def test_streaming_response_retry(self, client: Writer) -> None:
@parametrize
def test_method_upload(self, client: Writer) -> None:
file = client.files.upload(
- content=b"raw file contents",
+ content=b"Example data",
content_disposition="Content-Disposition",
content_type="Content-Type",
)
@@ -243,7 +243,7 @@ def test_method_upload(self, client: Writer) -> None:
@parametrize
def test_method_upload_with_all_params(self, client: Writer) -> None:
file = client.files.upload(
- content=b"raw file contents",
+ content=b"Example data",
content_disposition="Content-Disposition",
content_type="Content-Type",
graph_id="182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e",
@@ -254,7 +254,7 @@ def test_method_upload_with_all_params(self, client: Writer) -> None:
@parametrize
def test_raw_response_upload(self, client: Writer) -> None:
response = client.files.with_raw_response.upload(
- content=b"raw file contents",
+ content=b"Example data",
content_disposition="Content-Disposition",
content_type="Content-Type",
)
@@ -268,7 +268,7 @@ def test_raw_response_upload(self, client: Writer) -> None:
@parametrize
def test_streaming_response_upload(self, client: Writer) -> None:
with client.files.with_streaming_response.upload(
- content=b"raw file contents",
+ content=b"Example data",
content_disposition="Content-Disposition",
content_type="Content-Type",
) as response:
@@ -489,7 +489,7 @@ async def test_streaming_response_retry(self, async_client: AsyncWriter) -> None
@parametrize
async def test_method_upload(self, async_client: AsyncWriter) -> None:
file = await async_client.files.upload(
- content=b"raw file contents",
+ content=b"Example data",
content_disposition="Content-Disposition",
content_type="Content-Type",
)
@@ -499,7 +499,7 @@ async def test_method_upload(self, async_client: AsyncWriter) -> None:
@parametrize
async def test_method_upload_with_all_params(self, async_client: AsyncWriter) -> None:
file = await async_client.files.upload(
- content=b"raw file contents",
+ content=b"Example data",
content_disposition="Content-Disposition",
content_type="Content-Type",
graph_id="182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e",
@@ -510,7 +510,7 @@ async def test_method_upload_with_all_params(self, async_client: AsyncWriter) ->
@parametrize
async def test_raw_response_upload(self, async_client: AsyncWriter) -> None:
response = await async_client.files.with_raw_response.upload(
- content=b"raw file contents",
+ content=b"Example data",
content_disposition="Content-Disposition",
content_type="Content-Type",
)
@@ -524,7 +524,7 @@ async def test_raw_response_upload(self, async_client: AsyncWriter) -> None:
@parametrize
async def test_streaming_response_upload(self, async_client: AsyncWriter) -> None:
async with async_client.files.with_streaming_response.upload(
- content=b"raw file contents",
+ content=b"Example data",
content_disposition="Content-Disposition",
content_type="Content-Type",
) as response:
diff --git a/tests/api_resources/test_tools.py b/tests/api_resources/test_tools.py
index 971657ee..8e0bd7a7 100644
--- a/tests/api_resources/test_tools.py
+++ b/tests/api_resources/test_tools.py
@@ -9,12 +9,7 @@
from writerai import Writer, AsyncWriter
from tests.utils import assert_matches_type
-from writerai.types import (
- ToolAIDetectResponse,
- ToolParsePdfResponse,
- ToolWebSearchResponse,
- ToolContextAwareSplittingResponse,
-)
+from writerai.types import ToolParsePdfResponse, ToolWebSearchResponse
# pyright: reportDeprecated=false
@@ -24,79 +19,6 @@
class TestTools:
parametrize = pytest.mark.parametrize("client", [False, True], indirect=True, ids=["loose", "strict"])
- @parametrize
- def test_method_ai_detect(self, client: Writer) -> None:
- with pytest.warns(DeprecationWarning):
- tool = client.tools.ai_detect(
- input="AI and ML continue to be at the forefront of technological advancements. In 2025, we can expect more sophisticated AI systems that can handle complex tasks with greater efficiency. AI will play a crucial role in various sectors, including healthcare, finance, and manufacturing. For instance, AI-powered diagnostic tools will become more accurate, helping doctors detect diseases at an early stage. In finance, AI algorithms will enhance fraud detection and risk management.",
- )
-
- assert_matches_type(ToolAIDetectResponse, tool, path=["response"])
-
- @parametrize
- def test_raw_response_ai_detect(self, client: Writer) -> None:
- with pytest.warns(DeprecationWarning):
- response = client.tools.with_raw_response.ai_detect(
- input="AI and ML continue to be at the forefront of technological advancements. In 2025, we can expect more sophisticated AI systems that can handle complex tasks with greater efficiency. AI will play a crucial role in various sectors, including healthcare, finance, and manufacturing. For instance, AI-powered diagnostic tools will become more accurate, helping doctors detect diseases at an early stage. In finance, AI algorithms will enhance fraud detection and risk management.",
- )
-
- assert response.is_closed is True
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
- tool = response.parse()
- assert_matches_type(ToolAIDetectResponse, tool, path=["response"])
-
- @parametrize
- def test_streaming_response_ai_detect(self, client: Writer) -> None:
- with pytest.warns(DeprecationWarning):
- with client.tools.with_streaming_response.ai_detect(
- input="AI and ML continue to be at the forefront of technological advancements. In 2025, we can expect more sophisticated AI systems that can handle complex tasks with greater efficiency. AI will play a crucial role in various sectors, including healthcare, finance, and manufacturing. For instance, AI-powered diagnostic tools will become more accurate, helping doctors detect diseases at an early stage. In finance, AI algorithms will enhance fraud detection and risk management.",
- ) as response:
- assert not response.is_closed
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
-
- tool = response.parse()
- assert_matches_type(ToolAIDetectResponse, tool, path=["response"])
-
- assert cast(Any, response.is_closed) is True
-
- @parametrize
- def test_method_context_aware_splitting(self, client: Writer) -> None:
- with pytest.warns(DeprecationWarning):
- tool = client.tools.context_aware_splitting(
- strategy="llm_split",
- text="text",
- )
-
- assert_matches_type(ToolContextAwareSplittingResponse, tool, path=["response"])
-
- @parametrize
- def test_raw_response_context_aware_splitting(self, client: Writer) -> None:
- with pytest.warns(DeprecationWarning):
- response = client.tools.with_raw_response.context_aware_splitting(
- strategy="llm_split",
- text="text",
- )
-
- assert response.is_closed is True
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
- tool = response.parse()
- assert_matches_type(ToolContextAwareSplittingResponse, tool, path=["response"])
-
- @parametrize
- def test_streaming_response_context_aware_splitting(self, client: Writer) -> None:
- with pytest.warns(DeprecationWarning):
- with client.tools.with_streaming_response.context_aware_splitting(
- strategy="llm_split",
- text="text",
- ) as response:
- assert not response.is_closed
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
-
- tool = response.parse()
- assert_matches_type(ToolContextAwareSplittingResponse, tool, path=["response"])
-
- assert cast(Any, response.is_closed) is True
-
@parametrize
def test_method_parse_pdf(self, client: Writer) -> None:
with pytest.warns(DeprecationWarning):
@@ -200,79 +122,6 @@ class TestAsyncTools:
"async_client", [False, True, {"http_client": "aiohttp"}], indirect=True, ids=["loose", "strict", "aiohttp"]
)
- @parametrize
- async def test_method_ai_detect(self, async_client: AsyncWriter) -> None:
- with pytest.warns(DeprecationWarning):
- tool = await async_client.tools.ai_detect(
- input="AI and ML continue to be at the forefront of technological advancements. In 2025, we can expect more sophisticated AI systems that can handle complex tasks with greater efficiency. AI will play a crucial role in various sectors, including healthcare, finance, and manufacturing. For instance, AI-powered diagnostic tools will become more accurate, helping doctors detect diseases at an early stage. In finance, AI algorithms will enhance fraud detection and risk management.",
- )
-
- assert_matches_type(ToolAIDetectResponse, tool, path=["response"])
-
- @parametrize
- async def test_raw_response_ai_detect(self, async_client: AsyncWriter) -> None:
- with pytest.warns(DeprecationWarning):
- response = await async_client.tools.with_raw_response.ai_detect(
- input="AI and ML continue to be at the forefront of technological advancements. In 2025, we can expect more sophisticated AI systems that can handle complex tasks with greater efficiency. AI will play a crucial role in various sectors, including healthcare, finance, and manufacturing. For instance, AI-powered diagnostic tools will become more accurate, helping doctors detect diseases at an early stage. In finance, AI algorithms will enhance fraud detection and risk management.",
- )
-
- assert response.is_closed is True
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
- tool = await response.parse()
- assert_matches_type(ToolAIDetectResponse, tool, path=["response"])
-
- @parametrize
- async def test_streaming_response_ai_detect(self, async_client: AsyncWriter) -> None:
- with pytest.warns(DeprecationWarning):
- async with async_client.tools.with_streaming_response.ai_detect(
- input="AI and ML continue to be at the forefront of technological advancements. In 2025, we can expect more sophisticated AI systems that can handle complex tasks with greater efficiency. AI will play a crucial role in various sectors, including healthcare, finance, and manufacturing. For instance, AI-powered diagnostic tools will become more accurate, helping doctors detect diseases at an early stage. In finance, AI algorithms will enhance fraud detection and risk management.",
- ) as response:
- assert not response.is_closed
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
-
- tool = await response.parse()
- assert_matches_type(ToolAIDetectResponse, tool, path=["response"])
-
- assert cast(Any, response.is_closed) is True
-
- @parametrize
- async def test_method_context_aware_splitting(self, async_client: AsyncWriter) -> None:
- with pytest.warns(DeprecationWarning):
- tool = await async_client.tools.context_aware_splitting(
- strategy="llm_split",
- text="text",
- )
-
- assert_matches_type(ToolContextAwareSplittingResponse, tool, path=["response"])
-
- @parametrize
- async def test_raw_response_context_aware_splitting(self, async_client: AsyncWriter) -> None:
- with pytest.warns(DeprecationWarning):
- response = await async_client.tools.with_raw_response.context_aware_splitting(
- strategy="llm_split",
- text="text",
- )
-
- assert response.is_closed is True
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
- tool = await response.parse()
- assert_matches_type(ToolContextAwareSplittingResponse, tool, path=["response"])
-
- @parametrize
- async def test_streaming_response_context_aware_splitting(self, async_client: AsyncWriter) -> None:
- with pytest.warns(DeprecationWarning):
- async with async_client.tools.with_streaming_response.context_aware_splitting(
- strategy="llm_split",
- text="text",
- ) as response:
- assert not response.is_closed
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
-
- tool = await response.parse()
- assert_matches_type(ToolContextAwareSplittingResponse, tool, path=["response"])
-
- assert cast(Any, response.is_closed) is True
-
@parametrize
async def test_method_parse_pdf(self, async_client: AsyncWriter) -> None:
with pytest.warns(DeprecationWarning):
diff --git a/tests/api_resources/tools/__init__.py b/tests/api_resources/tools/__init__.py
deleted file mode 100644
index fd8019a9..00000000
--- a/tests/api_resources/tools/__init__.py
+++ /dev/null
@@ -1 +0,0 @@
-# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
diff --git a/tests/api_resources/tools/test_comprehend.py b/tests/api_resources/tools/test_comprehend.py
deleted file mode 100644
index 59e9dd9f..00000000
--- a/tests/api_resources/tools/test_comprehend.py
+++ /dev/null
@@ -1,102 +0,0 @@
-# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
-from __future__ import annotations
-
-import os
-from typing import Any, cast
-
-import pytest
-
-from writerai import Writer, AsyncWriter
-from tests.utils import assert_matches_type
-from writerai.types.tools import ComprehendMedicalResponse
-
-# pyright: reportDeprecated=false
-
-base_url = os.environ.get("TEST_API_BASE_URL", "http://127.0.0.1:4010")
-
-
-class TestComprehend:
- parametrize = pytest.mark.parametrize("client", [False, True], indirect=True, ids=["loose", "strict"])
-
- @parametrize
- def test_method_medical(self, client: Writer) -> None:
- with pytest.warns(DeprecationWarning):
- comprehend = client.tools.comprehend.medical(
- content="content",
- response_type="Entities",
- )
-
- assert_matches_type(ComprehendMedicalResponse, comprehend, path=["response"])
-
- @parametrize
- def test_raw_response_medical(self, client: Writer) -> None:
- with pytest.warns(DeprecationWarning):
- response = client.tools.comprehend.with_raw_response.medical(
- content="content",
- response_type="Entities",
- )
-
- assert response.is_closed is True
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
- comprehend = response.parse()
- assert_matches_type(ComprehendMedicalResponse, comprehend, path=["response"])
-
- @parametrize
- def test_streaming_response_medical(self, client: Writer) -> None:
- with pytest.warns(DeprecationWarning):
- with client.tools.comprehend.with_streaming_response.medical(
- content="content",
- response_type="Entities",
- ) as response:
- assert not response.is_closed
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
-
- comprehend = response.parse()
- assert_matches_type(ComprehendMedicalResponse, comprehend, path=["response"])
-
- assert cast(Any, response.is_closed) is True
-
-
-class TestAsyncComprehend:
- parametrize = pytest.mark.parametrize(
- "async_client", [False, True, {"http_client": "aiohttp"}], indirect=True, ids=["loose", "strict", "aiohttp"]
- )
-
- @parametrize
- async def test_method_medical(self, async_client: AsyncWriter) -> None:
- with pytest.warns(DeprecationWarning):
- comprehend = await async_client.tools.comprehend.medical(
- content="content",
- response_type="Entities",
- )
-
- assert_matches_type(ComprehendMedicalResponse, comprehend, path=["response"])
-
- @parametrize
- async def test_raw_response_medical(self, async_client: AsyncWriter) -> None:
- with pytest.warns(DeprecationWarning):
- response = await async_client.tools.comprehend.with_raw_response.medical(
- content="content",
- response_type="Entities",
- )
-
- assert response.is_closed is True
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
- comprehend = await response.parse()
- assert_matches_type(ComprehendMedicalResponse, comprehend, path=["response"])
-
- @parametrize
- async def test_streaming_response_medical(self, async_client: AsyncWriter) -> None:
- with pytest.warns(DeprecationWarning):
- async with async_client.tools.comprehend.with_streaming_response.medical(
- content="content",
- response_type="Entities",
- ) as response:
- assert not response.is_closed
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
-
- comprehend = await response.parse()
- assert_matches_type(ComprehendMedicalResponse, comprehend, path=["response"])
-
- assert cast(Any, response.is_closed) is True
diff --git a/tests/test_client.py b/tests/test_client.py
index f90d3a04..2b13f897 100644
--- a/tests/test_client.py
+++ b/tests/test_client.py
@@ -8,10 +8,11 @@
import json
import asyncio
import inspect
+import dataclasses
import tracemalloc
-from typing import Any, Union, cast
+from typing import Any, Union, TypeVar, Callable, Iterable, Iterator, Optional, Coroutine, cast
from unittest import mock
-from typing_extensions import Literal
+from typing_extensions import Literal, AsyncIterator, override
import httpx
import pytest
@@ -37,6 +38,7 @@
from .utils import update_env
+T = TypeVar("T")
base_url = os.environ.get("TEST_API_BASE_URL", "http://127.0.0.1:4010")
api_key = "My API Key"
@@ -51,6 +53,57 @@ def _low_retry_timeout(*_args: Any, **_kwargs: Any) -> float:
return 0.1
+def mirror_request_content(request: httpx.Request) -> httpx.Response:
+ return httpx.Response(200, content=request.content)
+
+
+# note: we can't use the httpx.MockTransport class as it consumes the request
+# body itself, which means we can't test that the body is read lazily
+class MockTransport(httpx.BaseTransport, httpx.AsyncBaseTransport):
+ def __init__(
+ self,
+ handler: Callable[[httpx.Request], httpx.Response]
+ | Callable[[httpx.Request], Coroutine[Any, Any, httpx.Response]],
+ ) -> None:
+ self.handler = handler
+
+ @override
+ def handle_request(
+ self,
+ request: httpx.Request,
+ ) -> httpx.Response:
+ assert not inspect.iscoroutinefunction(self.handler), "handler must not be a coroutine function"
+ assert inspect.isfunction(self.handler), "handler must be a function"
+ return self.handler(request)
+
+ @override
+ async def handle_async_request(
+ self,
+ request: httpx.Request,
+ ) -> httpx.Response:
+ assert inspect.iscoroutinefunction(self.handler), "handler must be a coroutine function"
+ return await self.handler(request)
+
+
+@dataclasses.dataclass
+class Counter:
+ value: int = 0
+
+
+def _make_sync_iterator(iterable: Iterable[T], counter: Optional[Counter] = None) -> Iterator[T]:
+ for item in iterable:
+ if counter:
+ counter.value += 1
+ yield item
+
+
+async def _make_async_iterator(iterable: Iterable[T], counter: Optional[Counter] = None) -> AsyncIterator[T]:
+ for item in iterable:
+ if counter:
+ counter.value += 1
+ yield item
+
+
def _get_open_connections(client: Writer | AsyncWriter) -> int:
transport = client._client._transport
assert isinstance(transport, httpx.HTTPTransport) or isinstance(transport, httpx.AsyncHTTPTransport)
@@ -375,6 +428,30 @@ def test_default_query_option(self) -> None:
client.close()
+ def test_hardcoded_query_params_in_url(self, client: Writer) -> None:
+ request = client._build_request(FinalRequestOptions(method="get", url="/foo?beta=true"))
+ url = httpx.URL(request.url)
+ assert dict(url.params) == {"beta": "true"}
+
+ request = client._build_request(
+ FinalRequestOptions(
+ method="get",
+ url="/foo?beta=true",
+ params={"limit": "10", "page": "abc"},
+ )
+ )
+ url = httpx.URL(request.url)
+ assert dict(url.params) == {"beta": "true", "limit": "10", "page": "abc"}
+
+ request = client._build_request(
+ FinalRequestOptions(
+ method="get",
+ url="/files/a%2Fb?beta=true",
+ params={"limit": "10"},
+ )
+ )
+ assert request.url.raw_path == b"/files/a%2Fb?beta=true&limit=10"
+
def test_request_extra_json(self, client: Writer) -> None:
request = client._build_request(
FinalRequestOptions(
@@ -501,6 +578,70 @@ def test_multipart_repeating_array(self, client: Writer) -> None:
b"",
]
+ @pytest.mark.respx(base_url=base_url)
+ def test_binary_content_upload(self, respx_mock: MockRouter, client: Writer) -> None:
+ respx_mock.post("/upload").mock(side_effect=mirror_request_content)
+
+ file_content = b"Hello, this is a test file."
+
+ response = client.post(
+ "/upload",
+ content=file_content,
+ cast_to=httpx.Response,
+ options={"headers": {"Content-Type": "application/octet-stream"}},
+ )
+
+ assert response.status_code == 200
+ assert response.request.headers["Content-Type"] == "application/octet-stream"
+ assert response.content == file_content
+
+ def test_binary_content_upload_with_iterator(self) -> None:
+ file_content = b"Hello, this is a test file."
+ counter = Counter()
+ iterator = _make_sync_iterator([file_content], counter=counter)
+
+ def mock_handler(request: httpx.Request) -> httpx.Response:
+ assert counter.value == 0, "the request body should not have been read"
+ return httpx.Response(200, content=request.read())
+
+ with Writer(
+ base_url=base_url,
+ api_key=api_key,
+ _strict_response_validation=True,
+ http_client=httpx.Client(transport=MockTransport(handler=mock_handler)),
+ ) as client:
+ response = client.post(
+ "/upload",
+ content=iterator,
+ cast_to=httpx.Response,
+ options={"headers": {"Content-Type": "application/octet-stream"}},
+ )
+
+ assert response.status_code == 200
+ assert response.request.headers["Content-Type"] == "application/octet-stream"
+ assert response.content == file_content
+ assert counter.value == 1
+
+ @pytest.mark.respx(base_url=base_url)
+ def test_binary_content_upload_with_body_is_deprecated(self, respx_mock: MockRouter, client: Writer) -> None:
+ respx_mock.post("/upload").mock(side_effect=mirror_request_content)
+
+ file_content = b"Hello, this is a test file."
+
+ with pytest.deprecated_call(
+ match="Passing raw bytes as `body` is deprecated and will be removed in a future version. Please pass raw bytes via the `content` parameter instead."
+ ):
+ response = client.post(
+ "/upload",
+ body=file_content,
+ cast_to=httpx.Response,
+ options={"headers": {"Content-Type": "application/octet-stream"}},
+ )
+
+ assert response.status_code == 200
+ assert response.request.headers["Content-Type"] == "application/octet-stream"
+ assert response.content == file_content
+
@pytest.mark.respx(base_url=base_url)
def test_basic_union_response(self, respx_mock: MockRouter, client: Writer) -> None:
class Model1(BaseModel):
@@ -844,6 +985,14 @@ def retry_handler(_request: httpx.Request) -> httpx.Response:
def test_proxy_environment_variables(self, monkeypatch: pytest.MonkeyPatch) -> None:
# Test that the proxy environment variables are set correctly
monkeypatch.setenv("HTTPS_PROXY", "https://example.org")
+ # Delete in case our environment has any proxy env vars set
+ monkeypatch.delenv("HTTP_PROXY", raising=False)
+ monkeypatch.delenv("ALL_PROXY", raising=False)
+ monkeypatch.delenv("NO_PROXY", raising=False)
+ monkeypatch.delenv("http_proxy", raising=False)
+ monkeypatch.delenv("https_proxy", raising=False)
+ monkeypatch.delenv("all_proxy", raising=False)
+ monkeypatch.delenv("no_proxy", raising=False)
client = DefaultHttpxClient()
@@ -1209,6 +1358,30 @@ async def test_default_query_option(self) -> None:
await client.close()
+ async def test_hardcoded_query_params_in_url(self, async_client: AsyncWriter) -> None:
+ request = async_client._build_request(FinalRequestOptions(method="get", url="/foo?beta=true"))
+ url = httpx.URL(request.url)
+ assert dict(url.params) == {"beta": "true"}
+
+ request = async_client._build_request(
+ FinalRequestOptions(
+ method="get",
+ url="/foo?beta=true",
+ params={"limit": "10", "page": "abc"},
+ )
+ )
+ url = httpx.URL(request.url)
+ assert dict(url.params) == {"beta": "true", "limit": "10", "page": "abc"}
+
+ request = async_client._build_request(
+ FinalRequestOptions(
+ method="get",
+ url="/files/a%2Fb?beta=true",
+ params={"limit": "10"},
+ )
+ )
+ assert request.url.raw_path == b"/files/a%2Fb?beta=true&limit=10"
+
def test_request_extra_json(self, client: Writer) -> None:
request = client._build_request(
FinalRequestOptions(
@@ -1335,6 +1508,72 @@ def test_multipart_repeating_array(self, async_client: AsyncWriter) -> None:
b"",
]
+ @pytest.mark.respx(base_url=base_url)
+ async def test_binary_content_upload(self, respx_mock: MockRouter, async_client: AsyncWriter) -> None:
+ respx_mock.post("/upload").mock(side_effect=mirror_request_content)
+
+ file_content = b"Hello, this is a test file."
+
+ response = await async_client.post(
+ "/upload",
+ content=file_content,
+ cast_to=httpx.Response,
+ options={"headers": {"Content-Type": "application/octet-stream"}},
+ )
+
+ assert response.status_code == 200
+ assert response.request.headers["Content-Type"] == "application/octet-stream"
+ assert response.content == file_content
+
+ async def test_binary_content_upload_with_asynciterator(self) -> None:
+ file_content = b"Hello, this is a test file."
+ counter = Counter()
+ iterator = _make_async_iterator([file_content], counter=counter)
+
+ async def mock_handler(request: httpx.Request) -> httpx.Response:
+ assert counter.value == 0, "the request body should not have been read"
+ return httpx.Response(200, content=await request.aread())
+
+ async with AsyncWriter(
+ base_url=base_url,
+ api_key=api_key,
+ _strict_response_validation=True,
+ http_client=httpx.AsyncClient(transport=MockTransport(handler=mock_handler)),
+ ) as client:
+ response = await client.post(
+ "/upload",
+ content=iterator,
+ cast_to=httpx.Response,
+ options={"headers": {"Content-Type": "application/octet-stream"}},
+ )
+
+ assert response.status_code == 200
+ assert response.request.headers["Content-Type"] == "application/octet-stream"
+ assert response.content == file_content
+ assert counter.value == 1
+
+ @pytest.mark.respx(base_url=base_url)
+ async def test_binary_content_upload_with_body_is_deprecated(
+ self, respx_mock: MockRouter, async_client: AsyncWriter
+ ) -> None:
+ respx_mock.post("/upload").mock(side_effect=mirror_request_content)
+
+ file_content = b"Hello, this is a test file."
+
+ with pytest.deprecated_call(
+ match="Passing raw bytes as `body` is deprecated and will be removed in a future version. Please pass raw bytes via the `content` parameter instead."
+ ):
+ response = await async_client.post(
+ "/upload",
+ body=file_content,
+ cast_to=httpx.Response,
+ options={"headers": {"Content-Type": "application/octet-stream"}},
+ )
+
+ assert response.status_code == 200
+ assert response.request.headers["Content-Type"] == "application/octet-stream"
+ assert response.content == file_content
+
@pytest.mark.respx(base_url=base_url)
async def test_basic_union_response(self, respx_mock: MockRouter, async_client: AsyncWriter) -> None:
class Model1(BaseModel):
@@ -1699,6 +1938,14 @@ async def test_get_platform(self) -> None:
async def test_proxy_environment_variables(self, monkeypatch: pytest.MonkeyPatch) -> None:
# Test that the proxy environment variables are set correctly
monkeypatch.setenv("HTTPS_PROXY", "https://example.org")
+ # Delete in case our environment has any proxy env vars set
+ monkeypatch.delenv("HTTP_PROXY", raising=False)
+ monkeypatch.delenv("ALL_PROXY", raising=False)
+ monkeypatch.delenv("NO_PROXY", raising=False)
+ monkeypatch.delenv("http_proxy", raising=False)
+ monkeypatch.delenv("https_proxy", raising=False)
+ monkeypatch.delenv("all_proxy", raising=False)
+ monkeypatch.delenv("no_proxy", raising=False)
client = DefaultAsyncHttpxClient()
diff --git a/tests/test_extract_files.py b/tests/test_extract_files.py
index 9d9a4b19..436354bf 100644
--- a/tests/test_extract_files.py
+++ b/tests/test_extract_files.py
@@ -35,6 +35,15 @@ def test_multiple_files() -> None:
assert query == {"documents": [{}, {}]}
+def test_top_level_file_array() -> None:
+ query = {"files": [b"file one", b"file two"], "title": "hello"}
+ assert extract_files(query, paths=[["files", ""]]) == [
+ ("files[]", b"file one"),
+ ("files[]", b"file two"),
+ ]
+ assert query == {"title": "hello"}
+
+
@pytest.mark.parametrize(
"query,paths,expected",
[
diff --git a/tests/test_utils/test_json.py b/tests/test_utils/test_json.py
new file mode 100644
index 00000000..9d63c901
--- /dev/null
+++ b/tests/test_utils/test_json.py
@@ -0,0 +1,126 @@
+from __future__ import annotations
+
+import datetime
+from typing import Union
+
+import pydantic
+
+from writerai import _compat
+from writerai._utils._json import openapi_dumps
+
+
+class TestOpenapiDumps:
+ def test_basic(self) -> None:
+ data = {"key": "value", "number": 42}
+ json_bytes = openapi_dumps(data)
+ assert json_bytes == b'{"key":"value","number":42}'
+
+ def test_datetime_serialization(self) -> None:
+ dt = datetime.datetime(2023, 1, 1, 12, 0, 0)
+ data = {"datetime": dt}
+ json_bytes = openapi_dumps(data)
+ assert json_bytes == b'{"datetime":"2023-01-01T12:00:00"}'
+
+ def test_pydantic_model_serialization(self) -> None:
+ class User(pydantic.BaseModel):
+ first_name: str
+ last_name: str
+ age: int
+
+ model_instance = User(first_name="John", last_name="Kramer", age=83)
+ data = {"model": model_instance}
+ json_bytes = openapi_dumps(data)
+ assert json_bytes == b'{"model":{"first_name":"John","last_name":"Kramer","age":83}}'
+
+ def test_pydantic_model_with_default_values(self) -> None:
+ class User(pydantic.BaseModel):
+ name: str
+ role: str = "user"
+ active: bool = True
+ score: int = 0
+
+ model_instance = User(name="Alice")
+ data = {"model": model_instance}
+ json_bytes = openapi_dumps(data)
+ assert json_bytes == b'{"model":{"name":"Alice"}}'
+
+ def test_pydantic_model_with_default_values_overridden(self) -> None:
+ class User(pydantic.BaseModel):
+ name: str
+ role: str = "user"
+ active: bool = True
+
+ model_instance = User(name="Bob", role="admin", active=False)
+ data = {"model": model_instance}
+ json_bytes = openapi_dumps(data)
+ assert json_bytes == b'{"model":{"name":"Bob","role":"admin","active":false}}'
+
+ def test_pydantic_model_with_alias(self) -> None:
+ class User(pydantic.BaseModel):
+ first_name: str = pydantic.Field(alias="firstName")
+ last_name: str = pydantic.Field(alias="lastName")
+
+ model_instance = User(firstName="John", lastName="Doe")
+ data = {"model": model_instance}
+ json_bytes = openapi_dumps(data)
+ assert json_bytes == b'{"model":{"firstName":"John","lastName":"Doe"}}'
+
+ def test_pydantic_model_with_alias_and_default(self) -> None:
+ class User(pydantic.BaseModel):
+ user_name: str = pydantic.Field(alias="userName")
+ user_role: str = pydantic.Field(default="member", alias="userRole")
+ is_active: bool = pydantic.Field(default=True, alias="isActive")
+
+ model_instance = User(userName="charlie")
+ data = {"model": model_instance}
+ json_bytes = openapi_dumps(data)
+ assert json_bytes == b'{"model":{"userName":"charlie"}}'
+
+ model_with_overrides = User(userName="diana", userRole="admin", isActive=False)
+ data = {"model": model_with_overrides}
+ json_bytes = openapi_dumps(data)
+ assert json_bytes == b'{"model":{"userName":"diana","userRole":"admin","isActive":false}}'
+
+ def test_pydantic_model_with_nested_models_and_defaults(self) -> None:
+ class Address(pydantic.BaseModel):
+ street: str
+ city: str = "Unknown"
+
+ class User(pydantic.BaseModel):
+ name: str
+ address: Address
+ verified: bool = False
+
+ if _compat.PYDANTIC_V1:
+ # to handle forward references in Pydantic v1
+ User.update_forward_refs(**locals()) # type: ignore[reportDeprecated]
+
+ address = Address(street="123 Main St")
+ user = User(name="Diana", address=address)
+ data = {"user": user}
+ json_bytes = openapi_dumps(data)
+ assert json_bytes == b'{"user":{"name":"Diana","address":{"street":"123 Main St"}}}'
+
+ address_with_city = Address(street="456 Oak Ave", city="Boston")
+ user_verified = User(name="Eve", address=address_with_city, verified=True)
+ data = {"user": user_verified}
+ json_bytes = openapi_dumps(data)
+ assert (
+ json_bytes == b'{"user":{"name":"Eve","address":{"street":"456 Oak Ave","city":"Boston"},"verified":true}}'
+ )
+
+ def test_pydantic_model_with_optional_fields(self) -> None:
+ class User(pydantic.BaseModel):
+ name: str
+ email: Union[str, None]
+ phone: Union[str, None]
+
+ model_with_none = User(name="Eve", email=None, phone=None)
+ data = {"model": model_with_none}
+ json_bytes = openapi_dumps(data)
+ assert json_bytes == b'{"model":{"name":"Eve","email":null,"phone":null}}'
+
+ model_with_values = User(name="Frank", email="frank@example.com", phone=None)
+ data = {"model": model_with_values}
+ json_bytes = openapi_dumps(data)
+ assert json_bytes == b'{"model":{"name":"Frank","email":"frank@example.com","phone":null}}'
diff --git a/tests/test_utils/test_path.py b/tests/test_utils/test_path.py
new file mode 100644
index 00000000..b42e3d87
--- /dev/null
+++ b/tests/test_utils/test_path.py
@@ -0,0 +1,89 @@
+from __future__ import annotations
+
+from typing import Any
+
+import pytest
+
+from writerai._utils._path import path_template
+
+
+@pytest.mark.parametrize(
+ "template, kwargs, expected",
+ [
+ ("/v1/{id}", dict(id="abc"), "/v1/abc"),
+ ("/v1/{a}/{b}", dict(a="x", b="y"), "/v1/x/y"),
+ ("/v1/{a}{b}/path/{c}?val={d}#{e}", dict(a="x", b="y", c="z", d="u", e="v"), "/v1/xy/path/z?val=u#v"),
+ ("/{w}/{w}", dict(w="echo"), "/echo/echo"),
+ ("/v1/static", {}, "/v1/static"),
+ ("", {}, ""),
+ ("/v1/?q={n}&count=10", dict(n=42), "/v1/?q=42&count=10"),
+ ("/v1/{v}", dict(v=None), "/v1/null"),
+ ("/v1/{v}", dict(v=True), "/v1/true"),
+ ("/v1/{v}", dict(v=False), "/v1/false"),
+ ("/v1/{v}", dict(v=".hidden"), "/v1/.hidden"), # dot prefix ok
+ ("/v1/{v}", dict(v="file.txt"), "/v1/file.txt"), # dot in middle ok
+ ("/v1/{v}", dict(v="..."), "/v1/..."), # triple dot ok
+ ("/v1/{a}{b}", dict(a=".", b="txt"), "/v1/.txt"), # dot var combining with adjacent to be ok
+ ("/items?q={v}#{f}", dict(v=".", f=".."), "/items?q=.#.."), # dots in query/fragment are fine
+ (
+ "/v1/{a}?query={b}",
+ dict(a="../../other/endpoint", b="a&bad=true"),
+ "/v1/..%2F..%2Fother%2Fendpoint?query=a%26bad%3Dtrue",
+ ),
+ ("/v1/{val}", dict(val="a/b/c"), "/v1/a%2Fb%2Fc"),
+ ("/v1/{val}", dict(val="a/b/c?query=value"), "/v1/a%2Fb%2Fc%3Fquery=value"),
+ ("/v1/{val}", dict(val="a/b/c?query=value&bad=true"), "/v1/a%2Fb%2Fc%3Fquery=value&bad=true"),
+ ("/v1/{val}", dict(val="%20"), "/v1/%2520"), # escapes escape sequences in input
+ # Query: slash and ? are safe, # is not
+ ("/items?q={v}", dict(v="a/b"), "/items?q=a/b"),
+ ("/items?q={v}", dict(v="a?b"), "/items?q=a?b"),
+ ("/items?q={v}", dict(v="a#b"), "/items?q=a%23b"),
+ ("/items?q={v}", dict(v="a b"), "/items?q=a%20b"),
+ # Fragment: slash and ? are safe
+ ("/docs#{v}", dict(v="a/b"), "/docs#a/b"),
+ ("/docs#{v}", dict(v="a?b"), "/docs#a?b"),
+ # Path: slash, ? and # are all encoded
+ ("/v1/{v}", dict(v="a/b"), "/v1/a%2Fb"),
+ ("/v1/{v}", dict(v="a?b"), "/v1/a%3Fb"),
+ ("/v1/{v}", dict(v="a#b"), "/v1/a%23b"),
+ # same var encoded differently by component
+ (
+ "/v1/{v}?q={v}#{v}",
+ dict(v="a/b?c#d"),
+ "/v1/a%2Fb%3Fc%23d?q=a/b?c%23d#a/b?c%23d",
+ ),
+ ("/v1/{val}", dict(val="x?admin=true"), "/v1/x%3Fadmin=true"), # query injection
+ ("/v1/{val}", dict(val="x#admin"), "/v1/x%23admin"), # fragment injection
+ ],
+)
+def test_interpolation(template: str, kwargs: dict[str, Any], expected: str) -> None:
+ assert path_template(template, **kwargs) == expected
+
+
+def test_missing_kwarg_raises_key_error() -> None:
+ with pytest.raises(KeyError, match="org_id"):
+ path_template("/v1/{org_id}")
+
+
+@pytest.mark.parametrize(
+ "template, kwargs",
+ [
+ ("{a}/path", dict(a=".")),
+ ("{a}/path", dict(a="..")),
+ ("/v1/{a}", dict(a=".")),
+ ("/v1/{a}", dict(a="..")),
+ ("/v1/{a}/path", dict(a=".")),
+ ("/v1/{a}/path", dict(a="..")),
+ ("/v1/{a}{b}", dict(a=".", b=".")), # adjacent vars → ".."
+ ("/v1/{a}.", dict(a=".")), # var + static → ".."
+ ("/v1/{a}{b}", dict(a="", b=".")), # empty + dot → "."
+ ("/v1/%2e/{x}", dict(x="ok")), # encoded dot in static text
+ ("/v1/%2e./{x}", dict(x="ok")), # mixed encoded ".." in static
+ ("/v1/.%2E/{x}", dict(x="ok")), # mixed encoded ".." in static
+ ("/v1/{v}?q=1", dict(v="..")),
+ ("/v1/{v}#frag", dict(v="..")),
+ ],
+)
+def test_dot_segment_rejected(template: str, kwargs: dict[str, Any]) -> None:
+ with pytest.raises(ValueError, match="dot-segment"):
+ path_template(template, **kwargs)