From 3eb7e37e181a35de63e6de8d8bba39f84bb52018 Mon Sep 17 00:00:00 2001 From: Alem Tuzlak Date: Mon, 30 Mar 2026 12:56:15 +0200 Subject: [PATCH 01/15] feat(ai-utils): add @tanstack/ai-utils package with shared utilities Introduces the @tanstack/ai-utils package providing shared, zero-dependency utilities (generateId, getApiKeyFromEnv, transformNullsToUndefined, defineModelMeta) for use across TanStack AI adapter packages. --- packages/typescript/ai-utils/package.json | 44 +++++++++++ packages/typescript/ai-utils/src/env.ts | 23 ++++++ packages/typescript/ai-utils/src/id.ts | 5 ++ packages/typescript/ai-utils/src/index.ts | 5 ++ .../ai-utils/src/model-meta/define.ts | 47 +++++++++++ .../ai-utils/src/model-meta/types.ts | 20 +++++ .../typescript/ai-utils/src/transforms.ts | 22 ++++++ .../typescript/ai-utils/tests/env.test.ts | 26 +++++++ packages/typescript/ai-utils/tests/id.test.ts | 20 +++++ .../ai-utils/tests/model-meta.test.ts | 78 +++++++++++++++++++ .../ai-utils/tests/transforms.test.ts | 48 ++++++++++++ packages/typescript/ai-utils/tsconfig.json | 9 +++ packages/typescript/ai-utils/vite.config.ts | 36 +++++++++ pnpm-lock.yaml | 9 +++ 14 files changed, 392 insertions(+) create mode 100644 packages/typescript/ai-utils/package.json create mode 100644 packages/typescript/ai-utils/src/env.ts create mode 100644 packages/typescript/ai-utils/src/id.ts create mode 100644 packages/typescript/ai-utils/src/index.ts create mode 100644 packages/typescript/ai-utils/src/model-meta/define.ts create mode 100644 packages/typescript/ai-utils/src/model-meta/types.ts create mode 100644 packages/typescript/ai-utils/src/transforms.ts create mode 100644 packages/typescript/ai-utils/tests/env.test.ts create mode 100644 packages/typescript/ai-utils/tests/id.test.ts create mode 100644 packages/typescript/ai-utils/tests/model-meta.test.ts create mode 100644 packages/typescript/ai-utils/tests/transforms.test.ts create mode 100644 packages/typescript/ai-utils/tsconfig.json create mode 100644 packages/typescript/ai-utils/vite.config.ts diff --git a/packages/typescript/ai-utils/package.json b/packages/typescript/ai-utils/package.json new file mode 100644 index 000000000..996184c49 --- /dev/null +++ b/packages/typescript/ai-utils/package.json @@ -0,0 +1,44 @@ +{ + "name": "@tanstack/ai-utils", + "version": "0.1.0", + "description": "Shared utilities for TanStack AI adapter packages", + "author": "", + "license": "MIT", + "repository": { + "type": "git", + "url": "git+https://github.com/TanStack/ai.git", + "directory": "packages/typescript/ai-utils" + }, + "type": "module", + "module": "./dist/esm/index.js", + "types": "./dist/esm/index.d.ts", + "exports": { + ".": { + "types": "./dist/esm/index.d.ts", + "import": "./dist/esm/index.js" + } + }, + "files": [ + "dist", + "src" + ], + "scripts": { + "build": "vite build", + "clean": "premove ./build ./dist", + "lint:fix": "eslint ./src --fix", + "test:build": "publint --strict", + "test:eslint": "eslint ./src", + "test:lib": "vitest run", + "test:lib:dev": "pnpm test:lib --watch", + "test:types": "tsc" + }, + "keywords": [ + "ai", + "utils", + "tanstack" + ], + "devDependencies": { + "@vitest/coverage-v8": "4.0.14", + "vite": "^7.2.7" + } +} diff --git a/packages/typescript/ai-utils/src/env.ts b/packages/typescript/ai-utils/src/env.ts new file mode 100644 index 000000000..23cb9bae2 --- /dev/null +++ b/packages/typescript/ai-utils/src/env.ts @@ -0,0 +1,23 @@ +export function getApiKeyFromEnv(envVarName: string): string { + let apiKey: string | undefined + + if (typeof process !== 'undefined' && process.env) { + apiKey = process.env[envVarName] + } + + if ( + !apiKey && + typeof window !== 'undefined' && + (window as any).env + ) { + apiKey = (window as any).env[envVarName] + } + + if (!apiKey) { + throw new Error( + `${envVarName} is not set. Please set the ${envVarName} environment variable or pass the API key directly.` + ) + } + + return apiKey +} diff --git a/packages/typescript/ai-utils/src/id.ts b/packages/typescript/ai-utils/src/id.ts new file mode 100644 index 000000000..03ace4128 --- /dev/null +++ b/packages/typescript/ai-utils/src/id.ts @@ -0,0 +1,5 @@ +export function generateId(prefix: string): string { + const timestamp = Date.now() + const randomPart = Math.random().toString(36).substring(2, 10) + return `${prefix}-${timestamp}-${randomPart}` +} diff --git a/packages/typescript/ai-utils/src/index.ts b/packages/typescript/ai-utils/src/index.ts new file mode 100644 index 000000000..160149686 --- /dev/null +++ b/packages/typescript/ai-utils/src/index.ts @@ -0,0 +1,5 @@ +export { generateId } from './id' +export { getApiKeyFromEnv } from './env' +export { transformNullsToUndefined } from './transforms' +export type { ModelMeta, Modality } from './model-meta/types' +export { defineModelMeta } from './model-meta/define' diff --git a/packages/typescript/ai-utils/src/model-meta/define.ts b/packages/typescript/ai-utils/src/model-meta/define.ts new file mode 100644 index 000000000..a87fb73ee --- /dev/null +++ b/packages/typescript/ai-utils/src/model-meta/define.ts @@ -0,0 +1,47 @@ +import type { ModelMeta } from './types' + +export function defineModelMeta(meta: T): T { + if (meta.supports.input.length === 0) { + throw new Error( + `defineModelMeta: model "${meta.name}" must have at least one input modality` + ) + } + + if (meta.supports.output.length === 0) { + throw new Error( + `defineModelMeta: model "${meta.name}" must have at least one output modality` + ) + } + + if (meta.context_window !== undefined && meta.context_window <= 0) { + throw new Error( + `defineModelMeta: model "${meta.name}" context_window must be positive` + ) + } + + if (meta.max_output_tokens !== undefined && meta.max_output_tokens <= 0) { + throw new Error( + `defineModelMeta: model "${meta.name}" max_output_tokens must be positive` + ) + } + + if (meta.pricing) { + if (meta.pricing.input.normal < 0) { + throw new Error( + `defineModelMeta: model "${meta.name}" pricing.input.normal must be non-negative` + ) + } + if (meta.pricing.input.cached !== undefined && meta.pricing.input.cached < 0) { + throw new Error( + `defineModelMeta: model "${meta.name}" pricing.input.cached must be non-negative` + ) + } + if (meta.pricing.output.normal < 0) { + throw new Error( + `defineModelMeta: model "${meta.name}" pricing.output.normal must be non-negative` + ) + } + } + + return meta +} diff --git a/packages/typescript/ai-utils/src/model-meta/types.ts b/packages/typescript/ai-utils/src/model-meta/types.ts new file mode 100644 index 000000000..24ba56b28 --- /dev/null +++ b/packages/typescript/ai-utils/src/model-meta/types.ts @@ -0,0 +1,20 @@ +export type Modality = 'text' | 'image' | 'audio' | 'video' | 'document' + +export interface ModelMeta { + name: string + supports: { + input: Array + output: Array + endpoints?: Array + features?: Array + tools?: Array + } + context_window?: number + max_output_tokens?: number + knowledge_cutoff?: string + pricing?: { + input: { normal: number; cached?: number } + output: { normal: number } + } + providerOptions?: TProviderOptions +} diff --git a/packages/typescript/ai-utils/src/transforms.ts b/packages/typescript/ai-utils/src/transforms.ts new file mode 100644 index 000000000..1496259c5 --- /dev/null +++ b/packages/typescript/ai-utils/src/transforms.ts @@ -0,0 +1,22 @@ +export function transformNullsToUndefined(obj: T): T { + if (obj === null) { + return undefined as unknown as T + } + + if (typeof obj !== 'object') { + return obj + } + + if (Array.isArray(obj)) { + return obj.map((item) => transformNullsToUndefined(item)) as unknown as T + } + + const result: Record = {} + for (const [key, value] of Object.entries(obj as Record)) { + if (value === null) { + continue + } + result[key] = transformNullsToUndefined(value) + } + return result as T +} diff --git a/packages/typescript/ai-utils/tests/env.test.ts b/packages/typescript/ai-utils/tests/env.test.ts new file mode 100644 index 000000000..24c6cffd9 --- /dev/null +++ b/packages/typescript/ai-utils/tests/env.test.ts @@ -0,0 +1,26 @@ +import { describe, it, expect, vi, afterEach } from 'vitest' +import { getApiKeyFromEnv } from '../src/env' + +describe('getApiKeyFromEnv', () => { + afterEach(() => { + vi.unstubAllEnvs() + }) + + it('should return the API key from process.env', () => { + vi.stubEnv('TEST_API_KEY', 'sk-test-123') + expect(getApiKeyFromEnv('TEST_API_KEY')).toBe('sk-test-123') + }) + + it('should throw if the env var is not set', () => { + expect(() => getApiKeyFromEnv('NONEXISTENT_KEY')).toThrow('NONEXISTENT_KEY') + }) + + it('should throw if the env var is empty string', () => { + vi.stubEnv('EMPTY_KEY', '') + expect(() => getApiKeyFromEnv('EMPTY_KEY')).toThrow('EMPTY_KEY') + }) + + it('should include the env var name in the error message', () => { + expect(() => getApiKeyFromEnv('MY_PROVIDER_API_KEY')).toThrow('MY_PROVIDER_API_KEY') + }) +}) diff --git a/packages/typescript/ai-utils/tests/id.test.ts b/packages/typescript/ai-utils/tests/id.test.ts new file mode 100644 index 000000000..74fe0d198 --- /dev/null +++ b/packages/typescript/ai-utils/tests/id.test.ts @@ -0,0 +1,20 @@ +import { describe, it, expect } from 'vitest' +import { generateId } from '../src/id' + +describe('generateId', () => { + it('should generate an id with the given prefix', () => { + const id = generateId('run') + expect(id).toMatch(/^run-\d+-[a-z0-9]+$/) + }) + + it('should generate unique ids', () => { + const id1 = generateId('msg') + const id2 = generateId('msg') + expect(id1).not.toBe(id2) + }) + + it('should use the prefix exactly as given', () => { + const id = generateId('tool_call') + expect(id.startsWith('tool_call-')).toBe(true) + }) +}) diff --git a/packages/typescript/ai-utils/tests/model-meta.test.ts b/packages/typescript/ai-utils/tests/model-meta.test.ts new file mode 100644 index 000000000..207cc863c --- /dev/null +++ b/packages/typescript/ai-utils/tests/model-meta.test.ts @@ -0,0 +1,78 @@ +import { describe, it, expect } from 'vitest' +import { defineModelMeta } from '../src/model-meta/define' +import type { ModelMeta, Modality } from '../src/model-meta/types' + +describe('defineModelMeta', () => { + it('should return the meta object unchanged for valid input', () => { + const meta = defineModelMeta({ + name: 'gpt-4o', + supports: { + input: ['text', 'image'] as Array, + output: ['text'] as Array, + }, + }) + expect(meta.name).toBe('gpt-4o') + expect(meta.supports.input).toEqual(['text', 'image']) + }) + + it('should accept optional fields', () => { + const meta = defineModelMeta({ + name: 'gpt-4o', + supports: { + input: ['text'] as Array, + output: ['text'] as Array, + features: ['streaming', 'function_calling'], + }, + context_window: 128000, + max_output_tokens: 16384, + knowledge_cutoff: '2024-10', + pricing: { + input: { normal: 2.5, cached: 1.25 }, + output: { normal: 10.0 }, + }, + }) + expect(meta.context_window).toBe(128000) + expect(meta.pricing?.input.cached).toBe(1.25) + }) + + it('should throw for negative pricing', () => { + expect(() => + defineModelMeta({ + name: 'test', + supports: { + input: ['text'] as Array, + output: ['text'] as Array, + }, + pricing: { + input: { normal: -1 }, + output: { normal: 1 }, + }, + }) + ).toThrow('pricing') + }) + + it('should throw for zero context window', () => { + expect(() => + defineModelMeta({ + name: 'test', + supports: { + input: ['text'] as Array, + output: ['text'] as Array, + }, + context_window: 0, + }) + ).toThrow('context_window') + }) + + it('should throw for empty input modalities', () => { + expect(() => + defineModelMeta({ + name: 'test', + supports: { + input: [] as Array, + output: ['text'] as Array, + }, + }) + ).toThrow('input') + }) +}) diff --git a/packages/typescript/ai-utils/tests/transforms.test.ts b/packages/typescript/ai-utils/tests/transforms.test.ts new file mode 100644 index 000000000..d37ba2e80 --- /dev/null +++ b/packages/typescript/ai-utils/tests/transforms.test.ts @@ -0,0 +1,48 @@ +import { describe, it, expect } from 'vitest' +import { transformNullsToUndefined } from '../src/transforms' + +describe('transformNullsToUndefined', () => { + it('should convert null values to undefined', () => { + const result = transformNullsToUndefined({ a: null, b: 'hello' }) + expect(result).toEqual({ b: 'hello' }) + expect('a' in result).toBe(false) + }) + + it('should handle nested objects', () => { + const result = transformNullsToUndefined({ + a: { b: null, c: 'value' }, + d: null, + }) + expect(result).toEqual({ a: { c: 'value' } }) + }) + + it('should handle arrays', () => { + const result = transformNullsToUndefined({ + items: [{ a: null, b: 1 }, { a: 'x', b: null }], + }) + expect(result).toEqual({ + items: [{ b: 1 }, { a: 'x' }], + }) + }) + + it('should return non-objects unchanged', () => { + expect(transformNullsToUndefined('hello')).toBe('hello') + expect(transformNullsToUndefined(42)).toBe(42) + expect(transformNullsToUndefined(true)).toBe(true) + }) + + it('should return null as undefined', () => { + expect(transformNullsToUndefined(null)).toBeUndefined() + }) + + it('should handle empty objects', () => { + expect(transformNullsToUndefined({})).toEqual({}) + }) + + it('should handle deeply nested nulls', () => { + const result = transformNullsToUndefined({ + a: { b: { c: { d: null, e: 'keep' } } }, + }) + expect(result).toEqual({ a: { b: { c: { e: 'keep' } } } }) + }) +}) diff --git a/packages/typescript/ai-utils/tsconfig.json b/packages/typescript/ai-utils/tsconfig.json new file mode 100644 index 000000000..ea11c1096 --- /dev/null +++ b/packages/typescript/ai-utils/tsconfig.json @@ -0,0 +1,9 @@ +{ + "extends": "../../../tsconfig.json", + "compilerOptions": { + "outDir": "dist", + "rootDir": "src" + }, + "include": ["src/**/*.ts", "src/**/*.tsx"], + "exclude": ["node_modules", "dist", "**/*.config.ts"] +} diff --git a/packages/typescript/ai-utils/vite.config.ts b/packages/typescript/ai-utils/vite.config.ts new file mode 100644 index 000000000..77bcc2e60 --- /dev/null +++ b/packages/typescript/ai-utils/vite.config.ts @@ -0,0 +1,36 @@ +import { defineConfig, mergeConfig } from 'vitest/config' +import { tanstackViteConfig } from '@tanstack/vite-config' +import packageJson from './package.json' + +const config = defineConfig({ + test: { + name: packageJson.name, + dir: './', + watch: false, + globals: true, + environment: 'node', + include: ['tests/**/*.test.ts'], + coverage: { + provider: 'v8', + reporter: ['text', 'json', 'html', 'lcov'], + exclude: [ + 'node_modules/', + 'dist/', + 'tests/', + '**/*.test.ts', + '**/*.config.ts', + '**/types.ts', + ], + include: ['src/**/*.ts'], + }, + }, +}) + +export default mergeConfig( + config, + tanstackViteConfig({ + entry: ['./src/index.ts'], + srcDir: './src', + cjs: false, + }), +) diff --git a/pnpm-lock.yaml b/pnpm-lock.yaml index 0b349d6c9..471bf784d 100644 --- a/pnpm-lock.yaml +++ b/pnpm-lock.yaml @@ -1120,6 +1120,15 @@ importers: specifier: ^7.2.7 version: 7.2.7(@types/node@24.10.3)(jiti@2.6.1)(lightningcss@1.30.2)(terser@5.44.1)(tsx@4.21.0)(yaml@2.8.2) + packages/typescript/ai-utils: + devDependencies: + '@vitest/coverage-v8': + specifier: 4.0.14 + version: 4.0.14(vitest@4.0.18(@types/node@25.0.1)(happy-dom@20.0.11)(jiti@2.6.1)(jsdom@27.3.0(postcss@8.5.6))(lightningcss@1.30.2)(terser@5.44.1)(tsx@4.21.0)(yaml@2.8.2)) + vite: + specifier: ^7.2.7 + version: 7.3.1(@types/node@25.0.1)(jiti@2.6.1)(lightningcss@1.30.2)(terser@5.44.1)(tsx@4.21.0)(yaml@2.8.2) + packages/typescript/ai-vue: dependencies: '@tanstack/ai-client': From fd0a9e5c2192c5c9935e1681d5785a99b7f41de2 Mon Sep 17 00:00:00 2001 From: Alem Tuzlak Date: Mon, 30 Mar 2026 13:06:57 +0200 Subject: [PATCH 02/15] fix(ai-utils): align with canonical adapter patterns MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit - Fix getApiKeyFromEnv to check globalThis.window.env before process.env, matching all existing adapters - Fix generateId to use substring(7) instead of substring(2,10) for consistent random part length - Add JSDoc to transformNullsToUndefined explaining the null→undefined JSON Schema reason - Add missing test cases for empty output modalities and negative output pricing --- packages/typescript/ai-utils/src/env.ts | 19 +++++-------- packages/typescript/ai-utils/src/id.ts | 2 +- .../typescript/ai-utils/src/transforms.ts | 6 ++++ .../ai-utils/tests/model-meta.test.ts | 28 +++++++++++++++++++ 4 files changed, 42 insertions(+), 13 deletions(-) diff --git a/packages/typescript/ai-utils/src/env.ts b/packages/typescript/ai-utils/src/env.ts index 23cb9bae2..483ca6ba6 100644 --- a/packages/typescript/ai-utils/src/env.ts +++ b/packages/typescript/ai-utils/src/env.ts @@ -1,17 +1,12 @@ export function getApiKeyFromEnv(envVarName: string): string { - let apiKey: string | undefined + const env = + typeof globalThis !== 'undefined' && (globalThis as any).window?.env + ? (globalThis as any).window.env + : typeof process !== 'undefined' + ? process.env + : undefined - if (typeof process !== 'undefined' && process.env) { - apiKey = process.env[envVarName] - } - - if ( - !apiKey && - typeof window !== 'undefined' && - (window as any).env - ) { - apiKey = (window as any).env[envVarName] - } + const apiKey = env?.[envVarName] if (!apiKey) { throw new Error( diff --git a/packages/typescript/ai-utils/src/id.ts b/packages/typescript/ai-utils/src/id.ts index 03ace4128..165e4dd0a 100644 --- a/packages/typescript/ai-utils/src/id.ts +++ b/packages/typescript/ai-utils/src/id.ts @@ -1,5 +1,5 @@ export function generateId(prefix: string): string { const timestamp = Date.now() - const randomPart = Math.random().toString(36).substring(2, 10) + const randomPart = Math.random().toString(36).substring(7) return `${prefix}-${timestamp}-${randomPart}` } diff --git a/packages/typescript/ai-utils/src/transforms.ts b/packages/typescript/ai-utils/src/transforms.ts index 1496259c5..ca54a0e08 100644 --- a/packages/typescript/ai-utils/src/transforms.ts +++ b/packages/typescript/ai-utils/src/transforms.ts @@ -1,3 +1,9 @@ +/** + * Recursively converts null values to undefined in an object. + * Used after receiving structured output from OpenAI-compatible providers, + * which return null for optional fields that were made nullable in the + * JSON Schema strict mode transformation. + */ export function transformNullsToUndefined(obj: T): T { if (obj === null) { return undefined as unknown as T diff --git a/packages/typescript/ai-utils/tests/model-meta.test.ts b/packages/typescript/ai-utils/tests/model-meta.test.ts index 207cc863c..94e9cf219 100644 --- a/packages/typescript/ai-utils/tests/model-meta.test.ts +++ b/packages/typescript/ai-utils/tests/model-meta.test.ts @@ -75,4 +75,32 @@ describe('defineModelMeta', () => { }) ).toThrow('input') }) + + it('should throw for empty output modalities', () => { + expect(() => + defineModelMeta({ + name: 'test', + supports: { + input: ['text'] as Array, + output: [] as Array, + }, + }) + ).toThrow('output') + }) + + it('should throw for negative output pricing', () => { + expect(() => + defineModelMeta({ + name: 'test', + supports: { + input: ['text'] as Array, + output: ['text'] as Array, + }, + pricing: { + input: { normal: 1 }, + output: { normal: -1 }, + }, + }) + ).toThrow('pricing') + }) }) From 296fc38a67032dd7e6ea08a989802690ae8f2f28 Mon Sep 17 00:00:00 2001 From: Alem Tuzlak Date: Mon, 30 Mar 2026 13:21:14 +0200 Subject: [PATCH 03/15] feat(openai-base): add @tanstack/openai-base with schema converter, tools, and types --- packages/typescript/openai-base/package.json | 47 +++++ packages/typescript/openai-base/src/index.ts | 6 + .../openai-base/src/tools/apply-patch-tool.ts | 26 +++ .../src/tools/code-interpreter-tool.ts | 31 ++++ .../src/tools/computer-use-tool.ts | 31 ++++ .../openai-base/src/tools/custom-tool.ts | 30 ++++ .../openai-base/src/tools/file-search-tool.ts | 42 +++++ .../openai-base/src/tools/function-tool.ts | 39 +++++ .../src/tools/image-generation-tool.ts | 39 +++++ .../typescript/openai-base/src/tools/index.ts | 41 +++++ .../openai-base/src/tools/local-shell-tool.ts | 26 +++ .../openai-base/src/tools/mcp-tool.ts | 41 +++++ .../openai-base/src/tools/shell-tool.ts | 24 +++ .../openai-base/src/tools/tool-choice.ts | 31 ++++ .../openai-base/src/tools/tool-converter.ts | 68 ++++++++ .../src/tools/web-search-preview-tool.ts | 29 ++++ .../openai-base/src/tools/web-search-tool.ts | 23 +++ .../openai-base/src/types/config.ts | 6 + .../openai-base/src/types/message-metadata.ts | 19 +++ .../openai-base/src/types/provider-options.ts | 37 ++++ .../openai-base/src/utils/client.ts | 6 + .../openai-base/src/utils/schema-converter.ts | 89 ++++++++++ .../tests/schema-converter.test.ts | 161 ++++++++++++++++++ packages/typescript/openai-base/tsconfig.json | 9 + .../typescript/openai-base/vite.config.ts | 36 ++++ pnpm-lock.yaml | 27 +++ 26 files changed, 964 insertions(+) create mode 100644 packages/typescript/openai-base/package.json create mode 100644 packages/typescript/openai-base/src/index.ts create mode 100644 packages/typescript/openai-base/src/tools/apply-patch-tool.ts create mode 100644 packages/typescript/openai-base/src/tools/code-interpreter-tool.ts create mode 100644 packages/typescript/openai-base/src/tools/computer-use-tool.ts create mode 100644 packages/typescript/openai-base/src/tools/custom-tool.ts create mode 100644 packages/typescript/openai-base/src/tools/file-search-tool.ts create mode 100644 packages/typescript/openai-base/src/tools/function-tool.ts create mode 100644 packages/typescript/openai-base/src/tools/image-generation-tool.ts create mode 100644 packages/typescript/openai-base/src/tools/index.ts create mode 100644 packages/typescript/openai-base/src/tools/local-shell-tool.ts create mode 100644 packages/typescript/openai-base/src/tools/mcp-tool.ts create mode 100644 packages/typescript/openai-base/src/tools/shell-tool.ts create mode 100644 packages/typescript/openai-base/src/tools/tool-choice.ts create mode 100644 packages/typescript/openai-base/src/tools/tool-converter.ts create mode 100644 packages/typescript/openai-base/src/tools/web-search-preview-tool.ts create mode 100644 packages/typescript/openai-base/src/tools/web-search-tool.ts create mode 100644 packages/typescript/openai-base/src/types/config.ts create mode 100644 packages/typescript/openai-base/src/types/message-metadata.ts create mode 100644 packages/typescript/openai-base/src/types/provider-options.ts create mode 100644 packages/typescript/openai-base/src/utils/client.ts create mode 100644 packages/typescript/openai-base/src/utils/schema-converter.ts create mode 100644 packages/typescript/openai-base/tests/schema-converter.test.ts create mode 100644 packages/typescript/openai-base/tsconfig.json create mode 100644 packages/typescript/openai-base/vite.config.ts diff --git a/packages/typescript/openai-base/package.json b/packages/typescript/openai-base/package.json new file mode 100644 index 000000000..fcc2f3aea --- /dev/null +++ b/packages/typescript/openai-base/package.json @@ -0,0 +1,47 @@ +{ + "name": "@tanstack/openai-base", + "version": "0.1.0", + "description": "Shared base adapters and utilities for OpenAI-compatible providers in TanStack AI", + "author": "", + "license": "MIT", + "repository": { + "type": "git", + "url": "git+https://github.com/TanStack/ai.git", + "directory": "packages/typescript/openai-base" + }, + "type": "module", + "module": "./dist/esm/index.js", + "types": "./dist/esm/index.d.ts", + "exports": { + ".": { + "types": "./dist/esm/index.d.ts", + "import": "./dist/esm/index.js" + } + }, + "files": ["dist", "src"], + "scripts": { + "build": "vite build", + "clean": "premove ./build ./dist", + "lint:fix": "eslint ./src --fix", + "test:build": "publint --strict", + "test:eslint": "eslint ./src", + "test:lib": "vitest run", + "test:lib:dev": "pnpm test:lib --watch", + "test:types": "tsc" + }, + "keywords": ["ai", "openai", "tanstack", "adapter", "base"], + "dependencies": { + "@tanstack/ai-utils": "workspace:*", + "openai": "^6.9.1" + }, + "peerDependencies": { + "@tanstack/ai": "workspace:^", + "zod": "^4.0.0" + }, + "devDependencies": { + "@tanstack/ai": "workspace:*", + "@vitest/coverage-v8": "4.0.14", + "vite": "^7.2.7", + "zod": "^4.2.0" + } +} diff --git a/packages/typescript/openai-base/src/index.ts b/packages/typescript/openai-base/src/index.ts new file mode 100644 index 000000000..d09f34476 --- /dev/null +++ b/packages/typescript/openai-base/src/index.ts @@ -0,0 +1,6 @@ +export { makeStructuredOutputCompatible } from './utils/schema-converter' +export { createOpenAICompatibleClient } from './utils/client' +export type { OpenAICompatibleClientConfig } from './types/config' +export * from './tools/index' +export * from './types/message-metadata' +export * from './types/provider-options' diff --git a/packages/typescript/openai-base/src/tools/apply-patch-tool.ts b/packages/typescript/openai-base/src/tools/apply-patch-tool.ts new file mode 100644 index 000000000..8e73cc898 --- /dev/null +++ b/packages/typescript/openai-base/src/tools/apply-patch-tool.ts @@ -0,0 +1,26 @@ +import type OpenAI from 'openai' +import type { Tool } from '@tanstack/ai' + +export type ApplyPatchTool = OpenAI.Responses.ApplyPatchTool + +/** + * Converts a standard Tool to OpenAI ApplyPatchTool format + */ +export function convertApplyPatchToolToAdapterFormat( + _tool: Tool, +): ApplyPatchTool { + return { + type: 'apply_patch', + } +} + +/** + * Creates a standard Tool from ApplyPatchTool parameters + */ +export function applyPatchTool(): Tool { + return { + name: 'apply_patch', + description: 'Apply a patch to modify files', + metadata: {}, + } +} diff --git a/packages/typescript/openai-base/src/tools/code-interpreter-tool.ts b/packages/typescript/openai-base/src/tools/code-interpreter-tool.ts new file mode 100644 index 000000000..15bd8e429 --- /dev/null +++ b/packages/typescript/openai-base/src/tools/code-interpreter-tool.ts @@ -0,0 +1,31 @@ +import type { Tool } from '@tanstack/ai' +import type OpenAI from 'openai' + +export type CodeInterpreterTool = OpenAI.Responses.Tool.CodeInterpreter + +/** + * Converts a standard Tool to OpenAI CodeInterpreterTool format + */ +export function convertCodeInterpreterToolToAdapterFormat( + tool: Tool, +): CodeInterpreterTool { + const metadata = tool.metadata as CodeInterpreterTool + return { + type: 'code_interpreter', + container: metadata.container, + } +} + +/** + * Creates a standard Tool from CodeInterpreterTool parameters + */ +export function codeInterpreterTool(container: CodeInterpreterTool): Tool { + return { + name: 'code_interpreter', + description: 'Execute code in a sandboxed environment', + metadata: { + type: 'code_interpreter', + container, + }, + } +} diff --git a/packages/typescript/openai-base/src/tools/computer-use-tool.ts b/packages/typescript/openai-base/src/tools/computer-use-tool.ts new file mode 100644 index 000000000..1a19b573b --- /dev/null +++ b/packages/typescript/openai-base/src/tools/computer-use-tool.ts @@ -0,0 +1,31 @@ +import type OpenAI from 'openai' +import type { Tool } from '@tanstack/ai' + +export type ComputerUseTool = OpenAI.Responses.ComputerTool +/** + * Converts a standard Tool to OpenAI ComputerUseTool format + */ +export function convertComputerUseToolToAdapterFormat( + tool: Tool, +): ComputerUseTool { + const metadata = tool.metadata as ComputerUseTool + return { + type: 'computer_use_preview', + display_height: metadata.display_height, + display_width: metadata.display_width, + environment: metadata.environment, + } +} + +/** + * Creates a standard Tool from ComputerUseTool parameters + */ +export function computerUseTool(toolData: ComputerUseTool): Tool { + return { + name: 'computer_use_preview', + description: 'Control a virtual computer', + metadata: { + ...toolData, + }, + } +} diff --git a/packages/typescript/openai-base/src/tools/custom-tool.ts b/packages/typescript/openai-base/src/tools/custom-tool.ts new file mode 100644 index 000000000..ad7de4d25 --- /dev/null +++ b/packages/typescript/openai-base/src/tools/custom-tool.ts @@ -0,0 +1,30 @@ +import type OpenAI from 'openai' +import type { Tool } from '@tanstack/ai' + +export type CustomTool = OpenAI.Responses.CustomTool + +/** + * Converts a standard Tool to OpenAI CustomTool format + */ +export function convertCustomToolToAdapterFormat(tool: Tool): CustomTool { + const metadata = tool.metadata as CustomTool + return { + type: 'custom', + name: metadata.name, + description: metadata.description, + format: metadata.format, + } +} + +/** + * Creates a standard Tool from CustomTool parameters + */ +export function customTool(toolData: CustomTool): Tool { + return { + name: 'custom', + description: toolData.description || 'A custom tool', + metadata: { + ...toolData, + }, + } +} diff --git a/packages/typescript/openai-base/src/tools/file-search-tool.ts b/packages/typescript/openai-base/src/tools/file-search-tool.ts new file mode 100644 index 000000000..0fc85f06e --- /dev/null +++ b/packages/typescript/openai-base/src/tools/file-search-tool.ts @@ -0,0 +1,42 @@ +import type OpenAI from 'openai' +import type { Tool } from '@tanstack/ai' + +const validateMaxNumResults = (maxNumResults: number | undefined) => { + if (maxNumResults && (maxNumResults < 1 || maxNumResults > 50)) { + throw new Error('max_num_results must be between 1 and 50.') + } +} + +export type FileSearchTool = OpenAI.Responses.FileSearchTool + +/** + * Converts a standard Tool to OpenAI FileSearchTool format + */ +export function convertFileSearchToolToAdapterFormat( + tool: Tool, +): OpenAI.Responses.FileSearchTool { + const metadata = tool.metadata as OpenAI.Responses.FileSearchTool + return { + type: 'file_search', + vector_store_ids: metadata.vector_store_ids, + max_num_results: metadata.max_num_results, + ranking_options: metadata.ranking_options, + filters: metadata.filters, + } +} + +/** + * Creates a standard Tool from FileSearchTool parameters + */ +export function fileSearchTool( + toolData: OpenAI.Responses.FileSearchTool, +): Tool { + validateMaxNumResults(toolData.max_num_results) + return { + name: 'file_search', + description: 'Search files in vector stores', + metadata: { + ...toolData, + }, + } +} diff --git a/packages/typescript/openai-base/src/tools/function-tool.ts b/packages/typescript/openai-base/src/tools/function-tool.ts new file mode 100644 index 000000000..7ae905cc2 --- /dev/null +++ b/packages/typescript/openai-base/src/tools/function-tool.ts @@ -0,0 +1,39 @@ +import { makeStructuredOutputCompatible } from '../utils/schema-converter' +import type { JSONSchema, Tool } from '@tanstack/ai' +import type OpenAI from 'openai' + +export type FunctionTool = OpenAI.Responses.FunctionTool + +/** + * Converts a standard Tool to OpenAI FunctionTool format. + * + * Tool schemas are already converted to JSON Schema in the ai layer. + * We apply OpenAI-specific transformations for strict mode: + * - All properties in required array + * - Optional fields made nullable + * - additionalProperties: false + * + * This enables strict mode for all tools automatically. + */ +export function convertFunctionToolToAdapterFormat(tool: Tool): FunctionTool { + const inputSchema = (tool.inputSchema ?? { + type: 'object', + properties: {}, + required: [], + }) as JSONSchema + + const jsonSchema = makeStructuredOutputCompatible( + inputSchema, + inputSchema.required || [], + ) + + jsonSchema.additionalProperties = false + + return { + type: 'function', + name: tool.name, + description: tool.description, + parameters: jsonSchema, + strict: true, + } satisfies FunctionTool +} diff --git a/packages/typescript/openai-base/src/tools/image-generation-tool.ts b/packages/typescript/openai-base/src/tools/image-generation-tool.ts new file mode 100644 index 000000000..c48ff1e0e --- /dev/null +++ b/packages/typescript/openai-base/src/tools/image-generation-tool.ts @@ -0,0 +1,39 @@ +import type OpenAI from 'openai' +import type { Tool } from '@tanstack/ai' + +export type ImageGenerationTool = OpenAI.Responses.Tool.ImageGeneration + +const validatePartialImages = (value: number | undefined) => { + if (value !== undefined && (value < 0 || value > 3)) { + throw new Error('partial_images must be between 0 and 3') + } +} + +/** + * Converts a standard Tool to OpenAI ImageGenerationTool format + */ +export function convertImageGenerationToolToAdapterFormat( + tool: Tool, +): ImageGenerationTool { + const metadata = tool.metadata as Omit + return { + type: 'image_generation', + ...metadata, + } +} + +/** + * Creates a standard Tool from ImageGenerationTool parameters + */ +export function imageGenerationTool( + toolData: Omit, +): Tool { + validatePartialImages(toolData.partial_images) + return { + name: 'image_generation', + description: 'Generate images based on text descriptions', + metadata: { + ...toolData, + }, + } +} diff --git a/packages/typescript/openai-base/src/tools/index.ts b/packages/typescript/openai-base/src/tools/index.ts new file mode 100644 index 000000000..1795d7fce --- /dev/null +++ b/packages/typescript/openai-base/src/tools/index.ts @@ -0,0 +1,41 @@ +import type { ApplyPatchTool } from './apply-patch-tool' +import type { CodeInterpreterTool } from './code-interpreter-tool' +import type { ComputerUseTool } from './computer-use-tool' +import type { CustomTool } from './custom-tool' +import type { FileSearchTool } from './file-search-tool' +import type { FunctionTool } from './function-tool' +import type { ImageGenerationTool } from './image-generation-tool' +import type { LocalShellTool } from './local-shell-tool' +import type { MCPTool } from './mcp-tool' +import type { ShellTool } from './shell-tool' +import type { WebSearchPreviewTool } from './web-search-preview-tool' +import type { WebSearchTool } from './web-search-tool' + +export type OpenAITool = + | ApplyPatchTool + | CodeInterpreterTool + | ComputerUseTool + | CustomTool + | FileSearchTool + | FunctionTool + | ImageGenerationTool + | LocalShellTool + | MCPTool + | ShellTool + | WebSearchPreviewTool + | WebSearchTool + +export * from './apply-patch-tool' +export * from './code-interpreter-tool' +export * from './computer-use-tool' +export * from './custom-tool' +export * from './file-search-tool' +export * from './function-tool' +export * from './image-generation-tool' +export * from './local-shell-tool' +export * from './mcp-tool' +export * from './shell-tool' +export * from './tool-choice' +export * from './tool-converter' +export * from './web-search-preview-tool' +export * from './web-search-tool' diff --git a/packages/typescript/openai-base/src/tools/local-shell-tool.ts b/packages/typescript/openai-base/src/tools/local-shell-tool.ts new file mode 100644 index 000000000..ed829cb28 --- /dev/null +++ b/packages/typescript/openai-base/src/tools/local-shell-tool.ts @@ -0,0 +1,26 @@ +import type OpenAI from 'openai' +import type { Tool } from '@tanstack/ai' + +export type LocalShellTool = OpenAI.Responses.Tool.LocalShell + +/** + * Converts a standard Tool to OpenAI LocalShellTool format + */ +export function convertLocalShellToolToAdapterFormat( + _tool: Tool, +): LocalShellTool { + return { + type: 'local_shell', + } +} + +/** + * Creates a standard Tool from LocalShellTool parameters + */ +export function localShellTool(): Tool { + return { + name: 'local_shell', + description: 'Execute local shell commands', + metadata: {}, + } +} diff --git a/packages/typescript/openai-base/src/tools/mcp-tool.ts b/packages/typescript/openai-base/src/tools/mcp-tool.ts new file mode 100644 index 000000000..64b94357f --- /dev/null +++ b/packages/typescript/openai-base/src/tools/mcp-tool.ts @@ -0,0 +1,41 @@ +import type OpenAI from 'openai' +import type { Tool } from '@tanstack/ai' + +export type MCPTool = OpenAI.Responses.Tool.Mcp + +export function validateMCPtool(tool: MCPTool) { + if (!tool.server_url && !tool.connector_id) { + throw new Error('Either server_url or connector_id must be provided.') + } + if (tool.connector_id && tool.server_url) { + throw new Error('Only one of server_url or connector_id can be provided.') + } +} + +/** + * Converts a standard Tool to OpenAI MCPTool format + */ +export function convertMCPToolToAdapterFormat(tool: Tool): MCPTool { + const metadata = tool.metadata as Omit + + const mcpTool: MCPTool = { + type: 'mcp', + ...metadata, + } + + validateMCPtool(mcpTool) + return mcpTool +} + +/** + * Creates a standard Tool from MCPTool parameters + */ +export function mcpTool(toolData: Omit): Tool { + validateMCPtool({ ...toolData, type: 'mcp' }) + + return { + name: 'mcp', + description: toolData.server_description || '', + metadata: toolData, + } +} diff --git a/packages/typescript/openai-base/src/tools/shell-tool.ts b/packages/typescript/openai-base/src/tools/shell-tool.ts new file mode 100644 index 000000000..83b301a23 --- /dev/null +++ b/packages/typescript/openai-base/src/tools/shell-tool.ts @@ -0,0 +1,24 @@ +import type OpenAI from 'openai' +import type { Tool } from '@tanstack/ai' + +export type ShellTool = OpenAI.Responses.FunctionShellTool + +/** + * Converts a standard Tool to OpenAI ShellTool format + */ +export function convertShellToolToAdapterFormat(_tool: Tool): ShellTool { + return { + type: 'shell', + } +} + +/** + * Creates a standard Tool from ShellTool parameters + */ +export function shellTool(): Tool { + return { + name: 'shell', + description: 'Execute shell commands', + metadata: {}, + } +} diff --git a/packages/typescript/openai-base/src/tools/tool-choice.ts b/packages/typescript/openai-base/src/tools/tool-choice.ts new file mode 100644 index 000000000..db6e0b148 --- /dev/null +++ b/packages/typescript/openai-base/src/tools/tool-choice.ts @@ -0,0 +1,31 @@ +interface MCPToolChoice { + type: 'mcp' + server_label: 'deepwiki' +} + +interface FunctionToolChoice { + type: 'function' + name: string +} + +interface CustomToolChoice { + type: 'custom' + name: string +} + +interface HostedToolChoice { + type: + | 'file_search' + | 'web_search_preview' + | 'computer_use_preview' + | 'code_interpreter' + | 'image_generation' + | 'shell' + | 'apply_patch' +} + +export type ToolChoice = + | MCPToolChoice + | FunctionToolChoice + | CustomToolChoice + | HostedToolChoice diff --git a/packages/typescript/openai-base/src/tools/tool-converter.ts b/packages/typescript/openai-base/src/tools/tool-converter.ts new file mode 100644 index 000000000..2855cd3f0 --- /dev/null +++ b/packages/typescript/openai-base/src/tools/tool-converter.ts @@ -0,0 +1,68 @@ +import { convertApplyPatchToolToAdapterFormat } from './apply-patch-tool' +import { convertCodeInterpreterToolToAdapterFormat } from './code-interpreter-tool' +import { convertComputerUseToolToAdapterFormat } from './computer-use-tool' +import { convertCustomToolToAdapterFormat } from './custom-tool' +import { convertFileSearchToolToAdapterFormat } from './file-search-tool' +import { convertFunctionToolToAdapterFormat } from './function-tool' +import { convertImageGenerationToolToAdapterFormat } from './image-generation-tool' +import { convertLocalShellToolToAdapterFormat } from './local-shell-tool' +import { convertMCPToolToAdapterFormat } from './mcp-tool' +import { convertShellToolToAdapterFormat } from './shell-tool' +import { convertWebSearchPreviewToolToAdapterFormat } from './web-search-preview-tool' +import { convertWebSearchToolToAdapterFormat } from './web-search-tool' +import type { OpenAITool } from './index' +import type { Tool } from '@tanstack/ai' + +const SPECIAL_TOOL_NAMES = new Set([ + 'apply_patch', + 'code_interpreter', + 'computer_use_preview', + 'file_search', + 'image_generation', + 'local_shell', + 'mcp', + 'shell', + 'web_search_preview', + 'web_search', + 'custom', +]) + +/** + * Converts an array of standard Tools to OpenAI-specific format + */ +export function convertToolsToProviderFormat( + tools: Array, +): Array { + return tools.map((tool) => { + const toolName = tool.name + + if (SPECIAL_TOOL_NAMES.has(toolName)) { + switch (toolName) { + case 'apply_patch': + return convertApplyPatchToolToAdapterFormat(tool) + case 'code_interpreter': + return convertCodeInterpreterToolToAdapterFormat(tool) + case 'computer_use_preview': + return convertComputerUseToolToAdapterFormat(tool) + case 'file_search': + return convertFileSearchToolToAdapterFormat(tool) + case 'image_generation': + return convertImageGenerationToolToAdapterFormat(tool) + case 'local_shell': + return convertLocalShellToolToAdapterFormat(tool) + case 'mcp': + return convertMCPToolToAdapterFormat(tool) + case 'shell': + return convertShellToolToAdapterFormat(tool) + case 'web_search_preview': + return convertWebSearchPreviewToolToAdapterFormat(tool) + case 'web_search': + return convertWebSearchToolToAdapterFormat(tool) + case 'custom': + return convertCustomToolToAdapterFormat(tool) + } + } + + return convertFunctionToolToAdapterFormat(tool) + }) +} diff --git a/packages/typescript/openai-base/src/tools/web-search-preview-tool.ts b/packages/typescript/openai-base/src/tools/web-search-preview-tool.ts new file mode 100644 index 000000000..48942d436 --- /dev/null +++ b/packages/typescript/openai-base/src/tools/web-search-preview-tool.ts @@ -0,0 +1,29 @@ +import type OpenAI from 'openai' +import type { Tool } from '@tanstack/ai' + +export type WebSearchPreviewTool = OpenAI.Responses.WebSearchPreviewTool + +/** + * Converts a standard Tool to OpenAI WebSearchPreviewTool format + */ +export function convertWebSearchPreviewToolToAdapterFormat( + tool: Tool, +): WebSearchPreviewTool { + const metadata = tool.metadata as WebSearchPreviewTool + return { + type: metadata.type, + search_context_size: metadata.search_context_size, + user_location: metadata.user_location, + } +} + +/** + * Creates a standard Tool from WebSearchPreviewTool parameters + */ +export function webSearchPreviewTool(toolData: WebSearchPreviewTool): Tool { + return { + name: 'web_search_preview', + description: 'Search the web (preview version)', + metadata: toolData, + } +} diff --git a/packages/typescript/openai-base/src/tools/web-search-tool.ts b/packages/typescript/openai-base/src/tools/web-search-tool.ts new file mode 100644 index 000000000..c7d5aef68 --- /dev/null +++ b/packages/typescript/openai-base/src/tools/web-search-tool.ts @@ -0,0 +1,23 @@ +import type OpenAI from 'openai' +import type { Tool } from '@tanstack/ai' + +export type WebSearchTool = OpenAI.Responses.WebSearchTool + +/** + * Converts a standard Tool to OpenAI WebSearchTool format + */ +export function convertWebSearchToolToAdapterFormat(tool: Tool): WebSearchTool { + const metadata = tool.metadata as WebSearchTool + return metadata +} + +/** + * Creates a standard Tool from WebSearchTool parameters + */ +export function webSearchTool(toolData: WebSearchTool): Tool { + return { + name: 'web_search', + description: 'Search the web', + metadata: toolData, + } +} diff --git a/packages/typescript/openai-base/src/types/config.ts b/packages/typescript/openai-base/src/types/config.ts new file mode 100644 index 000000000..e925143a6 --- /dev/null +++ b/packages/typescript/openai-base/src/types/config.ts @@ -0,0 +1,6 @@ +import type { ClientOptions } from 'openai' + +export interface OpenAICompatibleClientConfig extends ClientOptions { + apiKey: string + baseURL?: string +} diff --git a/packages/typescript/openai-base/src/types/message-metadata.ts b/packages/typescript/openai-base/src/types/message-metadata.ts new file mode 100644 index 000000000..e5179c9b1 --- /dev/null +++ b/packages/typescript/openai-base/src/types/message-metadata.ts @@ -0,0 +1,19 @@ +export interface OpenAICompatibleImageMetadata { + detail?: 'auto' | 'low' | 'high' +} + +export interface OpenAICompatibleAudioMetadata { + format?: 'mp3' | 'wav' | 'flac' | 'ogg' | 'webm' | 'aac' +} + +export interface OpenAICompatibleVideoMetadata {} +export interface OpenAICompatibleDocumentMetadata {} +export interface OpenAICompatibleTextMetadata {} + +export interface OpenAICompatibleMessageMetadataByModality { + text: OpenAICompatibleTextMetadata + image: OpenAICompatibleImageMetadata + audio: OpenAICompatibleAudioMetadata + video: OpenAICompatibleVideoMetadata + document: OpenAICompatibleDocumentMetadata +} diff --git a/packages/typescript/openai-base/src/types/provider-options.ts b/packages/typescript/openai-base/src/types/provider-options.ts new file mode 100644 index 000000000..9bef2598c --- /dev/null +++ b/packages/typescript/openai-base/src/types/provider-options.ts @@ -0,0 +1,37 @@ +export interface OpenAICompatibleBaseOptions { + temperature?: number + top_p?: number + max_tokens?: number + frequency_penalty?: number + presence_penalty?: number + stop?: string | string[] + user?: string +} + +export interface OpenAICompatibleReasoningOptions { + reasoning?: { + effort?: 'none' | 'minimal' | 'low' | 'medium' | 'high' + summary?: 'auto' | 'detailed' + } +} + +export interface OpenAICompatibleStructuredOutputOptions { + text?: { + format: { + type: string + name?: string + schema?: Record + strict?: boolean + } + } +} + +export interface OpenAICompatibleToolsOptions { + max_tool_calls?: number + parallel_tool_calls?: boolean + tool_choice?: 'auto' | 'none' | 'required' | { type: 'function'; function: { name: string } } +} + +export interface OpenAICompatibleStreamingOptions { + stream_options?: { include_usage?: boolean } +} diff --git a/packages/typescript/openai-base/src/utils/client.ts b/packages/typescript/openai-base/src/utils/client.ts new file mode 100644 index 000000000..3d33148ba --- /dev/null +++ b/packages/typescript/openai-base/src/utils/client.ts @@ -0,0 +1,6 @@ +import OpenAI from 'openai' +import type { OpenAICompatibleClientConfig } from '../types/config' + +export function createOpenAICompatibleClient(config: OpenAICompatibleClientConfig): OpenAI { + return new OpenAI(config) +} diff --git a/packages/typescript/openai-base/src/utils/schema-converter.ts b/packages/typescript/openai-base/src/utils/schema-converter.ts new file mode 100644 index 000000000..83bdd06f8 --- /dev/null +++ b/packages/typescript/openai-base/src/utils/schema-converter.ts @@ -0,0 +1,89 @@ +/** + * Transform a JSON schema to be compatible with OpenAI's structured output requirements. + * OpenAI requires: + * - All properties must be in the `required` array + * - Optional fields should have null added to their type union + * - additionalProperties must be false for objects + * + * @param schema - JSON schema to transform + * @param originalRequired - Original required array (to know which fields were optional) + * @returns Transformed schema compatible with OpenAI structured output + */ +export function makeStructuredOutputCompatible( + schema: Record, + originalRequired: Array = [], +): Record { + const result = { ...schema } + + if (result.type === 'object' && result.properties) { + const properties = { ...result.properties } + const allPropertyNames = Object.keys(properties) + + for (const propName of allPropertyNames) { + const prop = properties[propName] + const wasOptional = !originalRequired.includes(propName) + + if (prop.type === 'object' && prop.properties) { + properties[propName] = makeStructuredOutputCompatible( + prop, + prop.required || [], + ) + } else if (prop.type === 'array' && prop.items) { + properties[propName] = { + ...prop, + items: makeStructuredOutputCompatible( + prop.items, + prop.items.required || [], + ), + } + } else if (prop.anyOf) { + properties[propName] = makeStructuredOutputCompatible( + prop, + prop.required || [], + ) + } else if (prop.oneOf) { + throw new Error( + 'oneOf is not supported in OpenAI structured output schemas. Check the supported outputs here: https://platform.openai.com/docs/guides/structured-outputs#supported-types', + ) + } else if (wasOptional) { + // Optional fields must be nullable because OpenAI requires all properties in `required` + if (prop.type && !Array.isArray(prop.type)) { + properties[propName] = { + ...prop, + type: [prop.type, 'null'], + } + } else if (Array.isArray(prop.type) && !prop.type.includes('null')) { + properties[propName] = { + ...prop, + type: [...prop.type, 'null'], + } + } + } + } + + result.properties = properties + result.required = allPropertyNames + result.additionalProperties = false + } + + if (result.type === 'array' && result.items) { + result.items = makeStructuredOutputCompatible( + result.items, + result.items.required || [], + ) + } + + if (result.anyOf && Array.isArray(result.anyOf)) { + result.anyOf = result.anyOf.map((variant) => + makeStructuredOutputCompatible(variant, variant.required || []), + ) + } + + if (result.oneOf) { + throw new Error( + 'oneOf is not supported in OpenAI structured output schemas. Check the supported outputs here: https://platform.openai.com/docs/guides/structured-outputs#supported-types', + ) + } + + return result +} diff --git a/packages/typescript/openai-base/tests/schema-converter.test.ts b/packages/typescript/openai-base/tests/schema-converter.test.ts new file mode 100644 index 000000000..f90f54493 --- /dev/null +++ b/packages/typescript/openai-base/tests/schema-converter.test.ts @@ -0,0 +1,161 @@ +import { describe, expect, it } from 'vitest' +import { makeStructuredOutputCompatible } from '../src/utils/schema-converter' + +describe('makeStructuredOutputCompatible', () => { + it('should add additionalProperties: false to object schemas', () => { + const schema = { + type: 'object', + properties: { + name: { type: 'string' }, + }, + required: ['name'], + } + + const result = makeStructuredOutputCompatible(schema, ['name']) + expect(result.additionalProperties).toBe(false) + }) + + it('should make all properties required', () => { + const schema = { + type: 'object', + properties: { + name: { type: 'string' }, + age: { type: 'number' }, + }, + required: ['name'], + } + + const result = makeStructuredOutputCompatible(schema, ['name']) + expect(result.required).toEqual(['name', 'age']) + }) + + it('should make optional fields nullable', () => { + const schema = { + type: 'object', + properties: { + name: { type: 'string' }, + nickname: { type: 'string' }, + }, + required: ['name'], + } + + const result = makeStructuredOutputCompatible(schema, ['name']) + expect(result.properties.name.type).toBe('string') + expect(result.properties.nickname.type).toEqual(['string', 'null']) + }) + + it('should handle anyOf (union types) by transforming each variant', () => { + const schema = { + type: 'object', + properties: { + u: { + anyOf: [ + { + type: 'object', + properties: { a: { type: 'string' } }, + required: ['a'], + }, + { + type: 'object', + properties: { b: { type: 'number' } }, + required: ['b'], + }, + ], + }, + }, + required: ['u'], + } + + const result = makeStructuredOutputCompatible(schema, ['u']) + + // Each variant in anyOf should have additionalProperties: false + expect(result.properties.u.anyOf[0].additionalProperties).toBe(false) + expect(result.properties.u.anyOf[1].additionalProperties).toBe(false) + + // Verify complete structure + expect(result.additionalProperties).toBe(false) + expect(result.required).toEqual(['u']) + expect(result.properties.u.anyOf).toHaveLength(2) + expect(result.properties.u.anyOf[0].required).toEqual(['a']) + expect(result.properties.u.anyOf[1].required).toEqual(['b']) + }) + + it('should handle nested objects inside anyOf', () => { + const schema = { + type: 'object', + properties: { + data: { + anyOf: [ + { + type: 'object', + properties: { + nested: { + type: 'object', + properties: { x: { type: 'string' } }, + required: ['x'], + }, + }, + required: ['nested'], + }, + ], + }, + }, + required: ['data'], + } + + const result = makeStructuredOutputCompatible(schema, ['data']) + + // The nested object inside anyOf variant should also have additionalProperties: false + expect(result.properties.data.anyOf[0].additionalProperties).toBe(false) + expect( + result.properties.data.anyOf[0].properties.nested.additionalProperties, + ).toBe(false) + }) + + it('should handle arrays with items', () => { + const schema = { + type: 'object', + properties: { + items: { + type: 'array', + items: { + type: 'object', + properties: { id: { type: 'string' } }, + required: ['id'], + }, + }, + }, + required: ['items'], + } + + const result = makeStructuredOutputCompatible(schema, ['items']) + expect(result.properties.items.items.additionalProperties).toBe(false) + }) + + it('should throw an error for oneOf schemas (not supported by OpenAI)', () => { + const schema = { + type: 'object', + properties: { + u: { + oneOf: [ + { + type: 'object', + properties: { type: { const: 'a' }, value: { type: 'string' } }, + required: ['type', 'value'], + }, + { + type: 'object', + properties: { type: { const: 'b' }, count: { type: 'number' } }, + required: ['type', 'count'], + }, + ], + }, + }, + required: ['u'], + } + + expect(() => makeStructuredOutputCompatible(schema, ['u'])).toThrow( + 'oneOf is not supported in OpenAI structured output schemas', + ) + }) +}) diff --git a/packages/typescript/openai-base/tsconfig.json b/packages/typescript/openai-base/tsconfig.json new file mode 100644 index 000000000..ea11c1096 --- /dev/null +++ b/packages/typescript/openai-base/tsconfig.json @@ -0,0 +1,9 @@ +{ + "extends": "../../../tsconfig.json", + "compilerOptions": { + "outDir": "dist", + "rootDir": "src" + }, + "include": ["src/**/*.ts", "src/**/*.tsx"], + "exclude": ["node_modules", "dist", "**/*.config.ts"] +} diff --git a/packages/typescript/openai-base/vite.config.ts b/packages/typescript/openai-base/vite.config.ts new file mode 100644 index 000000000..77bcc2e60 --- /dev/null +++ b/packages/typescript/openai-base/vite.config.ts @@ -0,0 +1,36 @@ +import { defineConfig, mergeConfig } from 'vitest/config' +import { tanstackViteConfig } from '@tanstack/vite-config' +import packageJson from './package.json' + +const config = defineConfig({ + test: { + name: packageJson.name, + dir: './', + watch: false, + globals: true, + environment: 'node', + include: ['tests/**/*.test.ts'], + coverage: { + provider: 'v8', + reporter: ['text', 'json', 'html', 'lcov'], + exclude: [ + 'node_modules/', + 'dist/', + 'tests/', + '**/*.test.ts', + '**/*.config.ts', + '**/types.ts', + ], + include: ['src/**/*.ts'], + }, + }, +}) + +export default mergeConfig( + config, + tanstackViteConfig({ + entry: ['./src/index.ts'], + srcDir: './src', + cjs: false, + }), +) diff --git a/pnpm-lock.yaml b/pnpm-lock.yaml index 471bf784d..518ae9e84 100644 --- a/pnpm-lock.yaml +++ b/pnpm-lock.yaml @@ -1200,6 +1200,28 @@ importers: specifier: ^2.2.10 version: 2.2.12(typescript@5.9.3) + packages/typescript/openai-base: + dependencies: + '@tanstack/ai-utils': + specifier: workspace:* + version: link:../ai-utils + openai: + specifier: ^6.9.1 + version: 6.10.0(ws@8.18.3)(zod@4.3.6) + devDependencies: + '@tanstack/ai': + specifier: workspace:* + version: link:../ai + '@vitest/coverage-v8': + specifier: 4.0.14 + version: 4.0.14(vitest@4.0.18(@types/node@25.0.1)(happy-dom@20.0.11)(jiti@2.6.1)(jsdom@27.3.0(postcss@8.5.6))(lightningcss@1.30.2)(terser@5.44.1)(tsx@4.21.0)(yaml@2.8.2)) + vite: + specifier: ^7.2.7 + version: 7.3.1(@types/node@25.0.1)(jiti@2.6.1)(lightningcss@1.30.2)(terser@5.44.1)(tsx@4.21.0)(yaml@2.8.2) + zod: + specifier: ^4.2.0 + version: 4.3.6 + packages/typescript/preact-ai-devtools: dependencies: '@tanstack/ai-devtools-core': @@ -17148,6 +17170,11 @@ snapshots: ws: 8.18.3 zod: 4.2.1 + openai@6.10.0(ws@8.18.3)(zod@4.3.6): + optionalDependencies: + ws: 8.18.3 + zod: 4.3.6 + optionator@0.9.4: dependencies: deep-is: 0.1.4 From dc6fc37492e3b9d23d274e6dde537f4b8e544890 Mon Sep 17 00:00:00 2001 From: Alem Tuzlak Date: Mon, 30 Mar 2026 13:32:48 +0200 Subject: [PATCH 04/15] feat(openai-base): add Chat Completions text adapter base class Extract the streaming Chat Completions pipeline from ai-grok into a reusable OpenAICompatibleChatCompletionsTextAdapter. Providers that use the OpenAI Chat Completions API (/v1/chat/completions) can extend this class and only need to set baseURL, lock type parameters, and override methods for provider-specific quirks. Protected override points: mapOptionsToRequest, convertMessage, convertContentPart, processStreamChunks, makeStructuredOutputCompatible, normalizeContent, extractTextContent. Also adds Chat Completions-specific tool converter (distinct from the existing Responses API tool converter). --- .../src/adapters/chat-completions-text.ts | 591 ++++++++++++++ .../chat-completions-tool-converter.ts | 66 ++ packages/typescript/openai-base/src/index.ts | 6 + .../tests/chat-completions-text.test.ts | 748 ++++++++++++++++++ 4 files changed, 1411 insertions(+) create mode 100644 packages/typescript/openai-base/src/adapters/chat-completions-text.ts create mode 100644 packages/typescript/openai-base/src/adapters/chat-completions-tool-converter.ts create mode 100644 packages/typescript/openai-base/tests/chat-completions-text.test.ts diff --git a/packages/typescript/openai-base/src/adapters/chat-completions-text.ts b/packages/typescript/openai-base/src/adapters/chat-completions-text.ts new file mode 100644 index 000000000..ad3621cf3 --- /dev/null +++ b/packages/typescript/openai-base/src/adapters/chat-completions-text.ts @@ -0,0 +1,591 @@ +import { BaseTextAdapter } from '@tanstack/ai/adapters' +import { generateId, transformNullsToUndefined } from '@tanstack/ai-utils' +import { createOpenAICompatibleClient } from '../utils/client' +import { makeStructuredOutputCompatible } from '../utils/schema-converter' +import { convertToolsToChatCompletionsFormat } from './chat-completions-tool-converter' +import type { + StructuredOutputOptions, + StructuredOutputResult, +} from '@tanstack/ai/adapters' +import type OpenAI_SDK from 'openai' +import type { + ContentPart, + DefaultMessageMetadataByModality, + Modality, + ModelMessage, + StreamChunk, + TextOptions, +} from '@tanstack/ai' +import type { OpenAICompatibleClientConfig } from '../types/config' + +/** + * OpenAI-compatible Chat Completions Text Adapter + * + * A generalized base class for providers that use the OpenAI Chat Completions API + * (`/v1/chat/completions`). Providers like Grok, Groq, OpenRouter, and others can + * extend this class and only need to: + * - Set `baseURL` in the config + * - Lock the generic type parameters to provider-specific types + * - Override specific methods for quirks + * + * All methods that build requests or process responses are `protected` so subclasses + * can override them. + */ +export class OpenAICompatibleChatCompletionsTextAdapter< + TModel extends string, + TProviderOptions extends Record = Record, + TInputModalities extends ReadonlyArray = ReadonlyArray, + TMessageMetadata extends + DefaultMessageMetadataByModality = DefaultMessageMetadataByModality, +> extends BaseTextAdapter< + TModel, + TProviderOptions, + TInputModalities, + TMessageMetadata +> { + readonly kind = 'text' as const + readonly name: string + + protected client: OpenAI_SDK + + constructor( + config: OpenAICompatibleClientConfig, + model: TModel, + name: string = 'openai-compatible', + ) { + super({}, model) + this.name = name + this.client = createOpenAICompatibleClient(config) + } + + async *chatStream( + options: TextOptions, + ): AsyncIterable { + const requestParams = this.mapOptionsToRequest(options) + const timestamp = Date.now() + + // AG-UI lifecycle tracking (mutable state object for ESLint compatibility) + const aguiState = { + runId: generateId(this.name), + messageId: generateId(this.name), + timestamp, + hasEmittedRunStarted: false, + } + + try { + const stream = await this.client.chat.completions.create({ + ...requestParams, + stream: true, + }) + + yield* this.processStreamChunks(stream, options, aguiState) + } catch (error: unknown) { + const err = error as Error & { code?: string } + + // Emit RUN_STARTED if not yet emitted + if (!aguiState.hasEmittedRunStarted) { + aguiState.hasEmittedRunStarted = true + yield { + type: 'RUN_STARTED', + runId: aguiState.runId, + model: options.model, + timestamp, + } + } + + // Emit AG-UI RUN_ERROR + yield { + type: 'RUN_ERROR', + runId: aguiState.runId, + model: options.model, + timestamp, + error: { + message: err.message || 'Unknown error', + code: err.code, + }, + } + + console.error( + `>>> [${this.name}] chatStream: Fatal error during response creation <<<`, + ) + console.error('>>> Error message:', err.message) + console.error('>>> Error stack:', err.stack) + console.error('>>> Full error:', err) + } + } + + /** + * Generate structured output using the provider's JSON Schema response format. + * Uses stream: false to get the complete response in one call. + * + * OpenAI-compatible APIs have strict requirements for structured output: + * - All properties must be in the `required` array + * - Optional fields should have null added to their type union + * - additionalProperties must be false for all objects + * + * The outputSchema is already JSON Schema (converted in the ai layer). + * We apply provider-specific transformations for structured output compatibility. + */ + async structuredOutput( + options: StructuredOutputOptions, + ): Promise> { + const { chatOptions, outputSchema } = options + const requestParams = this.mapOptionsToRequest(chatOptions) + + const jsonSchema = this.makeStructuredOutputCompatible( + outputSchema, + outputSchema.required || [], + ) + + try { + const response = await this.client.chat.completions.create({ + ...requestParams, + stream: false, + response_format: { + type: 'json_schema', + json_schema: { + name: 'structured_output', + schema: jsonSchema, + strict: true, + }, + }, + }) + + // Extract text content from the response + const rawText = response.choices[0]?.message.content || '' + + // Parse the JSON response + let parsed: unknown + try { + parsed = JSON.parse(rawText) + } catch { + throw new Error( + `Failed to parse structured output as JSON. Content: ${rawText.slice(0, 200)}${rawText.length > 200 ? '...' : ''}`, + ) + } + + // Transform null values to undefined to match original Zod schema expectations + // Provider returns null for optional fields we made nullable in the schema + const transformed = transformNullsToUndefined(parsed) + + return { + data: transformed, + rawText, + } + } catch (error: unknown) { + const err = error as Error + console.error( + `>>> [${this.name}] structuredOutput: Error during response creation <<<`, + ) + console.error('>>> Error message:', err.message) + throw error + } + } + + /** + * Applies provider-specific transformations for structured output compatibility. + * Override this in subclasses to handle provider-specific quirks. + */ + protected makeStructuredOutputCompatible( + schema: Record, + originalRequired: Array, + ): Record { + return makeStructuredOutputCompatible(schema, originalRequired) + } + + /** + * Processes streamed chunks from the Chat Completions API and yields AG-UI events. + * Override this in subclasses to handle provider-specific stream behavior. + */ + protected async *processStreamChunks( + stream: AsyncIterable, + options: TextOptions, + aguiState: { + runId: string + messageId: string + timestamp: number + hasEmittedRunStarted: boolean + }, + ): AsyncIterable { + let accumulatedContent = '' + const timestamp = aguiState.timestamp + let hasEmittedTextMessageStart = false + + // Track tool calls being streamed (arguments come in chunks) + const toolCallsInProgress = new Map< + number, + { + id: string + name: string + arguments: string + started: boolean // Track if TOOL_CALL_START has been emitted + } + >() + + try { + for await (const chunk of stream) { + const choice = chunk.choices[0] + + if (!choice) continue + + // Emit RUN_STARTED on first chunk + if (!aguiState.hasEmittedRunStarted) { + aguiState.hasEmittedRunStarted = true + yield { + type: 'RUN_STARTED', + runId: aguiState.runId, + model: chunk.model || options.model, + timestamp, + } + } + + const delta = choice.delta + const deltaContent = delta.content + const deltaToolCalls = delta.tool_calls + + // Handle content delta + if (deltaContent) { + // Emit TEXT_MESSAGE_START on first text content + if (!hasEmittedTextMessageStart) { + hasEmittedTextMessageStart = true + yield { + type: 'TEXT_MESSAGE_START', + messageId: aguiState.messageId, + model: chunk.model || options.model, + timestamp, + role: 'assistant', + } + } + + accumulatedContent += deltaContent + + // Emit AG-UI TEXT_MESSAGE_CONTENT + yield { + type: 'TEXT_MESSAGE_CONTENT', + messageId: aguiState.messageId, + model: chunk.model || options.model, + timestamp, + delta: deltaContent, + content: accumulatedContent, + } + } + + // Handle tool calls - they come in as deltas + if (deltaToolCalls) { + for (const toolCallDelta of deltaToolCalls) { + const index = toolCallDelta.index + + // Initialize or update the tool call in progress + if (!toolCallsInProgress.has(index)) { + toolCallsInProgress.set(index, { + id: toolCallDelta.id || '', + name: toolCallDelta.function?.name || '', + arguments: '', + started: false, + }) + } + + const toolCall = toolCallsInProgress.get(index)! + + // Update with any new data from the delta + if (toolCallDelta.id) { + toolCall.id = toolCallDelta.id + } + if (toolCallDelta.function?.name) { + toolCall.name = toolCallDelta.function.name + } + if (toolCallDelta.function?.arguments) { + toolCall.arguments += toolCallDelta.function.arguments + } + + // Emit TOOL_CALL_START when we have id and name + if (toolCall.id && toolCall.name && !toolCall.started) { + toolCall.started = true + yield { + type: 'TOOL_CALL_START', + toolCallId: toolCall.id, + toolName: toolCall.name, + model: chunk.model || options.model, + timestamp, + index, + } + } + + // Emit TOOL_CALL_ARGS for argument deltas + if (toolCallDelta.function?.arguments && toolCall.started) { + yield { + type: 'TOOL_CALL_ARGS', + toolCallId: toolCall.id, + model: chunk.model || options.model, + timestamp, + delta: toolCallDelta.function.arguments, + } + } + } + } + + // Handle finish reason + if (choice.finish_reason) { + // Emit all completed tool calls + if ( + choice.finish_reason === 'tool_calls' || + toolCallsInProgress.size > 0 + ) { + for (const [, toolCall] of toolCallsInProgress) { + // Parse arguments for TOOL_CALL_END + let parsedInput: unknown = {} + try { + parsedInput = toolCall.arguments + ? JSON.parse(toolCall.arguments) + : {} + } catch { + parsedInput = {} + } + + // Emit AG-UI TOOL_CALL_END + yield { + type: 'TOOL_CALL_END', + toolCallId: toolCall.id, + toolName: toolCall.name, + model: chunk.model || options.model, + timestamp, + input: parsedInput, + } + } + } + + const computedFinishReason = + choice.finish_reason === 'tool_calls' || + toolCallsInProgress.size > 0 + ? 'tool_calls' + : 'stop' + + // Emit TEXT_MESSAGE_END if we had text content + if (hasEmittedTextMessageStart) { + yield { + type: 'TEXT_MESSAGE_END', + messageId: aguiState.messageId, + model: chunk.model || options.model, + timestamp, + } + } + + // Emit AG-UI RUN_FINISHED + yield { + type: 'RUN_FINISHED', + runId: aguiState.runId, + model: chunk.model || options.model, + timestamp, + usage: chunk.usage + ? { + promptTokens: chunk.usage.prompt_tokens || 0, + completionTokens: chunk.usage.completion_tokens || 0, + totalTokens: chunk.usage.total_tokens || 0, + } + : undefined, + finishReason: computedFinishReason, + } + } + } + } catch (error: unknown) { + const err = error as Error & { code?: string } + console.log( + `[${this.name}] Stream ended with error:`, + err.message, + ) + + // Emit AG-UI RUN_ERROR + yield { + type: 'RUN_ERROR', + runId: aguiState.runId, + model: options.model, + timestamp, + error: { + message: err.message || 'Unknown error occurred', + code: err.code, + }, + } + } + } + + /** + * Maps common TextOptions to Chat Completions API request format. + * Override this in subclasses to add provider-specific options. + */ + protected mapOptionsToRequest( + options: TextOptions, + ): OpenAI_SDK.Chat.Completions.ChatCompletionCreateParamsStreaming { + const tools = options.tools + ? convertToolsToChatCompletionsFormat( + options.tools, + this.makeStructuredOutputCompatible.bind(this), + ) + : undefined + + // Build messages array with system prompts + const messages: Array = + [] + + // Add system prompts first + if (options.systemPrompts && options.systemPrompts.length > 0) { + messages.push({ + role: 'system', + content: options.systemPrompts.join('\n'), + }) + } + + // Convert messages + for (const message of options.messages) { + messages.push(this.convertMessage(message)) + } + + return { + model: options.model, + messages, + temperature: options.temperature, + max_tokens: options.maxTokens, + top_p: options.topP, + tools: tools as Array, + stream: true, + stream_options: { include_usage: true }, + } + } + + /** + * Converts a single ModelMessage to the Chat Completions API message format. + * Override this in subclasses to handle provider-specific message formats. + */ + protected convertMessage( + message: ModelMessage, + ): OpenAI_SDK.Chat.Completions.ChatCompletionMessageParam { + // Handle tool messages + if (message.role === 'tool') { + return { + role: 'tool', + tool_call_id: message.toolCallId || '', + content: + typeof message.content === 'string' + ? message.content + : JSON.stringify(message.content), + } + } + + // Handle assistant messages + if (message.role === 'assistant') { + const toolCalls = message.toolCalls?.map((tc) => ({ + id: tc.id, + type: 'function' as const, + function: { + name: tc.function.name, + arguments: + typeof tc.function.arguments === 'string' + ? tc.function.arguments + : JSON.stringify(tc.function.arguments), + }, + })) + + return { + role: 'assistant', + content: this.extractTextContent(message.content), + ...(toolCalls && toolCalls.length > 0 ? { tool_calls: toolCalls } : {}), + } + } + + // Handle user messages - support multimodal content + const contentParts = this.normalizeContent(message.content) + + // If only text, use simple string format + if (contentParts.length === 1 && contentParts[0]?.type === 'text') { + return { + role: 'user', + content: contentParts[0].content, + } + } + + // Otherwise, use array format for multimodal + const parts: Array = + [] + for (const part of contentParts) { + const converted = this.convertContentPart(part) + if (converted) { + parts.push(converted) + } + } + + return { + role: 'user', + content: parts.length > 0 ? parts : '', + } + } + + /** + * Converts a single ContentPart to the Chat Completions API content part format. + * Override this in subclasses to handle additional content types or provider-specific metadata. + */ + protected convertContentPart( + part: ContentPart, + ): OpenAI_SDK.Chat.Completions.ChatCompletionContentPart | null { + if (part.type === 'text') { + return { type: 'text', text: part.content } + } + + if (part.type === 'image') { + const imageMetadata = part.metadata as + | { detail?: 'auto' | 'low' | 'high' } + | undefined + + // For base64 data, construct a data URI using the mimeType from source + const imageValue = part.source.value + const imageUrl = + part.source.type === 'data' && !imageValue.startsWith('data:') + ? `data:${part.source.mimeType};base64,${imageValue}` + : imageValue + + return { + type: 'image_url', + image_url: { + url: imageUrl, + detail: imageMetadata?.detail || 'auto', + }, + } + } + + // Unsupported content type — subclasses can override to handle more types + return null + } + + /** + * Normalizes message content to an array of ContentPart. + * Handles backward compatibility with string content. + */ + protected normalizeContent( + content: string | null | Array, + ): Array { + if (content === null) { + return [] + } + if (typeof content === 'string') { + return [{ type: 'text', content: content }] + } + return content + } + + /** + * Extracts text content from a content value that may be string, null, or ContentPart array. + */ + protected extractTextContent( + content: string | null | Array, + ): string { + if (content === null) { + return '' + } + if (typeof content === 'string') { + return content + } + // It's an array of ContentPart + return content + .filter((p) => p.type === 'text') + .map((p) => p.content) + .join('') + } +} diff --git a/packages/typescript/openai-base/src/adapters/chat-completions-tool-converter.ts b/packages/typescript/openai-base/src/adapters/chat-completions-tool-converter.ts new file mode 100644 index 000000000..ed468fac1 --- /dev/null +++ b/packages/typescript/openai-base/src/adapters/chat-completions-tool-converter.ts @@ -0,0 +1,66 @@ +import { makeStructuredOutputCompatible } from '../utils/schema-converter' +import type { JSONSchema, Tool } from '@tanstack/ai' +import type OpenAI from 'openai' + +/** + * Chat Completions API tool format. + * This is distinct from the Responses API tool format. + */ +export type ChatCompletionFunctionTool = + OpenAI.Chat.Completions.ChatCompletionTool + +/** + * Converts a standard Tool to OpenAI Chat Completions ChatCompletionTool format. + * + * Tool schemas are already converted to JSON Schema in the ai layer. + * We apply OpenAI-compatible transformations for strict mode: + * - All properties in required array + * - Optional fields made nullable + * - additionalProperties: false + * + * This enables strict mode for all tools automatically. + */ +export function convertFunctionToolToChatCompletionsFormat( + tool: Tool, + schemaConverter: ( + schema: Record, + required: Array, + ) => Record = makeStructuredOutputCompatible, +): ChatCompletionFunctionTool { + const inputSchema = (tool.inputSchema ?? { + type: 'object', + properties: {}, + required: [], + }) as JSONSchema + + const jsonSchema = schemaConverter(inputSchema, inputSchema.required || []) + + // Ensure additionalProperties is false for strict mode + jsonSchema.additionalProperties = false + + return { + type: 'function', + function: { + name: tool.name, + description: tool.description, + parameters: jsonSchema, + strict: true, + }, + } satisfies ChatCompletionFunctionTool +} + +/** + * Converts an array of standard Tools to Chat Completions format. + * Chat Completions API primarily supports function tools. + */ +export function convertToolsToChatCompletionsFormat( + tools: Array, + schemaConverter?: ( + schema: Record, + required: Array, + ) => Record, +): Array { + return tools.map((tool) => + convertFunctionToolToChatCompletionsFormat(tool, schemaConverter), + ) +} diff --git a/packages/typescript/openai-base/src/index.ts b/packages/typescript/openai-base/src/index.ts index d09f34476..0ff7f555e 100644 --- a/packages/typescript/openai-base/src/index.ts +++ b/packages/typescript/openai-base/src/index.ts @@ -4,3 +4,9 @@ export type { OpenAICompatibleClientConfig } from './types/config' export * from './tools/index' export * from './types/message-metadata' export * from './types/provider-options' +export { OpenAICompatibleChatCompletionsTextAdapter } from './adapters/chat-completions-text' +export { + convertFunctionToolToChatCompletionsFormat, + convertToolsToChatCompletionsFormat, + type ChatCompletionFunctionTool, +} from './adapters/chat-completions-tool-converter' diff --git a/packages/typescript/openai-base/tests/chat-completions-text.test.ts b/packages/typescript/openai-base/tests/chat-completions-text.test.ts new file mode 100644 index 000000000..8c95bcd2b --- /dev/null +++ b/packages/typescript/openai-base/tests/chat-completions-text.test.ts @@ -0,0 +1,748 @@ +import { describe, it, expect, vi, afterEach, beforeEach } from 'vitest' +import { OpenAICompatibleChatCompletionsTextAdapter } from '../src/adapters/chat-completions-text' +import type { StreamChunk, Tool } from '@tanstack/ai' + +// Declare mockCreate at module level +let mockCreate: ReturnType + +// Mock the OpenAI SDK +vi.mock('openai', () => { + return { + default: class { + chat = { + completions: { + create: (...args: Array) => mockCreate(...args), + }, + } + }, + } +}) + +// Helper to create async iterable from chunks +function createAsyncIterable(chunks: Array): AsyncIterable { + return { + [Symbol.asyncIterator]() { + let index = 0 + return { + async next() { + if (index < chunks.length) { + return { value: chunks[index++]!, done: false } + } + return { value: undefined as T, done: true } + }, + } + }, + } +} + +// Helper to setup the mock SDK client for streaming responses +function setupMockSdkClient( + streamChunks: Array>, + nonStreamResponse?: Record, +) { + mockCreate = vi.fn().mockImplementation((params) => { + if (params.stream) { + return Promise.resolve(createAsyncIterable(streamChunks)) + } + return Promise.resolve(nonStreamResponse) + }) +} + +const testConfig = { + apiKey: 'test-api-key', + baseURL: 'https://api.test-provider.com/v1', +} + +const weatherTool: Tool = { + name: 'lookup_weather', + description: 'Return the forecast for a location', +} + +describe('OpenAICompatibleChatCompletionsTextAdapter', () => { + beforeEach(() => { + vi.clearAllMocks() + }) + + afterEach(() => { + vi.unstubAllEnvs() + }) + + describe('instantiation', () => { + it('creates an adapter with default name', () => { + const adapter = new OpenAICompatibleChatCompletionsTextAdapter( + testConfig, + 'test-model', + ) + + expect(adapter).toBeDefined() + expect(adapter.kind).toBe('text') + expect(adapter.name).toBe('openai-compatible') + expect(adapter.model).toBe('test-model') + }) + + it('creates an adapter with custom name', () => { + const adapter = new OpenAICompatibleChatCompletionsTextAdapter( + testConfig, + 'test-model', + 'my-provider', + ) + + expect(adapter).toBeDefined() + expect(adapter.name).toBe('my-provider') + }) + + it('creates an adapter with custom baseURL', () => { + const adapter = new OpenAICompatibleChatCompletionsTextAdapter( + { + apiKey: 'test-key', + baseURL: 'https://custom.api.example.com/v1', + }, + 'custom-model', + ) + + expect(adapter).toBeDefined() + expect(adapter.model).toBe('custom-model') + }) + }) + + describe('streaming event sequence', () => { + it('emits RUN_STARTED as the first event', async () => { + const streamChunks = [ + { + id: 'chatcmpl-123', + model: 'test-model', + choices: [ + { + delta: { content: 'Hello' }, + finish_reason: null, + }, + ], + }, + { + id: 'chatcmpl-123', + model: 'test-model', + choices: [ + { + delta: {}, + finish_reason: 'stop', + }, + ], + usage: { + prompt_tokens: 5, + completion_tokens: 1, + total_tokens: 6, + }, + }, + ] + + setupMockSdkClient(streamChunks) + const adapter = new OpenAICompatibleChatCompletionsTextAdapter( + testConfig, + 'test-model', + ) + const chunks: Array = [] + + for await (const chunk of adapter.chatStream({ + model: 'test-model', + messages: [{ role: 'user', content: 'Hello' }], + })) { + chunks.push(chunk) + } + + expect(chunks[0]?.type).toBe('RUN_STARTED') + if (chunks[0]?.type === 'RUN_STARTED') { + expect(chunks[0].runId).toBeDefined() + expect(chunks[0].model).toBe('test-model') + } + }) + + it('emits TEXT_MESSAGE_START before TEXT_MESSAGE_CONTENT', async () => { + const streamChunks = [ + { + id: 'chatcmpl-123', + model: 'test-model', + choices: [ + { + delta: { content: 'Hello' }, + finish_reason: null, + }, + ], + }, + { + id: 'chatcmpl-123', + model: 'test-model', + choices: [ + { + delta: {}, + finish_reason: 'stop', + }, + ], + usage: { + prompt_tokens: 5, + completion_tokens: 1, + total_tokens: 6, + }, + }, + ] + + setupMockSdkClient(streamChunks) + const adapter = new OpenAICompatibleChatCompletionsTextAdapter( + testConfig, + 'test-model', + ) + const chunks: Array = [] + + for await (const chunk of adapter.chatStream({ + model: 'test-model', + messages: [{ role: 'user', content: 'Hello' }], + })) { + chunks.push(chunk) + } + + const textStartIndex = chunks.findIndex( + (c) => c.type === 'TEXT_MESSAGE_START', + ) + const textContentIndex = chunks.findIndex( + (c) => c.type === 'TEXT_MESSAGE_CONTENT', + ) + + expect(textStartIndex).toBeGreaterThan(-1) + expect(textContentIndex).toBeGreaterThan(-1) + expect(textStartIndex).toBeLessThan(textContentIndex) + + const textStart = chunks[textStartIndex] + if (textStart?.type === 'TEXT_MESSAGE_START') { + expect(textStart.messageId).toBeDefined() + expect(textStart.role).toBe('assistant') + } + }) + + it('emits proper AG-UI event sequence: RUN_STARTED -> TEXT_MESSAGE_START -> TEXT_MESSAGE_CONTENT -> TEXT_MESSAGE_END -> RUN_FINISHED', async () => { + const streamChunks = [ + { + id: 'chatcmpl-123', + model: 'test-model', + choices: [ + { + delta: { content: 'Hello world' }, + finish_reason: null, + }, + ], + }, + { + id: 'chatcmpl-123', + model: 'test-model', + choices: [ + { + delta: {}, + finish_reason: 'stop', + }, + ], + usage: { + prompt_tokens: 5, + completion_tokens: 2, + total_tokens: 7, + }, + }, + ] + + setupMockSdkClient(streamChunks) + const adapter = new OpenAICompatibleChatCompletionsTextAdapter( + testConfig, + 'test-model', + ) + const chunks: Array = [] + + for await (const chunk of adapter.chatStream({ + model: 'test-model', + messages: [{ role: 'user', content: 'Hello' }], + })) { + chunks.push(chunk) + } + + // Verify proper AG-UI event sequence + const eventTypes = chunks.map((c) => c.type) + + // Should start with RUN_STARTED + expect(eventTypes[0]).toBe('RUN_STARTED') + + // Should have TEXT_MESSAGE_START before TEXT_MESSAGE_CONTENT + const textStartIndex = eventTypes.indexOf('TEXT_MESSAGE_START') + const textContentIndex = eventTypes.indexOf('TEXT_MESSAGE_CONTENT') + expect(textStartIndex).toBeGreaterThan(-1) + expect(textContentIndex).toBeGreaterThan(textStartIndex) + + // Should have TEXT_MESSAGE_END before RUN_FINISHED + const textEndIndex = eventTypes.indexOf('TEXT_MESSAGE_END') + const runFinishedIndex = eventTypes.indexOf('RUN_FINISHED') + expect(textEndIndex).toBeGreaterThan(-1) + expect(runFinishedIndex).toBeGreaterThan(textEndIndex) + + // Verify RUN_FINISHED has proper data + const runFinishedChunk = chunks.find((c) => c.type === 'RUN_FINISHED') + if (runFinishedChunk?.type === 'RUN_FINISHED') { + expect(runFinishedChunk.finishReason).toBe('stop') + expect(runFinishedChunk.usage).toBeDefined() + } + }) + + it('emits TEXT_MESSAGE_END and RUN_FINISHED at the end with usage data', async () => { + const streamChunks = [ + { + id: 'chatcmpl-123', + model: 'test-model', + choices: [ + { + delta: { content: 'Hello' }, + finish_reason: null, + }, + ], + }, + { + id: 'chatcmpl-123', + model: 'test-model', + choices: [ + { + delta: {}, + finish_reason: 'stop', + }, + ], + usage: { + prompt_tokens: 5, + completion_tokens: 1, + total_tokens: 6, + }, + }, + ] + + setupMockSdkClient(streamChunks) + const adapter = new OpenAICompatibleChatCompletionsTextAdapter( + testConfig, + 'test-model', + ) + const chunks: Array = [] + + for await (const chunk of adapter.chatStream({ + model: 'test-model', + messages: [{ role: 'user', content: 'Hello' }], + })) { + chunks.push(chunk) + } + + const textEndChunk = chunks.find((c) => c.type === 'TEXT_MESSAGE_END') + expect(textEndChunk).toBeDefined() + if (textEndChunk?.type === 'TEXT_MESSAGE_END') { + expect(textEndChunk.messageId).toBeDefined() + } + + const runFinishedChunk = chunks.find((c) => c.type === 'RUN_FINISHED') + expect(runFinishedChunk).toBeDefined() + if (runFinishedChunk?.type === 'RUN_FINISHED') { + expect(runFinishedChunk.runId).toBeDefined() + expect(runFinishedChunk.finishReason).toBe('stop') + expect(runFinishedChunk.usage).toMatchObject({ + promptTokens: 5, + completionTokens: 1, + totalTokens: 6, + }) + } + }) + + it('streams content with correct accumulated values', async () => { + const streamChunks = [ + { + id: 'chatcmpl-stream', + model: 'test-model', + choices: [ + { + delta: { content: 'Hello ' }, + finish_reason: null, + }, + ], + }, + { + id: 'chatcmpl-stream', + model: 'test-model', + choices: [ + { + delta: { content: 'world' }, + finish_reason: null, + }, + ], + }, + { + id: 'chatcmpl-stream', + model: 'test-model', + choices: [ + { + delta: {}, + finish_reason: 'stop', + }, + ], + usage: { + prompt_tokens: 5, + completion_tokens: 2, + total_tokens: 7, + }, + }, + ] + + setupMockSdkClient(streamChunks) + const adapter = new OpenAICompatibleChatCompletionsTextAdapter( + testConfig, + 'test-model', + ) + const chunks: Array = [] + + for await (const chunk of adapter.chatStream({ + model: 'test-model', + messages: [{ role: 'user', content: 'Say hello' }], + })) { + chunks.push(chunk) + } + + // Check TEXT_MESSAGE_CONTENT events have correct accumulated content + const contentChunks = chunks.filter( + (c) => c.type === 'TEXT_MESSAGE_CONTENT', + ) + expect(contentChunks.length).toBe(2) + + const firstContent = contentChunks[0] + if (firstContent?.type === 'TEXT_MESSAGE_CONTENT') { + expect(firstContent.delta).toBe('Hello ') + expect(firstContent.content).toBe('Hello ') + } + + const secondContent = contentChunks[1] + if (secondContent?.type === 'TEXT_MESSAGE_CONTENT') { + expect(secondContent.delta).toBe('world') + expect(secondContent.content).toBe('Hello world') + } + }) + }) + + describe('tool call events', () => { + it('emits TOOL_CALL_START -> TOOL_CALL_ARGS -> TOOL_CALL_END', async () => { + const streamChunks = [ + { + id: 'chatcmpl-456', + model: 'test-model', + choices: [ + { + delta: { + tool_calls: [ + { + index: 0, + id: 'call_abc123', + type: 'function', + function: { + name: 'lookup_weather', + arguments: '{"location":', + }, + }, + ], + }, + finish_reason: null, + }, + ], + }, + { + id: 'chatcmpl-456', + model: 'test-model', + choices: [ + { + delta: { + tool_calls: [ + { + index: 0, + function: { + arguments: '"Berlin"}', + }, + }, + ], + }, + finish_reason: null, + }, + ], + }, + { + id: 'chatcmpl-456', + model: 'test-model', + choices: [ + { + delta: {}, + finish_reason: 'tool_calls', + }, + ], + usage: { + prompt_tokens: 10, + completion_tokens: 5, + total_tokens: 15, + }, + }, + ] + + setupMockSdkClient(streamChunks) + const adapter = new OpenAICompatibleChatCompletionsTextAdapter( + testConfig, + 'test-model', + ) + const chunks: Array = [] + + for await (const chunk of adapter.chatStream({ + model: 'test-model', + messages: [{ role: 'user', content: 'Weather in Berlin?' }], + tools: [weatherTool], + })) { + chunks.push(chunk) + } + + // Check AG-UI tool events + const toolStartChunk = chunks.find((c) => c.type === 'TOOL_CALL_START') + expect(toolStartChunk).toBeDefined() + if (toolStartChunk?.type === 'TOOL_CALL_START') { + expect(toolStartChunk.toolCallId).toBe('call_abc123') + expect(toolStartChunk.toolName).toBe('lookup_weather') + } + + const toolArgsChunks = chunks.filter((c) => c.type === 'TOOL_CALL_ARGS') + expect(toolArgsChunks.length).toBeGreaterThan(0) + + const toolEndChunk = chunks.find((c) => c.type === 'TOOL_CALL_END') + expect(toolEndChunk).toBeDefined() + if (toolEndChunk?.type === 'TOOL_CALL_END') { + expect(toolEndChunk.toolCallId).toBe('call_abc123') + expect(toolEndChunk.toolName).toBe('lookup_weather') + expect(toolEndChunk.input).toEqual({ location: 'Berlin' }) + } + + // Check finish reason + const runFinishedChunk = chunks.find((c) => c.type === 'RUN_FINISHED') + if (runFinishedChunk?.type === 'RUN_FINISHED') { + expect(runFinishedChunk.finishReason).toBe('tool_calls') + } + }) + }) + + describe('error handling', () => { + it('emits RUN_ERROR on stream error', async () => { + const streamChunks = [ + { + id: 'chatcmpl-123', + model: 'test-model', + choices: [ + { + delta: { content: 'Hello' }, + finish_reason: null, + }, + ], + }, + ] + + // Create an async iterable that throws mid-stream + const errorIterable = { + [Symbol.asyncIterator]() { + let index = 0 + return { + async next() { + if (index < streamChunks.length) { + return { value: streamChunks[index++]!, done: false } + } + throw new Error('Stream interrupted') + }, + } + }, + } + + mockCreate = vi.fn().mockResolvedValue(errorIterable) + + const adapter = new OpenAICompatibleChatCompletionsTextAdapter( + testConfig, + 'test-model', + ) + const chunks: Array = [] + + for await (const chunk of adapter.chatStream({ + model: 'test-model', + messages: [{ role: 'user', content: 'Hello' }], + })) { + chunks.push(chunk) + } + + // Should emit RUN_ERROR + const runErrorChunk = chunks.find((c) => c.type === 'RUN_ERROR') + expect(runErrorChunk).toBeDefined() + if (runErrorChunk?.type === 'RUN_ERROR') { + expect(runErrorChunk.error.message).toBe('Stream interrupted') + } + }) + + it('emits RUN_STARTED then RUN_ERROR when client.create throws', async () => { + mockCreate = vi.fn().mockRejectedValue(new Error('API key invalid')) + + const adapter = new OpenAICompatibleChatCompletionsTextAdapter( + testConfig, + 'test-model', + ) + const chunks: Array = [] + + for await (const chunk of adapter.chatStream({ + model: 'test-model', + messages: [{ role: 'user', content: 'Hello' }], + })) { + chunks.push(chunk) + } + + // Should have RUN_STARTED followed by RUN_ERROR + expect(chunks.length).toBe(2) + expect(chunks[0]?.type).toBe('RUN_STARTED') + expect(chunks[1]?.type).toBe('RUN_ERROR') + if (chunks[1]?.type === 'RUN_ERROR') { + expect(chunks[1].error.message).toBe('API key invalid') + } + }) + }) + + describe('structured output', () => { + it('generates structured output and parses JSON response', async () => { + const nonStreamResponse = { + choices: [ + { + message: { + content: '{"name":"Alice","age":30}', + }, + }, + ], + } + + setupMockSdkClient([], nonStreamResponse) + + const adapter = new OpenAICompatibleChatCompletionsTextAdapter( + testConfig, + 'test-model', + ) + + const result = await adapter.structuredOutput({ + chatOptions: { + model: 'test-model', + messages: [ + { role: 'user', content: 'Give me a person object' }, + ], + }, + outputSchema: { + type: 'object', + properties: { + name: { type: 'string' }, + age: { type: 'number' }, + }, + required: ['name', 'age'], + }, + }) + + expect(result.data).toEqual({ name: 'Alice', age: 30 }) + expect(result.rawText).toBe('{"name":"Alice","age":30}') + + // Verify stream: false was passed + expect(mockCreate).toHaveBeenCalledWith( + expect.objectContaining({ + stream: false, + response_format: expect.objectContaining({ + type: 'json_schema', + }), + }), + ) + }) + + it('transforms null values to undefined', async () => { + const nonStreamResponse = { + choices: [ + { + message: { + content: '{"name":"Alice","nickname":null}', + }, + }, + ], + } + + setupMockSdkClient([], nonStreamResponse) + + const adapter = new OpenAICompatibleChatCompletionsTextAdapter( + testConfig, + 'test-model', + ) + + const result = await adapter.structuredOutput({ + chatOptions: { + model: 'test-model', + messages: [ + { role: 'user', content: 'Give me a person object' }, + ], + }, + outputSchema: { + type: 'object', + properties: { + name: { type: 'string' }, + nickname: { type: 'string' }, + }, + required: ['name'], + }, + }) + + // null should be transformed to undefined + expect((result.data as any).name).toBe('Alice') + expect((result.data as any).nickname).toBeUndefined() + }) + + it('throws on invalid JSON response', async () => { + const nonStreamResponse = { + choices: [ + { + message: { + content: 'not valid json', + }, + }, + ], + } + + setupMockSdkClient([], nonStreamResponse) + + const adapter = new OpenAICompatibleChatCompletionsTextAdapter( + testConfig, + 'test-model', + ) + + await expect( + adapter.structuredOutput({ + chatOptions: { + model: 'test-model', + messages: [ + { role: 'user', content: 'Give me a person object' }, + ], + }, + outputSchema: { + type: 'object', + properties: { + name: { type: 'string' }, + }, + required: ['name'], + }, + }), + ).rejects.toThrow('Failed to parse structured output as JSON') + }) + }) + + describe('subclassing', () => { + it('allows subclassing with custom name', () => { + class MyProviderAdapter extends OpenAICompatibleChatCompletionsTextAdapter { + constructor(apiKey: string, model: string) { + super({ apiKey, baseURL: 'https://my-provider.com/v1' }, model, 'my-provider') + } + } + + const adapter = new MyProviderAdapter('test-key', 'my-model') + expect(adapter.name).toBe('my-provider') + expect(adapter.kind).toBe('text') + expect(adapter.model).toBe('my-model') + }) + }) +}) From 273c807db9bf8a065798b57d7d985f7b55131adc Mon Sep 17 00:00:00 2001 From: Alem Tuzlak Date: Mon, 30 Mar 2026 13:44:14 +0200 Subject: [PATCH 05/15] feat(openai-base): add Responses API text adapter base class Extract and generalize the OpenAI Responses API text adapter into OpenAICompatibleResponsesTextAdapter. This handles the full Responses API streaming pipeline (9+ event types), including reasoning/thinking tokens, tool call streaming, and structured output via text.format. Also adds responses-tool-converter for the flat Responses API tool format (distinct from Chat Completions' nested function format). --- .../src/adapters/responses-text.ts | 895 ++++++++++ .../src/adapters/responses-tool-converter.ts | 74 + packages/typescript/openai-base/src/index.ts | 6 + .../openai-base/tests/responses-text.test.ts | 1474 +++++++++++++++++ 4 files changed, 2449 insertions(+) create mode 100644 packages/typescript/openai-base/src/adapters/responses-text.ts create mode 100644 packages/typescript/openai-base/src/adapters/responses-tool-converter.ts create mode 100644 packages/typescript/openai-base/tests/responses-text.test.ts diff --git a/packages/typescript/openai-base/src/adapters/responses-text.ts b/packages/typescript/openai-base/src/adapters/responses-text.ts new file mode 100644 index 000000000..2bd3f3c2b --- /dev/null +++ b/packages/typescript/openai-base/src/adapters/responses-text.ts @@ -0,0 +1,895 @@ +import { BaseTextAdapter } from '@tanstack/ai/adapters' +import { generateId, transformNullsToUndefined } from '@tanstack/ai-utils' +import { createOpenAICompatibleClient } from '../utils/client' +import { makeStructuredOutputCompatible } from '../utils/schema-converter' +import { convertToolsToResponsesFormat } from './responses-tool-converter' +import type { + StructuredOutputOptions, + StructuredOutputResult, +} from '@tanstack/ai/adapters' +import type OpenAI_SDK from 'openai' +import type { Responses } from 'openai/resources' +import type { + ContentPart, + DefaultMessageMetadataByModality, + Modality, + ModelMessage, + StreamChunk, + TextOptions, +} from '@tanstack/ai' +import type { OpenAICompatibleClientConfig } from '../types/config' + +/** + * OpenAI-compatible Responses API Text Adapter + * + * A generalized base class for providers that use the OpenAI Responses API + * (`/v1/responses`). Providers like OpenAI (native), Azure OpenAI, and others + * that implement the Responses API can extend this class and only need to: + * - Set `baseURL` in the config + * - Lock the generic type parameters to provider-specific types + * - Override specific methods for quirks + * + * Key differences from the Chat Completions adapter: + * - Uses `client.responses.create()` instead of `client.chat.completions.create()` + * - Messages use `ResponseInput` format + * - System prompts go in `instructions` field, not as array messages + * - Streaming events are completely different (9+ event types vs simple delta chunks) + * - Supports reasoning/thinking tokens via `response.reasoning_text.delta` + * - Structured output uses `text.format` in the request (not `response_format`) + * - Tool calls use `response.function_call_arguments.delta` + * - Content parts are `input_text`, `input_image`, `input_file` + * + * All methods that build requests or process responses are `protected` so subclasses + * can override them. + */ +export class OpenAICompatibleResponsesTextAdapter< + TModel extends string, + TProviderOptions extends Record = Record, + TInputModalities extends ReadonlyArray = ReadonlyArray, + TMessageMetadata extends + DefaultMessageMetadataByModality = DefaultMessageMetadataByModality, +> extends BaseTextAdapter< + TModel, + TProviderOptions, + TInputModalities, + TMessageMetadata +> { + readonly kind = 'text' as const + readonly name: string + + protected client: OpenAI_SDK + + constructor( + config: OpenAICompatibleClientConfig, + model: TModel, + name: string = 'openai-compatible-responses', + ) { + super({}, model) + this.name = name + this.client = createOpenAICompatibleClient(config) + } + + async *chatStream( + options: TextOptions, + ): AsyncIterable { + // Track tool call metadata by unique ID + // Responses API streams tool calls with deltas — first chunk has ID/name, + // subsequent chunks only have args. + // We assign our own indices as we encounter unique tool call IDs. + const toolCallMetadata = new Map< + string, + { index: number; name: string; started: boolean } + >() + const requestParams = this.mapOptionsToRequest(options) + const timestamp = Date.now() + + // AG-UI lifecycle tracking + const aguiState = { + runId: generateId(this.name), + messageId: generateId(this.name), + timestamp, + hasEmittedRunStarted: false, + } + + try { + const response = await this.client.responses.create( + { + ...requestParams, + stream: true, + }, + { + headers: options.request?.headers, + signal: options.request?.signal, + }, + ) + + yield* this.processStreamChunks( + response, + toolCallMetadata, + options, + aguiState, + ) + } catch (error: unknown) { + const err = error as Error & { code?: string } + + // Emit RUN_STARTED if not yet emitted + if (!aguiState.hasEmittedRunStarted) { + aguiState.hasEmittedRunStarted = true + yield { + type: 'RUN_STARTED', + runId: aguiState.runId, + model: options.model, + timestamp, + } + } + + // Emit AG-UI RUN_ERROR + yield { + type: 'RUN_ERROR', + runId: aguiState.runId, + model: options.model, + timestamp, + error: { + message: err.message || 'Unknown error', + code: err.code, + }, + } + + console.error( + `>>> [${this.name}] chatStream: Fatal error during response creation <<<`, + ) + console.error('>>> Error message:', err.message) + console.error('>>> Error stack:', err.stack) + console.error('>>> Full error:', err) + } + } + + /** + * Generate structured output using the provider's native JSON Schema response format. + * Uses stream: false to get the complete response in one call. + * + * OpenAI-compatible Responses APIs have strict requirements for structured output: + * - All properties must be in the `required` array + * - Optional fields should have null added to their type union + * - additionalProperties must be false for all objects + * + * The outputSchema is already JSON Schema (converted in the ai layer). + * We apply provider-specific transformations for structured output compatibility. + */ + async structuredOutput( + options: StructuredOutputOptions, + ): Promise> { + const { chatOptions, outputSchema } = options + const requestParams = this.mapOptionsToRequest(chatOptions) + + // Apply provider-specific transformations for structured output compatibility + const jsonSchema = this.makeStructuredOutputCompatible( + outputSchema, + outputSchema.required || [], + ) + + try { + const response = await this.client.responses.create( + { + ...requestParams, + stream: false, + // Configure structured output via text.format + text: { + format: { + type: 'json_schema', + name: 'structured_output', + schema: jsonSchema, + strict: true, + }, + }, + }, + { + headers: chatOptions.request?.headers, + signal: chatOptions.request?.signal, + }, + ) + + // Extract text content from the response + const rawText = this.extractTextFromResponse( + response as OpenAI_SDK.Responses.Response, + ) + + // Parse the JSON response + let parsed: unknown + try { + parsed = JSON.parse(rawText) + } catch { + throw new Error( + `Failed to parse structured output as JSON. Content: ${rawText.slice(0, 200)}${rawText.length > 200 ? '...' : ''}`, + ) + } + + // Transform null values to undefined to match original Zod schema expectations + // Provider returns null for optional fields we made nullable in the schema + const transformed = transformNullsToUndefined(parsed) + + return { + data: transformed, + rawText, + } + } catch (error: unknown) { + const err = error as Error + console.error( + `>>> [${this.name}] structuredOutput: Error during response creation <<<`, + ) + console.error('>>> Error message:', err.message) + throw error + } + } + + /** + * Applies provider-specific transformations for structured output compatibility. + * Override this in subclasses to handle provider-specific quirks. + */ + protected makeStructuredOutputCompatible( + schema: Record, + originalRequired: Array, + ): Record { + return makeStructuredOutputCompatible(schema, originalRequired) + } + + /** + * Extract text content from a non-streaming Responses API response. + * Override this in subclasses for provider-specific response shapes. + */ + protected extractTextFromResponse( + response: OpenAI_SDK.Responses.Response, + ): string { + let textContent = '' + + for (const item of response.output) { + if (item.type === 'message') { + for (const part of item.content) { + if (part.type === 'output_text') { + textContent += part.text + } + } + } + } + + return textContent + } + + /** + * Processes streamed chunks from the Responses API and yields AG-UI events. + * Override this in subclasses to handle provider-specific stream behavior. + * + * Handles the following event types: + * - response.created / response.incomplete / response.failed + * - response.output_text.delta + * - response.reasoning_text.delta + * - response.reasoning_summary_text.delta + * - response.content_part.added / response.content_part.done + * - response.output_item.added + * - response.function_call_arguments.delta / response.function_call_arguments.done + * - response.completed + * - error + */ + protected async *processStreamChunks( + stream: AsyncIterable, + toolCallMetadata: Map< + string, + { index: number; name: string; started: boolean } + >, + options: TextOptions, + aguiState: { + runId: string + messageId: string + timestamp: number + hasEmittedRunStarted: boolean + }, + ): AsyncIterable { + let accumulatedContent = '' + let accumulatedReasoning = '' + const timestamp = aguiState.timestamp + let chunkCount = 0 + + // Track if we've been streaming deltas to avoid duplicating content from done events + let hasStreamedContentDeltas = false + let hasStreamedReasoningDeltas = false + + // Preserve response metadata across events + let model: string = options.model + + // AG-UI lifecycle tracking + let stepId: string | null = null + let hasEmittedTextMessageStart = false + let hasEmittedStepStarted = false + + try { + for await (const chunk of stream) { + chunkCount++ + + // Emit RUN_STARTED on first chunk + if (!aguiState.hasEmittedRunStarted) { + aguiState.hasEmittedRunStarted = true + yield { + type: 'RUN_STARTED', + runId: aguiState.runId, + model: model || options.model, + timestamp, + } + } + + const handleContentPart = ( + contentPart: { + type: string + text?: string + refusal?: string + }, + ): StreamChunk => { + if (contentPart.type === 'output_text') { + accumulatedContent += contentPart.text || '' + return { + type: 'TEXT_MESSAGE_CONTENT', + messageId: aguiState.messageId, + model: model || options.model, + timestamp, + delta: contentPart.text || '', + content: accumulatedContent, + } + } + + if (contentPart.type === 'reasoning_text') { + accumulatedReasoning += contentPart.text || '' + return { + type: 'STEP_FINISHED', + stepId: stepId || generateId(this.name), + model: model || options.model, + timestamp, + delta: contentPart.text || '', + content: accumulatedReasoning, + } + } + return { + type: 'RUN_ERROR', + runId: aguiState.runId, + model: model || options.model, + timestamp, + error: { + message: contentPart.refusal || 'Unknown refusal', + }, + } + } + + // handle general response events + if ( + chunk.type === 'response.created' || + chunk.type === 'response.incomplete' || + chunk.type === 'response.failed' + ) { + model = chunk.response.model + // Reset streaming flags for new response + hasStreamedContentDeltas = false + hasStreamedReasoningDeltas = false + hasEmittedTextMessageStart = false + hasEmittedStepStarted = false + accumulatedContent = '' + accumulatedReasoning = '' + if (chunk.response.error) { + yield { + type: 'RUN_ERROR', + runId: aguiState.runId, + model: chunk.response.model, + timestamp, + error: chunk.response.error, + } + } + if (chunk.response.incomplete_details) { + yield { + type: 'RUN_ERROR', + runId: aguiState.runId, + model: chunk.response.model, + timestamp, + error: { + message: chunk.response.incomplete_details.reason ?? '', + }, + } + } + } + + // Handle output text deltas (token-by-token streaming) + // response.output_text.delta provides incremental text updates + if (chunk.type === 'response.output_text.delta' && chunk.delta) { + // Delta can be an array of strings or a single string + const textDelta = Array.isArray(chunk.delta) + ? chunk.delta.join('') + : typeof chunk.delta === 'string' + ? chunk.delta + : '' + + if (textDelta) { + // Emit TEXT_MESSAGE_START on first text content + if (!hasEmittedTextMessageStart) { + hasEmittedTextMessageStart = true + yield { + type: 'TEXT_MESSAGE_START', + messageId: aguiState.messageId, + model: model || options.model, + timestamp, + role: 'assistant', + } + } + + accumulatedContent += textDelta + hasStreamedContentDeltas = true + yield { + type: 'TEXT_MESSAGE_CONTENT', + messageId: aguiState.messageId, + model: model || options.model, + timestamp, + delta: textDelta, + content: accumulatedContent, + } + } + } + + // Handle reasoning deltas (token-by-token thinking/reasoning streaming) + // response.reasoning_text.delta provides incremental reasoning updates + if (chunk.type === 'response.reasoning_text.delta' && chunk.delta) { + // Delta can be an array of strings or a single string + const reasoningDelta = Array.isArray(chunk.delta) + ? chunk.delta.join('') + : typeof chunk.delta === 'string' + ? chunk.delta + : '' + + if (reasoningDelta) { + // Emit STEP_STARTED on first reasoning content + if (!hasEmittedStepStarted) { + hasEmittedStepStarted = true + stepId = generateId(this.name) + yield { + type: 'STEP_STARTED', + stepId, + model: model || options.model, + timestamp, + stepType: 'thinking', + } + } + + accumulatedReasoning += reasoningDelta + hasStreamedReasoningDeltas = true + yield { + type: 'STEP_FINISHED', + stepId: stepId || generateId(this.name), + model: model || options.model, + timestamp, + delta: reasoningDelta, + content: accumulatedReasoning, + } + } + } + + // Handle reasoning summary deltas (when using reasoning.summary option) + // response.reasoning_summary_text.delta provides incremental summary updates + if ( + chunk.type === 'response.reasoning_summary_text.delta' && + chunk.delta + ) { + const summaryDelta = + typeof chunk.delta === 'string' ? chunk.delta : '' + + if (summaryDelta) { + // Emit STEP_STARTED on first reasoning content + if (!hasEmittedStepStarted) { + hasEmittedStepStarted = true + stepId = generateId(this.name) + yield { + type: 'STEP_STARTED', + stepId, + model: model || options.model, + timestamp, + stepType: 'thinking', + } + } + + accumulatedReasoning += summaryDelta + hasStreamedReasoningDeltas = true + yield { + type: 'STEP_FINISHED', + stepId: stepId || generateId(this.name), + model: model || options.model, + timestamp, + delta: summaryDelta, + content: accumulatedReasoning, + } + } + } + + // handle content_part added events for text, reasoning and refusals + if (chunk.type === 'response.content_part.added') { + const contentPart = chunk.part + // Emit TEXT_MESSAGE_START if this is text content + if ( + contentPart.type === 'output_text' && + !hasEmittedTextMessageStart + ) { + hasEmittedTextMessageStart = true + yield { + type: 'TEXT_MESSAGE_START', + messageId: aguiState.messageId, + model: model || options.model, + timestamp, + role: 'assistant', + } + } + // Emit STEP_STARTED if this is reasoning content + if (contentPart.type === 'reasoning_text' && !hasEmittedStepStarted) { + hasEmittedStepStarted = true + stepId = generateId(this.name) + yield { + type: 'STEP_STARTED', + stepId, + model: model || options.model, + timestamp, + stepType: 'thinking', + } + } + yield handleContentPart(contentPart) + } + + if (chunk.type === 'response.content_part.done') { + const contentPart = chunk.part + + // Skip emitting chunks for content parts that we've already streamed via deltas + // The done event is just a completion marker, not new content + if (contentPart.type === 'output_text' && hasStreamedContentDeltas) { + // Content already accumulated from deltas, skip + continue + } + if ( + contentPart.type === 'reasoning_text' && + hasStreamedReasoningDeltas + ) { + // Reasoning already accumulated from deltas, skip + continue + } + + // Only emit if we haven't been streaming deltas (e.g., for non-streaming responses) + yield handleContentPart(contentPart) + } + + // handle output_item.added to capture function call metadata (name) + if (chunk.type === 'response.output_item.added') { + const item = chunk.item + if (item.type === 'function_call' && item.id) { + // Store the function name for later use + if (!toolCallMetadata.has(item.id)) { + toolCallMetadata.set(item.id, { + index: chunk.output_index, + name: item.name || '', + started: false, + }) + } + // Emit TOOL_CALL_START + yield { + type: 'TOOL_CALL_START', + toolCallId: item.id, + toolName: item.name || '', + model: model || options.model, + timestamp, + index: chunk.output_index, + } + toolCallMetadata.get(item.id)!.started = true + } + } + + // Handle function call arguments delta (streaming) + if ( + chunk.type === 'response.function_call_arguments.delta' && + chunk.delta + ) { + const metadata = toolCallMetadata.get(chunk.item_id) + yield { + type: 'TOOL_CALL_ARGS', + toolCallId: chunk.item_id, + model: model || options.model, + timestamp, + delta: chunk.delta, + args: metadata ? undefined : chunk.delta, + } + } + + if (chunk.type === 'response.function_call_arguments.done') { + const { item_id } = chunk + + // Get the function name from metadata (captured in output_item.added) + const metadata = toolCallMetadata.get(item_id) + const name = metadata?.name || '' + + // Parse arguments + let parsedInput: unknown = {} + try { + parsedInput = chunk.arguments ? JSON.parse(chunk.arguments) : {} + } catch { + parsedInput = {} + } + + yield { + type: 'TOOL_CALL_END', + toolCallId: item_id, + toolName: name, + model: model || options.model, + timestamp, + input: parsedInput, + } + } + + if (chunk.type === 'response.completed') { + // Emit TEXT_MESSAGE_END if we had text content + if (hasEmittedTextMessageStart) { + yield { + type: 'TEXT_MESSAGE_END', + messageId: aguiState.messageId, + model: model || options.model, + timestamp, + } + } + + // Determine finish reason based on output + // If there are function_call items in the output, it's a tool_calls finish + const hasFunctionCalls = chunk.response.output.some( + (item: unknown) => + (item as { type: string }).type === 'function_call', + ) + + yield { + type: 'RUN_FINISHED', + runId: aguiState.runId, + model: model || options.model, + timestamp, + usage: { + promptTokens: chunk.response.usage?.input_tokens || 0, + completionTokens: chunk.response.usage?.output_tokens || 0, + totalTokens: chunk.response.usage?.total_tokens || 0, + }, + finishReason: hasFunctionCalls ? 'tool_calls' : 'stop', + } + } + + if (chunk.type === 'error') { + yield { + type: 'RUN_ERROR', + runId: aguiState.runId, + model: model || options.model, + timestamp, + error: { + message: chunk.message, + code: chunk.code ?? undefined, + }, + } + } + } + } catch (error: unknown) { + const err = error as Error & { code?: string } + console.log( + `[${this.name}] Stream ended with error:`, + err.message, + ) + yield { + type: 'RUN_ERROR', + runId: aguiState.runId, + model: options.model, + timestamp, + error: { + message: err.message || 'Unknown error occurred', + code: err.code, + }, + } + } + } + + /** + * Maps common TextOptions to Responses API request format. + * Override this in subclasses to add provider-specific options. + */ + protected mapOptionsToRequest( + options: TextOptions, + ): Omit { + const input = this.convertMessagesToInput(options.messages) + + const tools = options.tools + ? convertToolsToResponsesFormat( + options.tools, + this.makeStructuredOutputCompatible.bind(this), + ) + : undefined + + const modelOptions = options.modelOptions as Record | undefined + + return { + model: options.model, + temperature: options.temperature, + max_output_tokens: options.maxTokens, + top_p: options.topP, + metadata: options.metadata, + instructions: options.systemPrompts?.join('\n'), + ...modelOptions, + input, + tools, + } + } + + /** + * Converts ModelMessage[] to Responses API ResponseInput format. + * Override this in subclasses for provider-specific message format quirks. + * + * Key differences from Chat Completions: + * - Tool results use `function_call_output` type (not `tool` role) + * - Assistant tool calls are `function_call` objects (not nested in `tool_calls`) + * - User content uses `input_text`, `input_image`, `input_file` types + * - System prompts go in `instructions`, not as messages + */ + protected convertMessagesToInput( + messages: Array, + ): Responses.ResponseInput { + const result: Responses.ResponseInput = [] + + for (const message of messages) { + // Handle tool messages - convert to FunctionToolCallOutput + if (message.role === 'tool') { + result.push({ + type: 'function_call_output', + call_id: message.toolCallId || '', + output: + typeof message.content === 'string' + ? message.content + : JSON.stringify(message.content), + }) + continue + } + + // Handle assistant messages + if (message.role === 'assistant') { + // If the assistant message has tool calls, add them as FunctionToolCall objects + // Responses API expects arguments as a string (JSON string) + if (message.toolCalls && message.toolCalls.length > 0) { + for (const toolCall of message.toolCalls) { + // Keep arguments as string for Responses API + const argumentsString = + typeof toolCall.function.arguments === 'string' + ? toolCall.function.arguments + : JSON.stringify(toolCall.function.arguments) + + result.push({ + type: 'function_call', + call_id: toolCall.id, + name: toolCall.function.name, + arguments: argumentsString, + }) + } + } + + // Add the assistant's text message if there is content + if (message.content) { + const contentStr = this.extractTextContent(message.content) + if (contentStr) { + result.push({ + type: 'message', + role: 'assistant', + content: contentStr, + }) + } + } + + continue + } + + // Handle user messages (default case) — support multimodal content + const contentParts = this.normalizeContent(message.content) + const inputContent: Array = [] + + for (const part of contentParts) { + inputContent.push(this.convertContentPartToInput(part)) + } + + // If no content parts, add empty text + if (inputContent.length === 0) { + inputContent.push({ type: 'input_text', text: '' }) + } + + result.push({ + type: 'message', + role: 'user', + content: inputContent, + }) + } + + return result + } + + /** + * Converts a ContentPart to Responses API input content item. + * Handles text, image, and audio content parts. + * Override this in subclasses for additional content types or provider-specific metadata. + */ + protected convertContentPartToInput( + part: ContentPart, + ): Responses.ResponseInputContent { + switch (part.type) { + case 'text': + return { + type: 'input_text', + text: part.content, + } + case 'image': { + const imageMetadata = part.metadata as + | { detail?: 'auto' | 'low' | 'high' } + | undefined + if (part.source.type === 'url') { + return { + type: 'input_image', + image_url: part.source.value, + detail: imageMetadata?.detail || 'auto', + } + } + // For base64 data, construct a data URI using the mimeType from source + const imageValue = part.source.value + const imageUrl = imageValue.startsWith('data:') + ? imageValue + : `data:${part.source.mimeType};base64,${imageValue}` + return { + type: 'input_image', + image_url: imageUrl, + detail: imageMetadata?.detail || 'auto', + } + } + case 'audio': { + if (part.source.type === 'url') { + return { + type: 'input_file', + file_url: part.source.value, + } + } + return { + type: 'input_file', + file_data: part.source.value, + } + } + + default: + throw new Error(`Unsupported content part type: ${part.type}`) + } + } + + /** + * Normalizes message content to an array of ContentPart. + * Handles backward compatibility with string content. + */ + protected normalizeContent( + content: string | null | Array, + ): Array { + if (content === null) { + return [] + } + if (typeof content === 'string') { + return [{ type: 'text', content: content }] + } + return content + } + + /** + * Extracts text content from a content value that may be string, null, or ContentPart array. + */ + protected extractTextContent( + content: string | null | Array, + ): string { + if (content === null) { + return '' + } + if (typeof content === 'string') { + return content + } + // It's an array of ContentPart + return content + .filter((p) => p.type === 'text') + .map((p) => p.content) + .join('') + } +} diff --git a/packages/typescript/openai-base/src/adapters/responses-tool-converter.ts b/packages/typescript/openai-base/src/adapters/responses-tool-converter.ts new file mode 100644 index 000000000..6d44a8a39 --- /dev/null +++ b/packages/typescript/openai-base/src/adapters/responses-tool-converter.ts @@ -0,0 +1,74 @@ +import { makeStructuredOutputCompatible } from '../utils/schema-converter' +import type { JSONSchema, Tool } from '@tanstack/ai' + +/** + * Responses API function tool format. + * This is distinct from the Chat Completions API tool format. + * + * The Responses API uses a flatter structure: + * { type: 'function', name: string, description?: string, parameters: object, strict?: boolean } + * + * vs. Chat Completions: + * { type: 'function', function: { name, description, parameters }, strict?: boolean } + */ +export interface ResponsesFunctionTool { + type: 'function' + name: string + description?: string | null + parameters: Record | null + strict: boolean | null +} + +/** + * Converts a standard Tool to the Responses API FunctionTool format. + * + * Tool schemas are already converted to JSON Schema in the ai layer. + * We apply OpenAI-compatible transformations for strict mode: + * - All properties in required array + * - Optional fields made nullable + * - additionalProperties: false + * + * This enables strict mode for all tools automatically. + */ +export function convertFunctionToolToResponsesFormat( + tool: Tool, + schemaConverter: ( + schema: Record, + required: Array, + ) => Record = makeStructuredOutputCompatible, +): ResponsesFunctionTool { + const inputSchema = (tool.inputSchema ?? { + type: 'object', + properties: {}, + required: [], + }) as JSONSchema + + const jsonSchema = schemaConverter(inputSchema, inputSchema.required || []) + + // Ensure additionalProperties is false for strict mode + jsonSchema.additionalProperties = false + + return { + type: 'function', + name: tool.name, + description: tool.description, + parameters: jsonSchema, + strict: true, + } +} + +/** + * Converts an array of standard Tools to Responses API format. + * The Responses API primarily supports function tools at the base level. + */ +export function convertToolsToResponsesFormat( + tools: Array, + schemaConverter?: ( + schema: Record, + required: Array, + ) => Record, +): Array { + return tools.map((tool) => + convertFunctionToolToResponsesFormat(tool, schemaConverter), + ) +} diff --git a/packages/typescript/openai-base/src/index.ts b/packages/typescript/openai-base/src/index.ts index 0ff7f555e..b63b62802 100644 --- a/packages/typescript/openai-base/src/index.ts +++ b/packages/typescript/openai-base/src/index.ts @@ -10,3 +10,9 @@ export { convertToolsToChatCompletionsFormat, type ChatCompletionFunctionTool, } from './adapters/chat-completions-tool-converter' +export { OpenAICompatibleResponsesTextAdapter } from './adapters/responses-text' +export { + convertFunctionToolToResponsesFormat, + convertToolsToResponsesFormat, + type ResponsesFunctionTool, +} from './adapters/responses-tool-converter' diff --git a/packages/typescript/openai-base/tests/responses-text.test.ts b/packages/typescript/openai-base/tests/responses-text.test.ts new file mode 100644 index 000000000..4e690ad37 --- /dev/null +++ b/packages/typescript/openai-base/tests/responses-text.test.ts @@ -0,0 +1,1474 @@ +import { describe, it, expect, vi, afterEach, beforeEach } from 'vitest' +import { OpenAICompatibleResponsesTextAdapter } from '../src/adapters/responses-text' +import type { StreamChunk, Tool } from '@tanstack/ai' + +// Declare mockCreate at module level +let mockResponsesCreate: ReturnType + +// Mock the OpenAI SDK +vi.mock('openai', () => { + return { + default: class { + responses = { + create: (...args: Array) => mockResponsesCreate(...args), + } + }, + } +}) + +// Helper to create async iterable from chunks +function createAsyncIterable(chunks: Array): AsyncIterable { + return { + [Symbol.asyncIterator]() { + let index = 0 + return { + async next() { + if (index < chunks.length) { + return { value: chunks[index++]!, done: false } + } + return { value: undefined as T, done: true } + }, + } + }, + } +} + +// Helper to setup the mock SDK client for streaming/non-streaming responses +function setupMockResponsesClient( + streamChunks: Array>, + nonStreamResponse?: Record, +) { + mockResponsesCreate = vi.fn().mockImplementation((params) => { + if (params.stream) { + return Promise.resolve(createAsyncIterable(streamChunks)) + } + return Promise.resolve(nonStreamResponse) + }) +} + +const testConfig = { + apiKey: 'test-api-key', + baseURL: 'https://api.test-provider.com/v1', +} + +const weatherTool: Tool = { + name: 'lookup_weather', + description: 'Return the forecast for a location', +} + +describe('OpenAICompatibleResponsesTextAdapter', () => { + beforeEach(() => { + vi.clearAllMocks() + }) + + afterEach(() => { + vi.unstubAllEnvs() + }) + + describe('instantiation', () => { + it('creates an adapter with default name', () => { + const adapter = new OpenAICompatibleResponsesTextAdapter( + testConfig, + 'test-model', + ) + + expect(adapter).toBeDefined() + expect(adapter.kind).toBe('text') + expect(adapter.name).toBe('openai-compatible-responses') + expect(adapter.model).toBe('test-model') + }) + + it('creates an adapter with custom name', () => { + const adapter = new OpenAICompatibleResponsesTextAdapter( + testConfig, + 'test-model', + 'my-provider', + ) + + expect(adapter).toBeDefined() + expect(adapter.name).toBe('my-provider') + }) + + it('creates an adapter with custom baseURL', () => { + const adapter = new OpenAICompatibleResponsesTextAdapter( + { + apiKey: 'test-key', + baseURL: 'https://custom.api.example.com/v1', + }, + 'custom-model', + ) + + expect(adapter).toBeDefined() + expect(adapter.model).toBe('custom-model') + }) + }) + + describe('streaming event sequence', () => { + it('emits RUN_STARTED as the first event', async () => { + const streamChunks = [ + { + type: 'response.created', + response: { + id: 'resp-123', + model: 'test-model', + status: 'in_progress', + }, + }, + { + type: 'response.output_text.delta', + delta: 'Hello', + }, + { + type: 'response.completed', + response: { + id: 'resp-123', + model: 'test-model', + status: 'completed', + output: [], + usage: { + input_tokens: 5, + output_tokens: 1, + total_tokens: 6, + }, + }, + }, + ] + + setupMockResponsesClient(streamChunks) + const adapter = new OpenAICompatibleResponsesTextAdapter( + testConfig, + 'test-model', + ) + const chunks: Array = [] + + for await (const chunk of adapter.chatStream({ + model: 'test-model', + messages: [{ role: 'user', content: 'Hello' }], + })) { + chunks.push(chunk) + } + + expect(chunks[0]?.type).toBe('RUN_STARTED') + if (chunks[0]?.type === 'RUN_STARTED') { + expect(chunks[0].runId).toBeDefined() + expect(chunks[0].model).toBe('test-model') + } + }) + + it('emits TEXT_MESSAGE_START before TEXT_MESSAGE_CONTENT on output_text.delta', async () => { + const streamChunks = [ + { + type: 'response.created', + response: { + id: 'resp-123', + model: 'test-model', + status: 'in_progress', + }, + }, + { + type: 'response.output_text.delta', + delta: 'Hello', + }, + { + type: 'response.completed', + response: { + id: 'resp-123', + model: 'test-model', + status: 'completed', + output: [], + usage: { + input_tokens: 5, + output_tokens: 1, + total_tokens: 6, + }, + }, + }, + ] + + setupMockResponsesClient(streamChunks) + const adapter = new OpenAICompatibleResponsesTextAdapter( + testConfig, + 'test-model', + ) + const chunks: Array = [] + + for await (const chunk of adapter.chatStream({ + model: 'test-model', + messages: [{ role: 'user', content: 'Hello' }], + })) { + chunks.push(chunk) + } + + const textStartIndex = chunks.findIndex( + (c) => c.type === 'TEXT_MESSAGE_START', + ) + const textContentIndex = chunks.findIndex( + (c) => c.type === 'TEXT_MESSAGE_CONTENT', + ) + + expect(textStartIndex).toBeGreaterThan(-1) + expect(textContentIndex).toBeGreaterThan(-1) + expect(textStartIndex).toBeLessThan(textContentIndex) + + const textStart = chunks[textStartIndex] + if (textStart?.type === 'TEXT_MESSAGE_START') { + expect(textStart.messageId).toBeDefined() + expect(textStart.role).toBe('assistant') + } + }) + + it('emits proper AG-UI event sequence: RUN_STARTED -> TEXT_MESSAGE_START -> TEXT_MESSAGE_CONTENT -> TEXT_MESSAGE_END -> RUN_FINISHED', async () => { + const streamChunks = [ + { + type: 'response.created', + response: { + id: 'resp-123', + model: 'test-model', + status: 'in_progress', + }, + }, + { + type: 'response.output_text.delta', + delta: 'Hello world', + }, + { + type: 'response.completed', + response: { + id: 'resp-123', + model: 'test-model', + status: 'completed', + output: [], + usage: { + input_tokens: 5, + output_tokens: 2, + total_tokens: 7, + }, + }, + }, + ] + + setupMockResponsesClient(streamChunks) + const adapter = new OpenAICompatibleResponsesTextAdapter( + testConfig, + 'test-model', + ) + const chunks: Array = [] + + for await (const chunk of adapter.chatStream({ + model: 'test-model', + messages: [{ role: 'user', content: 'Hello' }], + })) { + chunks.push(chunk) + } + + // Verify proper AG-UI event sequence + const eventTypes = chunks.map((c) => c.type) + + // Should start with RUN_STARTED + expect(eventTypes[0]).toBe('RUN_STARTED') + + // Should have TEXT_MESSAGE_START before TEXT_MESSAGE_CONTENT + const textStartIndex = eventTypes.indexOf('TEXT_MESSAGE_START') + const textContentIndex = eventTypes.indexOf('TEXT_MESSAGE_CONTENT') + expect(textStartIndex).toBeGreaterThan(-1) + expect(textContentIndex).toBeGreaterThan(textStartIndex) + + // Should have TEXT_MESSAGE_END before RUN_FINISHED + const textEndIndex = eventTypes.indexOf('TEXT_MESSAGE_END') + const runFinishedIndex = eventTypes.indexOf('RUN_FINISHED') + expect(textEndIndex).toBeGreaterThan(-1) + expect(runFinishedIndex).toBeGreaterThan(textEndIndex) + + // Verify RUN_FINISHED has proper data + const runFinishedChunk = chunks.find((c) => c.type === 'RUN_FINISHED') + if (runFinishedChunk?.type === 'RUN_FINISHED') { + expect(runFinishedChunk.finishReason).toBe('stop') + expect(runFinishedChunk.usage).toBeDefined() + } + }) + + it('emits TEXT_MESSAGE_END and RUN_FINISHED at the end with usage data', async () => { + const streamChunks = [ + { + type: 'response.created', + response: { + id: 'resp-123', + model: 'test-model', + status: 'in_progress', + }, + }, + { + type: 'response.output_text.delta', + delta: 'Hello', + }, + { + type: 'response.completed', + response: { + id: 'resp-123', + model: 'test-model', + status: 'completed', + output: [], + usage: { + input_tokens: 5, + output_tokens: 1, + total_tokens: 6, + }, + }, + }, + ] + + setupMockResponsesClient(streamChunks) + const adapter = new OpenAICompatibleResponsesTextAdapter( + testConfig, + 'test-model', + ) + const chunks: Array = [] + + for await (const chunk of adapter.chatStream({ + model: 'test-model', + messages: [{ role: 'user', content: 'Hello' }], + })) { + chunks.push(chunk) + } + + const textEndChunk = chunks.find((c) => c.type === 'TEXT_MESSAGE_END') + expect(textEndChunk).toBeDefined() + if (textEndChunk?.type === 'TEXT_MESSAGE_END') { + expect(textEndChunk.messageId).toBeDefined() + } + + const runFinishedChunk = chunks.find((c) => c.type === 'RUN_FINISHED') + expect(runFinishedChunk).toBeDefined() + if (runFinishedChunk?.type === 'RUN_FINISHED') { + expect(runFinishedChunk.runId).toBeDefined() + expect(runFinishedChunk.finishReason).toBe('stop') + expect(runFinishedChunk.usage).toMatchObject({ + promptTokens: 5, + completionTokens: 1, + totalTokens: 6, + }) + } + }) + + it('streams content with correct accumulated values', async () => { + const streamChunks = [ + { + type: 'response.created', + response: { + id: 'resp-123', + model: 'test-model', + status: 'in_progress', + }, + }, + { + type: 'response.output_text.delta', + delta: 'Hello ', + }, + { + type: 'response.output_text.delta', + delta: 'world', + }, + { + type: 'response.completed', + response: { + id: 'resp-123', + model: 'test-model', + status: 'completed', + output: [], + usage: { + input_tokens: 5, + output_tokens: 2, + total_tokens: 7, + }, + }, + }, + ] + + setupMockResponsesClient(streamChunks) + const adapter = new OpenAICompatibleResponsesTextAdapter( + testConfig, + 'test-model', + ) + const chunks: Array = [] + + for await (const chunk of adapter.chatStream({ + model: 'test-model', + messages: [{ role: 'user', content: 'Say hello' }], + })) { + chunks.push(chunk) + } + + // Check TEXT_MESSAGE_CONTENT events have correct accumulated content + const contentChunks = chunks.filter( + (c) => c.type === 'TEXT_MESSAGE_CONTENT', + ) + expect(contentChunks.length).toBe(2) + + const firstContent = contentChunks[0] + if (firstContent?.type === 'TEXT_MESSAGE_CONTENT') { + expect(firstContent.delta).toBe('Hello ') + expect(firstContent.content).toBe('Hello ') + } + + const secondContent = contentChunks[1] + if (secondContent?.type === 'TEXT_MESSAGE_CONTENT') { + expect(secondContent.delta).toBe('world') + expect(secondContent.content).toBe('Hello world') + } + }) + }) + + describe('reasoning/thinking tokens', () => { + it('emits STEP_STARTED and STEP_FINISHED for reasoning_text.delta', async () => { + const streamChunks = [ + { + type: 'response.created', + response: { + id: 'resp-123', + model: 'test-model', + status: 'in_progress', + }, + }, + { + type: 'response.reasoning_text.delta', + delta: 'Let me think about this...', + }, + { + type: 'response.reasoning_text.delta', + delta: ' The answer is clear.', + }, + { + type: 'response.output_text.delta', + delta: 'The answer is 42.', + }, + { + type: 'response.completed', + response: { + id: 'resp-123', + model: 'test-model', + status: 'completed', + output: [], + usage: { + input_tokens: 10, + output_tokens: 20, + total_tokens: 30, + }, + }, + }, + ] + + setupMockResponsesClient(streamChunks) + const adapter = new OpenAICompatibleResponsesTextAdapter( + testConfig, + 'test-model', + ) + const chunks: Array = [] + + for await (const chunk of adapter.chatStream({ + model: 'test-model', + messages: [{ role: 'user', content: 'What is the meaning of life?' }], + })) { + chunks.push(chunk) + } + + const eventTypes = chunks.map((c) => c.type) + + // Should have STEP_STARTED for reasoning + const stepStartIndex = eventTypes.indexOf('STEP_STARTED') + expect(stepStartIndex).toBeGreaterThan(-1) + + const stepStart = chunks[stepStartIndex] + if (stepStart?.type === 'STEP_STARTED') { + expect(stepStart.stepId).toBeDefined() + expect(stepStart.stepType).toBe('thinking') + } + + // Should have STEP_FINISHED events for reasoning deltas + const stepFinished = chunks.filter((c) => c.type === 'STEP_FINISHED') + expect(stepFinished.length).toBe(2) + + // Check accumulated reasoning + if (stepFinished[0]?.type === 'STEP_FINISHED') { + expect(stepFinished[0].delta).toBe('Let me think about this...') + expect(stepFinished[0].content).toBe('Let me think about this...') + } + if (stepFinished[1]?.type === 'STEP_FINISHED') { + expect(stepFinished[1].delta).toBe(' The answer is clear.') + expect(stepFinished[1].content).toBe( + 'Let me think about this... The answer is clear.', + ) + } + + // Should also have text content + const textContent = chunks.filter( + (c) => c.type === 'TEXT_MESSAGE_CONTENT', + ) + expect(textContent.length).toBe(1) + }) + + it('emits STEP_STARTED and STEP_FINISHED for reasoning_summary_text.delta', async () => { + const streamChunks = [ + { + type: 'response.created', + response: { + id: 'resp-123', + model: 'test-model', + status: 'in_progress', + }, + }, + { + type: 'response.reasoning_summary_text.delta', + delta: 'Summary of reasoning...', + }, + { + type: 'response.output_text.delta', + delta: 'Final answer.', + }, + { + type: 'response.completed', + response: { + id: 'resp-123', + model: 'test-model', + status: 'completed', + output: [], + usage: { + input_tokens: 5, + output_tokens: 2, + total_tokens: 7, + }, + }, + }, + ] + + setupMockResponsesClient(streamChunks) + const adapter = new OpenAICompatibleResponsesTextAdapter( + testConfig, + 'test-model', + ) + const chunks: Array = [] + + for await (const chunk of adapter.chatStream({ + model: 'test-model', + messages: [{ role: 'user', content: 'Explain' }], + })) { + chunks.push(chunk) + } + + const stepStart = chunks.find((c) => c.type === 'STEP_STARTED') + expect(stepStart).toBeDefined() + if (stepStart?.type === 'STEP_STARTED') { + expect(stepStart.stepType).toBe('thinking') + } + + const stepFinished = chunks.filter((c) => c.type === 'STEP_FINISHED') + expect(stepFinished.length).toBe(1) + if (stepFinished[0]?.type === 'STEP_FINISHED') { + expect(stepFinished[0].delta).toBe('Summary of reasoning...') + } + }) + }) + + describe('tool call events', () => { + it('emits TOOL_CALL_START -> TOOL_CALL_ARGS -> TOOL_CALL_END', async () => { + const streamChunks = [ + { + type: 'response.created', + response: { + id: 'resp-456', + model: 'test-model', + status: 'in_progress', + }, + }, + { + type: 'response.output_item.added', + output_index: 0, + item: { + type: 'function_call', + id: 'call_abc123', + name: 'lookup_weather', + }, + }, + { + type: 'response.function_call_arguments.delta', + item_id: 'call_abc123', + delta: '{"location":', + }, + { + type: 'response.function_call_arguments.delta', + item_id: 'call_abc123', + delta: '"Berlin"}', + }, + { + type: 'response.function_call_arguments.done', + item_id: 'call_abc123', + arguments: '{"location":"Berlin"}', + }, + { + type: 'response.completed', + response: { + id: 'resp-456', + model: 'test-model', + status: 'completed', + output: [ + { + type: 'function_call', + id: 'call_abc123', + name: 'lookup_weather', + arguments: '{"location":"Berlin"}', + }, + ], + usage: { + input_tokens: 10, + output_tokens: 5, + total_tokens: 15, + }, + }, + }, + ] + + setupMockResponsesClient(streamChunks) + const adapter = new OpenAICompatibleResponsesTextAdapter( + testConfig, + 'test-model', + ) + const chunks: Array = [] + + for await (const chunk of adapter.chatStream({ + model: 'test-model', + messages: [{ role: 'user', content: 'Weather in Berlin?' }], + tools: [weatherTool], + })) { + chunks.push(chunk) + } + + // Check AG-UI tool events + const toolStartChunk = chunks.find((c) => c.type === 'TOOL_CALL_START') + expect(toolStartChunk).toBeDefined() + if (toolStartChunk?.type === 'TOOL_CALL_START') { + expect(toolStartChunk.toolCallId).toBe('call_abc123') + expect(toolStartChunk.toolName).toBe('lookup_weather') + expect(toolStartChunk.index).toBe(0) + } + + const toolArgsChunks = chunks.filter((c) => c.type === 'TOOL_CALL_ARGS') + expect(toolArgsChunks.length).toBe(2) + if (toolArgsChunks[0]?.type === 'TOOL_CALL_ARGS') { + expect(toolArgsChunks[0].delta).toBe('{"location":') + } + if (toolArgsChunks[1]?.type === 'TOOL_CALL_ARGS') { + expect(toolArgsChunks[1].delta).toBe('"Berlin"}') + } + + const toolEndChunk = chunks.find((c) => c.type === 'TOOL_CALL_END') + expect(toolEndChunk).toBeDefined() + if (toolEndChunk?.type === 'TOOL_CALL_END') { + expect(toolEndChunk.toolCallId).toBe('call_abc123') + expect(toolEndChunk.toolName).toBe('lookup_weather') + expect(toolEndChunk.input).toEqual({ location: 'Berlin' }) + } + + // Check finish reason is tool_calls when output contains function_call items + const runFinishedChunk = chunks.find((c) => c.type === 'RUN_FINISHED') + if (runFinishedChunk?.type === 'RUN_FINISHED') { + expect(runFinishedChunk.finishReason).toBe('tool_calls') + } + }) + + it('handles multiple parallel tool calls', async () => { + const streamChunks = [ + { + type: 'response.created', + response: { + id: 'resp-789', + model: 'test-model', + status: 'in_progress', + }, + }, + { + type: 'response.output_item.added', + output_index: 0, + item: { + type: 'function_call', + id: 'call_1', + name: 'lookup_weather', + }, + }, + { + type: 'response.output_item.added', + output_index: 1, + item: { + type: 'function_call', + id: 'call_2', + name: 'lookup_weather', + }, + }, + { + type: 'response.function_call_arguments.delta', + item_id: 'call_1', + delta: '{"location":"Berlin"}', + }, + { + type: 'response.function_call_arguments.delta', + item_id: 'call_2', + delta: '{"location":"Paris"}', + }, + { + type: 'response.function_call_arguments.done', + item_id: 'call_1', + arguments: '{"location":"Berlin"}', + }, + { + type: 'response.function_call_arguments.done', + item_id: 'call_2', + arguments: '{"location":"Paris"}', + }, + { + type: 'response.completed', + response: { + id: 'resp-789', + model: 'test-model', + status: 'completed', + output: [ + { + type: 'function_call', + id: 'call_1', + name: 'lookup_weather', + arguments: '{"location":"Berlin"}', + }, + { + type: 'function_call', + id: 'call_2', + name: 'lookup_weather', + arguments: '{"location":"Paris"}', + }, + ], + usage: { + input_tokens: 10, + output_tokens: 10, + total_tokens: 20, + }, + }, + }, + ] + + setupMockResponsesClient(streamChunks) + const adapter = new OpenAICompatibleResponsesTextAdapter( + testConfig, + 'test-model', + ) + const chunks: Array = [] + + for await (const chunk of adapter.chatStream({ + model: 'test-model', + messages: [ + { + role: 'user', + content: 'Weather in Berlin and Paris?', + }, + ], + tools: [weatherTool], + })) { + chunks.push(chunk) + } + + const toolStarts = chunks.filter((c) => c.type === 'TOOL_CALL_START') + expect(toolStarts.length).toBe(2) + + const toolEnds = chunks.filter((c) => c.type === 'TOOL_CALL_END') + expect(toolEnds.length).toBe(2) + + if (toolEnds[0]?.type === 'TOOL_CALL_END') { + expect(toolEnds[0].input).toEqual({ location: 'Berlin' }) + } + if (toolEnds[1]?.type === 'TOOL_CALL_END') { + expect(toolEnds[1].input).toEqual({ location: 'Paris' }) + } + }) + }) + + describe('content_part events', () => { + it('emits TEXT_MESSAGE_START on content_part.added with output_text', async () => { + const streamChunks = [ + { + type: 'response.created', + response: { + id: 'resp-123', + model: 'test-model', + status: 'in_progress', + }, + }, + { + type: 'response.content_part.added', + part: { + type: 'output_text', + text: 'It is sunny', + }, + }, + { + type: 'response.completed', + response: { + id: 'resp-123', + model: 'test-model', + status: 'completed', + output: [], + usage: { + input_tokens: 5, + output_tokens: 3, + total_tokens: 8, + }, + }, + }, + ] + + setupMockResponsesClient(streamChunks) + const adapter = new OpenAICompatibleResponsesTextAdapter( + testConfig, + 'test-model', + ) + const chunks: Array = [] + + for await (const chunk of adapter.chatStream({ + model: 'test-model', + messages: [{ role: 'user', content: 'Weather?' }], + })) { + chunks.push(chunk) + } + + const eventTypes = chunks.map((c) => c.type) + expect(eventTypes).toContain('TEXT_MESSAGE_START') + expect(eventTypes).toContain('TEXT_MESSAGE_CONTENT') + + // TEXT_MESSAGE_START should be before TEXT_MESSAGE_CONTENT + const startIdx = eventTypes.indexOf('TEXT_MESSAGE_START') + const contentIdx = eventTypes.indexOf('TEXT_MESSAGE_CONTENT') + expect(startIdx).toBeLessThan(contentIdx) + }) + + it('skips content_part.done when deltas were already streamed', async () => { + const streamChunks = [ + { + type: 'response.created', + response: { + id: 'resp-123', + model: 'test-model', + status: 'in_progress', + }, + }, + { + type: 'response.output_text.delta', + delta: 'Hello', + }, + { + type: 'response.output_text.delta', + delta: ' world', + }, + { + type: 'response.content_part.done', + part: { + type: 'output_text', + text: 'Hello world', + }, + }, + { + type: 'response.completed', + response: { + id: 'resp-123', + model: 'test-model', + status: 'completed', + output: [], + usage: { + input_tokens: 5, + output_tokens: 2, + total_tokens: 7, + }, + }, + }, + ] + + setupMockResponsesClient(streamChunks) + const adapter = new OpenAICompatibleResponsesTextAdapter( + testConfig, + 'test-model', + ) + const chunks: Array = [] + + for await (const chunk of adapter.chatStream({ + model: 'test-model', + messages: [{ role: 'user', content: 'Hello' }], + })) { + chunks.push(chunk) + } + + // Should only have 2 TEXT_MESSAGE_CONTENT events (from deltas), not 3 + const contentChunks = chunks.filter( + (c) => c.type === 'TEXT_MESSAGE_CONTENT', + ) + expect(contentChunks.length).toBe(2) + }) + }) + + describe('error handling', () => { + it('emits RUN_ERROR on stream error', async () => { + const streamChunks = [ + { + type: 'response.created', + response: { + id: 'resp-123', + model: 'test-model', + status: 'in_progress', + }, + }, + { + type: 'response.output_text.delta', + delta: 'Hello', + }, + ] + + // Create an async iterable that throws mid-stream + const errorIterable = { + [Symbol.asyncIterator]() { + let index = 0 + return { + async next() { + if (index < streamChunks.length) { + return { value: streamChunks[index++]!, done: false } + } + throw new Error('Stream interrupted') + }, + } + }, + } + + mockResponsesCreate = vi.fn().mockResolvedValue(errorIterable) + + const adapter = new OpenAICompatibleResponsesTextAdapter( + testConfig, + 'test-model', + ) + const chunks: Array = [] + + for await (const chunk of adapter.chatStream({ + model: 'test-model', + messages: [{ role: 'user', content: 'Hello' }], + })) { + chunks.push(chunk) + } + + // Should emit RUN_ERROR + const runErrorChunk = chunks.find((c) => c.type === 'RUN_ERROR') + expect(runErrorChunk).toBeDefined() + if (runErrorChunk?.type === 'RUN_ERROR') { + expect(runErrorChunk.error.message).toBe('Stream interrupted') + } + }) + + it('emits RUN_STARTED then RUN_ERROR when client.create throws', async () => { + mockResponsesCreate = vi + .fn() + .mockRejectedValue(new Error('API key invalid')) + + const adapter = new OpenAICompatibleResponsesTextAdapter( + testConfig, + 'test-model', + ) + const chunks: Array = [] + + for await (const chunk of adapter.chatStream({ + model: 'test-model', + messages: [{ role: 'user', content: 'Hello' }], + })) { + chunks.push(chunk) + } + + // Should have RUN_STARTED followed by RUN_ERROR + expect(chunks.length).toBe(2) + expect(chunks[0]?.type).toBe('RUN_STARTED') + expect(chunks[1]?.type).toBe('RUN_ERROR') + if (chunks[1]?.type === 'RUN_ERROR') { + expect(chunks[1].error.message).toBe('API key invalid') + } + }) + + it('emits RUN_ERROR on response.failed event', async () => { + const streamChunks = [ + { + type: 'response.failed', + response: { + id: 'resp-123', + model: 'test-model', + status: 'failed', + error: { + message: 'Content policy violation', + code: 'content_filter', + }, + }, + }, + ] + + setupMockResponsesClient(streamChunks) + const adapter = new OpenAICompatibleResponsesTextAdapter( + testConfig, + 'test-model', + ) + const chunks: Array = [] + + for await (const chunk of adapter.chatStream({ + model: 'test-model', + messages: [{ role: 'user', content: 'bad content' }], + })) { + chunks.push(chunk) + } + + const errorChunk = chunks.find((c) => c.type === 'RUN_ERROR') + expect(errorChunk).toBeDefined() + if (errorChunk?.type === 'RUN_ERROR') { + expect(errorChunk.error.message).toBe('Content policy violation') + } + }) + + it('emits RUN_ERROR on response.incomplete event', async () => { + const streamChunks = [ + { + type: 'response.incomplete', + response: { + id: 'resp-123', + model: 'test-model', + status: 'incomplete', + incomplete_details: { + reason: 'max_output_tokens', + }, + }, + }, + ] + + setupMockResponsesClient(streamChunks) + const adapter = new OpenAICompatibleResponsesTextAdapter( + testConfig, + 'test-model', + ) + const chunks: Array = [] + + for await (const chunk of adapter.chatStream({ + model: 'test-model', + messages: [{ role: 'user', content: 'Write a long story' }], + })) { + chunks.push(chunk) + } + + const errorChunks = chunks.filter((c) => c.type === 'RUN_ERROR') + expect(errorChunks.length).toBeGreaterThan(0) + const incompleteError = errorChunks.find( + (c) => + c.type === 'RUN_ERROR' && + c.error.message === 'max_output_tokens', + ) + expect(incompleteError).toBeDefined() + }) + + it('emits RUN_ERROR on error event type', async () => { + const streamChunks = [ + { + type: 'response.created', + response: { + id: 'resp-123', + model: 'test-model', + status: 'in_progress', + }, + }, + { + type: 'error', + message: 'Rate limit exceeded', + code: 'rate_limit', + }, + ] + + setupMockResponsesClient(streamChunks) + const adapter = new OpenAICompatibleResponsesTextAdapter( + testConfig, + 'test-model', + ) + const chunks: Array = [] + + for await (const chunk of adapter.chatStream({ + model: 'test-model', + messages: [{ role: 'user', content: 'Hello' }], + })) { + chunks.push(chunk) + } + + const errorChunk = chunks.find( + (c) => c.type === 'RUN_ERROR' && c.error.message === 'Rate limit exceeded', + ) + expect(errorChunk).toBeDefined() + if (errorChunk?.type === 'RUN_ERROR') { + expect(errorChunk.error.code).toBe('rate_limit') + } + }) + }) + + describe('structured output', () => { + it('generates structured output and parses JSON response', async () => { + const nonStreamResponse = { + output: [ + { + type: 'message', + content: [ + { + type: 'output_text', + text: '{"name":"Alice","age":30}', + }, + ], + }, + ], + } + + setupMockResponsesClient([], nonStreamResponse) + + const adapter = new OpenAICompatibleResponsesTextAdapter( + testConfig, + 'test-model', + ) + + const result = await adapter.structuredOutput({ + chatOptions: { + model: 'test-model', + messages: [{ role: 'user', content: 'Give me a person object' }], + }, + outputSchema: { + type: 'object', + properties: { + name: { type: 'string' }, + age: { type: 'number' }, + }, + required: ['name', 'age'], + }, + }) + + expect(result.data).toEqual({ name: 'Alice', age: 30 }) + expect(result.rawText).toBe('{"name":"Alice","age":30}') + + // Verify text.format was passed (Responses API format) + expect(mockResponsesCreate).toHaveBeenCalledWith( + expect.objectContaining({ + stream: false, + text: expect.objectContaining({ + format: expect.objectContaining({ + type: 'json_schema', + name: 'structured_output', + strict: true, + }), + }), + }), + expect.anything(), + ) + }) + + it('transforms null values to undefined', async () => { + const nonStreamResponse = { + output: [ + { + type: 'message', + content: [ + { + type: 'output_text', + text: '{"name":"Alice","nickname":null}', + }, + ], + }, + ], + } + + setupMockResponsesClient([], nonStreamResponse) + + const adapter = new OpenAICompatibleResponsesTextAdapter( + testConfig, + 'test-model', + ) + + const result = await adapter.structuredOutput({ + chatOptions: { + model: 'test-model', + messages: [{ role: 'user', content: 'Give me a person object' }], + }, + outputSchema: { + type: 'object', + properties: { + name: { type: 'string' }, + nickname: { type: 'string' }, + }, + required: ['name'], + }, + }) + + // null should be transformed to undefined + expect((result.data as any).name).toBe('Alice') + expect((result.data as any).nickname).toBeUndefined() + }) + + it('throws on invalid JSON response', async () => { + const nonStreamResponse = { + output: [ + { + type: 'message', + content: [ + { + type: 'output_text', + text: 'not valid json', + }, + ], + }, + ], + } + + setupMockResponsesClient([], nonStreamResponse) + + const adapter = new OpenAICompatibleResponsesTextAdapter( + testConfig, + 'test-model', + ) + + await expect( + adapter.structuredOutput({ + chatOptions: { + model: 'test-model', + messages: [ + { role: 'user', content: 'Give me a person object' }, + ], + }, + outputSchema: { + type: 'object', + properties: { + name: { type: 'string' }, + }, + required: ['name'], + }, + }), + ).rejects.toThrow('Failed to parse structured output as JSON') + }) + }) + + describe('request mapping', () => { + it('maps options to Responses API payload format', async () => { + const streamChunks = [ + { + type: 'response.created', + response: { + id: 'resp-123', + model: 'test-model', + status: 'in_progress', + }, + }, + { + type: 'response.completed', + response: { + id: 'resp-123', + model: 'test-model', + status: 'completed', + output: [], + usage: { + input_tokens: 5, + output_tokens: 1, + total_tokens: 6, + }, + }, + }, + ] + + setupMockResponsesClient(streamChunks) + const adapter = new OpenAICompatibleResponsesTextAdapter( + testConfig, + 'test-model', + ) + + const chunks: Array = [] + for await (const chunk of adapter.chatStream({ + model: 'test-model', + messages: [{ role: 'user', content: 'Hello' }], + temperature: 0.5, + topP: 0.9, + maxTokens: 1024, + systemPrompts: ['Be helpful'], + tools: [weatherTool], + })) { + chunks.push(chunk) + } + + expect(mockResponsesCreate).toHaveBeenCalledTimes(1) + const [payload] = mockResponsesCreate.mock.calls[0] + + // Verify Responses API field names + expect(payload).toMatchObject({ + model: 'test-model', + temperature: 0.5, + top_p: 0.9, + max_output_tokens: 1024, + stream: true, + instructions: 'Be helpful', + }) + + // Responses API uses 'input' instead of 'messages' + expect(payload.input).toBeDefined() + expect(Array.isArray(payload.input)).toBe(true) + + // Verify tools are included + expect(payload.tools).toBeDefined() + expect(Array.isArray(payload.tools)).toBe(true) + expect(payload.tools.length).toBe(1) + expect(payload.tools[0].type).toBe('function') + expect(payload.tools[0].name).toBe('lookup_weather') + }) + + it('converts user messages to input_text format', async () => { + const streamChunks = [ + { + type: 'response.created', + response: { + id: 'resp-123', + model: 'test-model', + status: 'in_progress', + }, + }, + { + type: 'response.completed', + response: { + id: 'resp-123', + model: 'test-model', + status: 'completed', + output: [], + usage: { + input_tokens: 5, + output_tokens: 1, + total_tokens: 6, + }, + }, + }, + ] + + setupMockResponsesClient(streamChunks) + const adapter = new OpenAICompatibleResponsesTextAdapter( + testConfig, + 'test-model', + ) + + const chunks: Array = [] + for await (const chunk of adapter.chatStream({ + model: 'test-model', + messages: [{ role: 'user', content: 'Hello world' }], + })) { + chunks.push(chunk) + } + + const [payload] = mockResponsesCreate.mock.calls[0] + expect(payload.input).toEqual([ + { + type: 'message', + role: 'user', + content: [{ type: 'input_text', text: 'Hello world' }], + }, + ]) + }) + + it('converts assistant messages with tool calls to function_call format', async () => { + const streamChunks = [ + { + type: 'response.created', + response: { + id: 'resp-123', + model: 'test-model', + status: 'in_progress', + }, + }, + { + type: 'response.completed', + response: { + id: 'resp-123', + model: 'test-model', + status: 'completed', + output: [], + usage: { + input_tokens: 10, + output_tokens: 1, + total_tokens: 11, + }, + }, + }, + ] + + setupMockResponsesClient(streamChunks) + const adapter = new OpenAICompatibleResponsesTextAdapter( + testConfig, + 'test-model', + ) + + const chunks: Array = [] + for await (const chunk of adapter.chatStream({ + model: 'test-model', + messages: [ + { + role: 'assistant', + content: 'Let me check', + toolCalls: [ + { + id: 'call_123', + type: 'function', + function: { + name: 'lookup_weather', + arguments: '{"location":"Berlin"}', + }, + }, + ], + }, + { + role: 'tool', + toolCallId: 'call_123', + content: '{"temp":72}', + }, + ], + })) { + chunks.push(chunk) + } + + const [payload] = mockResponsesCreate.mock.calls[0] + // Should have function_call, message, and function_call_output + expect(payload.input).toEqual( + expect.arrayContaining([ + expect.objectContaining({ + type: 'function_call', + call_id: 'call_123', + name: 'lookup_weather', + arguments: '{"location":"Berlin"}', + }), + expect.objectContaining({ + type: 'message', + role: 'assistant', + content: 'Let me check', + }), + expect.objectContaining({ + type: 'function_call_output', + call_id: 'call_123', + output: '{"temp":72}', + }), + ]), + ) + }) + }) + + describe('subclassing', () => { + it('allows subclassing with custom name', () => { + class MyProviderAdapter extends OpenAICompatibleResponsesTextAdapter { + constructor(apiKey: string, model: string) { + super( + { apiKey, baseURL: 'https://my-provider.com/v1' }, + model, + 'my-provider', + ) + } + } + + const adapter = new MyProviderAdapter('test-key', 'my-model') + expect(adapter.name).toBe('my-provider') + expect(adapter.kind).toBe('text') + expect(adapter.model).toBe('my-model') + }) + }) +}) From 865a8a4ec99c9c86e887e9b32db610beca63dbd4 Mon Sep 17 00:00:00 2001 From: Alem Tuzlak Date: Mon, 30 Mar 2026 13:54:39 +0200 Subject: [PATCH 06/15] feat(openai-base): add image, summarize, transcription, TTS, and video adapter base classes --- .../openai-base/src/adapters/image.ts | 132 +++++++++ .../openai-base/src/adapters/summarize.ts | 120 ++++++++ .../openai-base/src/adapters/transcription.ts | 161 +++++++++++ .../openai-base/src/adapters/tts.ts | 109 +++++++ .../openai-base/src/adapters/video.ts | 268 ++++++++++++++++++ packages/typescript/openai-base/src/index.ts | 8 + 6 files changed, 798 insertions(+) create mode 100644 packages/typescript/openai-base/src/adapters/image.ts create mode 100644 packages/typescript/openai-base/src/adapters/summarize.ts create mode 100644 packages/typescript/openai-base/src/adapters/transcription.ts create mode 100644 packages/typescript/openai-base/src/adapters/tts.ts create mode 100644 packages/typescript/openai-base/src/adapters/video.ts diff --git a/packages/typescript/openai-base/src/adapters/image.ts b/packages/typescript/openai-base/src/adapters/image.ts new file mode 100644 index 000000000..bf431fbc3 --- /dev/null +++ b/packages/typescript/openai-base/src/adapters/image.ts @@ -0,0 +1,132 @@ +import { BaseImageAdapter } from '@tanstack/ai/adapters' +import { generateId } from '@tanstack/ai-utils' +import { createOpenAICompatibleClient } from '../utils/client' +import type { + GeneratedImage, + ImageGenerationOptions, + ImageGenerationResult, +} from '@tanstack/ai' +import type OpenAI_SDK from 'openai' +import type { OpenAICompatibleClientConfig } from '../types/config' + +/** + * OpenAI-Compatible Image Generation Adapter + * + * A generalized base class for providers that implement OpenAI-compatible image + * generation APIs. Providers like OpenAI, Grok, and others can extend this class + * and only need to: + * - Set `baseURL` in the config + * - Lock the generic type parameters to provider-specific types + * - Override validation or request building methods for provider-specific constraints + * + * All methods that validate inputs, build requests, or transform responses are + * `protected` so subclasses can override them. + */ +export class OpenAICompatibleImageAdapter< + TModel extends string, + TProviderOptions extends object = Record, + TModelProviderOptionsByName extends Record = Record, + TModelSizeByName extends Record = Record, +> extends BaseImageAdapter< + TModel, + TProviderOptions, + TModelProviderOptionsByName, + TModelSizeByName +> { + readonly kind = 'image' as const + readonly name: string + + protected client: OpenAI_SDK + + constructor( + config: OpenAICompatibleClientConfig, + model: TModel, + name: string = 'openai-compatible', + ) { + super({}, model) + this.name = name + this.client = createOpenAICompatibleClient(config) + } + + async generateImages( + options: ImageGenerationOptions, + ): Promise { + const { model, prompt, numberOfImages, size } = options + + // Validate inputs + this.validatePrompt({ prompt, model }) + this.validateImageSize(model, size) + this.validateNumberOfImages(model, numberOfImages) + + // Build request based on model type + const request = this.buildRequest(options) + + const response = await this.client.images.generate({ + ...request, + stream: false, + }) + + return this.transformResponse(model, response) + } + + protected buildRequest( + options: ImageGenerationOptions, + ): OpenAI_SDK.Images.ImageGenerateParams { + const { model, prompt, numberOfImages, size, modelOptions } = options + + return { + model, + prompt, + n: numberOfImages ?? 1, + size: size as OpenAI_SDK.Images.ImageGenerateParams['size'], + ...modelOptions, + } + } + + protected transformResponse( + model: string, + response: OpenAI_SDK.Images.ImagesResponse, + ): ImageGenerationResult { + const images: Array = (response.data ?? []).map((item) => ({ + b64Json: item.b64_json, + url: item.url, + revisedPrompt: item.revised_prompt, + })) + + return { + id: generateId(this.name), + model, + images, + usage: response.usage + ? { + inputTokens: response.usage.input_tokens, + outputTokens: response.usage.output_tokens, + totalTokens: response.usage.total_tokens, + } + : undefined, + } + } + + protected validatePrompt(options: { prompt: string; model: string }): void { + if (options.prompt.length === 0) { + throw new Error('Prompt cannot be empty.') + } + } + + protected validateImageSize(_model: string, _size: string | undefined): void { + // Default: no size validation — subclasses can override + } + + protected validateNumberOfImages( + _model: string, + numberOfImages: number | undefined, + ): void { + if (numberOfImages === undefined) return + + if (numberOfImages < 1 || numberOfImages > 10) { + throw new Error( + `Number of images must be between 1 and 10. Requested: ${numberOfImages}`, + ) + } + } +} diff --git a/packages/typescript/openai-base/src/adapters/summarize.ts b/packages/typescript/openai-base/src/adapters/summarize.ts new file mode 100644 index 000000000..2fba3cdba --- /dev/null +++ b/packages/typescript/openai-base/src/adapters/summarize.ts @@ -0,0 +1,120 @@ +import { BaseSummarizeAdapter } from '@tanstack/ai/adapters' +import type { + StreamChunk, + SummarizationOptions, + SummarizationResult, + TextOptions, +} from '@tanstack/ai' + +/** + * Minimal interface for a text adapter that supports chatStream. + * This allows the summarize adapter to work with any OpenAI-compatible + * text adapter without tight coupling to a specific implementation. + */ +export interface ChatStreamCapable { + chatStream: (options: TextOptions) => AsyncIterable +} + +/** + * OpenAI-Compatible Summarize Adapter + * + * A thin wrapper around a text adapter that adds summarization-specific prompting. + * Delegates all API calls to the provided text adapter. + * + * Subclasses or instantiators provide a text adapter (or factory) at construction + * time, allowing any OpenAI-compatible provider to get summarization for free by + * reusing its text adapter. + */ +export class OpenAICompatibleSummarizeAdapter< + TModel extends string, + TProviderOptions extends object = Record, +> extends BaseSummarizeAdapter { + readonly name: string + + private textAdapter: ChatStreamCapable + + constructor( + textAdapter: ChatStreamCapable, + model: TModel, + name: string = 'openai-compatible', + ) { + super({}, model) + this.name = name + this.textAdapter = textAdapter + } + + async summarize(options: SummarizationOptions): Promise { + const systemPrompt = this.buildSummarizationPrompt(options) + + let summary = '' + const id = '' + let model = options.model + let usage = { promptTokens: 0, completionTokens: 0, totalTokens: 0 } + + for await (const chunk of this.textAdapter.chatStream({ + model: options.model as TModel, + messages: [{ role: 'user', content: options.text }], + systemPrompts: [systemPrompt], + maxTokens: options.maxLength, + temperature: 0.3, + } as TextOptions)) { + if (chunk.type === 'TEXT_MESSAGE_CONTENT') { + if (chunk.content) { + summary = chunk.content + } else { + summary += chunk.delta + } + model = chunk.model || model + } + if (chunk.type === 'RUN_FINISHED') { + if (chunk.usage) { + usage = chunk.usage + } + } + } + + return { id, model, summary, usage } + } + + async *summarizeStream( + options: SummarizationOptions, + ): AsyncIterable { + const systemPrompt = this.buildSummarizationPrompt(options) + + yield* this.textAdapter.chatStream({ + model: options.model as TModel, + messages: [{ role: 'user', content: options.text }], + systemPrompts: [systemPrompt], + maxTokens: options.maxLength, + temperature: 0.3, + } as TextOptions) + } + + protected buildSummarizationPrompt(options: SummarizationOptions): string { + let prompt = 'You are a professional summarizer. ' + + switch (options.style) { + case 'bullet-points': + prompt += 'Provide a summary in bullet point format. ' + break + case 'paragraph': + prompt += 'Provide a summary in paragraph format. ' + break + case 'concise': + prompt += 'Provide a very concise summary in 1-2 sentences. ' + break + default: + prompt += 'Provide a clear and concise summary. ' + } + + if (options.focus && options.focus.length > 0) { + prompt += `Focus on the following aspects: ${options.focus.join(', ')}. ` + } + + if (options.maxLength) { + prompt += `Keep the summary under ${options.maxLength} tokens. ` + } + + return prompt + } +} diff --git a/packages/typescript/openai-base/src/adapters/transcription.ts b/packages/typescript/openai-base/src/adapters/transcription.ts new file mode 100644 index 000000000..6ae4451e9 --- /dev/null +++ b/packages/typescript/openai-base/src/adapters/transcription.ts @@ -0,0 +1,161 @@ +import { BaseTranscriptionAdapter } from '@tanstack/ai/adapters' +import { generateId } from '@tanstack/ai-utils' +import { createOpenAICompatibleClient } from '../utils/client' +import type { + TranscriptionOptions, + TranscriptionResult, + TranscriptionSegment, +} from '@tanstack/ai' +import type OpenAI_SDK from 'openai' +import type { OpenAICompatibleClientConfig } from '../types/config' + +/** + * OpenAI-Compatible Transcription (Speech-to-Text) Adapter + * + * A generalized base class for providers that implement OpenAI-compatible audio + * transcription APIs. Providers can extend this class and only need to: + * - Set `baseURL` in the config + * - Lock the generic type parameters to provider-specific types + * - Override audio handling or response mapping methods as needed + * + * All methods that handle audio input or map response formats are `protected` + * so subclasses can override them. + */ +export class OpenAICompatibleTranscriptionAdapter< + TModel extends string, + TProviderOptions extends object = Record, +> extends BaseTranscriptionAdapter { + readonly name: string + + protected client: OpenAI_SDK + + constructor( + config: OpenAICompatibleClientConfig, + model: TModel, + name: string = 'openai-compatible', + ) { + super(config, model) + this.name = name + this.client = createOpenAICompatibleClient(config) + } + + async transcribe( + options: TranscriptionOptions, + ): Promise { + const { model, audio, language, prompt, responseFormat, modelOptions } = + options + + // Convert audio input to File object + const file = this.prepareAudioFile(audio) + + // Build request + const request: OpenAI_SDK.Audio.TranscriptionCreateParams = { + model, + file, + language, + prompt, + response_format: this.mapResponseFormat(responseFormat), + ...modelOptions, + } + + // Call API - use verbose_json to get timestamps when available + const useVerbose = + responseFormat === 'verbose_json' || + (!responseFormat && model !== 'whisper-1') + + if (useVerbose) { + const response = await this.client.audio.transcriptions.create({ + ...request, + response_format: 'verbose_json', + }) + + return { + id: generateId(this.name), + model, + text: response.text, + language: response.language, + duration: response.duration, + segments: response.segments?.map( + (seg): TranscriptionSegment => ({ + id: seg.id, + start: seg.start, + end: seg.end, + text: seg.text, + confidence: seg.avg_logprob ? Math.exp(seg.avg_logprob) : undefined, + }), + ), + words: response.words?.map((w) => ({ + word: w.word, + start: w.start, + end: w.end, + })), + } + } else { + const response = await this.client.audio.transcriptions.create(request) + + return { + id: generateId(this.name), + model, + text: typeof response === 'string' ? response : response.text, + language, + } + } + } + + protected prepareAudioFile( + audio: string | File | Blob | ArrayBuffer, + ): File { + // If already a File, return it + if (typeof File !== 'undefined' && audio instanceof File) { + return audio + } + + // If Blob, convert to File + if (typeof Blob !== 'undefined' && audio instanceof Blob) { + return new File([audio], 'audio.mp3', { + type: audio.type || 'audio/mpeg', + }) + } + + // If ArrayBuffer, convert to File + if (audio instanceof ArrayBuffer) { + return new File([audio], 'audio.mp3', { type: 'audio/mpeg' }) + } + + // If base64 string, decode and convert to File + if (typeof audio === 'string') { + // Check if it's a data URL + if (audio.startsWith('data:')) { + const parts = audio.split(',') + const header = parts[0] + const base64Data = parts[1] || '' + const mimeMatch = header?.match(/data:([^;]+)/) + const mimeType = mimeMatch?.[1] || 'audio/mpeg' + const binaryStr = atob(base64Data) + const bytes = new Uint8Array(binaryStr.length) + for (let i = 0; i < binaryStr.length; i++) { + bytes[i] = binaryStr.charCodeAt(i) + } + const extension = mimeType.split('/')[1] || 'mp3' + return new File([bytes], `audio.${extension}`, { type: mimeType }) + } + + // Assume raw base64 + const binaryStr = atob(audio) + const bytes = new Uint8Array(binaryStr.length) + for (let i = 0; i < binaryStr.length; i++) { + bytes[i] = binaryStr.charCodeAt(i) + } + return new File([bytes], 'audio.mp3', { type: 'audio/mpeg' }) + } + + throw new Error('Invalid audio input type') + } + + protected mapResponseFormat( + format?: 'json' | 'text' | 'srt' | 'verbose_json' | 'vtt', + ): OpenAI_SDK.Audio.TranscriptionCreateParams['response_format'] { + if (!format) return 'json' + return format as OpenAI_SDK.Audio.TranscriptionCreateParams['response_format'] + } +} diff --git a/packages/typescript/openai-base/src/adapters/tts.ts b/packages/typescript/openai-base/src/adapters/tts.ts new file mode 100644 index 000000000..21a03b4f6 --- /dev/null +++ b/packages/typescript/openai-base/src/adapters/tts.ts @@ -0,0 +1,109 @@ +import { BaseTTSAdapter } from '@tanstack/ai/adapters' +import { generateId } from '@tanstack/ai-utils' +import { createOpenAICompatibleClient } from '../utils/client' +import type { TTSOptions, TTSResult } from '@tanstack/ai' +import type OpenAI_SDK from 'openai' +import type { OpenAICompatibleClientConfig } from '../types/config' + +/** + * OpenAI-Compatible Text-to-Speech Adapter + * + * A generalized base class for providers that implement OpenAI-compatible TTS APIs. + * Providers can extend this class and only need to: + * - Set `baseURL` in the config + * - Lock the generic type parameters to provider-specific types + * - Override validation methods or request building for provider-specific constraints + * + * All methods that validate inputs or build requests are `protected` so subclasses + * can override them. + */ +export class OpenAICompatibleTTSAdapter< + TModel extends string, + TProviderOptions extends object = Record, +> extends BaseTTSAdapter { + readonly name: string + + protected client: OpenAI_SDK + + constructor( + config: OpenAICompatibleClientConfig, + model: TModel, + name: string = 'openai-compatible', + ) { + super(config, model) + this.name = name + this.client = createOpenAICompatibleClient(config) + } + + async generateSpeech( + options: TTSOptions, + ): Promise { + const { model, text, voice, format, speed, modelOptions } = options + + // Validate inputs + this.validateAudioInput(text) + this.validateSpeed(speed) + this.validateInstructions(model, modelOptions) + + // Build request + const request: OpenAI_SDK.Audio.SpeechCreateParams = { + model, + input: text, + voice: (voice || 'alloy') as OpenAI_SDK.Audio.SpeechCreateParams['voice'], + response_format: format, + speed, + ...modelOptions, + } + + // Call API + const response = await this.client.audio.speech.create(request) + + // Convert response to base64 + const arrayBuffer = await response.arrayBuffer() + const base64 = Buffer.from(arrayBuffer).toString('base64') + + const outputFormat = format || 'mp3' + const contentType = this.getContentType(outputFormat) + + return { + id: generateId(this.name), + model, + audio: base64, + format: outputFormat, + contentType, + } + } + + protected validateAudioInput(text: string): void { + if (text.length > 4096) { + throw new Error('Input text exceeds maximum length of 4096 characters.') + } + } + + protected validateSpeed(speed?: number): void { + if (speed !== undefined) { + if (speed < 0.25 || speed > 4.0) { + throw new Error('Speed must be between 0.25 and 4.0.') + } + } + } + + protected validateInstructions( + _model: string, + _modelOptions?: TProviderOptions, + ): void { + // Default: no instructions validation — subclasses can override + } + + protected getContentType(format: string): string { + const contentTypes: Record = { + mp3: 'audio/mpeg', + opus: 'audio/opus', + aac: 'audio/aac', + flac: 'audio/flac', + wav: 'audio/wav', + pcm: 'audio/pcm', + } + return contentTypes[format] || 'audio/mpeg' + } +} diff --git a/packages/typescript/openai-base/src/adapters/video.ts b/packages/typescript/openai-base/src/adapters/video.ts new file mode 100644 index 000000000..8969e773e --- /dev/null +++ b/packages/typescript/openai-base/src/adapters/video.ts @@ -0,0 +1,268 @@ +import { BaseVideoAdapter } from '@tanstack/ai/adapters' +import { createOpenAICompatibleClient } from '../utils/client' +import type { + VideoGenerationOptions, + VideoJobResult, + VideoStatusResult, + VideoUrlResult, +} from '@tanstack/ai' +import type OpenAI_SDK from 'openai' +import type { OpenAICompatibleClientConfig } from '../types/config' + +/** + * OpenAI-Compatible Video Generation Adapter + * + * A generalized base class for providers that implement OpenAI-compatible video + * generation APIs. Uses a job/polling architecture for async video generation. + * + * Providers can extend this class and only need to: + * - Set `baseURL` in the config + * - Lock the generic type parameters to provider-specific types + * - Override validation or request building methods as needed + * + * All methods that validate inputs, build requests, or map responses are `protected` + * so subclasses can override them. + * + * @experimental Video generation is an experimental feature and may change. + */ +export class OpenAICompatibleVideoAdapter< + TModel extends string, + TProviderOptions extends object = Record, + TModelProviderOptionsByName extends Record = Record, + TModelSizeByName extends Record = Record, +> extends BaseVideoAdapter< + TModel, + TProviderOptions, + TModelProviderOptionsByName, + TModelSizeByName +> { + readonly name: string + + protected client: OpenAI_SDK + protected clientConfig: OpenAICompatibleClientConfig + + constructor( + config: OpenAICompatibleClientConfig, + model: TModel, + name: string = 'openai-compatible', + ) { + super(config, model) + this.name = name + this.clientConfig = config + this.client = createOpenAICompatibleClient(config) + } + + /** + * Create a new video generation job. + * + * @experimental Video generation is an experimental feature and may change. + */ + async createVideoJob( + options: VideoGenerationOptions, + ): Promise { + const { model, size, duration, modelOptions } = options + + // Validate inputs + this.validateVideoSize(model, size) + const seconds = duration ?? (modelOptions as any)?.seconds + this.validateVideoSeconds(model, seconds) + + // Build request + const request = this.buildRequest(options) + + try { + const client = this.client as any + const response = await client.videos.create(request) + + return { + jobId: response.id, + model, + } + } catch (error: any) { + if (error?.message?.includes('videos') || error?.code === 'invalid_api') { + throw new Error( + `Video generation API is not available. The API may require special access. ` + + `Original error: ${error.message}`, + ) + } + throw error + } + } + + /** + * Get the current status of a video generation job. + * + * @experimental Video generation is an experimental feature and may change. + */ + async getVideoStatus(jobId: string): Promise { + try { + const client = this.client as any + const response = await client.videos.retrieve(jobId) + + return { + jobId, + status: this.mapStatus(response.status), + progress: response.progress, + error: response.error?.message, + } + } catch (error: any) { + if (error.status === 404) { + return { + jobId, + status: 'failed', + error: 'Job not found', + } + } + throw error + } + } + + /** + * Get the URL to download/view the generated video. + * + * @experimental Video generation is an experimental feature and may change. + */ + async getVideoUrl(jobId: string): Promise { + try { + const client = this.client as any + + let response: any + + if (typeof client.videos?.content === 'function') { + response = await client.videos.content(jobId) + } else if (typeof client.videos?.getContent === 'function') { + response = await client.videos.getContent(jobId) + } else if (typeof client.videos?.download === 'function') { + response = await client.videos.download(jobId) + } else { + // Fallback: check if retrieve returns the URL directly + const videoInfo = await client.videos.retrieve(jobId) + if (videoInfo.url) { + return { + jobId, + url: videoInfo.url, + expiresAt: videoInfo.expires_at + ? new Date(videoInfo.expires_at) + : undefined, + } + } + + // Fetch and return a data URL + const baseUrl = + this.clientConfig.baseURL || 'https://api.openai.com/v1' + const apiKey = this.clientConfig.apiKey + + const contentResponse = await fetch( + `${baseUrl}/videos/${jobId}/content`, + { + method: 'GET', + headers: { + Authorization: `Bearer ${apiKey}`, + }, + }, + ) + + if (!contentResponse.ok) { + const contentType = contentResponse.headers.get('content-type') + if (contentType?.includes('application/json')) { + const errorData = await contentResponse.json().catch(() => ({})) + throw new Error( + errorData.error?.message || + `Failed to get video content: ${contentResponse.status}`, + ) + } + throw new Error( + `Failed to get video content: ${contentResponse.status}`, + ) + } + + const videoBlob = await contentResponse.blob() + const buffer = await videoBlob.arrayBuffer() + const base64 = Buffer.from(buffer).toString('base64') + const mimeType = + contentResponse.headers.get('content-type') || 'video/mp4' + + return { + jobId, + url: `data:${mimeType};base64,${base64}`, + expiresAt: undefined, + } + } + + return { + jobId, + url: response.url, + expiresAt: response.expires_at + ? new Date(response.expires_at) + : undefined, + } + } catch (error: any) { + if (error.status === 404) { + throw new Error(`Video job not found: ${jobId}`) + } + if (error.status === 400) { + throw new Error( + `Video is not ready for download. Check status first. Job ID: ${jobId}`, + ) + } + throw error + } + } + + protected buildRequest( + options: VideoGenerationOptions, + ): Record { + const { model, prompt, size, duration, modelOptions } = options + + const request: Record = { + model, + prompt, + } + + if (size) { + request['size'] = size + } else if ((modelOptions as any)?.size) { + request['size'] = (modelOptions as any).size + } + + const seconds = duration ?? (modelOptions as any)?.seconds + if (seconds !== undefined) { + request['seconds'] = String(seconds) + } + + return request + } + + protected validateVideoSize(_model: string, _size?: string): void { + // Default: no size validation — subclasses can override + } + + protected validateVideoSeconds( + _model: string, + _seconds?: number | string, + ): void { + // Default: no duration validation — subclasses can override + } + + protected mapStatus( + apiStatus: string, + ): 'pending' | 'processing' | 'completed' | 'failed' { + switch (apiStatus) { + case 'queued': + case 'pending': + return 'pending' + case 'processing': + case 'in_progress': + return 'processing' + case 'completed': + case 'succeeded': + return 'completed' + case 'failed': + case 'error': + case 'cancelled': + return 'failed' + default: + return 'processing' + } + } +} diff --git a/packages/typescript/openai-base/src/index.ts b/packages/typescript/openai-base/src/index.ts index b63b62802..aa19e2502 100644 --- a/packages/typescript/openai-base/src/index.ts +++ b/packages/typescript/openai-base/src/index.ts @@ -16,3 +16,11 @@ export { convertToolsToResponsesFormat, type ResponsesFunctionTool, } from './adapters/responses-tool-converter' +export { OpenAICompatibleImageAdapter } from './adapters/image' +export { + OpenAICompatibleSummarizeAdapter, + type ChatStreamCapable, +} from './adapters/summarize' +export { OpenAICompatibleTranscriptionAdapter } from './adapters/transcription' +export { OpenAICompatibleTTSAdapter } from './adapters/tts' +export { OpenAICompatibleVideoAdapter } from './adapters/video' From 06d670147a68b1c36bac0b6ba43ec1cc53e62356 Mon Sep 17 00:00:00 2001 From: Alem Tuzlak Date: Mon, 30 Mar 2026 14:10:18 +0200 Subject: [PATCH 07/15] refactor(ai-openai): delegate to @tanstack/openai-base and @tanstack/ai-utils Migrate ai-openai to extend base classes from openai-base and delegate utility functions to ai-utils, eliminating ~1,800 lines of duplicated code while maintaining zero breaking changes. Changes: - utils/client.ts: generateId and getOpenAIApiKeyFromEnv delegate to ai-utils - utils/schema-converter.ts: transformNullsToUndefined and makeOpenAIStructuredOutputCompatible delegate to ai-utils/openai-base - tools/*: all 14 tool files re-export from openai-base - adapters/text.ts: extends OpenAICompatibleResponsesTextAdapter, overrides mapOptionsToRequest for OpenAI-specific tool conversion and validation - adapters/image.ts: extends OpenAICompatibleImageAdapter, overrides validation methods - adapters/summarize.ts: extends OpenAICompatibleSummarizeAdapter - adapters/tts.ts: extends OpenAICompatibleTTSAdapter, overrides validation - adapters/transcription.ts: extends OpenAICompatibleTranscriptionAdapter - adapters/video.ts: extends OpenAICompatibleVideoAdapter, overrides validation and request building - openai-base config.ts: removed explicit baseURL to avoid null incompatibility All 127 existing tests pass, types check clean, build succeeds. --- packages/typescript/ai-openai/package.json | 4 + .../ai-openai/src/adapters/image.ts | 87 +- .../ai-openai/src/adapters/summarize.ts | 106 +-- .../typescript/ai-openai/src/adapters/text.ts | 778 +----------------- .../ai-openai/src/adapters/transcription.ts | 142 +--- .../typescript/ai-openai/src/adapters/tts.ts | 94 +-- .../ai-openai/src/adapters/video.ts | 258 +----- .../ai-openai/src/tools/apply-patch-tool.ts | 31 +- .../src/tools/code-interpreter-tool.ts | 36 +- .../ai-openai/src/tools/computer-use-tool.ts | 36 +- .../ai-openai/src/tools/custom-tool.ts | 35 +- .../ai-openai/src/tools/file-search-tool.ts | 47 +- .../ai-openai/src/tools/function-tool.ts | 46 +- .../src/tools/image-generation-tool.ts | 44 +- .../typescript/ai-openai/src/tools/index.ts | 27 +- .../ai-openai/src/tools/local-shell-tool.ts | 31 +- .../ai-openai/src/tools/mcp-tool.ts | 47 +- .../ai-openai/src/tools/shell-tool.ts | 29 +- .../ai-openai/src/tools/tool-choice.ts | 32 +- .../ai-openai/src/tools/tool-converter.ts | 73 +- .../src/tools/web-search-preview-tool.ts | 34 +- .../ai-openai/src/tools/web-search-tool.ts | 28 +- .../typescript/ai-openai/src/utils/client.ts | 32 +- .../ai-openai/src/utils/schema-converter.ts | 120 +-- .../openai-base/src/types/config.ts | 1 - pnpm-lock.yaml | 6 + 26 files changed, 181 insertions(+), 2023 deletions(-) diff --git a/packages/typescript/ai-openai/package.json b/packages/typescript/ai-openai/package.json index 4c4b7ce9c..eb5d61006 100644 --- a/packages/typescript/ai-openai/package.json +++ b/packages/typescript/ai-openai/package.json @@ -40,6 +40,8 @@ "adapter" ], "dependencies": { + "@tanstack/ai-utils": "workspace:*", + "@tanstack/openai-base": "workspace:*", "openai": "^6.9.1" }, "peerDependencies": { @@ -50,6 +52,8 @@ "devDependencies": { "@tanstack/ai": "workspace:*", "@tanstack/ai-client": "workspace:*", + "@tanstack/ai-utils": "workspace:*", + "@tanstack/openai-base": "workspace:*", "@vitest/coverage-v8": "4.0.14", "vite": "^7.2.7", "zod": "^4.2.0" diff --git a/packages/typescript/ai-openai/src/adapters/image.ts b/packages/typescript/ai-openai/src/adapters/image.ts index 585e8a72f..86274da76 100644 --- a/packages/typescript/ai-openai/src/adapters/image.ts +++ b/packages/typescript/ai-openai/src/adapters/image.ts @@ -1,9 +1,5 @@ -import { BaseImageAdapter } from '@tanstack/ai/adapters' -import { - createOpenAIClient, - generateId, - getOpenAIApiKeyFromEnv, -} from '../utils/client' +import { OpenAICompatibleImageAdapter } from '@tanstack/openai-base' +import { getOpenAIApiKeyFromEnv, toCompatibleConfig } from '../utils/client' import { validateImageSize, validateNumberOfImages, @@ -15,12 +11,6 @@ import type { OpenAIImageModelSizeByName, OpenAIImageProviderOptions, } from '../image/image-provider-options' -import type { - GeneratedImage, - ImageGenerationOptions, - ImageGenerationResult, -} from '@tanstack/ai' -import type OpenAI_SDK from 'openai' import type { OpenAIClientConfig } from '../utils/client' /** @@ -41,7 +31,7 @@ export interface OpenAIImageConfig extends OpenAIClientConfig {} */ export class OpenAIImageAdapter< TModel extends OpenAIImageModel, -> extends BaseImageAdapter< +> extends OpenAICompatibleImageAdapter< TModel, OpenAIImageProviderOptions, OpenAIImageModelProviderOptionsByName, @@ -50,70 +40,29 @@ export class OpenAIImageAdapter< readonly kind = 'image' as const readonly name = 'openai' as const - private client: OpenAI_SDK - constructor(config: OpenAIImageConfig, model: TModel) { - super({}, model) - this.client = createOpenAIClient(config) + super(toCompatibleConfig(config), model, 'openai') } - async generateImages( - options: ImageGenerationOptions, - ): Promise { - const { model, prompt, numberOfImages, size } = options - - // Validate inputs - validatePrompt({ prompt, model }) - validateImageSize(model, size) - validateNumberOfImages(model, numberOfImages) - - // Build request based on model type - const request = this.buildRequest(options) - - const response = await this.client.images.generate({ - ...request, - stream: false, - }) - - return this.transformResponse(model, response) + protected override validatePrompt(options: { + prompt: string + model: string + }): void { + validatePrompt(options) } - private buildRequest( - options: ImageGenerationOptions, - ): OpenAI_SDK.Images.ImageGenerateParams { - const { model, prompt, numberOfImages, size, modelOptions } = options - - return { - model, - prompt, - n: numberOfImages ?? 1, - size: size as OpenAI_SDK.Images.ImageGenerateParams['size'], - ...modelOptions, - } + protected override validateImageSize( + model: string, + size: string | undefined, + ): void { + validateImageSize(model, size) } - private transformResponse( + protected override validateNumberOfImages( model: string, - response: OpenAI_SDK.Images.ImagesResponse, - ): ImageGenerationResult { - const images: Array = (response.data ?? []).map((item) => ({ - b64Json: item.b64_json, - url: item.url, - revisedPrompt: item.revised_prompt, - })) - - return { - id: generateId(this.name), - model, - images, - usage: response.usage - ? { - inputTokens: response.usage.input_tokens, - outputTokens: response.usage.output_tokens, - totalTokens: response.usage.total_tokens, - } - : undefined, - } + numberOfImages: number | undefined, + ): void { + validateNumberOfImages(model, numberOfImages) } } diff --git a/packages/typescript/ai-openai/src/adapters/summarize.ts b/packages/typescript/ai-openai/src/adapters/summarize.ts index 6db5d874e..d64c5af0e 100644 --- a/packages/typescript/ai-openai/src/adapters/summarize.ts +++ b/packages/typescript/ai-openai/src/adapters/summarize.ts @@ -1,12 +1,8 @@ -import { BaseSummarizeAdapter } from '@tanstack/ai/adapters' +import { OpenAICompatibleSummarizeAdapter } from '@tanstack/openai-base' import { getOpenAIApiKeyFromEnv } from '../utils/client' import { OpenAITextAdapter } from './text' +import type { ChatStreamCapable } from '@tanstack/openai-base' import type { OpenAIChatModel } from '../model-meta' -import type { - StreamChunk, - SummarizationOptions, - SummarizationResult, -} from '@tanstack/ai' import type { OpenAIClientConfig } from '../utils/client' /** @@ -32,94 +28,24 @@ export interface OpenAISummarizeProviderOptions { */ export class OpenAISummarizeAdapter< TModel extends OpenAIChatModel, -> extends BaseSummarizeAdapter { +> extends OpenAICompatibleSummarizeAdapter< + TModel, + OpenAISummarizeProviderOptions +> { readonly kind = 'summarize' as const readonly name = 'openai' as const - private textAdapter: OpenAITextAdapter - constructor(config: OpenAISummarizeConfig, model: TModel) { - super({}, model) - this.textAdapter = new OpenAITextAdapter(config, model) - } - - async summarize(options: SummarizationOptions): Promise { - const systemPrompt = this.buildSummarizationPrompt(options) - - // Use the text adapter's streaming and collect the result - let summary = '' - const id = '' - let model = options.model - let usage = { promptTokens: 0, completionTokens: 0, totalTokens: 0 } - - for await (const chunk of this.textAdapter.chatStream({ - model: options.model, - messages: [{ role: 'user', content: options.text }], - systemPrompts: [systemPrompt], - maxTokens: options.maxLength, - temperature: 0.3, - })) { - // AG-UI TEXT_MESSAGE_CONTENT event - if (chunk.type === 'TEXT_MESSAGE_CONTENT') { - if (chunk.content) { - summary = chunk.content - } else { - summary += chunk.delta - } - model = chunk.model || model - } - // AG-UI RUN_FINISHED event - if (chunk.type === 'RUN_FINISHED') { - if (chunk.usage) { - usage = chunk.usage - } - } - } - - return { id, model, summary, usage } - } - - async *summarizeStream( - options: SummarizationOptions, - ): AsyncIterable { - const systemPrompt = this.buildSummarizationPrompt(options) - - // Delegate directly to the text adapter's streaming - yield* this.textAdapter.chatStream({ - model: options.model, - messages: [{ role: 'user', content: options.text }], - systemPrompts: [systemPrompt], - maxTokens: options.maxLength, - temperature: 0.3, - }) - } - - private buildSummarizationPrompt(options: SummarizationOptions): string { - let prompt = 'You are a professional summarizer. ' - - switch (options.style) { - case 'bullet-points': - prompt += 'Provide a summary in bullet point format. ' - break - case 'paragraph': - prompt += 'Provide a summary in paragraph format. ' - break - case 'concise': - prompt += 'Provide a very concise summary in 1-2 sentences. ' - break - default: - prompt += 'Provide a clear and concise summary. ' - } - - if (options.focus && options.focus.length > 0) { - prompt += `Focus on the following aspects: ${options.focus.join(', ')}. ` - } - - if (options.maxLength) { - prompt += `Keep the summary under ${options.maxLength} tokens. ` - } - - return prompt + // The text adapter accepts richer provider options than the summarize adapter needs, + // but we only pass basic options (model, messages, systemPrompts, etc.) at call time. + super( + new OpenAITextAdapter( + config, + model, + ) as unknown as ChatStreamCapable, + model, + 'openai', + ) } } diff --git a/packages/typescript/ai-openai/src/adapters/text.ts b/packages/typescript/ai-openai/src/adapters/text.ts index 1747ce4ec..febb13003 100644 --- a/packages/typescript/ai-openai/src/adapters/text.ts +++ b/packages/typescript/ai-openai/src/adapters/text.ts @@ -1,42 +1,20 @@ -import { BaseTextAdapter } from '@tanstack/ai/adapters' +import { OpenAICompatibleResponsesTextAdapter } from '@tanstack/openai-base' import { validateTextProviderOptions } from '../text/text-provider-options' import { convertToolsToProviderFormat } from '../tools' -import { - createOpenAIClient, - generateId, - getOpenAIApiKeyFromEnv, -} from '../utils/client' -import { - makeOpenAIStructuredOutputCompatible, - transformNullsToUndefined, -} from '../utils/schema-converter' +import { getOpenAIApiKeyFromEnv, toCompatibleConfig } from '../utils/client' import type { OPENAI_CHAT_MODELS, OpenAIChatModel, OpenAIChatModelProviderOptionsByName, OpenAIModelInputModalitiesByName, } from '../model-meta' -import type { - StructuredOutputOptions, - StructuredOutputResult, -} from '@tanstack/ai/adapters' import type OpenAI_SDK from 'openai' -import type { Responses } from 'openai/resources' -import type { - ContentPart, - ModelMessage, - StreamChunk, - TextOptions, -} from '@tanstack/ai' +import type { TextOptions } from '@tanstack/ai' import type { ExternalTextProviderOptions, InternalTextProviderOptions, } from '../text/text-provider-options' -import type { - OpenAIAudioMetadata, - OpenAIImageMetadata, - OpenAIMessageMetadataByModality, -} from '../message-types' +import type { OpenAIMessageMetadataByModality } from '../message-types' import type { OpenAIClientConfig } from '../utils/client' /** @@ -83,7 +61,7 @@ type ResolveInputModalities = */ export class OpenAITextAdapter< TModel extends OpenAIChatModel, -> extends BaseTextAdapter< +> extends OpenAICompatibleResponsesTextAdapter< TModel, ResolveProviderOptions, ResolveInputModalities, @@ -92,567 +70,19 @@ export class OpenAITextAdapter< readonly kind = 'text' as const readonly name = 'openai' as const - private client: OpenAI_SDK - constructor(config: OpenAITextConfig, model: TModel) { - super({}, model) - this.client = createOpenAIClient(config) - } - - async *chatStream( - options: TextOptions>, - ): AsyncIterable { - // Track tool call metadata by unique ID - // OpenAI streams tool calls with deltas - first chunk has ID/name, subsequent chunks only have args - // We assign our own indices as we encounter unique tool call IDs - const toolCallMetadata = new Map< - string, - { index: number; name: string; started: boolean } - >() - const requestArguments = this.mapTextOptionsToOpenAI(options) - - try { - const response = await this.client.responses.create( - { - ...requestArguments, - stream: true, - }, - { - headers: options.request?.headers, - signal: options.request?.signal, - }, - ) - - // Chat Completions API uses SSE format - iterate directly - yield* this.processOpenAIStreamChunks( - response, - toolCallMetadata, - options, - () => generateId(this.name), - ) - } catch (error: unknown) { - const err = error as Error - console.error('>>> chatStream: Fatal error during response creation <<<') - console.error('>>> Error message:', err.message) - console.error('>>> Error stack:', err.stack) - console.error('>>> Full error:', err) - throw error - } + super(toCompatibleConfig(config), model, 'openai') } /** - * Generate structured output using OpenAI's native JSON Schema response format. - * Uses stream: false to get the complete response in one call. - * - * OpenAI has strict requirements for structured output: - * - All properties must be in the `required` array - * - Optional fields should have null added to their type union - * - additionalProperties must be false for all objects - * - * The outputSchema is already JSON Schema (converted in the ai layer). - * We apply OpenAI-specific transformations for structured output compatibility. + * Maps common options to OpenAI-specific format. + * Overrides the base class to use OpenAI's full tool converter + * (supporting special tool types like file_search, web_search, etc.) + * and to apply OpenAI-specific provider option validation. */ - async structuredOutput( - options: StructuredOutputOptions>, - ): Promise> { - const { chatOptions, outputSchema } = options - const requestArguments = this.mapTextOptionsToOpenAI(chatOptions) - - // Apply OpenAI-specific transformations for structured output compatibility - const jsonSchema = makeOpenAIStructuredOutputCompatible( - outputSchema, - outputSchema.required || [], - ) - - try { - const response = await this.client.responses.create( - { - ...requestArguments, - stream: false, - // Configure structured output via text.format - text: { - format: { - type: 'json_schema', - name: 'structured_output', - schema: jsonSchema, - strict: true, - }, - }, - }, - { - headers: chatOptions.request?.headers, - signal: chatOptions.request?.signal, - }, - ) - - // Extract text content from the response - const rawText = this.extractTextFromResponse(response) - - // Parse the JSON response - let parsed: unknown - try { - parsed = JSON.parse(rawText) - } catch { - throw new Error( - `Failed to parse structured output as JSON. Content: ${rawText.slice(0, 200)}${rawText.length > 200 ? '...' : ''}`, - ) - } - - // Transform null values to undefined to match original Zod schema expectations - // OpenAI returns null for optional fields we made nullable in the schema - const transformed = transformNullsToUndefined(parsed) - - return { - data: transformed, - rawText, - } - } catch (error: unknown) { - const err = error as Error - console.error('>>> structuredOutput: Error during response creation <<<') - console.error('>>> Error message:', err.message) - throw error - } - } - - /** - * Extract text content from a non-streaming response - */ - private extractTextFromResponse( - response: OpenAI_SDK.Responses.Response, - ): string { - let textContent = '' - - for (const item of response.output) { - if (item.type === 'message') { - for (const part of item.content) { - if (part.type === 'output_text') { - textContent += part.text - } - } - } - } - - return textContent - } - - private async *processOpenAIStreamChunks( - stream: AsyncIterable, - toolCallMetadata: Map< - string, - { index: number; name: string; started: boolean } - >, + protected override mapOptionsToRequest( options: TextOptions, - genId: () => string, - ): AsyncIterable { - let accumulatedContent = '' - let accumulatedReasoning = '' - const timestamp = Date.now() - let chunkCount = 0 - - // Track if we've been streaming deltas to avoid duplicating content from done events - let hasStreamedContentDeltas = false - let hasStreamedReasoningDeltas = false - - // Preserve response metadata across events - let model: string = options.model - - // AG-UI lifecycle tracking - const runId = genId() - const messageId = genId() - let stepId: string | null = null - let hasEmittedRunStarted = false - let hasEmittedTextMessageStart = false - let hasEmittedStepStarted = false - - try { - for await (const chunk of stream) { - chunkCount++ - - // Emit RUN_STARTED on first chunk - if (!hasEmittedRunStarted) { - hasEmittedRunStarted = true - yield { - type: 'RUN_STARTED', - runId, - model: model || options.model, - timestamp, - } - } - - const handleContentPart = ( - contentPart: - | OpenAI_SDK.Responses.ResponseOutputText - | OpenAI_SDK.Responses.ResponseOutputRefusal - | OpenAI_SDK.Responses.ResponseContentPartAddedEvent.ReasoningText, - ): StreamChunk => { - if (contentPart.type === 'output_text') { - accumulatedContent += contentPart.text - return { - type: 'TEXT_MESSAGE_CONTENT', - messageId, - model: model || options.model, - timestamp, - delta: contentPart.text, - content: accumulatedContent, - } - } - - if (contentPart.type === 'reasoning_text') { - accumulatedReasoning += contentPart.text - return { - type: 'STEP_FINISHED', - stepId: stepId || genId(), - model: model || options.model, - timestamp, - delta: contentPart.text, - content: accumulatedReasoning, - } - } - return { - type: 'RUN_ERROR', - runId, - model: model || options.model, - timestamp, - error: { - message: contentPart.refusal, - }, - } - } - // handle general response events - if ( - chunk.type === 'response.created' || - chunk.type === 'response.incomplete' || - chunk.type === 'response.failed' - ) { - model = chunk.response.model - // Reset streaming flags for new response - hasStreamedContentDeltas = false - hasStreamedReasoningDeltas = false - hasEmittedTextMessageStart = false - hasEmittedStepStarted = false - accumulatedContent = '' - accumulatedReasoning = '' - if (chunk.response.error) { - yield { - type: 'RUN_ERROR', - runId, - model: chunk.response.model, - timestamp, - error: chunk.response.error, - } - } - if (chunk.response.incomplete_details) { - yield { - type: 'RUN_ERROR', - runId, - model: chunk.response.model, - timestamp, - error: { - message: chunk.response.incomplete_details.reason ?? '', - }, - } - } - } - // Handle output text deltas (token-by-token streaming) - // response.output_text.delta provides incremental text updates - if (chunk.type === 'response.output_text.delta' && chunk.delta) { - // Delta can be an array of strings or a single string - const textDelta = Array.isArray(chunk.delta) - ? chunk.delta.join('') - : typeof chunk.delta === 'string' - ? chunk.delta - : '' - - if (textDelta) { - // Emit TEXT_MESSAGE_START on first text content - if (!hasEmittedTextMessageStart) { - hasEmittedTextMessageStart = true - yield { - type: 'TEXT_MESSAGE_START', - messageId, - model: model || options.model, - timestamp, - role: 'assistant', - } - } - - accumulatedContent += textDelta - hasStreamedContentDeltas = true - yield { - type: 'TEXT_MESSAGE_CONTENT', - messageId, - model: model || options.model, - timestamp, - delta: textDelta, - content: accumulatedContent, - } - } - } - - // Handle reasoning deltas (token-by-token thinking/reasoning streaming) - // response.reasoning_text.delta provides incremental reasoning updates - if (chunk.type === 'response.reasoning_text.delta' && chunk.delta) { - // Delta can be an array of strings or a single string - const reasoningDelta = Array.isArray(chunk.delta) - ? chunk.delta.join('') - : typeof chunk.delta === 'string' - ? chunk.delta - : '' - - if (reasoningDelta) { - // Emit STEP_STARTED on first reasoning content - if (!hasEmittedStepStarted) { - hasEmittedStepStarted = true - stepId = genId() - yield { - type: 'STEP_STARTED', - stepId, - model: model || options.model, - timestamp, - stepType: 'thinking', - } - } - - accumulatedReasoning += reasoningDelta - hasStreamedReasoningDeltas = true - yield { - type: 'STEP_FINISHED', - stepId: stepId || genId(), - model: model || options.model, - timestamp, - delta: reasoningDelta, - content: accumulatedReasoning, - } - } - } - - // Handle reasoning summary deltas (when using reasoning.summary option) - // response.reasoning_summary_text.delta provides incremental summary updates - if ( - chunk.type === 'response.reasoning_summary_text.delta' && - chunk.delta - ) { - const summaryDelta = - typeof chunk.delta === 'string' ? chunk.delta : '' - - if (summaryDelta) { - // Emit STEP_STARTED on first reasoning content - if (!hasEmittedStepStarted) { - hasEmittedStepStarted = true - stepId = genId() - yield { - type: 'STEP_STARTED', - stepId, - model: model || options.model, - timestamp, - stepType: 'thinking', - } - } - - accumulatedReasoning += summaryDelta - hasStreamedReasoningDeltas = true - yield { - type: 'STEP_FINISHED', - stepId: stepId || genId(), - model: model || options.model, - timestamp, - delta: summaryDelta, - content: accumulatedReasoning, - } - } - } - - // handle content_part added events for text, reasoning and refusals - if (chunk.type === 'response.content_part.added') { - const contentPart = chunk.part - // Emit TEXT_MESSAGE_START if this is text content - if ( - contentPart.type === 'output_text' && - !hasEmittedTextMessageStart - ) { - hasEmittedTextMessageStart = true - yield { - type: 'TEXT_MESSAGE_START', - messageId, - model: model || options.model, - timestamp, - role: 'assistant', - } - } - // Emit STEP_STARTED if this is reasoning content - if (contentPart.type === 'reasoning_text' && !hasEmittedStepStarted) { - hasEmittedStepStarted = true - stepId = genId() - yield { - type: 'STEP_STARTED', - stepId, - model: model || options.model, - timestamp, - stepType: 'thinking', - } - } - yield handleContentPart(contentPart) - } - - if (chunk.type === 'response.content_part.done') { - const contentPart = chunk.part - - // Skip emitting chunks for content parts that we've already streamed via deltas - // The done event is just a completion marker, not new content - if (contentPart.type === 'output_text' && hasStreamedContentDeltas) { - // Content already accumulated from deltas, skip - continue - } - if ( - contentPart.type === 'reasoning_text' && - hasStreamedReasoningDeltas - ) { - // Reasoning already accumulated from deltas, skip - continue - } - - // Only emit if we haven't been streaming deltas (e.g., for non-streaming responses) - yield handleContentPart(contentPart) - } - - // handle output_item.added to capture function call metadata (name) - if (chunk.type === 'response.output_item.added') { - const item = chunk.item - if (item.type === 'function_call' && item.id) { - // Store the function name for later use - if (!toolCallMetadata.has(item.id)) { - toolCallMetadata.set(item.id, { - index: chunk.output_index, - name: item.name || '', - started: false, - }) - } - // Emit TOOL_CALL_START - yield { - type: 'TOOL_CALL_START', - toolCallId: item.id, - toolName: item.name || '', - model: model || options.model, - timestamp, - index: chunk.output_index, - } - toolCallMetadata.get(item.id)!.started = true - } - } - - // Handle function call arguments delta (streaming) - if ( - chunk.type === 'response.function_call_arguments.delta' && - chunk.delta - ) { - const metadata = toolCallMetadata.get(chunk.item_id) - yield { - type: 'TOOL_CALL_ARGS', - toolCallId: chunk.item_id, - model: model || options.model, - timestamp, - delta: chunk.delta, - args: metadata ? undefined : chunk.delta, // We don't accumulate here, let caller handle it - } - } - - if (chunk.type === 'response.function_call_arguments.done') { - const { item_id } = chunk - - // Get the function name from metadata (captured in output_item.added) - const metadata = toolCallMetadata.get(item_id) - const name = metadata?.name || '' - - // Parse arguments - let parsedInput: unknown = {} - try { - parsedInput = chunk.arguments ? JSON.parse(chunk.arguments) : {} - } catch { - parsedInput = {} - } - - yield { - type: 'TOOL_CALL_END', - toolCallId: item_id, - toolName: name, - model: model || options.model, - timestamp, - input: parsedInput, - } - } - - if (chunk.type === 'response.completed') { - // Emit TEXT_MESSAGE_END if we had text content - if (hasEmittedTextMessageStart) { - yield { - type: 'TEXT_MESSAGE_END', - messageId, - model: model || options.model, - timestamp, - } - } - - // Determine finish reason based on output - // If there are function_call items in the output, it's a tool_calls finish - const hasFunctionCalls = chunk.response.output.some( - (item: unknown) => - (item as { type: string }).type === 'function_call', - ) - - yield { - type: 'RUN_FINISHED', - runId, - model: model || options.model, - timestamp, - usage: { - promptTokens: chunk.response.usage?.input_tokens || 0, - completionTokens: chunk.response.usage?.output_tokens || 0, - totalTokens: chunk.response.usage?.total_tokens || 0, - }, - finishReason: hasFunctionCalls ? 'tool_calls' : 'stop', - } - } - - if (chunk.type === 'error') { - yield { - type: 'RUN_ERROR', - runId, - model: model || options.model, - timestamp, - error: { - message: chunk.message, - code: chunk.code ?? undefined, - }, - } - } - } - } catch (error: unknown) { - const err = error as Error & { code?: string } - console.log( - '[OpenAI Adapter] Stream ended with error. Event type summary:', - { - totalChunks: chunkCount, - error: err.message, - }, - ) - yield { - type: 'RUN_ERROR', - runId, - model: options.model, - timestamp, - error: { - message: err.message || 'Unknown error occurred', - code: err.code, - }, - } - } - } - - /** - * Maps common options to OpenAI-specific format - * Handles translation of normalized options to OpenAI's API format - */ - private mapTextOptionsToOpenAI(options: TextOptions) { + ): Omit { const modelOptions = options.modelOptions as | Omit< InternalTextProviderOptions, @@ -694,190 +124,6 @@ export class OpenAITextAdapter< return requestParams } - - private convertMessagesToInput( - messages: Array, - ): Responses.ResponseInput { - const result: Responses.ResponseInput = [] - - for (const message of messages) { - // Handle tool messages - convert to FunctionToolCallOutput - if (message.role === 'tool') { - result.push({ - type: 'function_call_output', - call_id: message.toolCallId || '', - output: - typeof message.content === 'string' - ? message.content - : JSON.stringify(message.content), - }) - continue - } - - // Handle assistant messages - if (message.role === 'assistant') { - // If the assistant message has tool calls, add them as FunctionToolCall objects - // OpenAI Responses API expects arguments as a string (JSON string) - if (message.toolCalls && message.toolCalls.length > 0) { - for (const toolCall of message.toolCalls) { - // Keep arguments as string for Responses API - // Our internal format stores arguments as a JSON string, which is what API expects - const argumentsString = - typeof toolCall.function.arguments === 'string' - ? toolCall.function.arguments - : JSON.stringify(toolCall.function.arguments) - - result.push({ - type: 'function_call', - call_id: toolCall.id, - name: toolCall.function.name, - arguments: argumentsString, - }) - } - } - - // Add the assistant's text message if there is content - if (message.content) { - // Assistant messages are typically text-only - const contentStr = this.extractTextContent(message.content) - if (contentStr) { - result.push({ - type: 'message', - role: 'assistant', - content: contentStr, - }) - } - } - - continue - } - - // Handle user messages (default case) - support multimodal content - const contentParts = this.normalizeContent(message.content) - const openAIContent: Array = [] - - for (const part of contentParts) { - openAIContent.push( - this.convertContentPartToOpenAI( - part as ContentPart< - unknown, - OpenAIImageMetadata, - OpenAIAudioMetadata, - unknown, - unknown - >, - ), - ) - } - - // If no content parts, add empty text - if (openAIContent.length === 0) { - openAIContent.push({ type: 'input_text', text: '' }) - } - - result.push({ - type: 'message', - role: 'user', - content: openAIContent, - }) - } - - return result - } - - /** - * Converts a ContentPart to OpenAI input content item. - * Handles text, image, and audio content parts. - */ - private convertContentPartToOpenAI( - part: ContentPart< - unknown, - OpenAIImageMetadata, - OpenAIAudioMetadata, - unknown, - unknown - >, - ): Responses.ResponseInputContent { - switch (part.type) { - case 'text': - return { - type: 'input_text', - text: part.content, - } - case 'image': { - const imageMetadata = part.metadata - if (part.source.type === 'url') { - return { - type: 'input_image', - image_url: part.source.value, - detail: imageMetadata?.detail || 'auto', - } - } - // For base64 data, construct a data URI using the mimeType from source - const imageValue = part.source.value - const imageUrl = imageValue.startsWith('data:') - ? imageValue - : `data:${part.source.mimeType};base64,${imageValue}` - return { - type: 'input_image', - image_url: imageUrl, - detail: imageMetadata?.detail || 'auto', - } - } - case 'audio': { - if (part.source.type === 'url') { - // OpenAI may support audio URLs in the future - // For now, treat as data URI - return { - type: 'input_file', - file_url: part.source.value, - } - } - return { - type: 'input_file', - file_data: part.source.value, - } - } - - default: - throw new Error(`Unsupported content part type: ${part.type}`) - } - } - - /** - * Normalizes message content to an array of ContentPart. - * Handles backward compatibility with string content. - */ - private normalizeContent( - content: string | null | Array, - ): Array { - if (content === null) { - return [] - } - if (typeof content === 'string') { - return [{ type: 'text', content: content }] - } - return content - } - - /** - * Extracts text content from a content value that may be string, null, or ContentPart array. - */ - private extractTextContent( - content: string | null | Array, - ): string { - if (content === null) { - return '' - } - if (typeof content === 'string') { - return content - } - // It's an array of ContentPart - return content - .filter((p) => p.type === 'text') - .map((p) => p.content) - .join('') - } } /** diff --git a/packages/typescript/ai-openai/src/adapters/transcription.ts b/packages/typescript/ai-openai/src/adapters/transcription.ts index 796bc0b29..65b885ae2 100644 --- a/packages/typescript/ai-openai/src/adapters/transcription.ts +++ b/packages/typescript/ai-openai/src/adapters/transcription.ts @@ -1,17 +1,7 @@ -import { BaseTranscriptionAdapter } from '@tanstack/ai/adapters' -import { - createOpenAIClient, - generateId, - getOpenAIApiKeyFromEnv, -} from '../utils/client' +import { OpenAICompatibleTranscriptionAdapter } from '@tanstack/openai-base' +import { getOpenAIApiKeyFromEnv, toCompatibleConfig } from '../utils/client' import type { OpenAITranscriptionModel } from '../model-meta' import type { OpenAITranscriptionProviderOptions } from '../audio/transcription-provider-options' -import type { - TranscriptionOptions, - TranscriptionResult, - TranscriptionSegment, -} from '@tanstack/ai' -import type OpenAI_SDK from 'openai' import type { OpenAIClientConfig } from '../utils/client' /** @@ -34,132 +24,14 @@ export interface OpenAITranscriptionConfig extends OpenAIClientConfig {} */ export class OpenAITranscriptionAdapter< TModel extends OpenAITranscriptionModel, -> extends BaseTranscriptionAdapter { +> extends OpenAICompatibleTranscriptionAdapter< + TModel, + OpenAITranscriptionProviderOptions +> { readonly name = 'openai' as const - private client: OpenAI_SDK - constructor(config: OpenAITranscriptionConfig, model: TModel) { - super(config, model) - this.client = createOpenAIClient(config) - } - - async transcribe( - options: TranscriptionOptions, - ): Promise { - const { model, audio, language, prompt, responseFormat, modelOptions } = - options - - // Convert audio input to File object - const file = this.prepareAudioFile(audio) - - // Build request - const request: OpenAI_SDK.Audio.TranscriptionCreateParams = { - model, - file, - language, - prompt, - response_format: this.mapResponseFormat(responseFormat), - ...modelOptions, - } - - // Call OpenAI API - use verbose_json to get timestamps when available - const useVerbose = - responseFormat === 'verbose_json' || - (!responseFormat && model !== 'whisper-1') - - if (useVerbose) { - const response = await this.client.audio.transcriptions.create({ - ...request, - response_format: 'verbose_json', - }) - - return { - id: generateId(this.name), - model, - text: response.text, - language: response.language, - duration: response.duration, - segments: response.segments?.map( - (seg): TranscriptionSegment => ({ - id: seg.id, - start: seg.start, - end: seg.end, - text: seg.text, - confidence: seg.avg_logprob ? Math.exp(seg.avg_logprob) : undefined, - }), - ), - words: response.words?.map((w) => ({ - word: w.word, - start: w.start, - end: w.end, - })), - } - } else { - const response = await this.client.audio.transcriptions.create(request) - - return { - id: generateId(this.name), - model, - text: typeof response === 'string' ? response : response.text, - language, - } - } - } - - private prepareAudioFile(audio: string | File | Blob | ArrayBuffer): File { - // If already a File, return it - if (typeof File !== 'undefined' && audio instanceof File) { - return audio - } - - // If Blob, convert to File - if (typeof Blob !== 'undefined' && audio instanceof Blob) { - return new File([audio], 'audio.mp3', { - type: audio.type || 'audio/mpeg', - }) - } - - // If ArrayBuffer, convert to File - if (audio instanceof ArrayBuffer) { - return new File([audio], 'audio.mp3', { type: 'audio/mpeg' }) - } - - // If base64 string, decode and convert to File - if (typeof audio === 'string') { - // Check if it's a data URL - if (audio.startsWith('data:')) { - const parts = audio.split(',') - const header = parts[0] - const base64Data = parts[1] || '' - const mimeMatch = header?.match(/data:([^;]+)/) - const mimeType = mimeMatch?.[1] || 'audio/mpeg' - const binaryStr = atob(base64Data) - const bytes = new Uint8Array(binaryStr.length) - for (let i = 0; i < binaryStr.length; i++) { - bytes[i] = binaryStr.charCodeAt(i) - } - const extension = mimeType.split('/')[1] || 'mp3' - return new File([bytes], `audio.${extension}`, { type: mimeType }) - } - - // Assume raw base64 - const binaryStr = atob(audio) - const bytes = new Uint8Array(binaryStr.length) - for (let i = 0; i < binaryStr.length; i++) { - bytes[i] = binaryStr.charCodeAt(i) - } - return new File([bytes], 'audio.mp3', { type: 'audio/mpeg' }) - } - - throw new Error('Invalid audio input type') - } - - private mapResponseFormat( - format?: 'json' | 'text' | 'srt' | 'verbose_json' | 'vtt', - ): OpenAI_SDK.Audio.TranscriptionCreateParams['response_format'] { - if (!format) return 'json' - return format as OpenAI_SDK.Audio.TranscriptionCreateParams['response_format'] + super(toCompatibleConfig(config), model, 'openai') } } diff --git a/packages/typescript/ai-openai/src/adapters/tts.ts b/packages/typescript/ai-openai/src/adapters/tts.ts index 2f34e50fa..c7843e416 100644 --- a/packages/typescript/ai-openai/src/adapters/tts.ts +++ b/packages/typescript/ai-openai/src/adapters/tts.ts @@ -1,22 +1,12 @@ -import { BaseTTSAdapter } from '@tanstack/ai/adapters' -import { - createOpenAIClient, - generateId, - getOpenAIApiKeyFromEnv, -} from '../utils/client' +import { OpenAICompatibleTTSAdapter } from '@tanstack/openai-base' +import { getOpenAIApiKeyFromEnv, toCompatibleConfig } from '../utils/client' import { validateAudioInput, validateInstructions, validateSpeed, } from '../audio/audio-provider-options' import type { OpenAITTSModel } from '../model-meta' -import type { - OpenAITTSFormat, - OpenAITTSProviderOptions, - OpenAITTSVoice, -} from '../audio/tts-provider-options' -import type { TTSOptions, TTSResult } from '@tanstack/ai' -import type OpenAI_SDK from 'openai' +import type { OpenAITTSProviderOptions } from '../audio/tts-provider-options' import type { OpenAIClientConfig } from '../utils/client' /** @@ -37,74 +27,36 @@ export interface OpenAITTSConfig extends OpenAIClientConfig {} */ export class OpenAITTSAdapter< TModel extends OpenAITTSModel, -> extends BaseTTSAdapter { +> extends OpenAICompatibleTTSAdapter { readonly name = 'openai' as const - private client: OpenAI_SDK - constructor(config: OpenAITTSConfig, model: TModel) { - super(config, model) - this.client = createOpenAIClient(config) + super(toCompatibleConfig(config), model, 'openai') } - async generateSpeech( - options: TTSOptions, - ): Promise { - const { model, text, voice, format, speed, modelOptions } = options - - // Validate inputs using existing validators - const audioOptions = { - input: text, - model, - voice: voice as OpenAITTSVoice, - speed, - response_format: format as OpenAITTSFormat, - ...modelOptions, - } - - validateAudioInput(audioOptions) - validateSpeed(audioOptions) - validateInstructions(audioOptions) - - // Build request - const request: OpenAI_SDK.Audio.SpeechCreateParams = { - model, - input: text, - voice: voice || 'alloy', - response_format: format, - speed, - ...modelOptions, - } - - // Call OpenAI API - const response = await this.client.audio.speech.create(request) - - // Convert response to base64 - const arrayBuffer = await response.arrayBuffer() - const base64 = Buffer.from(arrayBuffer).toString('base64') - - const outputFormat = format || 'mp3' - const contentType = this.getContentType(outputFormat) + protected override validateAudioInput(text: string): void { + // Delegate to OpenAI-specific validation that also validates model/voice/format + validateAudioInput({ input: text, model: this.model, voice: 'alloy' }) + } - return { - id: generateId(this.name), - model, - audio: base64, - format: outputFormat, - contentType, + protected override validateSpeed(speed?: number): void { + if (speed !== undefined) { + validateSpeed({ speed, model: this.model, input: '', voice: 'alloy' }) } } - private getContentType(format: string): string { - const contentTypes: Record = { - mp3: 'audio/mpeg', - opus: 'audio/opus', - aac: 'audio/aac', - flac: 'audio/flac', - wav: 'audio/wav', - pcm: 'audio/pcm', + protected override validateInstructions( + model: string, + modelOptions?: OpenAITTSProviderOptions, + ): void { + if (modelOptions) { + validateInstructions({ + ...modelOptions, + model, + input: '', + voice: 'alloy', + }) } - return contentTypes[format] || 'audio/mpeg' } } diff --git a/packages/typescript/ai-openai/src/adapters/video.ts b/packages/typescript/ai-openai/src/adapters/video.ts index 1f882d16d..67c37a192 100644 --- a/packages/typescript/ai-openai/src/adapters/video.ts +++ b/packages/typescript/ai-openai/src/adapters/video.ts @@ -1,5 +1,5 @@ -import { BaseVideoAdapter } from '@tanstack/ai/adapters' -import { createOpenAIClient, getOpenAIApiKeyFromEnv } from '../utils/client' +import { OpenAICompatibleVideoAdapter } from '@tanstack/openai-base' +import { getOpenAIApiKeyFromEnv, toCompatibleConfig } from '../utils/client' import { toApiSeconds, validateVideoSeconds, @@ -12,12 +12,7 @@ import type { OpenAIVideoModelSizeByName, OpenAIVideoProviderOptions, } from '../video/video-provider-options' -import type { - VideoGenerationOptions, - VideoJobResult, - VideoStatusResult, - VideoUrlResult, -} from '@tanstack/ai' +import type { VideoGenerationOptions } from '@tanstack/ai' import type OpenAI_SDK from 'openai' import type { OpenAIClientConfig } from '../utils/client' @@ -44,7 +39,7 @@ export interface OpenAIVideoConfig extends OpenAIClientConfig {} */ export class OpenAIVideoAdapter< TModel extends OpenAIVideoModel, -> extends BaseVideoAdapter< +> extends OpenAICompatibleVideoAdapter< TModel, OpenAIVideoProviderOptions, OpenAIVideoModelProviderOptionsByName, @@ -52,229 +47,22 @@ export class OpenAIVideoAdapter< > { readonly name = 'openai' as const - private client: OpenAI_SDK - constructor(config: OpenAIVideoConfig, model: TModel) { - super(config, model) - this.client = createOpenAIClient(config) + super(toCompatibleConfig(config), model, 'openai') } - /** - * Create a new video generation job. - * - * API: POST /v1/videos - * Docs: https://platform.openai.com/docs/api-reference/videos/create - * - * @experimental Video generation is an experimental feature and may change. - * - * @example - * ```ts - * const { jobId } = await adapter.createVideoJob({ - * model: 'sora-2', - * prompt: 'A cat chasing a dog in a sunny park', - * size: '1280x720', - * duration: 8 // seconds: 4, 8, or 12 - * }) - * ``` - */ - async createVideoJob( - options: VideoGenerationOptions, - ): Promise { - const { model, size, duration, modelOptions } = options - - // Validate inputs + protected override validateVideoSize(model: string, size?: string): void { validateVideoSize(model, size) - // Duration maps to 'seconds' in the API - const seconds = duration ?? modelOptions?.seconds - validateVideoSeconds(model, seconds) - - // Build request - const request = this.buildRequest(options) - - try { - // POST /v1/videos - // Cast to any because the videos API may not be in SDK types yet - const client = this.client - const response = await client.videos.create(request) - - return { - jobId: response.id, - model, - } - } catch (error: any) { - // Fallback for when the videos API is not available - if (error?.message?.includes('videos') || error?.code === 'invalid_api') { - throw new Error( - `Video generation API is not available. The Sora API may require special access. ` + - `Original error: ${error.message}`, - ) - } - throw error - } } - /** - * Get the current status of a video generation job. - * - * API: GET /v1/videos/{video_id} - * Docs: https://platform.openai.com/docs/api-reference/videos/get - * - * @experimental Video generation is an experimental feature and may change. - * - * @example - * ```ts - * const status = await adapter.getVideoStatus(jobId) - * if (status.status === 'completed') { - * console.log('Video is ready!') - * } else if (status.status === 'processing') { - * console.log(`Progress: ${status.progress}%`) - * } - * ``` - */ - async getVideoStatus(jobId: string): Promise { - try { - // GET /v1/videos/{video_id} - const client = this.client - const response = await client.videos.retrieve(jobId) - - return { - jobId, - status: this.mapStatus(response.status), - progress: response.progress, - error: response.error?.message, - } - } catch (error: any) { - if (error.status === 404) { - return { - jobId, - status: 'failed', - error: 'Job not found', - } - } - throw error - } - } - - /** - * Get the URL to download/view the generated video. - * - * API: GET /v1/videos/{video_id}/content - * Docs: https://platform.openai.com/docs/api-reference/videos/content - * - * @experimental Video generation is an experimental feature and may change. - * - * @example - * ```ts - * const { url, expiresAt } = await adapter.getVideoUrl(jobId) - * console.log('Video URL:', url) - * console.log('Expires at:', expiresAt) - * ``` - */ - async getVideoUrl(jobId: string): Promise { - try { - // GET /v1/videos/{video_id}/content - // The SDK may not have a .content() method, so we try multiple approaches - const client = this.client as any - - let response: any - - // Try different possible method names - if (typeof client.videos?.content === 'function') { - response = await client.videos.content(jobId) - } else if (typeof client.videos?.getContent === 'function') { - response = await client.videos.getContent(jobId) - } else if (typeof client.videos?.download === 'function') { - response = await client.videos.download(jobId) - } else { - // Fallback: check if retrieve returns the URL directly - const videoInfo = await client.videos.retrieve(jobId) - if (videoInfo.url) { - return { - jobId, - url: videoInfo.url, - expiresAt: videoInfo.expires_at - ? new Date(videoInfo.expires_at) - : undefined, - } - } - - // Last resort: The /content endpoint returns raw binary video data, not JSON. - // We need to construct a URL that the client can use to fetch the video. - // The URL needs to include auth, so we'll create a signed URL or return - // a proxy endpoint. - - // For now, return a URL that goes through our API to proxy the request - // since the raw endpoint requires auth headers that browsers can't send. - // The video element can't add Authorization headers, so we need a workaround. - - // Option 1: Return the direct URL (only works if OpenAI supports query param auth) - // Option 2: Return a blob URL after fetching (memory intensive) - // Option 3: Return a proxy URL through our server - - // Let's try fetching and returning a data URL for now - const baseUrl = this.config.baseUrl || 'https://api.openai.com/v1' - const apiKey = this.config.apiKey - - const contentResponse = await fetch( - `${baseUrl}/videos/${jobId}/content`, - { - method: 'GET', - headers: { - Authorization: `Bearer ${apiKey}`, - }, - }, - ) - - if (!contentResponse.ok) { - // Try to parse error as JSON, but it might be binary - const contentType = contentResponse.headers.get('content-type') - if (contentType?.includes('application/json')) { - const errorData = await contentResponse.json().catch(() => ({})) - throw new Error( - errorData.error?.message || - `Failed to get video content: ${contentResponse.status}`, - ) - } - throw new Error( - `Failed to get video content: ${contentResponse.status}`, - ) - } - - // The response is the raw video file - convert to base64 data URL - const videoBlob = await contentResponse.blob() - const buffer = await videoBlob.arrayBuffer() - const base64 = Buffer.from(buffer).toString('base64') - const mimeType = - contentResponse.headers.get('content-type') || 'video/mp4' - - return { - jobId, - url: `data:${mimeType};base64,${base64}`, - expiresAt: undefined, // Data URLs don't expire - } - } - - return { - jobId, - url: response.url, - expiresAt: response.expires_at - ? new Date(response.expires_at) - : undefined, - } - } catch (error: any) { - if (error.status === 404) { - throw new Error(`Video job not found: ${jobId}`) - } - if (error.status === 400) { - throw new Error( - `Video is not ready for download. Check status first. Job ID: ${jobId}`, - ) - } - throw error - } + protected override validateVideoSeconds( + model: string, + seconds?: number | string, + ): void { + validateVideoSeconds(model, seconds) } - private buildRequest( + protected override buildRequest( options: VideoGenerationOptions, ): OpenAI_SDK.Videos.VideoCreateParams { const { model, prompt, size, duration, modelOptions } = options @@ -301,28 +89,6 @@ export class OpenAIVideoAdapter< return request } - - private mapStatus( - apiStatus: string, - ): 'pending' | 'processing' | 'completed' | 'failed' { - switch (apiStatus) { - case 'queued': - case 'pending': - return 'pending' - case 'processing': - case 'in_progress': - return 'processing' - case 'completed': - case 'succeeded': - return 'completed' - case 'failed': - case 'error': - case 'cancelled': - return 'failed' - default: - return 'processing' - } - } } /** diff --git a/packages/typescript/ai-openai/src/tools/apply-patch-tool.ts b/packages/typescript/ai-openai/src/tools/apply-patch-tool.ts index 8e73cc898..591111516 100644 --- a/packages/typescript/ai-openai/src/tools/apply-patch-tool.ts +++ b/packages/typescript/ai-openai/src/tools/apply-patch-tool.ts @@ -1,26 +1,5 @@ -import type OpenAI from 'openai' -import type { Tool } from '@tanstack/ai' - -export type ApplyPatchTool = OpenAI.Responses.ApplyPatchTool - -/** - * Converts a standard Tool to OpenAI ApplyPatchTool format - */ -export function convertApplyPatchToolToAdapterFormat( - _tool: Tool, -): ApplyPatchTool { - return { - type: 'apply_patch', - } -} - -/** - * Creates a standard Tool from ApplyPatchTool parameters - */ -export function applyPatchTool(): Tool { - return { - name: 'apply_patch', - description: 'Apply a patch to modify files', - metadata: {}, - } -} +export { + type ApplyPatchTool, + convertApplyPatchToolToAdapterFormat, + applyPatchTool, +} from '@tanstack/openai-base' diff --git a/packages/typescript/ai-openai/src/tools/code-interpreter-tool.ts b/packages/typescript/ai-openai/src/tools/code-interpreter-tool.ts index 15bd8e429..032452209 100644 --- a/packages/typescript/ai-openai/src/tools/code-interpreter-tool.ts +++ b/packages/typescript/ai-openai/src/tools/code-interpreter-tool.ts @@ -1,31 +1,5 @@ -import type { Tool } from '@tanstack/ai' -import type OpenAI from 'openai' - -export type CodeInterpreterTool = OpenAI.Responses.Tool.CodeInterpreter - -/** - * Converts a standard Tool to OpenAI CodeInterpreterTool format - */ -export function convertCodeInterpreterToolToAdapterFormat( - tool: Tool, -): CodeInterpreterTool { - const metadata = tool.metadata as CodeInterpreterTool - return { - type: 'code_interpreter', - container: metadata.container, - } -} - -/** - * Creates a standard Tool from CodeInterpreterTool parameters - */ -export function codeInterpreterTool(container: CodeInterpreterTool): Tool { - return { - name: 'code_interpreter', - description: 'Execute code in a sandboxed environment', - metadata: { - type: 'code_interpreter', - container, - }, - } -} +export { + type CodeInterpreterTool, + convertCodeInterpreterToolToAdapterFormat, + codeInterpreterTool, +} from '@tanstack/openai-base' diff --git a/packages/typescript/ai-openai/src/tools/computer-use-tool.ts b/packages/typescript/ai-openai/src/tools/computer-use-tool.ts index 1a19b573b..31ea46807 100644 --- a/packages/typescript/ai-openai/src/tools/computer-use-tool.ts +++ b/packages/typescript/ai-openai/src/tools/computer-use-tool.ts @@ -1,31 +1,5 @@ -import type OpenAI from 'openai' -import type { Tool } from '@tanstack/ai' - -export type ComputerUseTool = OpenAI.Responses.ComputerTool -/** - * Converts a standard Tool to OpenAI ComputerUseTool format - */ -export function convertComputerUseToolToAdapterFormat( - tool: Tool, -): ComputerUseTool { - const metadata = tool.metadata as ComputerUseTool - return { - type: 'computer_use_preview', - display_height: metadata.display_height, - display_width: metadata.display_width, - environment: metadata.environment, - } -} - -/** - * Creates a standard Tool from ComputerUseTool parameters - */ -export function computerUseTool(toolData: ComputerUseTool): Tool { - return { - name: 'computer_use_preview', - description: 'Control a virtual computer', - metadata: { - ...toolData, - }, - } -} +export { + type ComputerUseTool, + convertComputerUseToolToAdapterFormat, + computerUseTool, +} from '@tanstack/openai-base' diff --git a/packages/typescript/ai-openai/src/tools/custom-tool.ts b/packages/typescript/ai-openai/src/tools/custom-tool.ts index ad7de4d25..b4b8baa14 100644 --- a/packages/typescript/ai-openai/src/tools/custom-tool.ts +++ b/packages/typescript/ai-openai/src/tools/custom-tool.ts @@ -1,30 +1,5 @@ -import type OpenAI from 'openai' -import type { Tool } from '@tanstack/ai' - -export type CustomTool = OpenAI.Responses.CustomTool - -/** - * Converts a standard Tool to OpenAI CustomTool format - */ -export function convertCustomToolToAdapterFormat(tool: Tool): CustomTool { - const metadata = tool.metadata as CustomTool - return { - type: 'custom', - name: metadata.name, - description: metadata.description, - format: metadata.format, - } -} - -/** - * Creates a standard Tool from CustomTool parameters - */ -export function customTool(toolData: CustomTool): Tool { - return { - name: 'custom', - description: toolData.description || 'A custom tool', - metadata: { - ...toolData, - }, - } -} +export { + type CustomTool, + convertCustomToolToAdapterFormat, + customTool, +} from '@tanstack/openai-base' diff --git a/packages/typescript/ai-openai/src/tools/file-search-tool.ts b/packages/typescript/ai-openai/src/tools/file-search-tool.ts index 0fc85f06e..b30b8d94c 100644 --- a/packages/typescript/ai-openai/src/tools/file-search-tool.ts +++ b/packages/typescript/ai-openai/src/tools/file-search-tool.ts @@ -1,42 +1,5 @@ -import type OpenAI from 'openai' -import type { Tool } from '@tanstack/ai' - -const validateMaxNumResults = (maxNumResults: number | undefined) => { - if (maxNumResults && (maxNumResults < 1 || maxNumResults > 50)) { - throw new Error('max_num_results must be between 1 and 50.') - } -} - -export type FileSearchTool = OpenAI.Responses.FileSearchTool - -/** - * Converts a standard Tool to OpenAI FileSearchTool format - */ -export function convertFileSearchToolToAdapterFormat( - tool: Tool, -): OpenAI.Responses.FileSearchTool { - const metadata = tool.metadata as OpenAI.Responses.FileSearchTool - return { - type: 'file_search', - vector_store_ids: metadata.vector_store_ids, - max_num_results: metadata.max_num_results, - ranking_options: metadata.ranking_options, - filters: metadata.filters, - } -} - -/** - * Creates a standard Tool from FileSearchTool parameters - */ -export function fileSearchTool( - toolData: OpenAI.Responses.FileSearchTool, -): Tool { - validateMaxNumResults(toolData.max_num_results) - return { - name: 'file_search', - description: 'Search files in vector stores', - metadata: { - ...toolData, - }, - } -} +export { + type FileSearchTool, + convertFileSearchToolToAdapterFormat, + fileSearchTool, +} from '@tanstack/openai-base' diff --git a/packages/typescript/ai-openai/src/tools/function-tool.ts b/packages/typescript/ai-openai/src/tools/function-tool.ts index 6bcce9cde..0d48a9c5b 100644 --- a/packages/typescript/ai-openai/src/tools/function-tool.ts +++ b/packages/typescript/ai-openai/src/tools/function-tool.ts @@ -1,42 +1,4 @@ -import { makeOpenAIStructuredOutputCompatible } from '../utils/schema-converter' -import type { JSONSchema, Tool } from '@tanstack/ai' -import type OpenAI from 'openai' - -export type FunctionTool = OpenAI.Responses.FunctionTool - -/** - * Converts a standard Tool to OpenAI FunctionTool format. - * - * Tool schemas are already converted to JSON Schema in the ai layer. - * We apply OpenAI-specific transformations for strict mode: - * - All properties in required array - * - Optional fields made nullable - * - additionalProperties: false - * - * This enables strict mode for all tools automatically. - */ -export function convertFunctionToolToAdapterFormat(tool: Tool): FunctionTool { - // Tool schemas are already converted to JSON Schema in the ai layer - // Apply OpenAI-specific transformations for strict mode - const inputSchema = (tool.inputSchema ?? { - type: 'object', - properties: {}, - required: [], - }) as JSONSchema - - const jsonSchema = makeOpenAIStructuredOutputCompatible( - inputSchema, - inputSchema.required || [], - ) - - // Ensure additionalProperties is false for strict mode - jsonSchema.additionalProperties = false - - return { - type: 'function', - name: tool.name, - description: tool.description, - parameters: jsonSchema, - strict: true, // Always use strict mode since our schema converter handles the requirements - } satisfies FunctionTool -} +export { + type FunctionTool, + convertFunctionToolToAdapterFormat, +} from '@tanstack/openai-base' diff --git a/packages/typescript/ai-openai/src/tools/image-generation-tool.ts b/packages/typescript/ai-openai/src/tools/image-generation-tool.ts index c48ff1e0e..a776412c1 100644 --- a/packages/typescript/ai-openai/src/tools/image-generation-tool.ts +++ b/packages/typescript/ai-openai/src/tools/image-generation-tool.ts @@ -1,39 +1,5 @@ -import type OpenAI from 'openai' -import type { Tool } from '@tanstack/ai' - -export type ImageGenerationTool = OpenAI.Responses.Tool.ImageGeneration - -const validatePartialImages = (value: number | undefined) => { - if (value !== undefined && (value < 0 || value > 3)) { - throw new Error('partial_images must be between 0 and 3') - } -} - -/** - * Converts a standard Tool to OpenAI ImageGenerationTool format - */ -export function convertImageGenerationToolToAdapterFormat( - tool: Tool, -): ImageGenerationTool { - const metadata = tool.metadata as Omit - return { - type: 'image_generation', - ...metadata, - } -} - -/** - * Creates a standard Tool from ImageGenerationTool parameters - */ -export function imageGenerationTool( - toolData: Omit, -): Tool { - validatePartialImages(toolData.partial_images) - return { - name: 'image_generation', - description: 'Generate images based on text descriptions', - metadata: { - ...toolData, - }, - } -} +export { + type ImageGenerationTool, + convertImageGenerationToolToAdapterFormat, + imageGenerationTool, +} from '@tanstack/openai-base' diff --git a/packages/typescript/ai-openai/src/tools/index.ts b/packages/typescript/ai-openai/src/tools/index.ts index 1795d7fce..19a6f4c64 100644 --- a/packages/typescript/ai-openai/src/tools/index.ts +++ b/packages/typescript/ai-openai/src/tools/index.ts @@ -1,29 +1,4 @@ -import type { ApplyPatchTool } from './apply-patch-tool' -import type { CodeInterpreterTool } from './code-interpreter-tool' -import type { ComputerUseTool } from './computer-use-tool' -import type { CustomTool } from './custom-tool' -import type { FileSearchTool } from './file-search-tool' -import type { FunctionTool } from './function-tool' -import type { ImageGenerationTool } from './image-generation-tool' -import type { LocalShellTool } from './local-shell-tool' -import type { MCPTool } from './mcp-tool' -import type { ShellTool } from './shell-tool' -import type { WebSearchPreviewTool } from './web-search-preview-tool' -import type { WebSearchTool } from './web-search-tool' - -export type OpenAITool = - | ApplyPatchTool - | CodeInterpreterTool - | ComputerUseTool - | CustomTool - | FileSearchTool - | FunctionTool - | ImageGenerationTool - | LocalShellTool - | MCPTool - | ShellTool - | WebSearchPreviewTool - | WebSearchTool +export { type OpenAITool } from '@tanstack/openai-base' export * from './apply-patch-tool' export * from './code-interpreter-tool' diff --git a/packages/typescript/ai-openai/src/tools/local-shell-tool.ts b/packages/typescript/ai-openai/src/tools/local-shell-tool.ts index ed829cb28..90e39576d 100644 --- a/packages/typescript/ai-openai/src/tools/local-shell-tool.ts +++ b/packages/typescript/ai-openai/src/tools/local-shell-tool.ts @@ -1,26 +1,5 @@ -import type OpenAI from 'openai' -import type { Tool } from '@tanstack/ai' - -export type LocalShellTool = OpenAI.Responses.Tool.LocalShell - -/** - * Converts a standard Tool to OpenAI LocalShellTool format - */ -export function convertLocalShellToolToAdapterFormat( - _tool: Tool, -): LocalShellTool { - return { - type: 'local_shell', - } -} - -/** - * Creates a standard Tool from LocalShellTool parameters - */ -export function localShellTool(): Tool { - return { - name: 'local_shell', - description: 'Execute local shell commands', - metadata: {}, - } -} +export { + type LocalShellTool, + convertLocalShellToolToAdapterFormat, + localShellTool, +} from '@tanstack/openai-base' diff --git a/packages/typescript/ai-openai/src/tools/mcp-tool.ts b/packages/typescript/ai-openai/src/tools/mcp-tool.ts index 64b94357f..0af9c08f5 100644 --- a/packages/typescript/ai-openai/src/tools/mcp-tool.ts +++ b/packages/typescript/ai-openai/src/tools/mcp-tool.ts @@ -1,41 +1,6 @@ -import type OpenAI from 'openai' -import type { Tool } from '@tanstack/ai' - -export type MCPTool = OpenAI.Responses.Tool.Mcp - -export function validateMCPtool(tool: MCPTool) { - if (!tool.server_url && !tool.connector_id) { - throw new Error('Either server_url or connector_id must be provided.') - } - if (tool.connector_id && tool.server_url) { - throw new Error('Only one of server_url or connector_id can be provided.') - } -} - -/** - * Converts a standard Tool to OpenAI MCPTool format - */ -export function convertMCPToolToAdapterFormat(tool: Tool): MCPTool { - const metadata = tool.metadata as Omit - - const mcpTool: MCPTool = { - type: 'mcp', - ...metadata, - } - - validateMCPtool(mcpTool) - return mcpTool -} - -/** - * Creates a standard Tool from MCPTool parameters - */ -export function mcpTool(toolData: Omit): Tool { - validateMCPtool({ ...toolData, type: 'mcp' }) - - return { - name: 'mcp', - description: toolData.server_description || '', - metadata: toolData, - } -} +export { + type MCPTool, + validateMCPtool, + convertMCPToolToAdapterFormat, + mcpTool, +} from '@tanstack/openai-base' diff --git a/packages/typescript/ai-openai/src/tools/shell-tool.ts b/packages/typescript/ai-openai/src/tools/shell-tool.ts index 83b301a23..b0b915c35 100644 --- a/packages/typescript/ai-openai/src/tools/shell-tool.ts +++ b/packages/typescript/ai-openai/src/tools/shell-tool.ts @@ -1,24 +1,5 @@ -import type OpenAI from 'openai' -import type { Tool } from '@tanstack/ai' - -export type ShellTool = OpenAI.Responses.FunctionShellTool - -/** - * Converts a standard Tool to OpenAI ShellTool format - */ -export function convertShellToolToAdapterFormat(_tool: Tool): ShellTool { - return { - type: 'shell', - } -} - -/** - * Creates a standard Tool from ShellTool parameters - */ -export function shellTool(): Tool { - return { - name: 'shell', - description: 'Execute shell commands', - metadata: {}, - } -} +export { + type ShellTool, + convertShellToolToAdapterFormat, + shellTool, +} from '@tanstack/openai-base' diff --git a/packages/typescript/ai-openai/src/tools/tool-choice.ts b/packages/typescript/ai-openai/src/tools/tool-choice.ts index db6e0b148..99df1824f 100644 --- a/packages/typescript/ai-openai/src/tools/tool-choice.ts +++ b/packages/typescript/ai-openai/src/tools/tool-choice.ts @@ -1,31 +1 @@ -interface MCPToolChoice { - type: 'mcp' - server_label: 'deepwiki' -} - -interface FunctionToolChoice { - type: 'function' - name: string -} - -interface CustomToolChoice { - type: 'custom' - name: string -} - -interface HostedToolChoice { - type: - | 'file_search' - | 'web_search_preview' - | 'computer_use_preview' - | 'code_interpreter' - | 'image_generation' - | 'shell' - | 'apply_patch' -} - -export type ToolChoice = - | MCPToolChoice - | FunctionToolChoice - | CustomToolChoice - | HostedToolChoice +export { type ToolChoice } from '@tanstack/openai-base' diff --git a/packages/typescript/ai-openai/src/tools/tool-converter.ts b/packages/typescript/ai-openai/src/tools/tool-converter.ts index c4ac5909a..3d78a1b18 100644 --- a/packages/typescript/ai-openai/src/tools/tool-converter.ts +++ b/packages/typescript/ai-openai/src/tools/tool-converter.ts @@ -1,72 +1 @@ -import { convertApplyPatchToolToAdapterFormat } from './apply-patch-tool' -import { convertCodeInterpreterToolToAdapterFormat } from './code-interpreter-tool' -import { convertComputerUseToolToAdapterFormat } from './computer-use-tool' -import { convertCustomToolToAdapterFormat } from './custom-tool' -import { convertFileSearchToolToAdapterFormat } from './file-search-tool' -import { convertFunctionToolToAdapterFormat } from './function-tool' -import { convertImageGenerationToolToAdapterFormat } from './image-generation-tool' -import { convertLocalShellToolToAdapterFormat } from './local-shell-tool' -import { convertMCPToolToAdapterFormat } from './mcp-tool' -import { convertShellToolToAdapterFormat } from './shell-tool' -import { convertWebSearchPreviewToolToAdapterFormat } from './web-search-preview-tool' -import { convertWebSearchToolToAdapterFormat } from './web-search-tool' -import type { OpenAITool } from './index' -import type { Tool } from '@tanstack/ai' - -/** - * Converts an array of standard Tools to OpenAI-specific format - */ -export function convertToolsToProviderFormat( - tools: Array, -): Array { - return tools.map((tool) => { - // Special tool names that map to specific OpenAI tool types - const specialToolNames = new Set([ - 'apply_patch', - 'code_interpreter', - 'computer_use_preview', - 'file_search', - 'image_generation', - 'local_shell', - 'mcp', - 'shell', - 'web_search_preview', - 'web_search', - 'custom', - ]) - - const toolName = tool.name - - // If it's a special tool name, route to the appropriate converter - if (specialToolNames.has(toolName)) { - switch (toolName) { - case 'apply_patch': - return convertApplyPatchToolToAdapterFormat(tool) - case 'code_interpreter': - return convertCodeInterpreterToolToAdapterFormat(tool) - case 'computer_use_preview': - return convertComputerUseToolToAdapterFormat(tool) - case 'file_search': - return convertFileSearchToolToAdapterFormat(tool) - case 'image_generation': - return convertImageGenerationToolToAdapterFormat(tool) - case 'local_shell': - return convertLocalShellToolToAdapterFormat(tool) - case 'mcp': - return convertMCPToolToAdapterFormat(tool) - case 'shell': - return convertShellToolToAdapterFormat(tool) - case 'web_search_preview': - return convertWebSearchPreviewToolToAdapterFormat(tool) - case 'web_search': - return convertWebSearchToolToAdapterFormat(tool) - case 'custom': - return convertCustomToolToAdapterFormat(tool) - } - } - - // For regular function tools (not special names), convert as function tool - // This handles tools like "getGuitars", "recommendGuitar", etc. - return convertFunctionToolToAdapterFormat(tool) - }) -} +export { convertToolsToProviderFormat } from '@tanstack/openai-base' diff --git a/packages/typescript/ai-openai/src/tools/web-search-preview-tool.ts b/packages/typescript/ai-openai/src/tools/web-search-preview-tool.ts index 48942d436..67b88dc76 100644 --- a/packages/typescript/ai-openai/src/tools/web-search-preview-tool.ts +++ b/packages/typescript/ai-openai/src/tools/web-search-preview-tool.ts @@ -1,29 +1,5 @@ -import type OpenAI from 'openai' -import type { Tool } from '@tanstack/ai' - -export type WebSearchPreviewTool = OpenAI.Responses.WebSearchPreviewTool - -/** - * Converts a standard Tool to OpenAI WebSearchPreviewTool format - */ -export function convertWebSearchPreviewToolToAdapterFormat( - tool: Tool, -): WebSearchPreviewTool { - const metadata = tool.metadata as WebSearchPreviewTool - return { - type: metadata.type, - search_context_size: metadata.search_context_size, - user_location: metadata.user_location, - } -} - -/** - * Creates a standard Tool from WebSearchPreviewTool parameters - */ -export function webSearchPreviewTool(toolData: WebSearchPreviewTool): Tool { - return { - name: 'web_search_preview', - description: 'Search the web (preview version)', - metadata: toolData, - } -} +export { + type WebSearchPreviewTool, + convertWebSearchPreviewToolToAdapterFormat, + webSearchPreviewTool, +} from '@tanstack/openai-base' diff --git a/packages/typescript/ai-openai/src/tools/web-search-tool.ts b/packages/typescript/ai-openai/src/tools/web-search-tool.ts index c7d5aef68..0cacf1311 100644 --- a/packages/typescript/ai-openai/src/tools/web-search-tool.ts +++ b/packages/typescript/ai-openai/src/tools/web-search-tool.ts @@ -1,23 +1,5 @@ -import type OpenAI from 'openai' -import type { Tool } from '@tanstack/ai' - -export type WebSearchTool = OpenAI.Responses.WebSearchTool - -/** - * Converts a standard Tool to OpenAI WebSearchTool format - */ -export function convertWebSearchToolToAdapterFormat(tool: Tool): WebSearchTool { - const metadata = tool.metadata as WebSearchTool - return metadata -} - -/** - * Creates a standard Tool from WebSearchTool parameters - */ -export function webSearchTool(toolData: WebSearchTool): Tool { - return { - name: 'web_search', - description: 'Search the web', - metadata: toolData, - } -} +export { + type WebSearchTool, + convertWebSearchToolToAdapterFormat, + webSearchTool, +} from '@tanstack/openai-base' diff --git a/packages/typescript/ai-openai/src/utils/client.ts b/packages/typescript/ai-openai/src/utils/client.ts index 3915e2ea1..c3d34cf29 100644 --- a/packages/typescript/ai-openai/src/utils/client.ts +++ b/packages/typescript/ai-openai/src/utils/client.ts @@ -1,4 +1,6 @@ import OpenAI_SDK from 'openai' +import { generateId as _generateId, getApiKeyFromEnv } from '@tanstack/ai-utils' +import type { OpenAICompatibleClientConfig } from '@tanstack/openai-base' import type { ClientOptions } from 'openai' export interface OpenAIClientConfig extends ClientOptions { @@ -17,26 +19,24 @@ export function createOpenAIClient(config: OpenAIClientConfig): OpenAI_SDK { * @throws Error if OPENAI_API_KEY is not found */ export function getOpenAIApiKeyFromEnv(): string { - const env = - typeof globalThis !== 'undefined' && (globalThis as any).window?.env - ? (globalThis as any).window.env - : typeof process !== 'undefined' - ? process.env - : undefined - const key = env?.OPENAI_API_KEY - - if (!key) { - throw new Error( - 'OPENAI_API_KEY is required. Please set it in your environment variables or use the factory function with an explicit API key.', - ) - } - - return key + return getApiKeyFromEnv('OPENAI_API_KEY') } /** * Generates a unique ID with a prefix */ export function generateId(prefix: string): string { - return `${prefix}-${Date.now()}-${Math.random().toString(36).substring(7)}` + return _generateId(prefix) +} + +/** + * Converts an OpenAIClientConfig to OpenAICompatibleClientConfig. + * This bridges the type gap between the local config type (which extends + * the local copy of ClientOptions) and the base package's config type + * (which extends its own copy of ClientOptions). + */ +export function toCompatibleConfig( + config: OpenAIClientConfig, +): OpenAICompatibleClientConfig { + return config as unknown as OpenAICompatibleClientConfig } diff --git a/packages/typescript/ai-openai/src/utils/schema-converter.ts b/packages/typescript/ai-openai/src/utils/schema-converter.ts index d431bfe77..fb9ee165e 100644 --- a/packages/typescript/ai-openai/src/utils/schema-converter.ts +++ b/packages/typescript/ai-openai/src/utils/schema-converter.ts @@ -1,38 +1,7 @@ -/** - * Recursively transform null values to undefined in an object. - * - * This is needed because OpenAI's structured output requires all fields to be - * in the `required` array, with optional fields made nullable (type: ["string", "null"]). - * When OpenAI returns null for optional fields, we need to convert them back to - * undefined to match the original Zod schema expectations. - * - * @param obj - Object to transform - * @returns Object with nulls converted to undefined - */ -export function transformNullsToUndefined(obj: T): T { - if (obj === null) { - return undefined as unknown as T - } +import { transformNullsToUndefined } from '@tanstack/ai-utils' +import { makeStructuredOutputCompatible } from '@tanstack/openai-base' - if (Array.isArray(obj)) { - return obj.map((item) => transformNullsToUndefined(item)) as unknown as T - } - - if (typeof obj === 'object') { - const result: Record = {} - for (const [key, value] of Object.entries(obj as Record)) { - const transformed = transformNullsToUndefined(value) - // Only include the key if the value is not undefined - // This makes { notes: null } become {} (field absent) instead of { notes: undefined } - if (transformed !== undefined) { - result[key] = transformed - } - } - return result as T - } - - return obj -} +export { transformNullsToUndefined } /** * Transform a JSON schema to be compatible with OpenAI's structured output requirements. @@ -49,86 +18,5 @@ export function makeOpenAIStructuredOutputCompatible( schema: Record, originalRequired: Array = [], ): Record { - const result = { ...schema } - - // Handle object types - if (result.type === 'object' && result.properties) { - const properties = { ...result.properties } - const allPropertyNames = Object.keys(properties) - - // Transform each property - for (const propName of allPropertyNames) { - const prop = properties[propName] - const wasOptional = !originalRequired.includes(propName) - - // Recursively transform nested objects/arrays/unions - if (prop.type === 'object' && prop.properties) { - properties[propName] = makeOpenAIStructuredOutputCompatible( - prop, - prop.required || [], - ) - } else if (prop.type === 'array' && prop.items) { - properties[propName] = { - ...prop, - items: makeOpenAIStructuredOutputCompatible( - prop.items, - prop.items.required || [], - ), - } - } else if (prop.anyOf) { - // Handle anyOf at property level (union types) - properties[propName] = makeOpenAIStructuredOutputCompatible( - prop, - prop.required || [], - ) - } else if (prop.oneOf) { - // oneOf is not supported by OpenAI - throw early - throw new Error( - 'oneOf is not supported in OpenAI structured output schemas. Check the supported outputs here: https://platform.openai.com/docs/guides/structured-outputs#supported-types', - ) - } else if (wasOptional) { - // Make optional fields nullable by adding null to the type - if (prop.type && !Array.isArray(prop.type)) { - properties[propName] = { - ...prop, - type: [prop.type, 'null'], - } - } else if (Array.isArray(prop.type) && !prop.type.includes('null')) { - properties[propName] = { - ...prop, - type: [...prop.type, 'null'], - } - } - } - } - - result.properties = properties - // ALL properties must be required for OpenAI structured output - result.required = allPropertyNames - // additionalProperties must be false - result.additionalProperties = false - } - - // Handle array types with object items - if (result.type === 'array' && result.items) { - result.items = makeOpenAIStructuredOutputCompatible( - result.items, - result.items.required || [], - ) - } - - // Handle anyOf (union types) - each variant needs to be transformed - if (result.anyOf && Array.isArray(result.anyOf)) { - result.anyOf = result.anyOf.map((variant) => - makeOpenAIStructuredOutputCompatible(variant, variant.required || []), - ) - } - - if (result.oneOf) { - throw new Error( - 'oneOf is not supported in OpenAI structured output schemas. Check the supported outputs here: https://platform.openai.com/docs/guides/structured-outputs#supported-types', - ) - } - - return result + return makeStructuredOutputCompatible(schema, originalRequired) } diff --git a/packages/typescript/openai-base/src/types/config.ts b/packages/typescript/openai-base/src/types/config.ts index e925143a6..976336b42 100644 --- a/packages/typescript/openai-base/src/types/config.ts +++ b/packages/typescript/openai-base/src/types/config.ts @@ -2,5 +2,4 @@ import type { ClientOptions } from 'openai' export interface OpenAICompatibleClientConfig extends ClientOptions { apiKey: string - baseURL?: string } diff --git a/pnpm-lock.yaml b/pnpm-lock.yaml index 518ae9e84..c64feb58e 100644 --- a/pnpm-lock.yaml +++ b/pnpm-lock.yaml @@ -886,6 +886,12 @@ importers: packages/typescript/ai-openai: dependencies: + '@tanstack/ai-utils': + specifier: workspace:* + version: link:../ai-utils + '@tanstack/openai-base': + specifier: workspace:* + version: link:../openai-base openai: specifier: ^6.9.1 version: 6.10.0(ws@8.18.3)(zod@4.2.1) From d4b74b393d3be326b3e7e996773a7f69cad2c688 Mon Sep 17 00:00:00 2001 From: Alem Tuzlak Date: Mon, 30 Mar 2026 14:25:56 +0200 Subject: [PATCH 08/15] refactor(ai-grok): delegate to @tanstack/openai-base and @tanstack/ai-utils --- packages/typescript/ai-grok/package.json | 4 + .../typescript/ai-grok/src/adapters/image.ts | 83 +-- .../ai-grok/src/adapters/summarize.ts | 103 +--- .../typescript/ai-grok/src/adapters/text.ts | 526 +----------------- .../ai-grok/src/tools/function-tool.ts | 49 +- .../ai-grok/src/tools/tool-converter.ts | 20 +- .../typescript/ai-grok/src/utils/client.ts | 40 +- .../typescript/ai-grok/src/utils/index.ts | 3 +- .../ai-grok/src/utils/schema-converter.ts | 112 +--- .../ai-grok/tests/grok-adapter.test.ts | 56 +- 10 files changed, 91 insertions(+), 905 deletions(-) diff --git a/packages/typescript/ai-grok/package.json b/packages/typescript/ai-grok/package.json index ba76c8d99..bd0cd1418 100644 --- a/packages/typescript/ai-grok/package.json +++ b/packages/typescript/ai-grok/package.json @@ -40,9 +40,13 @@ "adapter" ], "dependencies": { + "@tanstack/ai-utils": "workspace:*", + "@tanstack/openai-base": "workspace:*", "openai": "^6.9.1" }, "devDependencies": { + "@tanstack/ai-utils": "workspace:*", + "@tanstack/openai-base": "workspace:*", "@vitest/coverage-v8": "4.0.14", "vite": "^7.2.7" }, diff --git a/packages/typescript/ai-grok/src/adapters/image.ts b/packages/typescript/ai-grok/src/adapters/image.ts index 4bdabd355..0a2150fbb 100644 --- a/packages/typescript/ai-grok/src/adapters/image.ts +++ b/packages/typescript/ai-grok/src/adapters/image.ts @@ -1,5 +1,5 @@ -import { BaseImageAdapter } from '@tanstack/ai/adapters' -import { createGrokClient, generateId, getGrokApiKeyFromEnv } from '../utils' +import { OpenAICompatibleImageAdapter } from '@tanstack/openai-base' +import { getGrokApiKeyFromEnv, toCompatibleConfig } from '../utils/client' import { validateImageSize, validateNumberOfImages, @@ -11,12 +11,6 @@ import type { GrokImageModelSizeByName, GrokImageProviderOptions, } from '../image/image-provider-options' -import type { - GeneratedImage, - ImageGenerationOptions, - ImageGenerationResult, -} from '@tanstack/ai' -import type OpenAI_SDK from 'openai' import type { GrokClientConfig } from '../utils' /** @@ -37,7 +31,7 @@ export interface GrokImageConfig extends GrokClientConfig {} */ export class GrokImageAdapter< TModel extends GrokImageModel, -> extends BaseImageAdapter< +> extends OpenAICompatibleImageAdapter< TModel, GrokImageProviderOptions, GrokImageModelProviderOptionsByName, @@ -46,70 +40,29 @@ export class GrokImageAdapter< readonly kind = 'image' as const readonly name = 'grok' as const - private client: OpenAI_SDK - constructor(config: GrokImageConfig, model: TModel) { - super({}, model) - this.client = createGrokClient(config) + super(toCompatibleConfig(config), model, 'grok') } - async generateImages( - options: ImageGenerationOptions, - ): Promise { - const { model, prompt, numberOfImages, size } = options - - // Validate inputs - validatePrompt({ prompt, model }) - validateImageSize(model, size) - validateNumberOfImages(model, numberOfImages) - - // Build request based on model type - const request = this.buildRequest(options) - - const response = await this.client.images.generate({ - ...request, - stream: false, - }) - - return this.transformResponse(model, response) + protected override validatePrompt(options: { + prompt: string + model: string + }): void { + validatePrompt(options) } - private buildRequest( - options: ImageGenerationOptions, - ): OpenAI_SDK.Images.ImageGenerateParams { - const { model, prompt, numberOfImages, size, modelOptions } = options - - return { - model, - prompt, - n: numberOfImages ?? 1, - size: size as OpenAI_SDK.Images.ImageGenerateParams['size'], - ...modelOptions, - } + protected override validateImageSize( + model: string, + size: string | undefined, + ): void { + validateImageSize(model, size) } - private transformResponse( + protected override validateNumberOfImages( model: string, - response: OpenAI_SDK.Images.ImagesResponse, - ): ImageGenerationResult { - const images: Array = (response.data ?? []).map((item) => ({ - b64Json: item.b64_json, - url: item.url, - revisedPrompt: item.revised_prompt, - })) - - return { - id: generateId(this.name), - model, - images, - usage: response.usage - ? { - inputTokens: response.usage.input_tokens, - outputTokens: response.usage.output_tokens, - totalTokens: response.usage.total_tokens, - } - : undefined, - } + numberOfImages: number | undefined, + ): void { + validateNumberOfImages(model, numberOfImages) } } diff --git a/packages/typescript/ai-grok/src/adapters/summarize.ts b/packages/typescript/ai-grok/src/adapters/summarize.ts index e9de0b663..d84796ca5 100644 --- a/packages/typescript/ai-grok/src/adapters/summarize.ts +++ b/packages/typescript/ai-grok/src/adapters/summarize.ts @@ -1,12 +1,8 @@ -import { BaseSummarizeAdapter } from '@tanstack/ai/adapters' +import { OpenAICompatibleSummarizeAdapter } from '@tanstack/openai-base' import { getGrokApiKeyFromEnv } from '../utils' import { GrokTextAdapter } from './text' +import type { ChatStreamCapable } from '@tanstack/openai-base' import type { GROK_CHAT_MODELS } from '../model-meta' -import type { - StreamChunk, - SummarizationOptions, - SummarizationResult, -} from '@tanstack/ai' import type { GrokClientConfig } from '../utils' /** @@ -35,94 +31,21 @@ export type GrokSummarizeModel = (typeof GROK_CHAT_MODELS)[number] */ export class GrokSummarizeAdapter< TModel extends GrokSummarizeModel, -> extends BaseSummarizeAdapter { +> extends OpenAICompatibleSummarizeAdapter { readonly kind = 'summarize' as const readonly name = 'grok' as const - private textAdapter: GrokTextAdapter - constructor(config: GrokSummarizeConfig, model: TModel) { - super({}, model) - this.textAdapter = new GrokTextAdapter(config, model) - } - - async summarize(options: SummarizationOptions): Promise { - const systemPrompt = this.buildSummarizationPrompt(options) - - // Use the text adapter's streaming and collect the result - let summary = '' - const id = '' - let model = options.model - let usage = { promptTokens: 0, completionTokens: 0, totalTokens: 0 } - - for await (const chunk of this.textAdapter.chatStream({ - model: options.model, - messages: [{ role: 'user', content: options.text }], - systemPrompts: [systemPrompt], - maxTokens: options.maxLength, - temperature: 0.3, - })) { - // AG-UI TEXT_MESSAGE_CONTENT event - if (chunk.type === 'TEXT_MESSAGE_CONTENT') { - if (chunk.content) { - summary = chunk.content - } else { - summary += chunk.delta - } - model = chunk.model || model - } - // AG-UI RUN_FINISHED event - if (chunk.type === 'RUN_FINISHED') { - if (chunk.usage) { - usage = chunk.usage - } - } - } - - return { id, model, summary, usage } - } - - async *summarizeStream( - options: SummarizationOptions, - ): AsyncIterable { - const systemPrompt = this.buildSummarizationPrompt(options) - - // Delegate directly to the text adapter's streaming - yield* this.textAdapter.chatStream({ - model: options.model, - messages: [{ role: 'user', content: options.text }], - systemPrompts: [systemPrompt], - maxTokens: options.maxLength, - temperature: 0.3, - }) - } - - private buildSummarizationPrompt(options: SummarizationOptions): string { - let prompt = 'You are a professional summarizer. ' - - switch (options.style) { - case 'bullet-points': - prompt += 'Provide a summary in bullet point format. ' - break - case 'paragraph': - prompt += 'Provide a summary in paragraph format. ' - break - case 'concise': - prompt += 'Provide a very concise summary in 1-2 sentences. ' - break - default: - prompt += 'Provide a clear and concise summary. ' - } - - if (options.focus && options.focus.length > 0) { - prompt += `Focus on the following aspects: ${options.focus.join(', ')}. ` - } - - if (options.maxLength) { - prompt += `Keep the summary under ${options.maxLength} tokens. ` - } - - return prompt + // The text adapter accepts richer provider options than the summarize adapter needs, + // but we only pass basic options (model, messages, systemPrompts, etc.) at call time. + super( + new GrokTextAdapter( + config, + model, + ) as unknown as ChatStreamCapable, + model, + 'grok', + ) } } diff --git a/packages/typescript/ai-grok/src/adapters/text.ts b/packages/typescript/ai-grok/src/adapters/text.ts index c0204ab53..59e3f933d 100644 --- a/packages/typescript/ai-grok/src/adapters/text.ts +++ b/packages/typescript/ai-grok/src/adapters/text.ts @@ -1,32 +1,11 @@ -import { BaseTextAdapter } from '@tanstack/ai/adapters' -import { validateTextProviderOptions } from '../text/text-provider-options' -import { convertToolsToProviderFormat } from '../tools' -import { - createGrokClient, - generateId, - getGrokApiKeyFromEnv, - makeGrokStructuredOutputCompatible, - transformNullsToUndefined, -} from '../utils' +import { OpenAICompatibleChatCompletionsTextAdapter } from '@tanstack/openai-base' +import { getGrokApiKeyFromEnv, toCompatibleConfig } from '../utils/client' import type { GROK_CHAT_MODELS, ResolveInputModalities, ResolveProviderOptions, } from '../model-meta' import type { - StructuredOutputOptions, - StructuredOutputResult, -} from '@tanstack/ai/adapters' -import type OpenAI_SDK from 'openai' -import type { - ContentPart, - ModelMessage, - StreamChunk, - TextOptions, -} from '@tanstack/ai' -import type { InternalTextProviderOptions } from '../text/text-provider-options' -import type { - GrokImageMetadata, GrokMessageMetadataByModality, } from '../message-types' import type { GrokClientConfig } from '../utils' @@ -49,7 +28,7 @@ export type { ExternalTextProviderOptions as GrokTextProviderOptions } from '../ */ export class GrokTextAdapter< TModel extends (typeof GROK_CHAT_MODELS)[number], -> extends BaseTextAdapter< +> extends OpenAICompatibleChatCompletionsTextAdapter< TModel, ResolveProviderOptions, ResolveInputModalities, @@ -58,505 +37,8 @@ export class GrokTextAdapter< readonly kind = 'text' as const readonly name = 'grok' as const - private client: OpenAI_SDK - constructor(config: GrokTextConfig, model: TModel) { - super({}, model) - this.client = createGrokClient(config) - } - - async *chatStream( - options: TextOptions>, - ): AsyncIterable { - const requestParams = this.mapTextOptionsToGrok(options) - const timestamp = Date.now() - - // AG-UI lifecycle tracking (mutable state object for ESLint compatibility) - const aguiState = { - runId: generateId(this.name), - messageId: generateId(this.name), - timestamp, - hasEmittedRunStarted: false, - } - - try { - const stream = await this.client.chat.completions.create({ - ...requestParams, - stream: true, - }) - - yield* this.processGrokStreamChunks(stream, options, aguiState) - } catch (error: unknown) { - const err = error as Error & { code?: string } - - // Emit RUN_STARTED if not yet emitted - if (!aguiState.hasEmittedRunStarted) { - aguiState.hasEmittedRunStarted = true - yield { - type: 'RUN_STARTED', - runId: aguiState.runId, - model: options.model, - timestamp, - } - } - - // Emit AG-UI RUN_ERROR - yield { - type: 'RUN_ERROR', - runId: aguiState.runId, - model: options.model, - timestamp, - error: { - message: err.message || 'Unknown error', - code: err.code, - }, - } - - console.error('>>> chatStream: Fatal error during response creation <<<') - console.error('>>> Error message:', err.message) - console.error('>>> Error stack:', err.stack) - console.error('>>> Full error:', err) - } - } - - /** - * Generate structured output using Grok's JSON Schema response format. - * Uses stream: false to get the complete response in one call. - * - * Grok has strict requirements for structured output (via OpenAI-compatible API): - * - All properties must be in the `required` array - * - Optional fields should have null added to their type union - * - additionalProperties must be false for all objects - * - * The outputSchema is already JSON Schema (converted in the ai layer). - * We apply Grok-specific transformations for structured output compatibility. - */ - async structuredOutput( - options: StructuredOutputOptions>, - ): Promise> { - const { chatOptions, outputSchema } = options - const requestParams = this.mapTextOptionsToGrok(chatOptions) - - // Apply Grok-specific transformations for structured output compatibility - const jsonSchema = makeGrokStructuredOutputCompatible( - outputSchema, - outputSchema.required || [], - ) - - try { - const response = await this.client.chat.completions.create({ - ...requestParams, - stream: false, - response_format: { - type: 'json_schema', - json_schema: { - name: 'structured_output', - schema: jsonSchema, - strict: true, - }, - }, - }) - - // Extract text content from the response - const rawText = response.choices[0]?.message.content || '' - - // Parse the JSON response - let parsed: unknown - try { - parsed = JSON.parse(rawText) - } catch { - throw new Error( - `Failed to parse structured output as JSON. Content: ${rawText.slice(0, 200)}${rawText.length > 200 ? '...' : ''}`, - ) - } - - // Transform null values to undefined to match original Zod schema expectations - // Grok returns null for optional fields we made nullable in the schema - const transformed = transformNullsToUndefined(parsed) - - return { - data: transformed, - rawText, - } - } catch (error: unknown) { - const err = error as Error - console.error('>>> structuredOutput: Error during response creation <<<') - console.error('>>> Error message:', err.message) - throw error - } - } - - private async *processGrokStreamChunks( - stream: AsyncIterable, - options: TextOptions, - aguiState: { - runId: string - messageId: string - timestamp: number - hasEmittedRunStarted: boolean - }, - ): AsyncIterable { - let accumulatedContent = '' - const timestamp = aguiState.timestamp - let hasEmittedTextMessageStart = false - - // Track tool calls being streamed (arguments come in chunks) - const toolCallsInProgress = new Map< - number, - { - id: string - name: string - arguments: string - started: boolean // Track if TOOL_CALL_START has been emitted - } - >() - - try { - for await (const chunk of stream) { - const choice = chunk.choices[0] - - if (!choice) continue - - // Emit RUN_STARTED on first chunk - if (!aguiState.hasEmittedRunStarted) { - aguiState.hasEmittedRunStarted = true - yield { - type: 'RUN_STARTED', - runId: aguiState.runId, - model: chunk.model || options.model, - timestamp, - } - } - - const delta = choice.delta - const deltaContent = delta.content - const deltaToolCalls = delta.tool_calls - - // Handle content delta - if (deltaContent) { - // Emit TEXT_MESSAGE_START on first text content - if (!hasEmittedTextMessageStart) { - hasEmittedTextMessageStart = true - yield { - type: 'TEXT_MESSAGE_START', - messageId: aguiState.messageId, - model: chunk.model || options.model, - timestamp, - role: 'assistant', - } - } - - accumulatedContent += deltaContent - - // Emit AG-UI TEXT_MESSAGE_CONTENT - yield { - type: 'TEXT_MESSAGE_CONTENT', - messageId: aguiState.messageId, - model: chunk.model || options.model, - timestamp, - delta: deltaContent, - content: accumulatedContent, - } - } - - // Handle tool calls - they come in as deltas - if (deltaToolCalls) { - for (const toolCallDelta of deltaToolCalls) { - const index = toolCallDelta.index - - // Initialize or update the tool call in progress - if (!toolCallsInProgress.has(index)) { - toolCallsInProgress.set(index, { - id: toolCallDelta.id || '', - name: toolCallDelta.function?.name || '', - arguments: '', - started: false, - }) - } - - const toolCall = toolCallsInProgress.get(index)! - - // Update with any new data from the delta - if (toolCallDelta.id) { - toolCall.id = toolCallDelta.id - } - if (toolCallDelta.function?.name) { - toolCall.name = toolCallDelta.function.name - } - if (toolCallDelta.function?.arguments) { - toolCall.arguments += toolCallDelta.function.arguments - } - - // Emit TOOL_CALL_START when we have id and name - if (toolCall.id && toolCall.name && !toolCall.started) { - toolCall.started = true - yield { - type: 'TOOL_CALL_START', - toolCallId: toolCall.id, - toolName: toolCall.name, - model: chunk.model || options.model, - timestamp, - index, - } - } - - // Emit TOOL_CALL_ARGS for argument deltas - if (toolCallDelta.function?.arguments && toolCall.started) { - yield { - type: 'TOOL_CALL_ARGS', - toolCallId: toolCall.id, - model: chunk.model || options.model, - timestamp, - delta: toolCallDelta.function.arguments, - } - } - } - } - - // Handle finish reason - if (choice.finish_reason) { - // Emit all completed tool calls - if ( - choice.finish_reason === 'tool_calls' || - toolCallsInProgress.size > 0 - ) { - for (const [, toolCall] of toolCallsInProgress) { - // Parse arguments for TOOL_CALL_END - let parsedInput: unknown = {} - try { - parsedInput = toolCall.arguments - ? JSON.parse(toolCall.arguments) - : {} - } catch { - parsedInput = {} - } - - // Emit AG-UI TOOL_CALL_END - yield { - type: 'TOOL_CALL_END', - toolCallId: toolCall.id, - toolName: toolCall.name, - model: chunk.model || options.model, - timestamp, - input: parsedInput, - } - } - } - - const computedFinishReason = - choice.finish_reason === 'tool_calls' || - toolCallsInProgress.size > 0 - ? 'tool_calls' - : 'stop' - - // Emit TEXT_MESSAGE_END if we had text content - if (hasEmittedTextMessageStart) { - yield { - type: 'TEXT_MESSAGE_END', - messageId: aguiState.messageId, - model: chunk.model || options.model, - timestamp, - } - } - - // Emit AG-UI RUN_FINISHED - yield { - type: 'RUN_FINISHED', - runId: aguiState.runId, - model: chunk.model || options.model, - timestamp, - usage: chunk.usage - ? { - promptTokens: chunk.usage.prompt_tokens || 0, - completionTokens: chunk.usage.completion_tokens || 0, - totalTokens: chunk.usage.total_tokens || 0, - } - : undefined, - finishReason: computedFinishReason, - } - } - } - } catch (error: unknown) { - const err = error as Error & { code?: string } - console.log('[Grok Adapter] Stream ended with error:', err.message) - - // Emit AG-UI RUN_ERROR - yield { - type: 'RUN_ERROR', - runId: aguiState.runId, - model: options.model, - timestamp, - error: { - message: err.message || 'Unknown error occurred', - code: err.code, - }, - } - } - } - - /** - * Maps common options to Grok-specific Chat Completions format - */ - private mapTextOptionsToGrok( - options: TextOptions, - ): OpenAI_SDK.Chat.Completions.ChatCompletionCreateParamsStreaming { - const modelOptions = options.modelOptions as - | Omit< - InternalTextProviderOptions, - 'max_tokens' | 'tools' | 'temperature' | 'input' | 'top_p' - > - | undefined - - if (modelOptions) { - validateTextProviderOptions({ - ...modelOptions, - model: options.model, - }) - } - - const tools = options.tools - ? convertToolsToProviderFormat(options.tools) - : undefined - - // Build messages array with system prompts - const messages: Array = - [] - - // Add system prompts first - if (options.systemPrompts && options.systemPrompts.length > 0) { - messages.push({ - role: 'system', - content: options.systemPrompts.join('\n'), - }) - } - - // Convert messages - for (const message of options.messages) { - messages.push(this.convertMessageToGrok(message)) - } - - return { - model: options.model, - messages, - temperature: options.temperature, - max_tokens: options.maxTokens, - top_p: options.topP, - tools: tools as Array, - stream: true, - stream_options: { include_usage: true }, - } - } - - private convertMessageToGrok( - message: ModelMessage, - ): OpenAI_SDK.Chat.Completions.ChatCompletionMessageParam { - // Handle tool messages - if (message.role === 'tool') { - return { - role: 'tool', - tool_call_id: message.toolCallId || '', - content: - typeof message.content === 'string' - ? message.content - : JSON.stringify(message.content), - } - } - - // Handle assistant messages - if (message.role === 'assistant') { - const toolCalls = message.toolCalls?.map((tc) => ({ - id: tc.id, - type: 'function' as const, - function: { - name: tc.function.name, - arguments: - typeof tc.function.arguments === 'string' - ? tc.function.arguments - : JSON.stringify(tc.function.arguments), - }, - })) - - return { - role: 'assistant', - content: this.extractTextContent(message.content), - ...(toolCalls && toolCalls.length > 0 ? { tool_calls: toolCalls } : {}), - } - } - - // Handle user messages - support multimodal content - const contentParts = this.normalizeContent(message.content) - - // If only text, use simple string format - if (contentParts.length === 1 && contentParts[0]?.type === 'text') { - return { - role: 'user', - content: contentParts[0].content, - } - } - - // Otherwise, use array format for multimodal - const parts: Array = - [] - for (const part of contentParts) { - if (part.type === 'text') { - parts.push({ type: 'text', text: part.content }) - } else if (part.type === 'image') { - const imageMetadata = part.metadata as GrokImageMetadata | undefined - // For base64 data, construct a data URI using the mimeType from source - const imageValue = part.source.value - const imageUrl = - part.source.type === 'data' && !imageValue.startsWith('data:') - ? `data:${part.source.mimeType};base64,${imageValue}` - : imageValue - parts.push({ - type: 'image_url', - image_url: { - url: imageUrl, - detail: imageMetadata?.detail || 'auto', - }, - }) - } - } - - return { - role: 'user', - content: parts.length > 0 ? parts : '', - } - } - - /** - * Normalizes message content to an array of ContentPart. - * Handles backward compatibility with string content. - */ - private normalizeContent( - content: string | null | Array, - ): Array { - if (content === null) { - return [] - } - if (typeof content === 'string') { - return [{ type: 'text', content: content }] - } - return content - } - - /** - * Extracts text content from a content value that may be string, null, or ContentPart array. - */ - private extractTextContent( - content: string | null | Array, - ): string { - if (content === null) { - return '' - } - if (typeof content === 'string') { - return content - } - // It's an array of ContentPart - return content - .filter((p) => p.type === 'text') - .map((p) => p.content) - .join('') + super(toCompatibleConfig(config), model, 'grok') } } diff --git a/packages/typescript/ai-grok/src/tools/function-tool.ts b/packages/typescript/ai-grok/src/tools/function-tool.ts index 646fb8953..35e66ff23 100644 --- a/packages/typescript/ai-grok/src/tools/function-tool.ts +++ b/packages/typescript/ai-grok/src/tools/function-tool.ts @@ -1,45 +1,4 @@ -import { makeGrokStructuredOutputCompatible } from '../utils/schema-converter' -import type { JSONSchema, Tool } from '@tanstack/ai' -import type OpenAI from 'openai' - -// Use Chat Completions API tool format (not Responses API) -export type FunctionTool = OpenAI.Chat.Completions.ChatCompletionTool - -/** - * Converts a standard Tool to Grok ChatCompletionTool format. - * - * Tool schemas are already converted to JSON Schema in the ai layer. - * We apply Grok-specific transformations for strict mode: - * - All properties in required array - * - Optional fields made nullable - * - additionalProperties: false - * - * This enables strict mode for all tools automatically. - */ -export function convertFunctionToolToAdapterFormat(tool: Tool): FunctionTool { - // Tool schemas are already converted to JSON Schema in the ai layer - // Apply Grok-specific transformations for strict mode - const inputSchema = (tool.inputSchema ?? { - type: 'object', - properties: {}, - required: [], - }) as JSONSchema - - const jsonSchema = makeGrokStructuredOutputCompatible( - inputSchema, - inputSchema.required || [], - ) - - // Ensure additionalProperties is false for strict mode - jsonSchema.additionalProperties = false - - return { - type: 'function', - function: { - name: tool.name, - description: tool.description, - parameters: jsonSchema, - strict: true, // Always use strict mode since our schema converter handles the requirements - }, - } satisfies FunctionTool -} +export { + convertFunctionToolToChatCompletionsFormat as convertFunctionToolToAdapterFormat, + type ChatCompletionFunctionTool as FunctionTool, +} from '@tanstack/openai-base' diff --git a/packages/typescript/ai-grok/src/tools/tool-converter.ts b/packages/typescript/ai-grok/src/tools/tool-converter.ts index 969fdb72d..2b3c58a1f 100644 --- a/packages/typescript/ai-grok/src/tools/tool-converter.ts +++ b/packages/typescript/ai-grok/src/tools/tool-converter.ts @@ -1,17 +1,3 @@ -import { convertFunctionToolToAdapterFormat } from './function-tool' -import type { FunctionTool } from './function-tool' -import type { Tool } from '@tanstack/ai' - -/** - * Converts an array of standard Tools to Grok-specific format - * Grok uses OpenAI-compatible API, so we primarily support function tools - */ -export function convertToolsToProviderFormat( - tools: Array, -): Array { - return tools.map((tool) => { - // For Grok, all tools are converted as function tools - // Grok uses OpenAI-compatible API which primarily supports function tools - return convertFunctionToolToAdapterFormat(tool) - }) -} +export { + convertToolsToChatCompletionsFormat as convertToolsToProviderFormat, +} from '@tanstack/openai-base' diff --git a/packages/typescript/ai-grok/src/utils/client.ts b/packages/typescript/ai-grok/src/utils/client.ts index 54f70eafe..c0837295c 100644 --- a/packages/typescript/ai-grok/src/utils/client.ts +++ b/packages/typescript/ai-grok/src/utils/client.ts @@ -1,46 +1,34 @@ -import OpenAI_SDK from 'openai' +import { getApiKeyFromEnv } from '@tanstack/ai-utils' +import type { OpenAICompatibleClientConfig } from '@tanstack/openai-base' import type { ClientOptions } from 'openai' export interface GrokClientConfig extends ClientOptions { apiKey: string } -/** - * Creates a Grok SDK client instance using OpenAI SDK with xAI's base URL - */ -export function createGrokClient(config: GrokClientConfig): OpenAI_SDK { - return new OpenAI_SDK({ - ...config, - apiKey: config.apiKey, - baseURL: config.baseURL || 'https://api.x.ai/v1', - }) -} - /** * Gets Grok API key from environment variables * @throws Error if XAI_API_KEY is not found */ export function getGrokApiKeyFromEnv(): string { - const env = - typeof globalThis !== 'undefined' && (globalThis as any).window?.env - ? (globalThis as any).window.env - : typeof process !== 'undefined' - ? process.env - : undefined - const key = env?.XAI_API_KEY - - if (!key) { + try { + return getApiKeyFromEnv('XAI_API_KEY') + } catch { throw new Error( 'XAI_API_KEY is required. Please set it in your environment variables or use the factory function with an explicit API key.', ) } - - return key } /** - * Generates a unique ID with a prefix + * Converts a GrokClientConfig to OpenAICompatibleClientConfig. + * Sets the default xAI base URL if not already set. */ -export function generateId(prefix: string): string { - return `${prefix}-${Date.now()}-${Math.random().toString(36).substring(7)}` +export function toCompatibleConfig( + config: GrokClientConfig, +): OpenAICompatibleClientConfig { + return { + ...config, + baseURL: config.baseURL || 'https://api.x.ai/v1', + } as unknown as OpenAICompatibleClientConfig } diff --git a/packages/typescript/ai-grok/src/utils/index.ts b/packages/typescript/ai-grok/src/utils/index.ts index 72c2f529f..15ec9e854 100644 --- a/packages/typescript/ai-grok/src/utils/index.ts +++ b/packages/typescript/ai-grok/src/utils/index.ts @@ -1,7 +1,6 @@ export { - createGrokClient, getGrokApiKeyFromEnv, - generateId, + toCompatibleConfig, type GrokClientConfig, } from './client' export { diff --git a/packages/typescript/ai-grok/src/utils/schema-converter.ts b/packages/typescript/ai-grok/src/utils/schema-converter.ts index 38c345e22..20c2d36d3 100644 --- a/packages/typescript/ai-grok/src/utils/schema-converter.ts +++ b/packages/typescript/ai-grok/src/utils/schema-converter.ts @@ -1,110 +1,2 @@ -/** - * Recursively transform null values to undefined in an object. - * - * This is needed because Grok's structured output (via OpenAI-compatible API) requires all fields to be - * in the `required` array, with optional fields made nullable (type: ["string", "null"]). - * When Grok returns null for optional fields, we need to convert them back to - * undefined to match the original Zod schema expectations. - * - * @param obj - Object to transform - * @returns Object with nulls converted to undefined - */ -export function transformNullsToUndefined(obj: T): T { - if (obj === null) { - return undefined as unknown as T - } - - if (Array.isArray(obj)) { - return obj.map((item) => transformNullsToUndefined(item)) as unknown as T - } - - if (typeof obj === 'object') { - const result: Record = {} - for (const [key, value] of Object.entries(obj as Record)) { - const transformed = transformNullsToUndefined(value) - // Only include the key if the value is not undefined - // This makes { notes: null } become {} (field absent) instead of { notes: undefined } - if (transformed !== undefined) { - result[key] = transformed - } - } - return result as T - } - - return obj -} - -/** - * Transform a JSON schema to be compatible with Grok's structured output requirements (OpenAI-compatible). - * Grok requires: - * - All properties must be in the `required` array - * - Optional fields should have null added to their type union - * - additionalProperties must be false for objects - * - * @param schema - JSON schema to transform - * @param originalRequired - Original required array (to know which fields were optional) - * @returns Transformed schema compatible with Grok structured output - */ -export function makeGrokStructuredOutputCompatible( - schema: Record, - originalRequired: Array = [], -): Record { - const result = { ...schema } - - // Handle object types - if (result.type === 'object' && result.properties) { - const properties = { ...result.properties } - const allPropertyNames = Object.keys(properties) - - // Transform each property - for (const propName of allPropertyNames) { - const prop = properties[propName] - const wasOptional = !originalRequired.includes(propName) - - // Recursively transform nested objects/arrays - if (prop.type === 'object' && prop.properties) { - properties[propName] = makeGrokStructuredOutputCompatible( - prop, - prop.required || [], - ) - } else if (prop.type === 'array' && prop.items) { - properties[propName] = { - ...prop, - items: makeGrokStructuredOutputCompatible( - prop.items, - prop.items.required || [], - ), - } - } else if (wasOptional) { - // Make optional fields nullable by adding null to the type - if (prop.type && !Array.isArray(prop.type)) { - properties[propName] = { - ...prop, - type: [prop.type, 'null'], - } - } else if (Array.isArray(prop.type) && !prop.type.includes('null')) { - properties[propName] = { - ...prop, - type: [...prop.type, 'null'], - } - } - } - } - - result.properties = properties - // ALL properties must be required for Grok structured output - result.required = allPropertyNames - // additionalProperties must be false - result.additionalProperties = false - } - - // Handle array types with object items - if (result.type === 'array' && result.items) { - result.items = makeGrokStructuredOutputCompatible( - result.items, - result.items.required || [], - ) - } - - return result -} +export { transformNullsToUndefined } from '@tanstack/ai-utils' +export { makeStructuredOutputCompatible as makeGrokStructuredOutputCompatible } from '@tanstack/openai-base' diff --git a/packages/typescript/ai-grok/tests/grok-adapter.test.ts b/packages/typescript/ai-grok/tests/grok-adapter.test.ts index 14e3e57c7..93f4d230f 100644 --- a/packages/typescript/ai-grok/tests/grok-adapter.test.ts +++ b/packages/typescript/ai-grok/tests/grok-adapter.test.ts @@ -4,22 +4,6 @@ import { createGrokImage, grokImage } from '../src/adapters/image' import { createGrokSummarize, grokSummarize } from '../src/adapters/summarize' import type { StreamChunk, Tool } from '@tanstack/ai' -// Declare mockCreate at module level -let mockCreate: ReturnType - -// Mock the OpenAI SDK -vi.mock('openai', () => { - return { - default: class { - chat = { - completions: { - create: (...args: Array) => mockCreate(...args), - }, - } - }, - } -}) - // Helper to create async iterable from chunks function createAsyncIterable(chunks: Array): AsyncIterable { return { @@ -37,17 +21,26 @@ function createAsyncIterable(chunks: Array): AsyncIterable { } } -// Helper to setup the mock SDK client for streaming responses -function setupMockSdkClient( +// Helper to create a mock OpenAI client and inject it into an adapter +function injectMockClient( + adapter: object, streamChunks: Array>, nonStreamResponse?: Record, -) { - mockCreate = vi.fn().mockImplementation((params) => { +): ReturnType { + const mockCreate = vi.fn().mockImplementation((params) => { if (params.stream) { return Promise.resolve(createAsyncIterable(streamChunks)) } return Promise.resolve(nonStreamResponse) }) + ;(adapter as any).client = { + chat: { + completions: { + create: mockCreate, + }, + }, + } + return mockCreate } const weatherTool: Tool = { @@ -188,8 +181,8 @@ describe('Grok AG-UI event emission', () => { }, ] - setupMockSdkClient(streamChunks) const adapter = createGrokText('grok-3', 'test-api-key') + injectMockClient(adapter, streamChunks) const chunks: Array = [] for await (const chunk of adapter.chatStream({ @@ -235,8 +228,8 @@ describe('Grok AG-UI event emission', () => { }, ] - setupMockSdkClient(streamChunks) const adapter = createGrokText('grok-3', 'test-api-key') + injectMockClient(adapter, streamChunks) const chunks: Array = [] for await (const chunk of adapter.chatStream({ @@ -293,8 +286,8 @@ describe('Grok AG-UI event emission', () => { }, ] - setupMockSdkClient(streamChunks) const adapter = createGrokText('grok-3', 'test-api-key') + injectMockClient(adapter, streamChunks) const chunks: Array = [] for await (const chunk of adapter.chatStream({ @@ -383,8 +376,8 @@ describe('Grok AG-UI event emission', () => { }, ] - setupMockSdkClient(streamChunks) const adapter = createGrokText('grok-3', 'test-api-key') + injectMockClient(adapter, streamChunks) const chunks: Array = [] for await (const chunk of adapter.chatStream({ @@ -450,9 +443,16 @@ describe('Grok AG-UI event emission', () => { }, } - mockCreate = vi.fn().mockResolvedValue(errorIterable) - const adapter = createGrokText('grok-3', 'test-api-key') + const mockCreate = vi.fn().mockResolvedValue(errorIterable) + ;(adapter as any).client = { + chat: { + completions: { + create: mockCreate, + }, + }, + } + const chunks: Array = [] for await (const chunk of adapter.chatStream({ @@ -499,8 +499,8 @@ describe('Grok AG-UI event emission', () => { }, ] - setupMockSdkClient(streamChunks) const adapter = createGrokText('grok-3', 'test-api-key') + injectMockClient(adapter, streamChunks) const chunks: Array = [] for await (const chunk of adapter.chatStream({ @@ -575,8 +575,8 @@ describe('Grok AG-UI event emission', () => { }, ] - setupMockSdkClient(streamChunks) const adapter = createGrokText('grok-3', 'test-api-key') + injectMockClient(adapter, streamChunks) const chunks: Array = [] for await (const chunk of adapter.chatStream({ From a74d5430e21c8fe28ea2c6cc760e1ba9c647d1de Mon Sep 17 00:00:00 2001 From: Alem Tuzlak Date: Mon, 30 Mar 2026 14:33:03 +0200 Subject: [PATCH 09/15] refactor: migrate ai-groq, ai-openrouter, ai-ollama to shared utilities Replace duplicated generateId, env-var lookup, and null-transform logic in ai-groq, ai-openrouter, and ai-ollama with imports from @tanstack/ai-utils and @tanstack/openai-base. makeGroqStructuredOutputCompatible now delegates to makeStructuredOutputCompatible and applies the Groq-specific quirk of removing empty required arrays. --- packages/typescript/ai-groq/package.json | 2 + .../typescript/ai-groq/src/utils/client.ts | 19 +-- .../ai-groq/src/utils/schema-converter.ts | 122 ++++++------------ .../ai-groq/tests/groq-adapter.test.ts | 2 +- packages/typescript/ai-ollama/package.json | 1 + .../typescript/ai-ollama/src/utils/client.ts | 3 +- .../typescript/ai-openrouter/package.json | 3 +- .../ai-openrouter/src/utils/client.ts | 36 +----- packages/typescript/ai-utils/src/env.ts | 2 +- .../ai-utils/src/model-meta/define.ts | 19 +-- .../typescript/ai-utils/tests/env.test.ts | 4 +- .../ai-utils/tests/model-meta.test.ts | 10 +- .../ai-utils/tests/transforms.test.ts | 5 +- packages/typescript/openai-base/package.json | 13 +- .../src/adapters/chat-completions-text.ts | 9 +- .../src/adapters/responses-text.ts | 21 ++- .../openai-base/src/adapters/summarize.ts | 4 +- .../openai-base/src/adapters/transcription.ts | 4 +- .../openai-base/src/adapters/video.ts | 3 +- .../openai-base/src/types/provider-options.ts | 6 +- .../openai-base/src/utils/client.ts | 4 +- .../tests/chat-completions-text.test.ts | 18 ++- .../openai-base/tests/responses-text.test.ts | 10 +- pnpm-lock.yaml | 18 +++ 24 files changed, 145 insertions(+), 193 deletions(-) diff --git a/packages/typescript/ai-groq/package.json b/packages/typescript/ai-groq/package.json index 3ae995c18..06a70ba64 100644 --- a/packages/typescript/ai-groq/package.json +++ b/packages/typescript/ai-groq/package.json @@ -47,6 +47,8 @@ "zod": "^4.0.0" }, "dependencies": { + "@tanstack/ai-utils": "workspace:*", + "@tanstack/openai-base": "workspace:*", "groq-sdk": "^0.37.0" } } diff --git a/packages/typescript/ai-groq/src/utils/client.ts b/packages/typescript/ai-groq/src/utils/client.ts index f143193d2..29ab0fb31 100644 --- a/packages/typescript/ai-groq/src/utils/client.ts +++ b/packages/typescript/ai-groq/src/utils/client.ts @@ -1,5 +1,6 @@ import Groq_SDK from 'groq-sdk' import type { ClientOptions } from 'groq-sdk' +import { generateId as _generateId, getApiKeyFromEnv } from '@tanstack/ai-utils' export interface GroqClientConfig extends ClientOptions { apiKey: string @@ -17,26 +18,12 @@ export function createGroqClient(config: GroqClientConfig): Groq_SDK { * @throws Error if GROQ_API_KEY is not found */ export function getGroqApiKeyFromEnv(): string { - const env = - typeof globalThis !== 'undefined' && (globalThis as any).window?.env - ? (globalThis as any).window.env - : typeof process !== 'undefined' - ? process.env - : undefined - const key = env?.GROQ_API_KEY - - if (!key) { - throw new Error( - 'GROQ_API_KEY is required. Please set it in your environment variables or use the factory function with an explicit API key.', - ) - } - - return key + return getApiKeyFromEnv('GROQ_API_KEY') } /** * Generates a unique ID with a prefix */ export function generateId(prefix: string): string { - return `${prefix}-${Date.now()}-${Math.random().toString(36).substring(7)}` + return _generateId(prefix) } diff --git a/packages/typescript/ai-groq/src/utils/schema-converter.ts b/packages/typescript/ai-groq/src/utils/schema-converter.ts index d0a57cf44..2841c83c8 100644 --- a/packages/typescript/ai-groq/src/utils/schema-converter.ts +++ b/packages/typescript/ai-groq/src/utils/schema-converter.ts @@ -1,35 +1,38 @@ +import { makeStructuredOutputCompatible } from '@tanstack/openai-base' +import { transformNullsToUndefined } from '@tanstack/ai-utils' + +export { transformNullsToUndefined } + /** - * Recursively transform null values to undefined in an object. - * - * This is needed because Groq's structured output requires all fields to be - * in the `required` array, with optional fields made nullable (type: ["string", "null"]). - * When Groq returns null for optional fields, we need to convert them back to - * undefined to match the original Zod schema expectations. - * - * @param obj - Object to transform - * @returns Object with nulls converted to undefined + * Recursively removes `required: []` from a schema object. + * Groq rejects `required` when it is an empty array, even though + * OpenAI-compatible schemas allow it. */ -export function transformNullsToUndefined(obj: T): T { - if (obj === null) { - return undefined as unknown as T - } +function removeEmptyRequired(schema: Record): Record { + const result = { ...schema } - if (Array.isArray(obj)) { - return obj.map((item) => transformNullsToUndefined(item)) as unknown as T + if (Array.isArray(result.required) && result.required.length === 0) { + delete result.required } - if (typeof obj === 'object') { - const result: Record = {} - for (const [key, value] of Object.entries(obj as Record)) { - const transformed = transformNullsToUndefined(value) - if (transformed !== undefined) { - result[key] = transformed - } + if (result.properties && typeof result.properties === 'object') { + const properties: Record = {} + for (const [key, value] of Object.entries( + result.properties as Record, + )) { + properties[key] = + typeof value === 'object' && value !== null && !Array.isArray(value) + ? removeEmptyRequired(value) + : value } - return result as T + result.properties = properties + } + + if (result.items && typeof result.items === 'object' && !Array.isArray(result.items)) { + result.items = removeEmptyRequired(result.items) } - return obj + return result } /** @@ -39,6 +42,10 @@ export function transformNullsToUndefined(obj: T): T { * - All properties must be in the `required` array * - Optional fields should have null added to their type union * - additionalProperties must be false for objects + * - `required` must be omitted (not empty array) when there are no properties + * + * Delegates to the shared OpenAI-compatible transformer and applies the + * Groq-specific quirk of removing empty `required` arrays. * * @param schema - JSON schema to transform * @param originalRequired - Original required array (to know which fields were optional) @@ -48,63 +55,16 @@ export function makeGroqStructuredOutputCompatible( schema: Record, originalRequired: Array = [], ): Record { - const result = { ...schema } + // Ensure object schemas always have properties (e.g. z.object({}) may produce + // { type: 'object' } without properties). openai-base's transformer skips + // objects without properties, so we normalise first. + const normalised = + schema.type === 'object' && !schema.properties + ? { ...schema, properties: {} } + : schema - if (result.type === 'object') { - if (!result.properties) { - result.properties = {} - } - const properties = { ...result.properties } - const allPropertyNames = Object.keys(properties) - - for (const propName of allPropertyNames) { - const prop = properties[propName] - const wasOptional = !originalRequired.includes(propName) - - if (prop.type === 'object' && prop.properties) { - properties[propName] = makeGroqStructuredOutputCompatible( - prop, - prop.required || [], - ) - } else if (prop.type === 'array' && prop.items) { - properties[propName] = { - ...prop, - items: makeGroqStructuredOutputCompatible( - prop.items, - prop.items.required || [], - ), - } - } else if (wasOptional) { - if (prop.type && !Array.isArray(prop.type)) { - properties[propName] = { - ...prop, - type: [prop.type, 'null'], - } - } else if (Array.isArray(prop.type) && !prop.type.includes('null')) { - properties[propName] = { - ...prop, - type: [...prop.type, 'null'], - } - } - } - } + const result = makeStructuredOutputCompatible(normalised, originalRequired) - result.properties = properties - // Groq rejects `required` when there are no properties, even if it's an empty array - if (allPropertyNames.length > 0) { - result.required = allPropertyNames - } else { - delete result.required - } - result.additionalProperties = false - } - - if (result.type === 'array' && result.items) { - result.items = makeGroqStructuredOutputCompatible( - result.items, - result.items.required || [], - ) - } - - return result + // Groq rejects `required` when it is an empty array + return removeEmptyRequired(result) } diff --git a/packages/typescript/ai-groq/tests/groq-adapter.test.ts b/packages/typescript/ai-groq/tests/groq-adapter.test.ts index 1562b0623..da9b939c0 100644 --- a/packages/typescript/ai-groq/tests/groq-adapter.test.ts +++ b/packages/typescript/ai-groq/tests/groq-adapter.test.ts @@ -90,7 +90,7 @@ describe('Groq adapters', () => { vi.stubEnv('GROQ_API_KEY', '') expect(() => groqText('llama-3.3-70b-versatile')).toThrow( - 'GROQ_API_KEY is required', + 'GROQ_API_KEY', ) }) diff --git a/packages/typescript/ai-ollama/package.json b/packages/typescript/ai-ollama/package.json index 4aca7c929..763626ee9 100644 --- a/packages/typescript/ai-ollama/package.json +++ b/packages/typescript/ai-ollama/package.json @@ -41,6 +41,7 @@ "adapter" ], "dependencies": { + "@tanstack/ai-utils": "workspace:*", "ollama": "^0.6.3" }, "peerDependencies": { diff --git a/packages/typescript/ai-ollama/src/utils/client.ts b/packages/typescript/ai-ollama/src/utils/client.ts index dc5cd9276..e55d6d9de 100644 --- a/packages/typescript/ai-ollama/src/utils/client.ts +++ b/packages/typescript/ai-ollama/src/utils/client.ts @@ -1,4 +1,5 @@ import { Ollama } from 'ollama' +import { generateId as _generateId } from '@tanstack/ai-utils' export interface OllamaClientConfig { host?: string @@ -37,7 +38,7 @@ export function getOllamaHostFromEnv(): string { * Generates a unique ID with a prefix */ export function generateId(prefix: string = 'msg'): string { - return `${prefix}-${Date.now()}-${Math.random().toString(36).substring(7)}` + return _generateId(prefix) } /** diff --git a/packages/typescript/ai-openrouter/package.json b/packages/typescript/ai-openrouter/package.json index 3923121a8..6bc13998d 100644 --- a/packages/typescript/ai-openrouter/package.json +++ b/packages/typescript/ai-openrouter/package.json @@ -40,7 +40,8 @@ ], "dependencies": { "@openrouter/sdk": "0.3.15", - "@tanstack/ai": "workspace:*" + "@tanstack/ai": "workspace:*", + "@tanstack/ai-utils": "workspace:*" }, "devDependencies": { "@vitest/coverage-v8": "4.0.14", diff --git a/packages/typescript/ai-openrouter/src/utils/client.ts b/packages/typescript/ai-openrouter/src/utils/client.ts index 758416993..04522c5f9 100644 --- a/packages/typescript/ai-openrouter/src/utils/client.ts +++ b/packages/typescript/ai-openrouter/src/utils/client.ts @@ -1,3 +1,5 @@ +import { generateId as _generateId, getApiKeyFromEnv } from '@tanstack/ai-utils' + export interface OpenRouterClientConfig { apiKey: string baseURL?: string @@ -5,42 +7,12 @@ export interface OpenRouterClientConfig { xTitle?: string } -interface EnvObject { - OPENROUTER_API_KEY?: string -} - -interface WindowWithEnv { - env?: EnvObject -} - -function getEnvironment(): EnvObject | undefined { - if (typeof globalThis !== 'undefined') { - const win = (globalThis as { window?: WindowWithEnv }).window - if (win?.env) { - return win.env - } - } - if (typeof process !== 'undefined') { - return process.env as EnvObject - } - return undefined -} - export function getOpenRouterApiKeyFromEnv(): string { - const env = getEnvironment() - const key = env?.OPENROUTER_API_KEY - - if (!key) { - throw new Error( - 'OPENROUTER_API_KEY is required. Please set it in your environment variables or use the factory function with an explicit API key.', - ) - } - - return key + return getApiKeyFromEnv('OPENROUTER_API_KEY') } export function generateId(prefix: string): string { - return `${prefix}-${Date.now()}-${Math.random().toString(36).substring(7)}` + return _generateId(prefix) } export function buildHeaders( diff --git a/packages/typescript/ai-utils/src/env.ts b/packages/typescript/ai-utils/src/env.ts index 483ca6ba6..57af69c52 100644 --- a/packages/typescript/ai-utils/src/env.ts +++ b/packages/typescript/ai-utils/src/env.ts @@ -10,7 +10,7 @@ export function getApiKeyFromEnv(envVarName: string): string { if (!apiKey) { throw new Error( - `${envVarName} is not set. Please set the ${envVarName} environment variable or pass the API key directly.` + `${envVarName} is not set. Please set the ${envVarName} environment variable or pass the API key directly.`, ) } diff --git a/packages/typescript/ai-utils/src/model-meta/define.ts b/packages/typescript/ai-utils/src/model-meta/define.ts index a87fb73ee..8ce70b4b2 100644 --- a/packages/typescript/ai-utils/src/model-meta/define.ts +++ b/packages/typescript/ai-utils/src/model-meta/define.ts @@ -3,42 +3,45 @@ import type { ModelMeta } from './types' export function defineModelMeta(meta: T): T { if (meta.supports.input.length === 0) { throw new Error( - `defineModelMeta: model "${meta.name}" must have at least one input modality` + `defineModelMeta: model "${meta.name}" must have at least one input modality`, ) } if (meta.supports.output.length === 0) { throw new Error( - `defineModelMeta: model "${meta.name}" must have at least one output modality` + `defineModelMeta: model "${meta.name}" must have at least one output modality`, ) } if (meta.context_window !== undefined && meta.context_window <= 0) { throw new Error( - `defineModelMeta: model "${meta.name}" context_window must be positive` + `defineModelMeta: model "${meta.name}" context_window must be positive`, ) } if (meta.max_output_tokens !== undefined && meta.max_output_tokens <= 0) { throw new Error( - `defineModelMeta: model "${meta.name}" max_output_tokens must be positive` + `defineModelMeta: model "${meta.name}" max_output_tokens must be positive`, ) } if (meta.pricing) { if (meta.pricing.input.normal < 0) { throw new Error( - `defineModelMeta: model "${meta.name}" pricing.input.normal must be non-negative` + `defineModelMeta: model "${meta.name}" pricing.input.normal must be non-negative`, ) } - if (meta.pricing.input.cached !== undefined && meta.pricing.input.cached < 0) { + if ( + meta.pricing.input.cached !== undefined && + meta.pricing.input.cached < 0 + ) { throw new Error( - `defineModelMeta: model "${meta.name}" pricing.input.cached must be non-negative` + `defineModelMeta: model "${meta.name}" pricing.input.cached must be non-negative`, ) } if (meta.pricing.output.normal < 0) { throw new Error( - `defineModelMeta: model "${meta.name}" pricing.output.normal must be non-negative` + `defineModelMeta: model "${meta.name}" pricing.output.normal must be non-negative`, ) } } diff --git a/packages/typescript/ai-utils/tests/env.test.ts b/packages/typescript/ai-utils/tests/env.test.ts index 24c6cffd9..ae9de001d 100644 --- a/packages/typescript/ai-utils/tests/env.test.ts +++ b/packages/typescript/ai-utils/tests/env.test.ts @@ -21,6 +21,8 @@ describe('getApiKeyFromEnv', () => { }) it('should include the env var name in the error message', () => { - expect(() => getApiKeyFromEnv('MY_PROVIDER_API_KEY')).toThrow('MY_PROVIDER_API_KEY') + expect(() => getApiKeyFromEnv('MY_PROVIDER_API_KEY')).toThrow( + 'MY_PROVIDER_API_KEY', + ) }) }) diff --git a/packages/typescript/ai-utils/tests/model-meta.test.ts b/packages/typescript/ai-utils/tests/model-meta.test.ts index 94e9cf219..30c6e9404 100644 --- a/packages/typescript/ai-utils/tests/model-meta.test.ts +++ b/packages/typescript/ai-utils/tests/model-meta.test.ts @@ -47,7 +47,7 @@ describe('defineModelMeta', () => { input: { normal: -1 }, output: { normal: 1 }, }, - }) + }), ).toThrow('pricing') }) @@ -60,7 +60,7 @@ describe('defineModelMeta', () => { output: ['text'] as Array, }, context_window: 0, - }) + }), ).toThrow('context_window') }) @@ -72,7 +72,7 @@ describe('defineModelMeta', () => { input: [] as Array, output: ['text'] as Array, }, - }) + }), ).toThrow('input') }) @@ -84,7 +84,7 @@ describe('defineModelMeta', () => { input: ['text'] as Array, output: [] as Array, }, - }) + }), ).toThrow('output') }) @@ -100,7 +100,7 @@ describe('defineModelMeta', () => { input: { normal: 1 }, output: { normal: -1 }, }, - }) + }), ).toThrow('pricing') }) }) diff --git a/packages/typescript/ai-utils/tests/transforms.test.ts b/packages/typescript/ai-utils/tests/transforms.test.ts index d37ba2e80..8ce65c1b6 100644 --- a/packages/typescript/ai-utils/tests/transforms.test.ts +++ b/packages/typescript/ai-utils/tests/transforms.test.ts @@ -18,7 +18,10 @@ describe('transformNullsToUndefined', () => { it('should handle arrays', () => { const result = transformNullsToUndefined({ - items: [{ a: null, b: 1 }, { a: 'x', b: null }], + items: [ + { a: null, b: 1 }, + { a: 'x', b: null }, + ], }) expect(result).toEqual({ items: [{ b: 1 }, { a: 'x' }], diff --git a/packages/typescript/openai-base/package.json b/packages/typescript/openai-base/package.json index fcc2f3aea..760611214 100644 --- a/packages/typescript/openai-base/package.json +++ b/packages/typescript/openai-base/package.json @@ -18,7 +18,10 @@ "import": "./dist/esm/index.js" } }, - "files": ["dist", "src"], + "files": [ + "dist", + "src" + ], "scripts": { "build": "vite build", "clean": "premove ./build ./dist", @@ -29,7 +32,13 @@ "test:lib:dev": "pnpm test:lib --watch", "test:types": "tsc" }, - "keywords": ["ai", "openai", "tanstack", "adapter", "base"], + "keywords": [ + "ai", + "openai", + "tanstack", + "adapter", + "base" + ], "dependencies": { "@tanstack/ai-utils": "workspace:*", "openai": "^6.9.1" diff --git a/packages/typescript/openai-base/src/adapters/chat-completions-text.ts b/packages/typescript/openai-base/src/adapters/chat-completions-text.ts index ad3621cf3..3acbab0ad 100644 --- a/packages/typescript/openai-base/src/adapters/chat-completions-text.ts +++ b/packages/typescript/openai-base/src/adapters/chat-completions-text.ts @@ -35,8 +35,8 @@ export class OpenAICompatibleChatCompletionsTextAdapter< TModel extends string, TProviderOptions extends Record = Record, TInputModalities extends ReadonlyArray = ReadonlyArray, - TMessageMetadata extends - DefaultMessageMetadataByModality = DefaultMessageMetadataByModality, + TMessageMetadata extends DefaultMessageMetadataByModality = + DefaultMessageMetadataByModality, > extends BaseTextAdapter< TModel, TProviderOptions, @@ -389,10 +389,7 @@ export class OpenAICompatibleChatCompletionsTextAdapter< } } catch (error: unknown) { const err = error as Error & { code?: string } - console.log( - `[${this.name}] Stream ended with error:`, - err.message, - ) + console.log(`[${this.name}] Stream ended with error:`, err.message) // Emit AG-UI RUN_ERROR yield { diff --git a/packages/typescript/openai-base/src/adapters/responses-text.ts b/packages/typescript/openai-base/src/adapters/responses-text.ts index 2bd3f3c2b..699ee0d38 100644 --- a/packages/typescript/openai-base/src/adapters/responses-text.ts +++ b/packages/typescript/openai-base/src/adapters/responses-text.ts @@ -46,8 +46,8 @@ export class OpenAICompatibleResponsesTextAdapter< TModel extends string, TProviderOptions extends Record = Record, TInputModalities extends ReadonlyArray = ReadonlyArray, - TMessageMetadata extends - DefaultMessageMetadataByModality = DefaultMessageMetadataByModality, + TMessageMetadata extends DefaultMessageMetadataByModality = + DefaultMessageMetadataByModality, > extends BaseTextAdapter< TModel, TProviderOptions, @@ -316,13 +316,11 @@ export class OpenAICompatibleResponsesTextAdapter< } } - const handleContentPart = ( - contentPart: { - type: string - text?: string - refusal?: string - }, - ): StreamChunk => { + const handleContentPart = (contentPart: { + type: string + text?: string + refusal?: string + }): StreamChunk => { if (contentPart.type === 'output_text') { accumulatedContent += contentPart.text || '' return { @@ -668,10 +666,7 @@ export class OpenAICompatibleResponsesTextAdapter< } } catch (error: unknown) { const err = error as Error & { code?: string } - console.log( - `[${this.name}] Stream ended with error:`, - err.message, - ) + console.log(`[${this.name}] Stream ended with error:`, err.message) yield { type: 'RUN_ERROR', runId: aguiState.runId, diff --git a/packages/typescript/openai-base/src/adapters/summarize.ts b/packages/typescript/openai-base/src/adapters/summarize.ts index 2fba3cdba..47bcda139 100644 --- a/packages/typescript/openai-base/src/adapters/summarize.ts +++ b/packages/typescript/openai-base/src/adapters/summarize.ts @@ -12,7 +12,9 @@ import type { * text adapter without tight coupling to a specific implementation. */ export interface ChatStreamCapable { - chatStream: (options: TextOptions) => AsyncIterable + chatStream: ( + options: TextOptions, + ) => AsyncIterable } /** diff --git a/packages/typescript/openai-base/src/adapters/transcription.ts b/packages/typescript/openai-base/src/adapters/transcription.ts index 6ae4451e9..14346ecdf 100644 --- a/packages/typescript/openai-base/src/adapters/transcription.ts +++ b/packages/typescript/openai-base/src/adapters/transcription.ts @@ -102,9 +102,7 @@ export class OpenAICompatibleTranscriptionAdapter< } } - protected prepareAudioFile( - audio: string | File | Blob | ArrayBuffer, - ): File { + protected prepareAudioFile(audio: string | File | Blob | ArrayBuffer): File { // If already a File, return it if (typeof File !== 'undefined' && audio instanceof File) { return audio diff --git a/packages/typescript/openai-base/src/adapters/video.ts b/packages/typescript/openai-base/src/adapters/video.ts index 8969e773e..edf688fe3 100644 --- a/packages/typescript/openai-base/src/adapters/video.ts +++ b/packages/typescript/openai-base/src/adapters/video.ts @@ -148,8 +148,7 @@ export class OpenAICompatibleVideoAdapter< } // Fetch and return a data URL - const baseUrl = - this.clientConfig.baseURL || 'https://api.openai.com/v1' + const baseUrl = this.clientConfig.baseURL || 'https://api.openai.com/v1' const apiKey = this.clientConfig.apiKey const contentResponse = await fetch( diff --git a/packages/typescript/openai-base/src/types/provider-options.ts b/packages/typescript/openai-base/src/types/provider-options.ts index 9bef2598c..018482e07 100644 --- a/packages/typescript/openai-base/src/types/provider-options.ts +++ b/packages/typescript/openai-base/src/types/provider-options.ts @@ -29,7 +29,11 @@ export interface OpenAICompatibleStructuredOutputOptions { export interface OpenAICompatibleToolsOptions { max_tool_calls?: number parallel_tool_calls?: boolean - tool_choice?: 'auto' | 'none' | 'required' | { type: 'function'; function: { name: string } } + tool_choice?: + | 'auto' + | 'none' + | 'required' + | { type: 'function'; function: { name: string } } } export interface OpenAICompatibleStreamingOptions { diff --git a/packages/typescript/openai-base/src/utils/client.ts b/packages/typescript/openai-base/src/utils/client.ts index 3d33148ba..8dd54b2fc 100644 --- a/packages/typescript/openai-base/src/utils/client.ts +++ b/packages/typescript/openai-base/src/utils/client.ts @@ -1,6 +1,8 @@ import OpenAI from 'openai' import type { OpenAICompatibleClientConfig } from '../types/config' -export function createOpenAICompatibleClient(config: OpenAICompatibleClientConfig): OpenAI { +export function createOpenAICompatibleClient( + config: OpenAICompatibleClientConfig, +): OpenAI { return new OpenAI(config) } diff --git a/packages/typescript/openai-base/tests/chat-completions-text.test.ts b/packages/typescript/openai-base/tests/chat-completions-text.test.ts index 8c95bcd2b..3674493cf 100644 --- a/packages/typescript/openai-base/tests/chat-completions-text.test.ts +++ b/packages/typescript/openai-base/tests/chat-completions-text.test.ts @@ -625,9 +625,7 @@ describe('OpenAICompatibleChatCompletionsTextAdapter', () => { const result = await adapter.structuredOutput({ chatOptions: { model: 'test-model', - messages: [ - { role: 'user', content: 'Give me a person object' }, - ], + messages: [{ role: 'user', content: 'Give me a person object' }], }, outputSchema: { type: 'object', @@ -674,9 +672,7 @@ describe('OpenAICompatibleChatCompletionsTextAdapter', () => { const result = await adapter.structuredOutput({ chatOptions: { model: 'test-model', - messages: [ - { role: 'user', content: 'Give me a person object' }, - ], + messages: [{ role: 'user', content: 'Give me a person object' }], }, outputSchema: { type: 'object', @@ -715,9 +711,7 @@ describe('OpenAICompatibleChatCompletionsTextAdapter', () => { adapter.structuredOutput({ chatOptions: { model: 'test-model', - messages: [ - { role: 'user', content: 'Give me a person object' }, - ], + messages: [{ role: 'user', content: 'Give me a person object' }], }, outputSchema: { type: 'object', @@ -735,7 +729,11 @@ describe('OpenAICompatibleChatCompletionsTextAdapter', () => { it('allows subclassing with custom name', () => { class MyProviderAdapter extends OpenAICompatibleChatCompletionsTextAdapter { constructor(apiKey: string, model: string) { - super({ apiKey, baseURL: 'https://my-provider.com/v1' }, model, 'my-provider') + super( + { apiKey, baseURL: 'https://my-provider.com/v1' }, + model, + 'my-provider', + ) } } diff --git a/packages/typescript/openai-base/tests/responses-text.test.ts b/packages/typescript/openai-base/tests/responses-text.test.ts index 4e690ad37..2c78cedb2 100644 --- a/packages/typescript/openai-base/tests/responses-text.test.ts +++ b/packages/typescript/openai-base/tests/responses-text.test.ts @@ -1059,8 +1059,7 @@ describe('OpenAICompatibleResponsesTextAdapter', () => { expect(errorChunks.length).toBeGreaterThan(0) const incompleteError = errorChunks.find( (c) => - c.type === 'RUN_ERROR' && - c.error.message === 'max_output_tokens', + c.type === 'RUN_ERROR' && c.error.message === 'max_output_tokens', ) expect(incompleteError).toBeDefined() }) @@ -1097,7 +1096,8 @@ describe('OpenAICompatibleResponsesTextAdapter', () => { } const errorChunk = chunks.find( - (c) => c.type === 'RUN_ERROR' && c.error.message === 'Rate limit exceeded', + (c) => + c.type === 'RUN_ERROR' && c.error.message === 'Rate limit exceeded', ) expect(errorChunk).toBeDefined() if (errorChunk?.type === 'RUN_ERROR') { @@ -1231,9 +1231,7 @@ describe('OpenAICompatibleResponsesTextAdapter', () => { adapter.structuredOutput({ chatOptions: { model: 'test-model', - messages: [ - { role: 'user', content: 'Give me a person object' }, - ], + messages: [{ role: 'user', content: 'Give me a person object' }], }, outputSchema: { type: 'object', diff --git a/pnpm-lock.yaml b/pnpm-lock.yaml index c64feb58e..db8514d16 100644 --- a/pnpm-lock.yaml +++ b/pnpm-lock.yaml @@ -835,6 +835,12 @@ importers: '@tanstack/ai': specifier: workspace:^ version: link:../ai + '@tanstack/ai-utils': + specifier: workspace:* + version: link:../ai-utils + '@tanstack/openai-base': + specifier: workspace:* + version: link:../openai-base openai: specifier: ^6.9.1 version: 6.10.0(ws@8.18.3)(zod@4.2.1) @@ -854,6 +860,12 @@ importers: '@tanstack/ai': specifier: workspace:^ version: link:../ai + '@tanstack/ai-utils': + specifier: workspace:* + version: link:../ai-utils + '@tanstack/openai-base': + specifier: workspace:* + version: link:../openai-base groq-sdk: specifier: ^0.37.0 version: 0.37.0 @@ -870,6 +882,9 @@ importers: packages/typescript/ai-ollama: dependencies: + '@tanstack/ai-utils': + specifier: workspace:* + version: link:../ai-utils ollama: specifier: ^0.6.3 version: 0.6.3 @@ -920,6 +935,9 @@ importers: '@tanstack/ai': specifier: workspace:* version: link:../ai + '@tanstack/ai-utils': + specifier: workspace:* + version: link:../ai-utils devDependencies: '@vitest/coverage-v8': specifier: 4.0.14 From 0e4e3bced42d35c15d174dba879298689f567c38 Mon Sep 17 00:00:00 2001 From: Alem Tuzlak Date: Mon, 30 Mar 2026 14:37:21 +0200 Subject: [PATCH 10/15] style: format files with prettier --- packages/typescript/ai-grok/src/adapters/summarize.ts | 5 ++++- packages/typescript/ai-grok/src/adapters/text.ts | 4 +--- packages/typescript/ai-grok/src/tools/tool-converter.ts | 4 +--- packages/typescript/ai-groq/src/utils/schema-converter.ts | 6 +++++- packages/typescript/ai-groq/tests/groq-adapter.test.ts | 4 +--- 5 files changed, 12 insertions(+), 11 deletions(-) diff --git a/packages/typescript/ai-grok/src/adapters/summarize.ts b/packages/typescript/ai-grok/src/adapters/summarize.ts index d84796ca5..f13984bac 100644 --- a/packages/typescript/ai-grok/src/adapters/summarize.ts +++ b/packages/typescript/ai-grok/src/adapters/summarize.ts @@ -31,7 +31,10 @@ export type GrokSummarizeModel = (typeof GROK_CHAT_MODELS)[number] */ export class GrokSummarizeAdapter< TModel extends GrokSummarizeModel, -> extends OpenAICompatibleSummarizeAdapter { +> extends OpenAICompatibleSummarizeAdapter< + TModel, + GrokSummarizeProviderOptions +> { readonly kind = 'summarize' as const readonly name = 'grok' as const diff --git a/packages/typescript/ai-grok/src/adapters/text.ts b/packages/typescript/ai-grok/src/adapters/text.ts index 59e3f933d..23c9fd957 100644 --- a/packages/typescript/ai-grok/src/adapters/text.ts +++ b/packages/typescript/ai-grok/src/adapters/text.ts @@ -5,9 +5,7 @@ import type { ResolveInputModalities, ResolveProviderOptions, } from '../model-meta' -import type { - GrokMessageMetadataByModality, -} from '../message-types' +import type { GrokMessageMetadataByModality } from '../message-types' import type { GrokClientConfig } from '../utils' /** diff --git a/packages/typescript/ai-grok/src/tools/tool-converter.ts b/packages/typescript/ai-grok/src/tools/tool-converter.ts index 2b3c58a1f..315f4e638 100644 --- a/packages/typescript/ai-grok/src/tools/tool-converter.ts +++ b/packages/typescript/ai-grok/src/tools/tool-converter.ts @@ -1,3 +1 @@ -export { - convertToolsToChatCompletionsFormat as convertToolsToProviderFormat, -} from '@tanstack/openai-base' +export { convertToolsToChatCompletionsFormat as convertToolsToProviderFormat } from '@tanstack/openai-base' diff --git a/packages/typescript/ai-groq/src/utils/schema-converter.ts b/packages/typescript/ai-groq/src/utils/schema-converter.ts index 2841c83c8..366d231d5 100644 --- a/packages/typescript/ai-groq/src/utils/schema-converter.ts +++ b/packages/typescript/ai-groq/src/utils/schema-converter.ts @@ -28,7 +28,11 @@ function removeEmptyRequired(schema: Record): Record { result.properties = properties } - if (result.items && typeof result.items === 'object' && !Array.isArray(result.items)) { + if ( + result.items && + typeof result.items === 'object' && + !Array.isArray(result.items) + ) { result.items = removeEmptyRequired(result.items) } diff --git a/packages/typescript/ai-groq/tests/groq-adapter.test.ts b/packages/typescript/ai-groq/tests/groq-adapter.test.ts index da9b939c0..55d9bb6a2 100644 --- a/packages/typescript/ai-groq/tests/groq-adapter.test.ts +++ b/packages/typescript/ai-groq/tests/groq-adapter.test.ts @@ -89,9 +89,7 @@ describe('Groq adapters', () => { it('throws if GROQ_API_KEY is not set when using groqText', () => { vi.stubEnv('GROQ_API_KEY', '') - expect(() => groqText('llama-3.3-70b-versatile')).toThrow( - 'GROQ_API_KEY', - ) + expect(() => groqText('llama-3.3-70b-versatile')).toThrow('GROQ_API_KEY') }) it('allows custom baseURL override', () => { From 8f778b17751a837761dd7ce39858bf85cc647623 Mon Sep 17 00:00:00 2001 From: Alem Tuzlak Date: Mon, 30 Mar 2026 14:46:33 +0200 Subject: [PATCH 11/15] refactor: migrate ai-anthropic, ai-gemini, ai-fal, ai-elevenlabs to @tanstack/ai-utils Replace duplicated generateId and getXxxApiKeyFromEnv implementations in ai-anthropic, ai-gemini, ai-fal, and ai-elevenlabs with imports from @tanstack/ai-utils. All provider-specific wrapper function names preserved for backwards compatibility. --- packages/typescript/ai-anthropic/package.json | 4 +- .../ai-anthropic/src/utils/client.ts | 19 +- .../typescript/ai-elevenlabs/package.json | 4 +- .../ai-elevenlabs/src/realtime/token.ts | 21 +- packages/typescript/ai-fal/package.json | 4 +- .../typescript/ai-fal/src/utils/client.ts | 35 +- packages/typescript/ai-gemini/package.json | 4 +- .../typescript/ai-gemini/src/utils/client.ts | 21 +- pnpm-lock.yaml | 419 ++++-------------- 9 files changed, 118 insertions(+), 413 deletions(-) diff --git a/packages/typescript/ai-anthropic/package.json b/packages/typescript/ai-anthropic/package.json index cb80ff13a..cf61a56d9 100644 --- a/packages/typescript/ai-anthropic/package.json +++ b/packages/typescript/ai-anthropic/package.json @@ -40,7 +40,8 @@ "test:types": "tsc" }, "dependencies": { - "@anthropic-ai/sdk": "^0.71.2" + "@anthropic-ai/sdk": "^0.71.2", + "@tanstack/ai-utils": "workspace:*" }, "peerDependencies": { "@tanstack/ai": "workspace:^", @@ -48,6 +49,7 @@ }, "devDependencies": { "@tanstack/ai": "workspace:*", + "@tanstack/ai-utils": "workspace:*", "@vitest/coverage-v8": "4.0.14", "zod": "^4.2.0" } diff --git a/packages/typescript/ai-anthropic/src/utils/client.ts b/packages/typescript/ai-anthropic/src/utils/client.ts index e42c1255f..d07d2b2af 100644 --- a/packages/typescript/ai-anthropic/src/utils/client.ts +++ b/packages/typescript/ai-anthropic/src/utils/client.ts @@ -1,4 +1,5 @@ import Anthropic_SDK from '@anthropic-ai/sdk' +import { generateId as _generateId, getApiKeyFromEnv } from '@tanstack/ai-utils' import type { ClientOptions } from '@anthropic-ai/sdk' export interface AnthropicClientConfig extends ClientOptions { @@ -22,26 +23,12 @@ export function createAnthropicClient( * @throws Error if ANTHROPIC_API_KEY is not found */ export function getAnthropicApiKeyFromEnv(): string { - const env = - typeof globalThis !== 'undefined' && (globalThis as any).window?.env - ? (globalThis as any).window.env - : typeof process !== 'undefined' - ? process.env - : undefined - const key = env?.ANTHROPIC_API_KEY - - if (!key) { - throw new Error( - 'ANTHROPIC_API_KEY is required. Please set it in your environment variables or use the factory function with an explicit API key.', - ) - } - - return key + return getApiKeyFromEnv('ANTHROPIC_API_KEY') } /** * Generates a unique ID with a prefix */ export function generateId(prefix: string): string { - return `${prefix}-${Date.now()}-${Math.random().toString(36).substring(7)}` + return _generateId(prefix) } diff --git a/packages/typescript/ai-elevenlabs/package.json b/packages/typescript/ai-elevenlabs/package.json index b6f654a8c..a1344e877 100644 --- a/packages/typescript/ai-elevenlabs/package.json +++ b/packages/typescript/ai-elevenlabs/package.json @@ -41,7 +41,8 @@ "test:types": "tsc" }, "dependencies": { - "@11labs/client": "^0.2.0" + "@11labs/client": "^0.2.0", + "@tanstack/ai-utils": "workspace:*" }, "peerDependencies": { "@tanstack/ai": "workspace:^", @@ -50,6 +51,7 @@ "devDependencies": { "@tanstack/ai": "workspace:*", "@tanstack/ai-client": "workspace:*", + "@tanstack/ai-utils": "workspace:*", "@vitest/coverage-v8": "4.0.14" } } diff --git a/packages/typescript/ai-elevenlabs/src/realtime/token.ts b/packages/typescript/ai-elevenlabs/src/realtime/token.ts index 030d0c9a9..e7802bac0 100644 --- a/packages/typescript/ai-elevenlabs/src/realtime/token.ts +++ b/packages/typescript/ai-elevenlabs/src/realtime/token.ts @@ -1,3 +1,4 @@ +import { getApiKeyFromEnv } from '@tanstack/ai-utils' import type { RealtimeToken, RealtimeTokenAdapter } from '@tanstack/ai' import type { ElevenLabsRealtimeTokenOptions } from './types' @@ -7,25 +8,7 @@ const ELEVENLABS_API_URL = 'https://api.elevenlabs.io/v1' * Get ElevenLabs API key from environment */ function getElevenLabsApiKey(): string { - // Check process.env (Node.js) - if (typeof process !== 'undefined' && process.env.ELEVENLABS_API_KEY) { - return process.env.ELEVENLABS_API_KEY - } - - // Check window.env (Browser with injected env) - if ( - typeof window !== 'undefined' && - (window as unknown as { env?: { ELEVENLABS_API_KEY?: string } }).env - ?.ELEVENLABS_API_KEY - ) { - return (window as unknown as { env: { ELEVENLABS_API_KEY: string } }).env - .ELEVENLABS_API_KEY - } - - throw new Error( - 'ELEVENLABS_API_KEY not found in environment variables. ' + - 'Please set ELEVENLABS_API_KEY in your environment.', - ) + return getApiKeyFromEnv('ELEVENLABS_API_KEY') } /** diff --git a/packages/typescript/ai-fal/package.json b/packages/typescript/ai-fal/package.json index 3e7d91968..478e08c81 100644 --- a/packages/typescript/ai-fal/package.json +++ b/packages/typescript/ai-fal/package.json @@ -41,10 +41,12 @@ "video-generation" ], "dependencies": { - "@fal-ai/client": "^1.9.4" + "@fal-ai/client": "^1.9.4", + "@tanstack/ai-utils": "workspace:*" }, "devDependencies": { "@tanstack/ai": "workspace:*", + "@tanstack/ai-utils": "workspace:*", "@vitest/coverage-v8": "4.0.14", "vite": "^7.2.7" }, diff --git a/packages/typescript/ai-fal/src/utils/client.ts b/packages/typescript/ai-fal/src/utils/client.ts index ccc788f05..fc3d27ee3 100644 --- a/packages/typescript/ai-fal/src/utils/client.ts +++ b/packages/typescript/ai-fal/src/utils/client.ts @@ -1,42 +1,13 @@ import { fal } from '@fal-ai/client' +import { generateId as _generateId, getApiKeyFromEnv } from '@tanstack/ai-utils' export interface FalClientConfig { apiKey: string proxyUrl?: string } -interface EnvObject { - FAL_KEY?: string -} - -interface WindowWithEnv { - env?: EnvObject -} - -function getEnvironment(): EnvObject | undefined { - if (typeof globalThis !== 'undefined') { - const win = (globalThis as { window?: WindowWithEnv }).window - if (win?.env) { - return win.env - } - } - if (typeof process !== 'undefined') { - return process.env as EnvObject - } - return undefined -} - export function getFalApiKeyFromEnv(): string { - const env = getEnvironment() - const key = env?.FAL_KEY - - if (!key) { - throw new Error( - 'FAL_KEY is required. Please set it in your environment variables or use the factory function with an explicit API key.', - ) - } - - return key + return getApiKeyFromEnv('FAL_KEY') } export function configureFalClient(config?: FalClientConfig): void { @@ -56,5 +27,5 @@ export function configureFalClient(config?: FalClientConfig): void { } export function generateId(prefix: string): string { - return `${prefix}-${Date.now()}-${Math.random().toString(36).substring(7)}` + return _generateId(prefix) } diff --git a/packages/typescript/ai-gemini/package.json b/packages/typescript/ai-gemini/package.json index 11656a9c1..0eb5a8592 100644 --- a/packages/typescript/ai-gemini/package.json +++ b/packages/typescript/ai-gemini/package.json @@ -40,13 +40,15 @@ "adapter" ], "dependencies": { - "@google/genai": "^1.43.0" + "@google/genai": "^1.43.0", + "@tanstack/ai-utils": "workspace:*" }, "peerDependencies": { "@tanstack/ai": "workspace:^" }, "devDependencies": { "@tanstack/ai": "workspace:*", + "@tanstack/ai-utils": "workspace:*", "@vitest/coverage-v8": "4.0.14", "vite": "^7.2.7" } diff --git a/packages/typescript/ai-gemini/src/utils/client.ts b/packages/typescript/ai-gemini/src/utils/client.ts index bb92293d7..f42a3fce0 100644 --- a/packages/typescript/ai-gemini/src/utils/client.ts +++ b/packages/typescript/ai-gemini/src/utils/client.ts @@ -1,4 +1,5 @@ import { GoogleGenAI } from '@google/genai' +import { generateId as _generateId, getApiKeyFromEnv } from '@tanstack/ai-utils' import type { GoogleGenAIOptions } from '@google/genai' export interface GeminiClientConfig extends GoogleGenAIOptions { @@ -20,26 +21,16 @@ export function createGeminiClient(config: GeminiClientConfig): GoogleGenAI { * @throws Error if GOOGLE_API_KEY or GEMINI_API_KEY is not found */ export function getGeminiApiKeyFromEnv(): string { - const env = - typeof globalThis !== 'undefined' && (globalThis as any).window?.env - ? (globalThis as any).window.env - : typeof process !== 'undefined' - ? process.env - : undefined - const key = env?.GOOGLE_API_KEY || env?.GEMINI_API_KEY - - if (!key) { - throw new Error( - 'GOOGLE_API_KEY or GEMINI_API_KEY is required. Please set it in your environment variables or use the factory function with an explicit API key.', - ) + try { + return getApiKeyFromEnv('GOOGLE_API_KEY') + } catch { + return getApiKeyFromEnv('GEMINI_API_KEY') } - - return key } /** * Generates a unique ID with a prefix */ export function generateId(prefix: string): string { - return `${prefix}-${Date.now()}-${Math.random().toString(36).substring(7)}` + return _generateId(prefix) } diff --git a/pnpm-lock.yaml b/pnpm-lock.yaml index db8514d16..e1f11caba 100644 --- a/pnpm-lock.yaml +++ b/pnpm-lock.yaml @@ -450,7 +450,7 @@ importers: version: 1.141.1(@tanstack/router-core@1.159.4)(@tanstack/solid-router@1.141.1(solid-js@1.9.10))(csstype@3.2.3)(solid-js@1.9.10) '@tanstack/solid-router-ssr-query': specifier: ^1.139.10 - version: 1.141.1(@tanstack/query-core@5.90.12)(@tanstack/router-core@1.159.4)(@tanstack/solid-query@5.90.15(solid-js@1.9.10))(@tanstack/solid-router@1.141.1(solid-js@1.9.10))(eslint@9.39.4(jiti@2.6.1))(solid-js@1.9.10)(typescript@5.9.3) + version: 1.141.1(@tanstack/query-core@5.90.12)(@tanstack/router-core@1.159.4)(@tanstack/solid-query@5.90.15(solid-js@1.9.10))(@tanstack/solid-router@1.141.1(solid-js@1.9.10))(eslint@9.39.2(jiti@2.6.1))(solid-js@1.9.10)(typescript@5.9.3) '@tanstack/solid-start': specifier: ^1.139.10 version: 1.141.1(@tanstack/react-router@1.159.5(react-dom@19.2.3(react@19.2.3))(react@19.2.3))(crossws@0.4.4(srvx@0.11.2))(solid-js@1.9.10)(vite-plugin-solid@2.11.10(solid-js@1.9.10)(vite@7.2.7(@types/node@24.10.3)(jiti@2.6.1)(lightningcss@1.30.2)(terser@5.44.1)(tsx@4.21.0)(yaml@2.8.2)))(vite@7.2.7(@types/node@24.10.3)(jiti@2.6.1)(lightningcss@1.30.2)(terser@5.44.1)(tsx@4.21.0)(yaml@2.8.2)) @@ -689,7 +689,7 @@ importers: version: 1.1.0 '@vitest/coverage-v8': specifier: 4.0.14 - version: 4.0.14(vitest@4.1.0(@types/node@25.0.1)(happy-dom@20.0.11)(jsdom@27.3.0(postcss@8.5.6))(vite@7.3.1(@types/node@25.0.1)(jiti@2.6.1)(lightningcss@1.30.2)(terser@5.44.1)(tsx@4.21.0)(yaml@2.8.2))) + version: 4.0.14(vitest@4.0.18(@types/node@25.0.1)(happy-dom@20.0.11)(jiti@2.6.1)(jsdom@27.3.0(postcss@8.5.6))(lightningcss@1.30.2)(terser@5.44.1)(tsx@4.21.0)(yaml@2.8.2)) zod: specifier: ^4.2.0 version: 4.2.1 @@ -699,13 +699,16 @@ importers: '@anthropic-ai/sdk': specifier: ^0.71.2 version: 0.71.2(zod@4.2.1) + '@tanstack/ai-utils': + specifier: workspace:* + version: link:../ai-utils devDependencies: '@tanstack/ai': specifier: workspace:* version: link:../ai '@vitest/coverage-v8': specifier: 4.0.14 - version: 4.0.14(vitest@4.1.0(@types/node@25.0.1)(happy-dom@20.0.11)(jsdom@27.3.0(postcss@8.5.6))(vite@7.3.1(@types/node@25.0.1)(jiti@2.6.1)(lightningcss@1.30.2)(terser@5.44.1)(tsx@4.21.0)(yaml@2.8.2))) + version: 4.0.14(vitest@4.0.18(@types/node@25.0.1)(happy-dom@20.0.11)(jiti@2.6.1)(jsdom@27.3.0(postcss@8.5.6))(lightningcss@1.30.2)(terser@5.44.1)(tsx@4.21.0)(yaml@2.8.2)) zod: specifier: ^4.2.0 version: 4.2.1 @@ -721,7 +724,7 @@ importers: devDependencies: '@vitest/coverage-v8': specifier: 4.0.14 - version: 4.0.14(vitest@4.1.0(@types/node@25.0.1)(happy-dom@20.0.11)(jsdom@27.3.0(postcss@8.5.6))(vite@7.2.7(@types/node@25.0.1)(jiti@2.6.1)(lightningcss@1.30.2)(terser@5.44.1)(tsx@4.21.0)(yaml@2.8.2))) + version: 4.0.14(vitest@4.0.18(@types/node@25.0.1)(happy-dom@20.0.11)(jiti@2.6.1)(jsdom@27.3.0(postcss@8.5.6))(lightningcss@1.30.2)(terser@5.44.1)(tsx@4.21.0)(yaml@2.8.2)) vite: specifier: ^7.2.7 version: 7.2.7(@types/node@25.0.1)(jiti@2.6.1)(lightningcss@1.30.2)(terser@5.44.1)(tsx@4.21.0)(yaml@2.8.2) @@ -752,7 +755,7 @@ importers: devDependencies: '@vitest/coverage-v8': specifier: 4.0.14 - version: 4.0.14(vitest@4.1.0(@types/node@25.0.1)(happy-dom@20.0.11)(jsdom@27.3.0(postcss@8.5.6))(vite@7.2.7(@types/node@25.0.1)(jiti@2.6.1)(lightningcss@1.30.2)(terser@5.44.1)(tsx@4.21.0)(yaml@2.8.2))) + version: 4.0.14(vitest@4.0.18(@types/node@25.0.1)(happy-dom@20.0.11)(jiti@2.6.1)(jsdom@27.3.0(postcss@8.5.6))(lightningcss@1.30.2)(terser@5.44.1)(tsx@4.21.0)(yaml@2.8.2)) jsdom: specifier: ^27.2.0 version: 27.3.0(postcss@8.5.6) @@ -774,6 +777,9 @@ importers: '@11labs/client': specifier: ^0.2.0 version: 0.2.0(@types/dom-mediacapture-record@1.0.22) + '@tanstack/ai-utils': + specifier: workspace:* + version: link:../ai-utils devDependencies: '@tanstack/ai': specifier: workspace:* @@ -783,7 +789,7 @@ importers: version: link:../ai-client '@vitest/coverage-v8': specifier: 4.0.14 - version: 4.0.14(vitest@4.1.0(@types/node@25.0.1)(happy-dom@20.0.11)(jsdom@27.3.0(postcss@8.5.6))(vite@7.3.1(@types/node@25.0.1)(jiti@2.6.1)(lightningcss@1.30.2)(terser@5.44.1)(tsx@4.21.0)(yaml@2.8.2))) + version: 4.0.14(vitest@4.0.18(@types/node@25.0.1)(happy-dom@20.0.11)(jiti@2.6.1)(jsdom@27.3.0(postcss@8.5.6))(lightningcss@1.30.2)(terser@5.44.1)(tsx@4.21.0)(yaml@2.8.2)) packages/typescript/ai-event-client: dependencies: @@ -796,20 +802,23 @@ importers: version: link:../ai '@vitest/coverage-v8': specifier: 4.0.14 - version: 4.0.14(vitest@4.1.0(@types/node@25.0.1)(happy-dom@20.0.11)(jsdom@27.3.0(postcss@8.5.6))(vite@7.3.1(@types/node@25.0.1)(jiti@2.6.1)(lightningcss@1.30.2)(terser@5.44.1)(tsx@4.21.0)(yaml@2.8.2))) + version: 4.0.14(vitest@4.0.18(@types/node@25.0.1)(happy-dom@20.0.11)(jiti@2.6.1)(jsdom@27.3.0(postcss@8.5.6))(lightningcss@1.30.2)(terser@5.44.1)(tsx@4.21.0)(yaml@2.8.2)) packages/typescript/ai-fal: dependencies: '@fal-ai/client': - specifier: ^1.9.4 - version: 1.9.4 + specifier: ^1.9.1 + version: 1.9.1 + '@tanstack/ai-utils': + specifier: workspace:* + version: link:../ai-utils devDependencies: '@tanstack/ai': specifier: workspace:* version: link:../ai '@vitest/coverage-v8': specifier: 4.0.14 - version: 4.0.14(vitest@4.1.0(@types/node@25.0.1)(happy-dom@20.0.11)(jsdom@27.3.0(postcss@8.5.6))(vite@7.3.1(@types/node@25.0.1)(jiti@2.6.1)(lightningcss@1.30.2)(terser@5.44.1)(tsx@4.21.0)(yaml@2.8.2))) + version: 4.0.14(vitest@4.0.18(@types/node@25.0.1)(happy-dom@20.0.11)(jiti@2.6.1)(jsdom@27.3.0(postcss@8.5.6))(lightningcss@1.30.2)(terser@5.44.1)(tsx@4.21.0)(yaml@2.8.2)) vite: specifier: ^7.2.7 version: 7.3.1(@types/node@25.0.1)(jiti@2.6.1)(lightningcss@1.30.2)(terser@5.44.1)(tsx@4.21.0)(yaml@2.8.2) @@ -819,13 +828,16 @@ importers: '@google/genai': specifier: ^1.43.0 version: 1.43.0 + '@tanstack/ai-utils': + specifier: workspace:* + version: link:../ai-utils devDependencies: '@tanstack/ai': specifier: workspace:* version: link:../ai '@vitest/coverage-v8': specifier: 4.0.14 - version: 4.0.14(vitest@4.1.0(@types/node@25.0.1)(happy-dom@20.0.11)(jsdom@27.3.0(postcss@8.5.6))(vite@7.2.7(@types/node@25.0.1)(jiti@2.6.1)(lightningcss@1.30.2)(terser@5.44.1)(tsx@4.21.0)(yaml@2.8.2))) + version: 4.0.14(vitest@4.0.18(@types/node@25.0.1)(happy-dom@20.0.11)(jiti@2.6.1)(jsdom@27.3.0(postcss@8.5.6))(lightningcss@1.30.2)(terser@5.44.1)(tsx@4.21.0)(yaml@2.8.2)) vite: specifier: ^7.2.7 version: 7.2.7(@types/node@25.0.1)(jiti@2.6.1)(lightningcss@1.30.2)(terser@5.44.1)(tsx@4.21.0)(yaml@2.8.2) @@ -850,7 +862,7 @@ importers: devDependencies: '@vitest/coverage-v8': specifier: 4.0.14 - version: 4.0.14(vitest@4.1.0(@types/node@25.0.1)(happy-dom@20.0.11)(jsdom@27.3.0(postcss@8.5.6))(vite@7.2.7(@types/node@25.0.1)(jiti@2.6.1)(lightningcss@1.30.2)(terser@5.44.1)(tsx@4.21.0)(yaml@2.8.2))) + version: 4.0.14(vitest@4.0.18(@types/node@25.0.1)(happy-dom@20.0.11)(jiti@2.6.1)(jsdom@27.3.0(postcss@8.5.6))(lightningcss@1.30.2)(terser@5.44.1)(tsx@4.21.0)(yaml@2.8.2)) vite: specifier: ^7.2.7 version: 7.2.7(@types/node@25.0.1)(jiti@2.6.1)(lightningcss@1.30.2)(terser@5.44.1)(tsx@4.21.0)(yaml@2.8.2) @@ -875,7 +887,7 @@ importers: devDependencies: '@vitest/coverage-v8': specifier: 4.0.14 - version: 4.0.14(vitest@4.1.0(@types/node@25.0.1)(happy-dom@20.0.11)(jsdom@27.3.0(postcss@8.5.6))(vite@7.3.1(@types/node@25.0.1)(jiti@2.6.1)(lightningcss@1.30.2)(terser@5.44.1)(tsx@4.21.0)(yaml@2.8.2))) + version: 4.0.14(vitest@4.0.18(@types/node@25.0.1)(happy-dom@20.0.11)(jiti@2.6.1)(jsdom@27.3.0(postcss@8.5.6))(lightningcss@1.30.2)(terser@5.44.1)(tsx@4.21.0)(yaml@2.8.2)) vite: specifier: ^7.2.7 version: 7.3.1(@types/node@25.0.1)(jiti@2.6.1)(lightningcss@1.30.2)(terser@5.44.1)(tsx@4.21.0)(yaml@2.8.2) @@ -894,7 +906,7 @@ importers: version: link:../ai '@vitest/coverage-v8': specifier: 4.0.14 - version: 4.0.14(vitest@4.1.0(@types/node@25.0.1)(happy-dom@20.0.11)(jsdom@27.3.0(postcss@8.5.6))(vite@7.2.7(@types/node@25.0.1)(jiti@2.6.1)(lightningcss@1.30.2)(terser@5.44.1)(tsx@4.21.0)(yaml@2.8.2))) + version: 4.0.14(vitest@4.0.18(@types/node@25.0.1)(happy-dom@20.0.11)(jiti@2.6.1)(jsdom@27.3.0(postcss@8.5.6))(lightningcss@1.30.2)(terser@5.44.1)(tsx@4.21.0)(yaml@2.8.2)) vite: specifier: ^7.2.7 version: 7.2.7(@types/node@25.0.1)(jiti@2.6.1)(lightningcss@1.30.2)(terser@5.44.1)(tsx@4.21.0)(yaml@2.8.2) @@ -919,7 +931,7 @@ importers: version: link:../ai-client '@vitest/coverage-v8': specifier: 4.0.14 - version: 4.0.14(vitest@4.1.0(@types/node@25.0.1)(happy-dom@20.0.11)(jsdom@27.3.0(postcss@8.5.6))(vite@7.2.7(@types/node@25.0.1)(jiti@2.6.1)(lightningcss@1.30.2)(terser@5.44.1)(tsx@4.21.0)(yaml@2.8.2))) + version: 4.0.14(vitest@4.0.18(@types/node@25.0.1)(happy-dom@20.0.11)(jiti@2.6.1)(jsdom@27.3.0(postcss@8.5.6))(lightningcss@1.30.2)(terser@5.44.1)(tsx@4.21.0)(yaml@2.8.2)) vite: specifier: ^7.2.7 version: 7.2.7(@types/node@25.0.1)(jiti@2.6.1)(lightningcss@1.30.2)(terser@5.44.1)(tsx@4.21.0)(yaml@2.8.2) @@ -941,7 +953,7 @@ importers: devDependencies: '@vitest/coverage-v8': specifier: 4.0.14 - version: 4.0.14(vitest@4.1.0(@types/node@25.0.1)(happy-dom@20.0.11)(jsdom@27.3.0(postcss@8.5.6))(vite@7.2.7(@types/node@25.0.1)(jiti@2.6.1)(lightningcss@1.30.2)(terser@5.44.1)(tsx@4.21.0)(yaml@2.8.2))) + version: 4.0.14(vitest@4.0.18(@types/node@25.0.1)(happy-dom@20.0.11)(jiti@2.6.1)(jsdom@27.3.0(postcss@8.5.6))(lightningcss@1.30.2)(terser@5.44.1)(tsx@4.21.0)(yaml@2.8.2)) vite: specifier: ^7.2.7 version: 7.2.7(@types/node@25.0.1)(jiti@2.6.1)(lightningcss@1.30.2)(terser@5.44.1)(tsx@4.21.0)(yaml@2.8.2) @@ -960,7 +972,7 @@ importers: version: 3.2.4(preact@10.28.2) '@vitest/coverage-v8': specifier: 4.0.14 - version: 4.0.14(vitest@4.1.0(@types/node@25.0.1)(happy-dom@20.0.11)(jsdom@27.3.0(postcss@8.5.6))(vite@7.2.7(@types/node@25.0.1)(jiti@2.6.1)(lightningcss@1.30.2)(terser@5.44.1)(tsx@4.21.0)(yaml@2.8.2))) + version: 4.0.14(vitest@4.0.18(@types/node@25.0.1)(happy-dom@20.0.11)(jiti@2.6.1)(jsdom@27.3.0(postcss@8.5.6))(lightningcss@1.30.2)(terser@5.44.1)(tsx@4.21.0)(yaml@2.8.2)) jsdom: specifier: ^27.2.0 version: 27.3.0(postcss@8.5.6) @@ -988,7 +1000,7 @@ importers: version: 19.2.7 '@vitest/coverage-v8': specifier: 4.0.14 - version: 4.0.14(vitest@4.1.0(@types/node@25.0.1)(happy-dom@20.0.11)(jsdom@27.3.0(postcss@8.5.6))(vite@7.2.7(@types/node@25.0.1)(jiti@2.6.1)(lightningcss@1.30.2)(terser@5.44.1)(tsx@4.21.0)(yaml@2.8.2))) + version: 4.0.14(vitest@4.0.18(@types/node@25.0.1)(happy-dom@20.0.11)(jiti@2.6.1)(jsdom@27.3.0(postcss@8.5.6))(lightningcss@1.30.2)(terser@5.44.1)(tsx@4.21.0)(yaml@2.8.2)) jsdom: specifier: ^27.2.0 version: 27.3.0(postcss@8.5.6) @@ -1028,7 +1040,7 @@ importers: version: 19.2.7 '@vitest/coverage-v8': specifier: 4.0.14 - version: 4.0.14(vitest@4.1.0(@types/node@25.0.1)(happy-dom@20.0.11)(jsdom@27.3.0(postcss@8.5.6))(vite@7.2.7(@types/node@25.0.1)(jiti@2.6.1)(lightningcss@1.30.2)(terser@5.44.1)(tsx@4.21.0)(yaml@2.8.2))) + version: 4.0.14(vitest@4.0.18(@types/node@25.0.1)(happy-dom@20.0.11)(jiti@2.6.1)(jsdom@27.3.0(postcss@8.5.6))(lightningcss@1.30.2)(terser@5.44.1)(tsx@4.21.0)(yaml@2.8.2)) react: specifier: ^19.2.3 version: 19.2.3 @@ -1099,7 +1111,7 @@ importers: version: link:../ai-solid '@vitest/coverage-v8': specifier: 4.0.14 - version: 4.0.14(vitest@4.1.0(@types/node@25.0.1)(happy-dom@20.0.11)(jsdom@27.3.0(postcss@8.5.6))(vite@7.2.7(@types/node@25.0.1)(jiti@2.6.1)(lightningcss@1.30.2)(terser@5.44.1)(tsx@4.21.0)(yaml@2.8.2))) + version: 4.0.14(vitest@4.0.18(@types/node@25.0.1)(happy-dom@20.0.11)(jiti@2.6.1)(jsdom@27.3.0(postcss@8.5.6))(lightningcss@1.30.2)(terser@5.44.1)(tsx@4.21.0)(yaml@2.8.2)) solid-js: specifier: ^1.9.10 version: 1.9.10 @@ -1127,7 +1139,7 @@ importers: version: 24.10.3 '@vitest/coverage-v8': specifier: 4.0.14 - version: 4.0.14(vitest@4.1.0(@types/node@24.10.3)(happy-dom@20.0.11)(jsdom@27.3.0(postcss@8.5.6))(vite@7.2.7(@types/node@24.10.3)(jiti@2.6.1)(lightningcss@1.30.2)(terser@5.44.1)(tsx@4.21.0)(yaml@2.8.2))) + version: 4.0.14(vitest@4.0.18(@types/node@24.10.3)(happy-dom@20.0.11)(jiti@2.6.1)(jsdom@27.3.0(postcss@8.5.6))(lightningcss@1.30.2)(terser@5.44.1)(tsx@4.21.0)(yaml@2.8.2)) jsdom: specifier: ^27.2.0 version: 27.3.0(postcss@8.5.6) @@ -1213,7 +1225,7 @@ importers: version: 6.0.3(vite@7.2.7(@types/node@25.0.1)(jiti@2.6.1)(lightningcss@1.30.2)(terser@5.44.1)(tsx@4.21.0)(yaml@2.8.2))(vue@3.5.25(typescript@5.9.3)) '@vitest/coverage-v8': specifier: 4.0.14 - version: 4.0.14(vitest@4.1.0(@types/node@25.0.1)(happy-dom@20.0.11)(jsdom@27.3.0(postcss@8.5.6))(vite@7.2.7(@types/node@25.0.1)(jiti@2.6.1)(lightningcss@1.30.2)(terser@5.44.1)(tsx@4.21.0)(yaml@2.8.2))) + version: 4.0.14(vitest@4.0.18(@types/node@25.0.1)(happy-dom@20.0.11)(jiti@2.6.1)(jsdom@27.3.0(postcss@8.5.6))(lightningcss@1.30.2)(terser@5.44.1)(tsx@4.21.0)(yaml@2.8.2)) vite: specifier: ^7.2.7 version: 7.2.7(@types/node@25.0.1)(jiti@2.6.1)(lightningcss@1.30.2)(terser@5.44.1)(tsx@4.21.0)(yaml@2.8.2) @@ -1260,7 +1272,7 @@ importers: devDependencies: '@vitest/coverage-v8': specifier: 4.0.14 - version: 4.0.14(vitest@4.1.0(@types/node@25.0.1)(happy-dom@20.0.11)(jsdom@27.3.0(postcss@8.5.6))(vite@7.2.7(@types/node@25.0.1)(jiti@2.6.1)(lightningcss@1.30.2)(terser@5.44.1)(tsx@4.21.0)(yaml@2.8.2))) + version: 4.0.14(vitest@4.0.18(@types/node@25.0.1)(happy-dom@20.0.11)(jiti@2.6.1)(jsdom@27.3.0(postcss@8.5.6))(lightningcss@1.30.2)(terser@5.44.1)(tsx@4.21.0)(yaml@2.8.2)) vite: specifier: ^7.2.7 version: 7.2.7(@types/node@25.0.1)(jiti@2.6.1)(lightningcss@1.30.2)(terser@5.44.1)(tsx@4.21.0)(yaml@2.8.2) @@ -1279,7 +1291,7 @@ importers: version: 19.2.7 '@vitest/coverage-v8': specifier: 4.0.14 - version: 4.0.14(vitest@4.1.0(@types/node@25.0.1)(happy-dom@20.0.11)(jsdom@27.3.0(postcss@8.5.6))(vite@7.2.7(@types/node@25.0.1)(jiti@2.6.1)(lightningcss@1.30.2)(terser@5.44.1)(tsx@4.21.0)(yaml@2.8.2))) + version: 4.0.14(vitest@4.0.18(@types/node@25.0.1)(happy-dom@20.0.11)(jiti@2.6.1)(jsdom@27.3.0(postcss@8.5.6))(lightningcss@1.30.2)(terser@5.44.1)(tsx@4.21.0)(yaml@2.8.2)) react: specifier: ^19.2.3 version: 19.2.3 @@ -1419,7 +1431,7 @@ importers: devDependencies: '@vitest/coverage-v8': specifier: 4.0.14 - version: 4.0.14(vitest@4.1.0(@types/node@25.0.1)(happy-dom@20.0.11)(jsdom@27.3.0(postcss@8.5.6))(vite@7.2.7(@types/node@25.0.1)(jiti@2.6.1)(lightningcss@1.30.2)(terser@5.44.1)(tsx@4.21.0)(yaml@2.8.2))) + version: 4.0.14(vitest@4.0.18(@types/node@25.0.1)(happy-dom@20.0.11)(jiti@2.6.1)(jsdom@27.3.0(postcss@8.5.6))(lightningcss@1.30.2)(terser@5.44.1)(tsx@4.21.0)(yaml@2.8.2)) solid-js: specifier: ^1.9.10 version: 1.9.10 @@ -2490,10 +2502,6 @@ packages: resolution: {integrity: sha512-aw1gNayWpdI/jSYVgzN5pL0cfzU02GT3NBpeT/DXbx1/1x7ZKxFPd9bwrzygx/qiwIQiJ1sw/zD8qY/kRvlGHA==} engines: {node: ^18.18.0 || ^20.9.0 || >=21.1.0} - '@eslint/config-array@0.21.2': - resolution: {integrity: sha512-nJl2KGTlrf9GjLimgIru+V/mzgSK0ABCDQRvxw5BjURL7WfH5uoWmizbH7QB6MmnMBd8cIC9uceWnezL1VZWWw==} - engines: {node: ^18.18.0 || ^20.9.0 || >=21.1.0} - '@eslint/config-helpers@0.4.2': resolution: {integrity: sha512-gBrxN88gOIf3R7ja5K9slwNayVcZgK6SOUORm2uBzTeIEfeVaIhOpCtTox3P6R7o2jLFwLFTLnC7kU/RGcYEgw==} engines: {node: ^18.18.0 || ^20.9.0 || >=21.1.0} @@ -2506,16 +2514,12 @@ packages: resolution: {integrity: sha512-Kr+LPIUVKz2qkx1HAMH8q1q6azbqBAsXJUxBl/ODDuVPX45Z9DfwB8tPjTi6nNZ8BuM3nbJxC5zCAg5elnBUTQ==} engines: {node: ^18.18.0 || ^20.9.0 || >=21.1.0} - '@eslint/eslintrc@3.3.5': - resolution: {integrity: sha512-4IlJx0X0qftVsN5E+/vGujTRIFtwuLbNsVUe7TO6zYPDR1O6nFwvwhIKEKSrl6dZchmYBITazxKoUYOjdtjlRg==} - engines: {node: ^18.18.0 || ^20.9.0 || >=21.1.0} - '@eslint/js@9.39.1': resolution: {integrity: sha512-S26Stp4zCy88tH94QbBv3XCuzRQiZ9yXofEILmglYTh/Ug/a9/umqvgFtYBAo3Lp0nsI/5/qH1CCrbdK3AP1Tw==} engines: {node: ^18.18.0 || ^20.9.0 || >=21.1.0} - '@eslint/js@9.39.4': - resolution: {integrity: sha512-nE7DEIchvtiFTwBw4Lfbu59PG+kCofhjsKaCWzxTpt4lfRjRMqG6uMBzKXuEcyXhOHoUp9riAm7/aWYGhXZ9cw==} + '@eslint/js@9.39.2': + resolution: {integrity: sha512-q1mjIoW1VX4IvSocvM/vbTiveKC4k9eLrajNEuSsmjymSDEbpGddtpfOoN7YGAqBK3NG+uqo8ia4PDTt8buCYA==} engines: {node: ^18.18.0 || ^20.9.0 || >=21.1.0} '@eslint/object-schema@2.1.7': @@ -2530,8 +2534,8 @@ packages: resolution: {integrity: sha512-C3mrr3b5dRVlKPJdfrAXS8+dq+rq8Qm5SNRazca0JKgw1HQERFmrVb0towvMmw5uu8hHKNiQasMaR/tydf3Zsg==} engines: {node: ^20.19.0 || ^22.13.0 || ^23.5.0 || >=24.0.0, npm: '>=10'} - '@fal-ai/client@1.9.4': - resolution: {integrity: sha512-mDjF2QDq+oficSSxzmErNkseQeRXnvUBEhJy39n4PPe7jRPZeSqM2SNb27SW50rDtCtay+stwFU8zRZehlt1Qg==} + '@fal-ai/client@1.9.1': + resolution: {integrity: sha512-Z6+n9/2sKlDam1wWDfRWmMkLS09e2WhTU9w+2eWL7PuoGmHK43IhwKirk4b3kPC/QPp1g+ymfvhrSiS1SpDr6g==} engines: {node: '>=18.0.0'} '@gerrit0/mini-shiki@3.19.0': @@ -4853,9 +4857,6 @@ packages: '@vitest/expect@4.0.18': resolution: {integrity: sha512-8sCWUyckXXYvx4opfzVY03EOiYVxyNrHS5QxX3DAIi5dpJAAkyJezHCP77VMX4HKA2LDT/Jpfo8i2r5BE3GnQQ==} - '@vitest/expect@4.1.0': - resolution: {integrity: sha512-EIxG7k4wlWweuCLG9Y5InKFwpMEOyrMb6ZJ1ihYu02LVj/bzUwn2VMU+13PinsjRW75XnITeFrQBMH5+dLvCDA==} - '@vitest/mocker@4.0.15': resolution: {integrity: sha512-CZ28GLfOEIFkvCFngN8Sfx5h+Se0zN+h4B7yOsPVCcgtiO7t5jt9xQh2E1UkFep+eb9fjyMfuC5gBypwb07fvQ==} peerDependencies: @@ -4878,17 +4879,6 @@ packages: vite: optional: true - '@vitest/mocker@4.1.0': - resolution: {integrity: sha512-evxREh+Hork43+Y4IOhTo+h5lGmVRyjqI739Rz4RlUPqwrkFFDF6EMvOOYjTx4E8Tl6gyCLRL8Mu7Ry12a13Tw==} - peerDependencies: - msw: ^2.4.9 - vite: ^6.0.0 || ^7.0.0 || ^8.0.0-0 - peerDependenciesMeta: - msw: - optional: true - vite: - optional: true - '@vitest/pretty-format@4.0.14': resolution: {integrity: sha512-SOYPgujB6TITcJxgd3wmsLl+wZv+fy3av2PpiPpsWPZ6J1ySUYfScfpIt2Yv56ShJXR2MOA6q2KjKHN4EpdyRQ==} @@ -4898,36 +4888,24 @@ packages: '@vitest/pretty-format@4.0.18': resolution: {integrity: sha512-P24GK3GulZWC5tz87ux0m8OADrQIUVDPIjjj65vBXYG17ZeU3qD7r+MNZ1RNv4l8CGU2vtTRqixrOi9fYk/yKw==} - '@vitest/pretty-format@4.1.0': - resolution: {integrity: sha512-3RZLZlh88Ib0J7NQTRATfc/3ZPOnSUn2uDBUoGNn5T36+bALixmzphN26OUD3LRXWkJu4H0s5vvUeqBiw+kS0A==} - '@vitest/runner@4.0.15': resolution: {integrity: sha512-+A+yMY8dGixUhHmNdPUxOh0la6uVzun86vAbuMT3hIDxMrAOmn5ILBHm8ajrqHE0t8R9T1dGnde1A5DTnmi3qw==} '@vitest/runner@4.0.18': resolution: {integrity: sha512-rpk9y12PGa22Jg6g5M3UVVnTS7+zycIGk9ZNGN+m6tZHKQb7jrP7/77WfZy13Y/EUDd52NDsLRQhYKtv7XfPQw==} - '@vitest/runner@4.1.0': - resolution: {integrity: sha512-Duvx2OzQ7d6OjchL+trw+aSrb9idh7pnNfxrklo14p3zmNL4qPCDeIJAK+eBKYjkIwG96Bc6vYuxhqDXQOWpoQ==} - '@vitest/snapshot@4.0.15': resolution: {integrity: sha512-A7Ob8EdFZJIBjLjeO0DZF4lqR6U7Ydi5/5LIZ0xcI+23lYlsYJAfGn8PrIWTYdZQRNnSRlzhg0zyGu37mVdy5g==} '@vitest/snapshot@4.0.18': resolution: {integrity: sha512-PCiV0rcl7jKQjbgYqjtakly6T1uwv/5BQ9SwBLekVg/EaYeQFPiXcgrC2Y7vDMA8dM1SUEAEV82kgSQIlXNMvA==} - '@vitest/snapshot@4.1.0': - resolution: {integrity: sha512-0Vy9euT1kgsnj1CHttwi9i9o+4rRLEaPRSOJ5gyv579GJkNpgJK+B4HSv/rAWixx2wdAFci1X4CEPjiu2bXIMg==} - '@vitest/spy@4.0.15': resolution: {integrity: sha512-+EIjOJmnY6mIfdXtE/bnozKEvTC4Uczg19yeZ2vtCz5Yyb0QQ31QWVQ8hswJ3Ysx/K2EqaNsVanjr//2+P3FHw==} '@vitest/spy@4.0.18': resolution: {integrity: sha512-cbQt3PTSD7P2OARdVW3qWER5EGq7PHlvE+QfzSC0lbwO+xnt7+XH06ZzFjFRgzUX//JmpxrCu92VdwvEPlWSNw==} - '@vitest/spy@4.1.0': - resolution: {integrity: sha512-pz77k+PgNpyMDv2FV6qmk5ZVau6c3R8HC8v342T2xlFxQKTrSeYw9waIJG8KgV9fFwAtTu4ceRzMivPTH6wSxw==} - '@vitest/utils@4.0.14': resolution: {integrity: sha512-hLqXZKAWNg8pI+SQXyXxWCTOpA3MvsqcbVeNgSi8x/CSN2wi26dSzn1wrOhmCmFjEvN9p8/kLFRHa6PI8jHazw==} @@ -4937,9 +4915,6 @@ packages: '@vitest/utils@4.0.18': resolution: {integrity: sha512-msMRKLMVLWygpK3u2Hybgi4MNjcYJvwTb0Ru09+fOyCXIgT5raYP041DRRdiJiI3k/2U6SEbAETB3YtBrUkCFA==} - '@vitest/utils@4.1.0': - resolution: {integrity: sha512-XfPXT6a8TZY3dcGY8EdwsBulFCIw+BeeX0RZn2x/BtiY/75YGh8FeWGG8QISN/WhaqSrE2OrlDgtF8q5uhOTmw==} - '@volar/language-core@2.4.15': resolution: {integrity: sha512-3VHw+QZU0ZG9IuQmzT68IyN4hZNd9GchGPhbD9+pa8CVv7rnoOZwo7T8weIbrRmihqy3ATpdfXFnqRrfPVK6CA==} @@ -5077,9 +5052,6 @@ packages: ajv@6.12.6: resolution: {integrity: sha512-j3fVLgvTo527anyYyJOGTYJbG+vnnQYvE0m5mmkc1TK+nxAppkCLMIL0aZ4dblVCNoGShhm+kzE4ZUykBoMg4g==} - ajv@6.14.0: - resolution: {integrity: sha512-IWrosm/yrn43eiKqkfkHis7QioDleaXQHdDVPKg0FSwwd/DuvyX79TZnFOnYpB7dcsFAMmtFztZuXPDvSePkFw==} - ajv@8.12.0: resolution: {integrity: sha512-sRu1kpcO9yLtYxBKvqfTeh9KzZEwO3STyX1HT+4CaDzC6HpTGYhIhPIzj9XuKU7KYDwnaeh5hcOwjy1QuJzBPA==} @@ -5890,9 +5862,6 @@ packages: es-module-lexer@1.7.0: resolution: {integrity: sha512-jEQoCwk8hyb2AZziIOLhDqpm5+2ww5uIE6lkO/6jcOCusfk6LhMHpXXfBLXTZ7Ydyt0j4VoUQv6uGNYbdW+kBA==} - es-module-lexer@2.0.0: - resolution: {integrity: sha512-5POEcUuZybH7IdmGsD8wlf0AI55wMecM9rVBTI/qEAy2c1kTOm3DjFYjrBdI2K3BaJjJYfYFeRtM0t9ssnRuxw==} - es-object-atoms@1.1.1: resolution: {integrity: sha512-FGgH2h8zKNim9ljj7dankFPcICIK9Cp5bm+c2gQSYePhpaG5+esrLODihIorn+Pe6FGJzWhXQotPv73jTaldXA==} engines: {node: '>= 0.4'} @@ -6024,8 +5993,8 @@ packages: jiti: optional: true - eslint@9.39.4: - resolution: {integrity: sha512-XoMjdBOwe/esVgEvLmNsD3IRHkm7fbKIUGvrleloJXUZgDHig2IPWNniv+GwjyJXzuNqVjlr5+4yVUZjycJwfQ==} + eslint@9.39.2: + resolution: {integrity: sha512-LEyamqS7W5HB3ujJyvi0HQK/dtVINZvd5mAAp9eT5S/ujByGjiZLCzPcHVzuXbpJDJF/cxwHlfceVUDZ2lnSTw==} engines: {node: ^18.18.0 || ^20.9.0 || >=21.1.0} hasBin: true peerDependencies: @@ -7409,9 +7378,6 @@ packages: minimatch@3.1.2: resolution: {integrity: sha512-J7p63hRiAjw1NDEww1W7i37+ByIrOWO5XQQAzZ3VOcL0PNybwpfmV/N05zFAzwQ9USyEcX6t3UO+K5aqBQOIHw==} - minimatch@3.1.5: - resolution: {integrity: sha512-VgjWUsnnT6n+NUk6eZq77zeFdpW2LWDzP6zFGrCbHXiYNul5Dzqk2HHQ5uFH2DNW5Xbp8+jVzaeNt94ssEEl4w==} - minimatch@5.1.6: resolution: {integrity: sha512-lKwV/1brpG6mBUFHtb7NUmtABCb2WZZmm2wNiOA5hAb8VdCS4B3dtMWyvcoViccwAW/COERjXLt0zP1zXUN26g==} engines: {node: '>=10'} @@ -8501,9 +8467,6 @@ packages: std-env@3.10.0: resolution: {integrity: sha512-5GS12FdOZNliM5mAOxFRg7Ir0pWz8MdpYm6AY6VPkGpbA7ZzmbzNcBJQ0GPvvyWgcY7QAhCgf9Uy89I03faLkg==} - std-env@4.0.0: - resolution: {integrity: sha512-zUMPtQ/HBY3/50VbpkupYHbRroTRZJPRLvreamgErJVys0ceuzMkD44J/QjqhHjOzK42GQ3QZIeFG1OYfOtKqQ==} - stop-iteration-iterator@1.1.0: resolution: {integrity: sha512-eLoXW/DHyl62zxY4SCaIgnRhuMr6ri4juEYARS8E6sCEqzKpOiE521Ucofdx+KnDZl5xmvGYaaKCk5FEOxJCoQ==} engines: {node: '>= 0.4'} @@ -8672,10 +8635,6 @@ packages: resolution: {integrity: sha512-W/KYk+NFhkmsYpuHq5JykngiOCnxeVL8v8dFnqxSD8qEEdRfXk1SDM6JzNqcERbcGYj9tMrDQBYV9cjgnunFIg==} engines: {node: '>=18'} - tinyexec@1.0.4: - resolution: {integrity: sha512-u9r3uZC0bdpGOXtlxUIdwf9pkmvhqJdrVCH9fapQtgy/OeTTMZ1nqH7agtvEfmGui6e1XxjcdrlxvxJvc3sMqw==} - engines: {node: '>=18'} - tinyglobby@0.2.15: resolution: {integrity: sha512-j2Zq4NyQYG5XMST4cbs02Ak8iJUdxRM0XI5QyxXuZOzKOINmWurp3smXu3y5wDcJrptwpSjgXHzIQxR0omXljQ==} engines: {node: '>=12.0.0'} @@ -8684,10 +8643,6 @@ packages: resolution: {integrity: sha512-PSkbLUoxOFRzJYjjxHJt9xro7D+iilgMX/C9lawzVuYiIdcihh9DXmVibBe8lmcFrRi/VzlPjBxbN7rH24q8/Q==} engines: {node: '>=14.0.0'} - tinyrainbow@3.1.0: - resolution: {integrity: sha512-Bf+ILmBgretUrdJxzXM0SgXLZ3XfiaUuOj/IKQHuTXip+05Xn+uyEYdVg0kYDipTBcLrCVyUzAPz7QmArb0mmw==} - engines: {node: '>=14.0.0'} - tldts-core@7.0.19: resolution: {integrity: sha512-lJX2dEWx0SGH4O6p+7FPwYmJ/bu1JbcGJ8RLaG9b7liIgZ85itUVEPbMtWRVrde/0fnDPEPHW10ZsKW3kVsE9A==} @@ -9419,41 +9374,6 @@ packages: jsdom: optional: true - vitest@4.1.0: - resolution: {integrity: sha512-YbDrMF9jM2Lqc++2530UourxZHmkKLxrs4+mYhEwqWS97WJ7wOYEkcr+QfRgJ3PW9wz3odRijLZjHEaRLTNbqw==} - engines: {node: ^20.0.0 || ^22.0.0 || >=24.0.0} - hasBin: true - peerDependencies: - '@edge-runtime/vm': '*' - '@opentelemetry/api': ^1.9.0 - '@types/node': ^20.0.0 || ^22.0.0 || >=24.0.0 - '@vitest/browser-playwright': 4.1.0 - '@vitest/browser-preview': 4.1.0 - '@vitest/browser-webdriverio': 4.1.0 - '@vitest/ui': 4.1.0 - happy-dom: '*' - jsdom: '*' - vite: ^6.0.0 || ^7.0.0 || ^8.0.0-0 - peerDependenciesMeta: - '@edge-runtime/vm': - optional: true - '@opentelemetry/api': - optional: true - '@types/node': - optional: true - '@vitest/browser-playwright': - optional: true - '@vitest/browser-preview': - optional: true - '@vitest/browser-webdriverio': - optional: true - '@vitest/ui': - optional: true - happy-dom: - optional: true - jsdom: - optional: true - vscode-uri@3.1.0: resolution: {integrity: sha512-/BpdSx+yCQGnCvecbyXdxHDkuk55/G3xwnC0GqY4gmQ3j+A+g8kzzgB4Nk/SINjqn6+waqw3EgbVF2QKExkRxQ==} @@ -10455,14 +10375,14 @@ snapshots: eslint: 9.39.1(jiti@2.6.1) eslint-visitor-keys: 3.4.3 - '@eslint-community/eslint-utils@4.9.0(eslint@9.39.4(jiti@2.6.1))': + '@eslint-community/eslint-utils@4.9.0(eslint@9.39.2(jiti@2.6.1))': dependencies: - eslint: 9.39.4(jiti@2.6.1) + eslint: 9.39.2(jiti@2.6.1) eslint-visitor-keys: 3.4.3 - '@eslint-community/eslint-utils@4.9.1(eslint@9.39.4(jiti@2.6.1))': + '@eslint-community/eslint-utils@4.9.1(eslint@9.39.2(jiti@2.6.1))': dependencies: - eslint: 9.39.4(jiti@2.6.1) + eslint: 9.39.2(jiti@2.6.1) eslint-visitor-keys: 3.4.3 '@eslint-community/regexpp@4.12.2': {} @@ -10475,14 +10395,6 @@ snapshots: transitivePeerDependencies: - supports-color - '@eslint/config-array@0.21.2': - dependencies: - '@eslint/object-schema': 2.1.7 - debug: 4.4.3 - minimatch: 3.1.5 - transitivePeerDependencies: - - supports-color - '@eslint/config-helpers@0.4.2': dependencies: '@eslint/core': 0.17.0 @@ -10505,23 +10417,9 @@ snapshots: transitivePeerDependencies: - supports-color - '@eslint/eslintrc@3.3.5': - dependencies: - ajv: 6.14.0 - debug: 4.4.3 - espree: 10.4.0 - globals: 14.0.0 - ignore: 5.3.2 - import-fresh: 3.3.1 - js-yaml: 4.1.1 - minimatch: 3.1.5 - strip-json-comments: 3.1.1 - transitivePeerDependencies: - - supports-color - '@eslint/js@9.39.1': {} - '@eslint/js@9.39.4': {} + '@eslint/js@9.39.2': {} '@eslint/object-schema@2.1.7': {} @@ -10532,7 +10430,7 @@ snapshots: '@faker-js/faker@10.1.0': {} - '@fal-ai/client@1.9.4': + '@fal-ai/client@1.9.1': dependencies: '@msgpack/msgpack': 3.1.3 eventsource-parser: 1.1.2 @@ -12527,13 +12425,13 @@ snapshots: transitivePeerDependencies: - csstype - '@tanstack/solid-router-ssr-query@1.141.1(@tanstack/query-core@5.90.12)(@tanstack/router-core@1.159.4)(@tanstack/solid-query@5.90.15(solid-js@1.9.10))(@tanstack/solid-router@1.141.1(solid-js@1.9.10))(eslint@9.39.4(jiti@2.6.1))(solid-js@1.9.10)(typescript@5.9.3)': + '@tanstack/solid-router-ssr-query@1.141.1(@tanstack/query-core@5.90.12)(@tanstack/router-core@1.159.4)(@tanstack/solid-query@5.90.15(solid-js@1.9.10))(@tanstack/solid-router@1.141.1(solid-js@1.9.10))(eslint@9.39.2(jiti@2.6.1))(solid-js@1.9.10)(typescript@5.9.3)': dependencies: '@tanstack/query-core': 5.90.12 '@tanstack/router-ssr-query-core': 1.141.1(@tanstack/query-core@5.90.12)(@tanstack/router-core@1.159.4) '@tanstack/solid-query': 5.90.15(solid-js@1.9.10) '@tanstack/solid-router': 1.141.1(solid-js@1.9.10) - eslint-plugin-solid: 0.14.5(eslint@9.39.4(jiti@2.6.1))(typescript@5.9.3) + eslint-plugin-solid: 0.14.5(eslint@9.39.2(jiti@2.6.1))(typescript@5.9.3) solid-js: 1.9.10 transitivePeerDependencies: - '@tanstack/router-core' @@ -13333,13 +13231,13 @@ snapshots: transitivePeerDependencies: - supports-color - '@typescript-eslint/utils@8.49.0(eslint@9.39.4(jiti@2.6.1))(typescript@5.9.3)': + '@typescript-eslint/utils@8.49.0(eslint@9.39.2(jiti@2.6.1))(typescript@5.9.3)': dependencies: - '@eslint-community/eslint-utils': 4.9.0(eslint@9.39.4(jiti@2.6.1)) + '@eslint-community/eslint-utils': 4.9.0(eslint@9.39.2(jiti@2.6.1)) '@typescript-eslint/scope-manager': 8.49.0 '@typescript-eslint/types': 8.49.0 '@typescript-eslint/typescript-estree': 8.49.0(typescript@5.9.3) - eslint: 9.39.4(jiti@2.6.1) + eslint: 9.39.2(jiti@2.6.1) typescript: 5.9.3 transitivePeerDependencies: - supports-color @@ -13533,7 +13431,7 @@ snapshots: transitivePeerDependencies: - supports-color - '@vitest/coverage-v8@4.0.14(vitest@4.1.0(@types/node@24.10.3)(happy-dom@20.0.11)(jsdom@27.3.0(postcss@8.5.6))(vite@7.2.7(@types/node@24.10.3)(jiti@2.6.1)(lightningcss@1.30.2)(terser@5.44.1)(tsx@4.21.0)(yaml@2.8.2)))': + '@vitest/coverage-v8@4.0.14(vitest@4.0.18(@types/node@24.10.3)(happy-dom@20.0.11)(jiti@2.6.1)(jsdom@27.3.0(postcss@8.5.6))(lightningcss@1.30.2)(terser@5.44.1)(tsx@4.21.0)(yaml@2.8.2))': dependencies: '@bcoe/v8-coverage': 1.0.2 '@vitest/utils': 4.0.14 @@ -13546,11 +13444,11 @@ snapshots: obug: 2.1.1 std-env: 3.10.0 tinyrainbow: 3.0.3 - vitest: 4.1.0(@types/node@24.10.3)(happy-dom@20.0.11)(jsdom@27.3.0(postcss@8.5.6))(vite@7.2.7(@types/node@24.10.3)(jiti@2.6.1)(lightningcss@1.30.2)(terser@5.44.1)(tsx@4.21.0)(yaml@2.8.2)) + vitest: 4.0.18(@types/node@24.10.3)(happy-dom@20.0.11)(jiti@2.6.1)(jsdom@27.3.0(postcss@8.5.6))(lightningcss@1.30.2)(terser@5.44.1)(tsx@4.21.0)(yaml@2.8.2) transitivePeerDependencies: - supports-color - '@vitest/coverage-v8@4.0.14(vitest@4.1.0(@types/node@25.0.1)(happy-dom@20.0.11)(jsdom@27.3.0(postcss@8.5.6))(vite@7.2.7(@types/node@25.0.1)(jiti@2.6.1)(lightningcss@1.30.2)(terser@5.44.1)(tsx@4.21.0)(yaml@2.8.2)))': + '@vitest/coverage-v8@4.0.14(vitest@4.0.18(@types/node@25.0.1)(happy-dom@20.0.11)(jiti@2.6.1)(jsdom@27.3.0(postcss@8.5.6))(lightningcss@1.30.2)(terser@5.44.1)(tsx@4.21.0)(yaml@2.8.2))': dependencies: '@bcoe/v8-coverage': 1.0.2 '@vitest/utils': 4.0.14 @@ -13563,24 +13461,7 @@ snapshots: obug: 2.1.1 std-env: 3.10.0 tinyrainbow: 3.0.3 - vitest: 4.1.0(@types/node@25.0.1)(happy-dom@20.0.11)(jsdom@27.3.0(postcss@8.5.6))(vite@7.2.7(@types/node@25.0.1)(jiti@2.6.1)(lightningcss@1.30.2)(terser@5.44.1)(tsx@4.21.0)(yaml@2.8.2)) - transitivePeerDependencies: - - supports-color - - '@vitest/coverage-v8@4.0.14(vitest@4.1.0(@types/node@25.0.1)(happy-dom@20.0.11)(jsdom@27.3.0(postcss@8.5.6))(vite@7.3.1(@types/node@25.0.1)(jiti@2.6.1)(lightningcss@1.30.2)(terser@5.44.1)(tsx@4.21.0)(yaml@2.8.2)))': - dependencies: - '@bcoe/v8-coverage': 1.0.2 - '@vitest/utils': 4.0.14 - ast-v8-to-istanbul: 0.3.11 - istanbul-lib-coverage: 3.2.2 - istanbul-lib-report: 3.0.1 - istanbul-lib-source-maps: 5.0.6 - istanbul-reports: 3.2.0 - magicast: 0.5.2 - obug: 2.1.1 - std-env: 3.10.0 - tinyrainbow: 3.0.3 - vitest: 4.1.0(@types/node@25.0.1)(happy-dom@20.0.11)(jsdom@27.3.0(postcss@8.5.6))(vite@7.3.1(@types/node@25.0.1)(jiti@2.6.1)(lightningcss@1.30.2)(terser@5.44.1)(tsx@4.21.0)(yaml@2.8.2)) + vitest: 4.0.18(@types/node@25.0.1)(happy-dom@20.0.11)(jiti@2.6.1)(jsdom@27.3.0(postcss@8.5.6))(lightningcss@1.30.2)(terser@5.44.1)(tsx@4.21.0)(yaml@2.8.2) transitivePeerDependencies: - supports-color @@ -13602,15 +13483,6 @@ snapshots: chai: 6.2.2 tinyrainbow: 3.0.3 - '@vitest/expect@4.1.0': - dependencies: - '@standard-schema/spec': 1.1.0 - '@types/chai': 5.2.3 - '@vitest/spy': 4.1.0 - '@vitest/utils': 4.1.0 - chai: 6.2.2 - tinyrainbow: 3.1.0 - '@vitest/mocker@4.0.15(vite@7.3.1(@types/node@24.10.3)(jiti@2.6.1)(lightningcss@1.30.2)(terser@5.44.1)(tsx@4.21.0)(yaml@2.8.2))': dependencies: '@vitest/spy': 4.0.15 @@ -13627,25 +13499,9 @@ snapshots: optionalDependencies: vite: 7.3.1(@types/node@24.10.3)(jiti@2.6.1)(lightningcss@1.30.2)(terser@5.44.1)(tsx@4.21.0)(yaml@2.8.2) - '@vitest/mocker@4.1.0(vite@7.2.7(@types/node@24.10.3)(jiti@2.6.1)(lightningcss@1.30.2)(terser@5.44.1)(tsx@4.21.0)(yaml@2.8.2))': - dependencies: - '@vitest/spy': 4.1.0 - estree-walker: 3.0.3 - magic-string: 0.30.21 - optionalDependencies: - vite: 7.2.7(@types/node@24.10.3)(jiti@2.6.1)(lightningcss@1.30.2)(terser@5.44.1)(tsx@4.21.0)(yaml@2.8.2) - - '@vitest/mocker@4.1.0(vite@7.2.7(@types/node@25.0.1)(jiti@2.6.1)(lightningcss@1.30.2)(terser@5.44.1)(tsx@4.21.0)(yaml@2.8.2))': - dependencies: - '@vitest/spy': 4.1.0 - estree-walker: 3.0.3 - magic-string: 0.30.21 - optionalDependencies: - vite: 7.2.7(@types/node@25.0.1)(jiti@2.6.1)(lightningcss@1.30.2)(terser@5.44.1)(tsx@4.21.0)(yaml@2.8.2) - - '@vitest/mocker@4.1.0(vite@7.3.1(@types/node@25.0.1)(jiti@2.6.1)(lightningcss@1.30.2)(terser@5.44.1)(tsx@4.21.0)(yaml@2.8.2))': + '@vitest/mocker@4.0.18(vite@7.3.1(@types/node@25.0.1)(jiti@2.6.1)(lightningcss@1.30.2)(terser@5.44.1)(tsx@4.21.0)(yaml@2.8.2))': dependencies: - '@vitest/spy': 4.1.0 + '@vitest/spy': 4.0.18 estree-walker: 3.0.3 magic-string: 0.30.21 optionalDependencies: @@ -13663,10 +13519,6 @@ snapshots: dependencies: tinyrainbow: 3.0.3 - '@vitest/pretty-format@4.1.0': - dependencies: - tinyrainbow: 3.1.0 - '@vitest/runner@4.0.15': dependencies: '@vitest/utils': 4.0.15 @@ -13677,11 +13529,6 @@ snapshots: '@vitest/utils': 4.0.18 pathe: 2.0.3 - '@vitest/runner@4.1.0': - dependencies: - '@vitest/utils': 4.1.0 - pathe: 2.0.3 - '@vitest/snapshot@4.0.15': dependencies: '@vitest/pretty-format': 4.0.15 @@ -13694,19 +13541,10 @@ snapshots: magic-string: 0.30.21 pathe: 2.0.3 - '@vitest/snapshot@4.1.0': - dependencies: - '@vitest/pretty-format': 4.1.0 - '@vitest/utils': 4.1.0 - magic-string: 0.30.21 - pathe: 2.0.3 - '@vitest/spy@4.0.15': {} '@vitest/spy@4.0.18': {} - '@vitest/spy@4.1.0': {} - '@vitest/utils@4.0.14': dependencies: '@vitest/pretty-format': 4.0.14 @@ -13722,12 +13560,6 @@ snapshots: '@vitest/pretty-format': 4.0.18 tinyrainbow: 3.0.3 - '@vitest/utils@4.1.0': - dependencies: - '@vitest/pretty-format': 4.1.0 - convert-source-map: 2.0.0 - tinyrainbow: 3.1.0 - '@volar/language-core@2.4.15': dependencies: '@volar/source-map': 2.4.15 @@ -13897,13 +13729,6 @@ snapshots: json-schema-traverse: 0.4.1 uri-js: 4.4.1 - ajv@6.14.0: - dependencies: - fast-deep-equal: 3.1.3 - fast-json-stable-stringify: 2.1.0 - json-schema-traverse: 0.4.1 - uri-js: 4.4.1 - ajv@8.12.0: dependencies: fast-deep-equal: 3.1.3 @@ -14702,8 +14527,6 @@ snapshots: es-module-lexer@1.7.0: {} - es-module-lexer@2.0.0: {} - es-object-atoms@1.1.1: dependencies: es-errors: 1.3.0 @@ -14899,10 +14722,10 @@ snapshots: transitivePeerDependencies: - typescript - eslint-plugin-solid@0.14.5(eslint@9.39.4(jiti@2.6.1))(typescript@5.9.3): + eslint-plugin-solid@0.14.5(eslint@9.39.2(jiti@2.6.1))(typescript@5.9.3): dependencies: - '@typescript-eslint/utils': 8.49.0(eslint@9.39.4(jiti@2.6.1))(typescript@5.9.3) - eslint: 9.39.4(jiti@2.6.1) + '@typescript-eslint/utils': 8.49.0(eslint@9.39.2(jiti@2.6.1))(typescript@5.9.3) + eslint: 9.39.2(jiti@2.6.1) estraverse: 5.3.0 is-html: 2.0.0 kebab-case: 1.0.2 @@ -14968,21 +14791,21 @@ snapshots: transitivePeerDependencies: - supports-color - eslint@9.39.4(jiti@2.6.1): + eslint@9.39.2(jiti@2.6.1): dependencies: - '@eslint-community/eslint-utils': 4.9.1(eslint@9.39.4(jiti@2.6.1)) + '@eslint-community/eslint-utils': 4.9.1(eslint@9.39.2(jiti@2.6.1)) '@eslint-community/regexpp': 4.12.2 - '@eslint/config-array': 0.21.2 + '@eslint/config-array': 0.21.1 '@eslint/config-helpers': 0.4.2 '@eslint/core': 0.17.0 - '@eslint/eslintrc': 3.3.5 - '@eslint/js': 9.39.4 + '@eslint/eslintrc': 3.3.3 + '@eslint/js': 9.39.2 '@eslint/plugin-kit': 0.4.1 '@humanfs/node': 0.16.7 '@humanwhocodes/module-importer': 1.0.1 '@humanwhocodes/retry': 0.4.3 '@types/estree': 1.0.8 - ajv: 6.14.0 + ajv: 6.12.6 chalk: 4.1.2 cross-spawn: 7.0.6 debug: 4.4.3 @@ -15001,7 +14824,7 @@ snapshots: is-glob: 4.0.3 json-stable-stringify-without-jsonify: 1.0.1 lodash.merge: 4.6.2 - minimatch: 3.1.5 + minimatch: 3.1.2 natural-compare: 1.4.0 optionator: 0.9.4 optionalDependencies: @@ -16706,10 +16529,6 @@ snapshots: dependencies: brace-expansion: 1.1.12 - minimatch@3.1.5: - dependencies: - brace-expansion: 1.1.12 - minimatch@5.1.6: dependencies: brace-expansion: 2.0.2 @@ -18171,8 +17990,6 @@ snapshots: std-env@3.10.0: {} - std-env@4.0.0: {} - stop-iteration-iterator@1.1.0: dependencies: es-errors: 1.3.0 @@ -18373,8 +18190,6 @@ snapshots: tinyexec@1.0.2: {} - tinyexec@1.0.4: {} - tinyglobby@0.2.15: dependencies: fdir: 6.5.0(picomatch@4.0.3) @@ -18382,8 +18197,6 @@ snapshots: tinyrainbow@3.0.3: {} - tinyrainbow@3.1.0: {} - tldts-core@7.0.19: {} tldts@7.0.19: @@ -19200,84 +19013,26 @@ snapshots: - tsx - yaml - vitest@4.1.0(@types/node@24.10.3)(happy-dom@20.0.11)(jsdom@27.3.0(postcss@8.5.6))(vite@7.2.7(@types/node@24.10.3)(jiti@2.6.1)(lightningcss@1.30.2)(terser@5.44.1)(tsx@4.21.0)(yaml@2.8.2)): - dependencies: - '@vitest/expect': 4.1.0 - '@vitest/mocker': 4.1.0(vite@7.2.7(@types/node@24.10.3)(jiti@2.6.1)(lightningcss@1.30.2)(terser@5.44.1)(tsx@4.21.0)(yaml@2.8.2)) - '@vitest/pretty-format': 4.1.0 - '@vitest/runner': 4.1.0 - '@vitest/snapshot': 4.1.0 - '@vitest/spy': 4.1.0 - '@vitest/utils': 4.1.0 - es-module-lexer: 2.0.0 - expect-type: 1.3.0 - magic-string: 0.30.21 - obug: 2.1.1 - pathe: 2.0.3 - picomatch: 4.0.3 - std-env: 4.0.0 - tinybench: 2.9.0 - tinyexec: 1.0.4 - tinyglobby: 0.2.15 - tinyrainbow: 3.1.0 - vite: 7.2.7(@types/node@24.10.3)(jiti@2.6.1)(lightningcss@1.30.2)(terser@5.44.1)(tsx@4.21.0)(yaml@2.8.2) - why-is-node-running: 2.3.0 - optionalDependencies: - '@types/node': 24.10.3 - happy-dom: 20.0.11 - jsdom: 27.3.0(postcss@8.5.6) - transitivePeerDependencies: - - msw - - vitest@4.1.0(@types/node@25.0.1)(happy-dom@20.0.11)(jsdom@27.3.0(postcss@8.5.6))(vite@7.2.7(@types/node@25.0.1)(jiti@2.6.1)(lightningcss@1.30.2)(terser@5.44.1)(tsx@4.21.0)(yaml@2.8.2)): + vitest@4.0.18(@types/node@25.0.1)(happy-dom@20.0.11)(jiti@2.6.1)(jsdom@27.3.0(postcss@8.5.6))(lightningcss@1.30.2)(terser@5.44.1)(tsx@4.21.0)(yaml@2.8.2): dependencies: - '@vitest/expect': 4.1.0 - '@vitest/mocker': 4.1.0(vite@7.2.7(@types/node@25.0.1)(jiti@2.6.1)(lightningcss@1.30.2)(terser@5.44.1)(tsx@4.21.0)(yaml@2.8.2)) - '@vitest/pretty-format': 4.1.0 - '@vitest/runner': 4.1.0 - '@vitest/snapshot': 4.1.0 - '@vitest/spy': 4.1.0 - '@vitest/utils': 4.1.0 - es-module-lexer: 2.0.0 - expect-type: 1.3.0 - magic-string: 0.30.21 - obug: 2.1.1 - pathe: 2.0.3 - picomatch: 4.0.3 - std-env: 4.0.0 - tinybench: 2.9.0 - tinyexec: 1.0.4 - tinyglobby: 0.2.15 - tinyrainbow: 3.1.0 - vite: 7.2.7(@types/node@25.0.1)(jiti@2.6.1)(lightningcss@1.30.2)(terser@5.44.1)(tsx@4.21.0)(yaml@2.8.2) - why-is-node-running: 2.3.0 - optionalDependencies: - '@types/node': 25.0.1 - happy-dom: 20.0.11 - jsdom: 27.3.0(postcss@8.5.6) - transitivePeerDependencies: - - msw - - vitest@4.1.0(@types/node@25.0.1)(happy-dom@20.0.11)(jsdom@27.3.0(postcss@8.5.6))(vite@7.3.1(@types/node@25.0.1)(jiti@2.6.1)(lightningcss@1.30.2)(terser@5.44.1)(tsx@4.21.0)(yaml@2.8.2)): - dependencies: - '@vitest/expect': 4.1.0 - '@vitest/mocker': 4.1.0(vite@7.3.1(@types/node@25.0.1)(jiti@2.6.1)(lightningcss@1.30.2)(terser@5.44.1)(tsx@4.21.0)(yaml@2.8.2)) - '@vitest/pretty-format': 4.1.0 - '@vitest/runner': 4.1.0 - '@vitest/snapshot': 4.1.0 - '@vitest/spy': 4.1.0 - '@vitest/utils': 4.1.0 - es-module-lexer: 2.0.0 + '@vitest/expect': 4.0.18 + '@vitest/mocker': 4.0.18(vite@7.3.1(@types/node@25.0.1)(jiti@2.6.1)(lightningcss@1.30.2)(terser@5.44.1)(tsx@4.21.0)(yaml@2.8.2)) + '@vitest/pretty-format': 4.0.18 + '@vitest/runner': 4.0.18 + '@vitest/snapshot': 4.0.18 + '@vitest/spy': 4.0.18 + '@vitest/utils': 4.0.18 + es-module-lexer: 1.7.0 expect-type: 1.3.0 magic-string: 0.30.21 obug: 2.1.1 pathe: 2.0.3 picomatch: 4.0.3 - std-env: 4.0.0 + std-env: 3.10.0 tinybench: 2.9.0 - tinyexec: 1.0.4 + tinyexec: 1.0.2 tinyglobby: 0.2.15 - tinyrainbow: 3.1.0 + tinyrainbow: 3.0.3 vite: 7.3.1(@types/node@25.0.1)(jiti@2.6.1)(lightningcss@1.30.2)(terser@5.44.1)(tsx@4.21.0)(yaml@2.8.2) why-is-node-running: 2.3.0 optionalDependencies: @@ -19285,7 +19040,17 @@ snapshots: happy-dom: 20.0.11 jsdom: 27.3.0(postcss@8.5.6) transitivePeerDependencies: + - jiti + - less + - lightningcss - msw + - sass + - sass-embedded + - stylus + - sugarss + - terser + - tsx + - yaml vscode-uri@3.1.0: {} From d345523f31e4f63059373aa72362f71f722153d5 Mon Sep 17 00:00:00 2001 From: Alem Tuzlak Date: Mon, 30 Mar 2026 14:48:25 +0200 Subject: [PATCH 12/15] chore: add changesets for openai-base extraction --- .changeset/add-ai-utils-package.md | 5 +++++ .changeset/add-openai-base-package.md | 5 +++++ .changeset/refactor-providers-to-shared-packages.md | 13 +++++++++++++ 3 files changed, 23 insertions(+) create mode 100644 .changeset/add-ai-utils-package.md create mode 100644 .changeset/add-openai-base-package.md create mode 100644 .changeset/refactor-providers-to-shared-packages.md diff --git a/.changeset/add-ai-utils-package.md b/.changeset/add-ai-utils-package.md new file mode 100644 index 000000000..c5cdcddee --- /dev/null +++ b/.changeset/add-ai-utils-package.md @@ -0,0 +1,5 @@ +--- +'@tanstack/ai-utils': minor +--- + +New package: shared provider-agnostic utilities for TanStack AI adapters. Includes `generateId`, `getApiKeyFromEnv`, `transformNullsToUndefined`, and `ModelMeta` types with `defineModelMeta` validation helper. Zero runtime dependencies. diff --git a/.changeset/add-openai-base-package.md b/.changeset/add-openai-base-package.md new file mode 100644 index 000000000..b549fe322 --- /dev/null +++ b/.changeset/add-openai-base-package.md @@ -0,0 +1,5 @@ +--- +'@tanstack/openai-base': minor +--- + +New package: shared base adapters and utilities for OpenAI-compatible providers. Includes Chat Completions and Responses API text adapter base classes, image/summarize/transcription/TTS/video adapter base classes, schema converter, 15 tool converters, and shared types. Providers extend these base classes to reduce duplication and ensure consistent behavior. diff --git a/.changeset/refactor-providers-to-shared-packages.md b/.changeset/refactor-providers-to-shared-packages.md new file mode 100644 index 000000000..05c0a8825 --- /dev/null +++ b/.changeset/refactor-providers-to-shared-packages.md @@ -0,0 +1,13 @@ +--- +'@tanstack/ai-openai': patch +'@tanstack/ai-grok': patch +'@tanstack/ai-groq': patch +'@tanstack/ai-openrouter': patch +'@tanstack/ai-ollama': patch +'@tanstack/ai-anthropic': patch +'@tanstack/ai-gemini': patch +'@tanstack/ai-fal': patch +'@tanstack/ai-elevenlabs': patch +--- + +Internal refactor: delegate shared utilities to `@tanstack/ai-utils` and OpenAI-compatible adapter logic to `@tanstack/openai-base`. No breaking changes — all public APIs remain identical. From 914b8836a83b4d25c118ec96c34e4738e894a784 Mon Sep 17 00:00:00 2001 From: Alem Tuzlak Date: Mon, 30 Mar 2026 18:33:11 +0200 Subject: [PATCH 13/15] fix: address CodeRabbit review comments on openai-base extraction - Fix schema-converter default required parameter and null-widening for nested types - Fix removeEmptyRequired to recurse into anyOf/oneOf/allOf/additionalProperties (groq) - Forward modelOptions, request headers/signal in chat-completions-text adapter - Remove stream_options leak into non-streaming structured output calls - Use call_id instead of internal id for tool call correlation (responses-text) - Make transcription verbose_json default provider-agnostic via protected override - Add runtime guards for ArrayBuffer and atob in transcription adapter - Derive TTS outputFormat from merged request after modelOptions spread - Add downloadContent probe and fix expires_at seconds-to-milliseconds (video) - Fix mcp-tool type ordering so metadata cannot override type: 'mcp' - Add tests proving all fixes work --- .../ai-groq/src/utils/schema-converter.ts | 22 +++ .../ai-groq/tests/schema-converter.test.ts | 100 +++++++++++ .../ai-openai/src/adapters/transcription.ts | 4 + .../src/adapters/chat-completions-text.ts | 57 ++++-- .../src/adapters/responses-text.ts | 14 +- .../openai-base/src/adapters/transcription.ts | 22 ++- .../openai-base/src/adapters/tts.ts | 2 +- .../openai-base/src/adapters/video.ts | 19 +- .../openai-base/src/tools/mcp-tool.ts | 2 +- .../openai-base/src/utils/schema-converter.ts | 45 ++--- .../tests/chat-completions-text.test.ts | 163 +++++++++++++++++- .../openai-base/tests/mcp-tool.test.ts | 34 ++++ .../openai-base/tests/responses-text.test.ts | 93 ++++++++++ .../tests/schema-converter.test.ts | 87 ++++++++++ pnpm-lock.yaml | 10 +- 15 files changed, 619 insertions(+), 55 deletions(-) create mode 100644 packages/typescript/ai-groq/tests/schema-converter.test.ts create mode 100644 packages/typescript/openai-base/tests/mcp-tool.test.ts diff --git a/packages/typescript/ai-groq/src/utils/schema-converter.ts b/packages/typescript/ai-groq/src/utils/schema-converter.ts index 366d231d5..3178c9141 100644 --- a/packages/typescript/ai-groq/src/utils/schema-converter.ts +++ b/packages/typescript/ai-groq/src/utils/schema-converter.ts @@ -36,6 +36,28 @@ function removeEmptyRequired(schema: Record): Record { result.items = removeEmptyRequired(result.items) } + // Recurse into combinator arrays (anyOf, oneOf, allOf) + for (const keyword of ['anyOf', 'oneOf', 'allOf'] as const) { + if (Array.isArray(result[keyword])) { + result[keyword] = result[keyword].map((entry: Record) => + typeof entry === 'object' && entry !== null + ? removeEmptyRequired(entry) + : entry, + ) + } + } + + // Recurse into additionalProperties if it's a schema object + if ( + result.additionalProperties && + typeof result.additionalProperties === 'object' && + !Array.isArray(result.additionalProperties) + ) { + result.additionalProperties = removeEmptyRequired( + result.additionalProperties, + ) + } + return result } diff --git a/packages/typescript/ai-groq/tests/schema-converter.test.ts b/packages/typescript/ai-groq/tests/schema-converter.test.ts new file mode 100644 index 000000000..1e13ecd15 --- /dev/null +++ b/packages/typescript/ai-groq/tests/schema-converter.test.ts @@ -0,0 +1,100 @@ +import { describe, expect, it } from 'vitest' +import { makeGroqStructuredOutputCompatible } from '../src/utils/schema-converter' + +describe('makeGroqStructuredOutputCompatible', () => { + it('should remove empty required arrays inside anyOf variants', () => { + const schema = { + type: 'object', + properties: { + value: { + anyOf: [ + { + type: 'object', + properties: {}, + required: [], + }, + { type: 'null' }, + ], + }, + }, + required: ['value'], + } + + const result = makeGroqStructuredOutputCompatible(schema, ['value']) + + // Empty required inside anyOf variant should be removed + const objectVariant = result.properties.value.anyOf.find( + (v: any) => v.type === 'object', + ) + expect(objectVariant.required).toBeUndefined() + }) + + it('should remove empty required arrays inside oneOf variants', () => { + const schema = { + type: 'object', + properties: { + data: { + type: 'object', + properties: { + inner: { type: 'string' }, + }, + required: ['inner'], + }, + }, + required: ['data'], + } + + // First create a schema that would produce empty required after processing + const result = makeGroqStructuredOutputCompatible(schema, ['data']) + + // Should not have empty required arrays anywhere + const checkNoEmptyRequired = (obj: any): void => { + if (obj && typeof obj === 'object') { + if (Array.isArray(obj.required)) { + expect(obj.required.length).toBeGreaterThan(0) + } + for (const value of Object.values(obj)) { + if (typeof value === 'object' && value !== null) { + checkNoEmptyRequired(value) + } + } + } + } + checkNoEmptyRequired(result) + }) + + it('should remove empty required in additionalProperties', () => { + const schema = { + type: 'object', + properties: { + meta: { + type: 'object', + properties: { + name: { type: 'string' }, + }, + required: ['name'], + additionalProperties: { + type: 'object', + properties: {}, + required: [], + }, + }, + }, + required: ['meta'], + } + + const result = makeGroqStructuredOutputCompatible(schema, ['meta']) + + // meta should have required with allPropertyNames + expect(result.properties.meta.required).toEqual(['name']) + // additionalProperties' empty required should be removed + if ( + result.properties.meta.additionalProperties && + typeof result.properties.meta.additionalProperties === 'object' + ) { + expect( + result.properties.meta.additionalProperties.required, + ).toBeUndefined() + } + }) +}) diff --git a/packages/typescript/ai-openai/src/adapters/transcription.ts b/packages/typescript/ai-openai/src/adapters/transcription.ts index 65b885ae2..7007b8351 100644 --- a/packages/typescript/ai-openai/src/adapters/transcription.ts +++ b/packages/typescript/ai-openai/src/adapters/transcription.ts @@ -33,6 +33,10 @@ export class OpenAITranscriptionAdapter< constructor(config: OpenAITranscriptionConfig, model: TModel) { super(toCompatibleConfig(config), model, 'openai') } + + protected override shouldDefaultToVerbose(model: string): boolean { + return model !== 'whisper-1' + } } /** diff --git a/packages/typescript/openai-base/src/adapters/chat-completions-text.ts b/packages/typescript/openai-base/src/adapters/chat-completions-text.ts index 3acbab0ad..3f7a63e0e 100644 --- a/packages/typescript/openai-base/src/adapters/chat-completions-text.ts +++ b/packages/typescript/openai-base/src/adapters/chat-completions-text.ts @@ -73,10 +73,19 @@ export class OpenAICompatibleChatCompletionsTextAdapter< } try { - const stream = await this.client.chat.completions.create({ - ...requestParams, - stream: true, - }) + const stream = await this.client.chat.completions.create( + { + ...requestParams, + stream: true, + stream_options: { include_usage: true }, + }, + { + headers: (options.request as RequestInit | undefined)?.headers as + | Record + | undefined, + signal: (options.request as RequestInit | undefined)?.signal, + }, + ) yield* this.processStreamChunks(stream, options, aguiState) } catch (error: unknown) { @@ -138,18 +147,32 @@ export class OpenAICompatibleChatCompletionsTextAdapter< ) try { - const response = await this.client.chat.completions.create({ - ...requestParams, - stream: false, - response_format: { - type: 'json_schema', - json_schema: { - name: 'structured_output', - schema: jsonSchema, - strict: true, + // Strip stream_options which is only valid for streaming calls + const { + stream_options: _, + stream: __, + ...cleanParams + } = requestParams as any + const response = await this.client.chat.completions.create( + { + ...cleanParams, + stream: false, + response_format: { + type: 'json_schema', + json_schema: { + name: 'structured_output', + schema: jsonSchema, + strict: true, + }, }, }, - }) + { + headers: (chatOptions.request as RequestInit | undefined)?.headers as + | Record + | undefined, + signal: (chatOptions.request as RequestInit | undefined)?.signal, + }, + ) // Extract text content from the response const rawText = response.choices[0]?.message.content || '' @@ -436,15 +459,19 @@ export class OpenAICompatibleChatCompletionsTextAdapter< messages.push(this.convertMessage(message)) } + const modelOptions = options.modelOptions as + | Record + | undefined + return { model: options.model, messages, temperature: options.temperature, max_tokens: options.maxTokens, top_p: options.topP, + ...modelOptions, tools: tools as Array, stream: true, - stream_options: { include_usage: true }, } } diff --git a/packages/typescript/openai-base/src/adapters/responses-text.ts b/packages/typescript/openai-base/src/adapters/responses-text.ts index 699ee0d38..4b94206b5 100644 --- a/packages/typescript/openai-base/src/adapters/responses-text.ts +++ b/packages/typescript/openai-base/src/adapters/responses-text.ts @@ -78,7 +78,7 @@ export class OpenAICompatibleResponsesTextAdapter< // We assign our own indices as we encounter unique tool call IDs. const toolCallMetadata = new Map< string, - { index: number; name: string; started: boolean } + { index: number; name: string; callId: string; started: boolean } >() const requestParams = this.mapOptionsToRequest(options) const timestamp = Date.now() @@ -274,7 +274,7 @@ export class OpenAICompatibleResponsesTextAdapter< stream: AsyncIterable, toolCallMetadata: Map< string, - { index: number; name: string; started: boolean } + { index: number; name: string; callId: string; started: boolean } >, options: TextOptions, aguiState: { @@ -557,18 +557,21 @@ export class OpenAICompatibleResponsesTextAdapter< if (chunk.type === 'response.output_item.added') { const item = chunk.item if (item.type === 'function_call' && item.id) { + // Use call_id for tool call correlation (required for function_call_output) + const callId = (item as any).call_id || item.id // Store the function name for later use if (!toolCallMetadata.has(item.id)) { toolCallMetadata.set(item.id, { index: chunk.output_index, name: item.name || '', + callId, started: false, }) } // Emit TOOL_CALL_START yield { type: 'TOOL_CALL_START', - toolCallId: item.id, + toolCallId: callId, toolName: item.name || '', model: model || options.model, timestamp, @@ -586,7 +589,7 @@ export class OpenAICompatibleResponsesTextAdapter< const metadata = toolCallMetadata.get(chunk.item_id) yield { type: 'TOOL_CALL_ARGS', - toolCallId: chunk.item_id, + toolCallId: metadata?.callId || chunk.item_id, model: model || options.model, timestamp, delta: chunk.delta, @@ -600,6 +603,7 @@ export class OpenAICompatibleResponsesTextAdapter< // Get the function name from metadata (captured in output_item.added) const metadata = toolCallMetadata.get(item_id) const name = metadata?.name || '' + const callId = metadata?.callId || item_id // Parse arguments let parsedInput: unknown = {} @@ -611,7 +615,7 @@ export class OpenAICompatibleResponsesTextAdapter< yield { type: 'TOOL_CALL_END', - toolCallId: item_id, + toolCallId: callId, toolName: name, model: model || options.model, timestamp, diff --git a/packages/typescript/openai-base/src/adapters/transcription.ts b/packages/typescript/openai-base/src/adapters/transcription.ts index 14346ecdf..02c4f08c0 100644 --- a/packages/typescript/openai-base/src/adapters/transcription.ts +++ b/packages/typescript/openai-base/src/adapters/transcription.ts @@ -61,7 +61,7 @@ export class OpenAICompatibleTranscriptionAdapter< // Call API - use verbose_json to get timestamps when available const useVerbose = responseFormat === 'verbose_json' || - (!responseFormat && model !== 'whisper-1') + (!responseFormat && this.shouldDefaultToVerbose(model)) if (useVerbose) { const response = await this.client.audio.transcriptions.create({ @@ -116,7 +116,7 @@ export class OpenAICompatibleTranscriptionAdapter< } // If ArrayBuffer, convert to File - if (audio instanceof ArrayBuffer) { + if (typeof ArrayBuffer !== 'undefined' && audio instanceof ArrayBuffer) { return new File([audio], 'audio.mp3', { type: 'audio/mpeg' }) } @@ -129,6 +129,11 @@ export class OpenAICompatibleTranscriptionAdapter< const base64Data = parts[1] || '' const mimeMatch = header?.match(/data:([^;]+)/) const mimeType = mimeMatch?.[1] || 'audio/mpeg' + if (typeof atob !== 'function') { + throw new Error( + 'atob is not available in this environment. Use a File, Blob, or ArrayBuffer input instead.', + ) + } const binaryStr = atob(base64Data) const bytes = new Uint8Array(binaryStr.length) for (let i = 0; i < binaryStr.length; i++) { @@ -139,6 +144,11 @@ export class OpenAICompatibleTranscriptionAdapter< } // Assume raw base64 + if (typeof atob !== 'function') { + throw new Error( + 'atob is not available in this environment. Use a File, Blob, or ArrayBuffer input instead.', + ) + } const binaryStr = atob(audio) const bytes = new Uint8Array(binaryStr.length) for (let i = 0; i < binaryStr.length; i++) { @@ -150,6 +160,14 @@ export class OpenAICompatibleTranscriptionAdapter< throw new Error('Invalid audio input type') } + /** + * Whether the adapter should default to verbose_json when no response format is specified. + * Override in provider-specific subclasses for model-specific behavior. + */ + protected shouldDefaultToVerbose(_model: string): boolean { + return false + } + protected mapResponseFormat( format?: 'json' | 'text' | 'srt' | 'verbose_json' | 'vtt', ): OpenAI_SDK.Audio.TranscriptionCreateParams['response_format'] { diff --git a/packages/typescript/openai-base/src/adapters/tts.ts b/packages/typescript/openai-base/src/adapters/tts.ts index 21a03b4f6..2dcdb3965 100644 --- a/packages/typescript/openai-base/src/adapters/tts.ts +++ b/packages/typescript/openai-base/src/adapters/tts.ts @@ -62,7 +62,7 @@ export class OpenAICompatibleTTSAdapter< const arrayBuffer = await response.arrayBuffer() const base64 = Buffer.from(arrayBuffer).toString('base64') - const outputFormat = format || 'mp3' + const outputFormat = (request.response_format as string) || 'mp3' const contentType = this.getContentType(outputFormat) return { diff --git a/packages/typescript/openai-base/src/adapters/video.ts b/packages/typescript/openai-base/src/adapters/video.ts index edf688fe3..fe38f196f 100644 --- a/packages/typescript/openai-base/src/adapters/video.ts +++ b/packages/typescript/openai-base/src/adapters/video.ts @@ -128,7 +128,20 @@ export class OpenAICompatibleVideoAdapter< let response: any - if (typeof client.videos?.content === 'function') { + if (typeof client.videos?.downloadContent === 'function') { + // OpenAI SDK's downloadContent returns raw video bytes as a Response + const contentResponse = await client.videos.downloadContent(jobId) + const videoBlob = await contentResponse.blob() + const buffer = await videoBlob.arrayBuffer() + const base64 = Buffer.from(buffer).toString('base64') + const mimeType = + contentResponse.headers.get('content-type') || 'video/mp4' + return { + jobId, + url: `data:${mimeType};base64,${base64}`, + expiresAt: undefined, + } + } else if (typeof client.videos?.content === 'function') { response = await client.videos.content(jobId) } else if (typeof client.videos?.getContent === 'function') { response = await client.videos.getContent(jobId) @@ -142,7 +155,7 @@ export class OpenAICompatibleVideoAdapter< jobId, url: videoInfo.url, expiresAt: videoInfo.expires_at - ? new Date(videoInfo.expires_at) + ? new Date(videoInfo.expires_at * 1000) : undefined, } } @@ -192,7 +205,7 @@ export class OpenAICompatibleVideoAdapter< jobId, url: response.url, expiresAt: response.expires_at - ? new Date(response.expires_at) + ? new Date(response.expires_at * 1000) : undefined, } } catch (error: any) { diff --git a/packages/typescript/openai-base/src/tools/mcp-tool.ts b/packages/typescript/openai-base/src/tools/mcp-tool.ts index 64b94357f..aad0943d3 100644 --- a/packages/typescript/openai-base/src/tools/mcp-tool.ts +++ b/packages/typescript/openai-base/src/tools/mcp-tool.ts @@ -19,8 +19,8 @@ export function convertMCPToolToAdapterFormat(tool: Tool): MCPTool { const metadata = tool.metadata as Omit const mcpTool: MCPTool = { - type: 'mcp', ...metadata, + type: 'mcp', } validateMCPtool(mcpTool) diff --git a/packages/typescript/openai-base/src/utils/schema-converter.ts b/packages/typescript/openai-base/src/utils/schema-converter.ts index 83bdd06f8..1ae39172c 100644 --- a/packages/typescript/openai-base/src/utils/schema-converter.ts +++ b/packages/typescript/openai-base/src/utils/schema-converter.ts @@ -11,25 +11,26 @@ */ export function makeStructuredOutputCompatible( schema: Record, - originalRequired: Array = [], + originalRequired?: Array, ): Record { const result = { ...schema } + const required = + originalRequired ?? + (Array.isArray(result.required) ? result.required : []) if (result.type === 'object' && result.properties) { const properties = { ...result.properties } const allPropertyNames = Object.keys(properties) for (const propName of allPropertyNames) { - const prop = properties[propName] - const wasOptional = !originalRequired.includes(propName) + let prop = properties[propName] + const wasOptional = !required.includes(propName) + // Step 1: Recurse into nested structures if (prop.type === 'object' && prop.properties) { - properties[propName] = makeStructuredOutputCompatible( - prop, - prop.required || [], - ) + prop = makeStructuredOutputCompatible(prop, prop.required || []) } else if (prop.type === 'array' && prop.items) { - properties[propName] = { + prop = { ...prop, items: makeStructuredOutputCompatible( prop.items, @@ -37,28 +38,28 @@ export function makeStructuredOutputCompatible( ), } } else if (prop.anyOf) { - properties[propName] = makeStructuredOutputCompatible( - prop, - prop.required || [], - ) + prop = makeStructuredOutputCompatible(prop, prop.required || []) } else if (prop.oneOf) { throw new Error( 'oneOf is not supported in OpenAI structured output schemas. Check the supported outputs here: https://platform.openai.com/docs/guides/structured-outputs#supported-types', ) - } else if (wasOptional) { - // Optional fields must be nullable because OpenAI requires all properties in `required` - if (prop.type && !Array.isArray(prop.type)) { - properties[propName] = { - ...prop, - type: [prop.type, 'null'], + } + + // Step 2: Apply null-widening for optional properties (after recursion) + if (wasOptional) { + if (prop.anyOf) { + // For anyOf, add a null variant if not already present + if (!prop.anyOf.some((v: any) => v.type === 'null')) { + prop = { ...prop, anyOf: [...prop.anyOf, { type: 'null' }] } } + } else if (prop.type && !Array.isArray(prop.type)) { + prop = { ...prop, type: [prop.type, 'null'] } } else if (Array.isArray(prop.type) && !prop.type.includes('null')) { - properties[propName] = { - ...prop, - type: [...prop.type, 'null'], - } + prop = { ...prop, type: [...prop.type, 'null'] } } } + + properties[propName] = prop } result.properties = properties diff --git a/packages/typescript/openai-base/tests/chat-completions-text.test.ts b/packages/typescript/openai-base/tests/chat-completions-text.test.ts index 3674493cf..610024221 100644 --- a/packages/typescript/openai-base/tests/chat-completions-text.test.ts +++ b/packages/typescript/openai-base/tests/chat-completions-text.test.ts @@ -640,7 +640,7 @@ describe('OpenAICompatibleChatCompletionsTextAdapter', () => { expect(result.data).toEqual({ name: 'Alice', age: 30 }) expect(result.rawText).toBe('{"name":"Alice","age":30}') - // Verify stream: false was passed + // Verify stream: false was passed (second arg is request options) expect(mockCreate).toHaveBeenCalledWith( expect.objectContaining({ stream: false, @@ -648,6 +648,7 @@ describe('OpenAICompatibleChatCompletionsTextAdapter', () => { type: 'json_schema', }), }), + expect.anything(), ) }) @@ -743,4 +744,164 @@ describe('OpenAICompatibleChatCompletionsTextAdapter', () => { expect(adapter.model).toBe('my-model') }) }) + + describe('request forwarding', () => { + it('forwards modelOptions to the API request', async () => { + const streamChunks = [ + { + id: 'chatcmpl-123', + model: 'test-model', + choices: [ + { delta: { content: 'Hi' }, finish_reason: null }, + ], + }, + { + id: 'chatcmpl-123', + model: 'test-model', + choices: [{ delta: {}, finish_reason: 'stop' }], + usage: { prompt_tokens: 5, completion_tokens: 1, total_tokens: 6 }, + }, + ] + + setupMockSdkClient(streamChunks) + const adapter = new OpenAICompatibleChatCompletionsTextAdapter( + testConfig, + 'test-model', + ) + + const chunks: Array = [] + for await (const chunk of adapter.chatStream({ + model: 'test-model', + messages: [{ role: 'user', content: 'Hello' }], + modelOptions: { frequency_penalty: 0.5, presence_penalty: 0.3 }, + })) { + chunks.push(chunk) + } + + // Verify modelOptions were forwarded + expect(mockCreate).toHaveBeenCalledWith( + expect.objectContaining({ + frequency_penalty: 0.5, + presence_penalty: 0.3, + }), + expect.anything(), + ) + }) + + it('includes stream_options only for streaming calls', async () => { + const streamChunks = [ + { + id: 'chatcmpl-123', + model: 'test-model', + choices: [ + { delta: { content: 'Hi' }, finish_reason: null }, + ], + }, + { + id: 'chatcmpl-123', + model: 'test-model', + choices: [{ delta: {}, finish_reason: 'stop' }], + usage: { prompt_tokens: 5, completion_tokens: 1, total_tokens: 6 }, + }, + ] + + setupMockSdkClient(streamChunks) + const adapter = new OpenAICompatibleChatCompletionsTextAdapter( + testConfig, + 'test-model', + ) + + const chunks: Array = [] + for await (const chunk of adapter.chatStream({ + model: 'test-model', + messages: [{ role: 'user', content: 'Hello' }], + })) { + chunks.push(chunk) + } + + // Streaming call should include stream_options + expect(mockCreate).toHaveBeenCalledWith( + expect.objectContaining({ + stream: true, + stream_options: { include_usage: true }, + }), + expect.anything(), + ) + }) + + it('does not include stream_options in structured output calls', async () => { + const nonStreamResponse = { + choices: [ + { message: { content: '{"name":"Alice"}' } }, + ], + } + + setupMockSdkClient([], nonStreamResponse) + + const adapter = new OpenAICompatibleChatCompletionsTextAdapter( + testConfig, + 'test-model', + ) + + await adapter.structuredOutput({ + chatOptions: { + model: 'test-model', + messages: [{ role: 'user', content: 'Give me a person' }], + }, + outputSchema: { + type: 'object', + properties: { name: { type: 'string' } }, + required: ['name'], + }, + }) + + // Structured output call should NOT have stream_options + const callArgs = mockCreate.mock.calls[0]?.[0] + expect(callArgs.stream).toBe(false) + expect(callArgs.stream_options).toBeUndefined() + }) + + it('forwards request headers and signal to SDK create calls', async () => { + const streamChunks = [ + { + id: 'chatcmpl-123', + model: 'test-model', + choices: [ + { delta: { content: 'Hi' }, finish_reason: null }, + ], + }, + { + id: 'chatcmpl-123', + model: 'test-model', + choices: [{ delta: {}, finish_reason: 'stop' }], + usage: { prompt_tokens: 5, completion_tokens: 1, total_tokens: 6 }, + }, + ] + + setupMockSdkClient(streamChunks) + const adapter = new OpenAICompatibleChatCompletionsTextAdapter( + testConfig, + 'test-model', + ) + + const controller = new AbortController() + const chunks: Array = [] + for await (const chunk of adapter.chatStream({ + model: 'test-model', + messages: [{ role: 'user', content: 'Hello' }], + request: { + headers: { 'X-Custom-Header': 'test-value' }, + signal: controller.signal, + }, + })) { + chunks.push(chunk) + } + + // Verify second argument contains headers and signal + const requestOptions = mockCreate.mock.calls[0]?.[1] + expect(requestOptions).toBeDefined() + expect(requestOptions.headers).toEqual({ 'X-Custom-Header': 'test-value' }) + expect(requestOptions.signal).toBe(controller.signal) + }) + }) }) diff --git a/packages/typescript/openai-base/tests/mcp-tool.test.ts b/packages/typescript/openai-base/tests/mcp-tool.test.ts new file mode 100644 index 000000000..d38e40128 --- /dev/null +++ b/packages/typescript/openai-base/tests/mcp-tool.test.ts @@ -0,0 +1,34 @@ +import { describe, expect, it } from 'vitest' +import { convertMCPToolToAdapterFormat } from '../src/tools/mcp-tool' +import type { Tool } from '@tanstack/ai' + +describe('convertMCPToolToAdapterFormat', () => { + it('should always set type to mcp even if metadata contains a type field', () => { + const tool: Tool = { + name: 'mcp', + description: 'test mcp tool', + metadata: { + type: 'not_mcp', + server_url: 'https://example.com/mcp', + }, + } + + const result = convertMCPToolToAdapterFormat(tool) + expect(result.type).toBe('mcp') + }) + + it('should preserve metadata fields other than type', () => { + const tool: Tool = { + name: 'mcp', + description: 'test mcp tool', + metadata: { + server_url: 'https://example.com/mcp', + server_description: 'Test server', + }, + } + + const result = convertMCPToolToAdapterFormat(tool) + expect(result.type).toBe('mcp') + expect(result.server_url).toBe('https://example.com/mcp') + }) +}) diff --git a/packages/typescript/openai-base/tests/responses-text.test.ts b/packages/typescript/openai-base/tests/responses-text.test.ts index 2c78cedb2..7c50acc3e 100644 --- a/packages/typescript/openai-base/tests/responses-text.test.ts +++ b/packages/typescript/openai-base/tests/responses-text.test.ts @@ -784,6 +784,99 @@ describe('OpenAICompatibleResponsesTextAdapter', () => { expect(toolEnds[1].input).toEqual({ location: 'Paris' }) } }) + + it('uses call_id instead of internal id for tool call correlation', async () => { + const streamChunks = [ + { + type: 'response.created', + response: { + id: 'resp-callid', + model: 'test-model', + status: 'in_progress', + }, + }, + { + type: 'response.output_item.added', + output_index: 0, + item: { + type: 'function_call', + id: 'fc_internal_001', + call_id: 'call_api_abc123', + name: 'lookup_weather', + }, + }, + { + type: 'response.function_call_arguments.delta', + item_id: 'fc_internal_001', + delta: '{"location":"Tokyo"}', + }, + { + type: 'response.function_call_arguments.done', + item_id: 'fc_internal_001', + arguments: '{"location":"Tokyo"}', + }, + { + type: 'response.completed', + response: { + id: 'resp-callid', + model: 'test-model', + status: 'completed', + output: [ + { + type: 'function_call', + id: 'fc_internal_001', + call_id: 'call_api_abc123', + name: 'lookup_weather', + arguments: '{"location":"Tokyo"}', + }, + ], + usage: { + input_tokens: 10, + output_tokens: 5, + total_tokens: 15, + }, + }, + }, + ] + + setupMockResponsesClient(streamChunks) + const adapter = new OpenAICompatibleResponsesTextAdapter( + testConfig, + 'test-model', + ) + const chunks: Array = [] + + for await (const chunk of adapter.chatStream({ + model: 'test-model', + messages: [{ role: 'user', content: 'Weather in Tokyo?' }], + tools: [weatherTool], + })) { + chunks.push(chunk) + } + + // TOOL_CALL_START should use call_id, not internal id + const toolStart = chunks.find((c) => c.type === 'TOOL_CALL_START') + expect(toolStart).toBeDefined() + if (toolStart?.type === 'TOOL_CALL_START') { + expect(toolStart.toolCallId).toBe('call_api_abc123') + expect(toolStart.toolCallId).not.toBe('fc_internal_001') + } + + // TOOL_CALL_ARGS should also use call_id + const toolArgs = chunks.filter((c) => c.type === 'TOOL_CALL_ARGS') + expect(toolArgs.length).toBeGreaterThan(0) + if (toolArgs[0]?.type === 'TOOL_CALL_ARGS') { + expect(toolArgs[0].toolCallId).toBe('call_api_abc123') + } + + // TOOL_CALL_END should also use call_id + const toolEnd = chunks.find((c) => c.type === 'TOOL_CALL_END') + expect(toolEnd).toBeDefined() + if (toolEnd?.type === 'TOOL_CALL_END') { + expect(toolEnd.toolCallId).toBe('call_api_abc123') + expect(toolEnd.toolCallId).not.toBe('fc_internal_001') + } + }) }) describe('content_part events', () => { diff --git a/packages/typescript/openai-base/tests/schema-converter.test.ts b/packages/typescript/openai-base/tests/schema-converter.test.ts index f90f54493..a8fc93bef 100644 --- a/packages/typescript/openai-base/tests/schema-converter.test.ts +++ b/packages/typescript/openai-base/tests/schema-converter.test.ts @@ -158,4 +158,91 @@ describe('makeStructuredOutputCompatible', () => { 'oneOf is not supported in OpenAI structured output schemas', ) }) + + it('should use schema.required as default when originalRequired is not provided', () => { + const schema = { + type: 'object', + properties: { + name: { type: 'string' }, + nickname: { type: 'string' }, + }, + required: ['name'], + } + + // Call without second argument — should use schema.required + const result = makeStructuredOutputCompatible(schema) + expect(result.properties.name.type).toBe('string') + expect(result.properties.nickname.type).toEqual(['string', 'null']) + expect(result.required).toEqual(['name', 'nickname']) + }) + + it('should make optional object properties nullable after recursion', () => { + const schema = { + type: 'object', + properties: { + required_obj: { + type: 'object', + properties: { x: { type: 'string' } }, + required: ['x'], + }, + optional_obj: { + type: 'object', + properties: { y: { type: 'number' } }, + required: ['y'], + }, + }, + required: ['required_obj'], + } + + const result = makeStructuredOutputCompatible(schema, ['required_obj']) + + // required_obj should be recursed into but NOT made nullable + expect(result.properties.required_obj.additionalProperties).toBe(false) + expect(result.properties.required_obj.type).toBe('object') + + // optional_obj should be recursed into AND made nullable + expect(result.properties.optional_obj.additionalProperties).toBe(false) + expect(result.properties.optional_obj.type).toEqual(['object', 'null']) + }) + + it('should make optional array properties nullable after recursion', () => { + const schema = { + type: 'object', + properties: { + tags: { + type: 'array', + items: { + type: 'object', + properties: { label: { type: 'string' } }, + required: ['label'], + }, + }, + }, + required: [], + } + + const result = makeStructuredOutputCompatible(schema, []) + + // tags is optional, should be nullable AND have items recursed + expect(result.properties.tags.type).toEqual(['array', 'null']) + expect(result.properties.tags.items.additionalProperties).toBe(false) + }) + + it('should make optional anyOf properties nullable by adding null variant', () => { + const schema = { + type: 'object', + properties: { + value: { + anyOf: [{ type: 'string' }, { type: 'number' }], + }, + }, + required: [], + } + + const result = makeStructuredOutputCompatible(schema, []) + + // optional anyOf should have a null variant added + expect(result.properties.value.anyOf).toContainEqual({ type: 'null' }) + expect(result.properties.value.anyOf).toHaveLength(3) + }) }) diff --git a/pnpm-lock.yaml b/pnpm-lock.yaml index e1f11caba..cb13175bc 100644 --- a/pnpm-lock.yaml +++ b/pnpm-lock.yaml @@ -807,8 +807,8 @@ importers: packages/typescript/ai-fal: dependencies: '@fal-ai/client': - specifier: ^1.9.1 - version: 1.9.1 + specifier: ^1.9.4 + version: 1.9.5 '@tanstack/ai-utils': specifier: workspace:* version: link:../ai-utils @@ -2534,8 +2534,8 @@ packages: resolution: {integrity: sha512-C3mrr3b5dRVlKPJdfrAXS8+dq+rq8Qm5SNRazca0JKgw1HQERFmrVb0towvMmw5uu8hHKNiQasMaR/tydf3Zsg==} engines: {node: ^20.19.0 || ^22.13.0 || ^23.5.0 || >=24.0.0, npm: '>=10'} - '@fal-ai/client@1.9.1': - resolution: {integrity: sha512-Z6+n9/2sKlDam1wWDfRWmMkLS09e2WhTU9w+2eWL7PuoGmHK43IhwKirk4b3kPC/QPp1g+ymfvhrSiS1SpDr6g==} + '@fal-ai/client@1.9.5': + resolution: {integrity: sha512-knCMOqXapzL5Lsp4Xh/B/VfvbseKgHg2Kt//MjcxN5weF59/26En3zXTPd8pljl4QAr7b62X5EuNCT69MpyjSA==} engines: {node: '>=18.0.0'} '@gerrit0/mini-shiki@3.19.0': @@ -10430,7 +10430,7 @@ snapshots: '@faker-js/faker@10.1.0': {} - '@fal-ai/client@1.9.1': + '@fal-ai/client@1.9.5': dependencies: '@msgpack/msgpack': 3.1.3 eventsource-parser: 1.1.2 From 012f2a1ce158b2b9de115755bf615eefd12566e3 Mon Sep 17 00:00:00 2001 From: Alem Tuzlak Date: Mon, 30 Mar 2026 19:02:19 +0200 Subject: [PATCH 14/15] fix: resolve eslint and knip failures from full test suite - Fix unnecessary type assertions in chat-completions-text and responses-text - Fix eslint import order in ai-groq client.ts - Fix unnecessary condition in ai-groq schema-converter combinator recursion - Fix array-type lint error in openai-base provider-options - Remove unused files in ai-grok (tools/index.ts, tool-converter.ts, function-tool.ts) - Remove unused exports (createOpenAIClient, generateId, validateTextProviderOptions, InternalTextProviderOptions) --- .../ai-grok/src/text/text-provider-options.ts | 22 ------------------- .../ai-grok/src/tools/function-tool.ts | 4 ---- .../typescript/ai-grok/src/tools/index.ts | 5 ----- .../ai-grok/src/tools/tool-converter.ts | 1 - .../typescript/ai-groq/src/utils/client.ts | 2 +- .../ai-groq/src/utils/schema-converter.ts | 4 +--- .../typescript/ai-openai/src/utils/client.ts | 17 +------------- .../src/adapters/chat-completions-text.ts | 4 +--- .../src/adapters/responses-text.ts | 2 +- .../openai-base/src/types/provider-options.ts | 2 +- 10 files changed, 6 insertions(+), 57 deletions(-) delete mode 100644 packages/typescript/ai-grok/src/tools/function-tool.ts delete mode 100644 packages/typescript/ai-grok/src/tools/index.ts delete mode 100644 packages/typescript/ai-grok/src/tools/tool-converter.ts diff --git a/packages/typescript/ai-grok/src/text/text-provider-options.ts b/packages/typescript/ai-grok/src/text/text-provider-options.ts index a05222ff1..c0e7480f7 100644 --- a/packages/typescript/ai-grok/src/text/text-provider-options.ts +++ b/packages/typescript/ai-grok/src/text/text-provider-options.ts @@ -1,5 +1,3 @@ -import type { FunctionTool } from '../tools/function-tool' - /** * Grok Text Provider Options * @@ -51,27 +49,7 @@ export interface GrokTextProviderOptions extends GrokBaseOptions { stop?: string | Array } -/** - * Internal options interface for validation - * Used internally by the adapter - */ -export interface InternalTextProviderOptions extends GrokTextProviderOptions { - model: string - stream?: boolean - tools?: Array -} - /** * External provider options (what users pass in) */ export type ExternalTextProviderOptions = GrokTextProviderOptions - -/** - * Validates text provider options - */ -export function validateTextProviderOptions( - _options: InternalTextProviderOptions, -): void { - // Basic validation can be added here if needed - // For now, Grok API will handle validation -} diff --git a/packages/typescript/ai-grok/src/tools/function-tool.ts b/packages/typescript/ai-grok/src/tools/function-tool.ts deleted file mode 100644 index 35e66ff23..000000000 --- a/packages/typescript/ai-grok/src/tools/function-tool.ts +++ /dev/null @@ -1,4 +0,0 @@ -export { - convertFunctionToolToChatCompletionsFormat as convertFunctionToolToAdapterFormat, - type ChatCompletionFunctionTool as FunctionTool, -} from '@tanstack/openai-base' diff --git a/packages/typescript/ai-grok/src/tools/index.ts b/packages/typescript/ai-grok/src/tools/index.ts deleted file mode 100644 index c90334153..000000000 --- a/packages/typescript/ai-grok/src/tools/index.ts +++ /dev/null @@ -1,5 +0,0 @@ -export { - convertFunctionToolToAdapterFormat, - type FunctionTool, -} from './function-tool' -export { convertToolsToProviderFormat } from './tool-converter' diff --git a/packages/typescript/ai-grok/src/tools/tool-converter.ts b/packages/typescript/ai-grok/src/tools/tool-converter.ts deleted file mode 100644 index 315f4e638..000000000 --- a/packages/typescript/ai-grok/src/tools/tool-converter.ts +++ /dev/null @@ -1 +0,0 @@ -export { convertToolsToChatCompletionsFormat as convertToolsToProviderFormat } from '@tanstack/openai-base' diff --git a/packages/typescript/ai-groq/src/utils/client.ts b/packages/typescript/ai-groq/src/utils/client.ts index 29ab0fb31..4e4f64580 100644 --- a/packages/typescript/ai-groq/src/utils/client.ts +++ b/packages/typescript/ai-groq/src/utils/client.ts @@ -1,6 +1,6 @@ +import { generateId as _generateId, getApiKeyFromEnv } from '@tanstack/ai-utils' import Groq_SDK from 'groq-sdk' import type { ClientOptions } from 'groq-sdk' -import { generateId as _generateId, getApiKeyFromEnv } from '@tanstack/ai-utils' export interface GroqClientConfig extends ClientOptions { apiKey: string diff --git a/packages/typescript/ai-groq/src/utils/schema-converter.ts b/packages/typescript/ai-groq/src/utils/schema-converter.ts index 3178c9141..b5539cb15 100644 --- a/packages/typescript/ai-groq/src/utils/schema-converter.ts +++ b/packages/typescript/ai-groq/src/utils/schema-converter.ts @@ -40,9 +40,7 @@ function removeEmptyRequired(schema: Record): Record { for (const keyword of ['anyOf', 'oneOf', 'allOf'] as const) { if (Array.isArray(result[keyword])) { result[keyword] = result[keyword].map((entry: Record) => - typeof entry === 'object' && entry !== null - ? removeEmptyRequired(entry) - : entry, + removeEmptyRequired(entry), ) } } diff --git a/packages/typescript/ai-openai/src/utils/client.ts b/packages/typescript/ai-openai/src/utils/client.ts index c3d34cf29..b14ba44bf 100644 --- a/packages/typescript/ai-openai/src/utils/client.ts +++ b/packages/typescript/ai-openai/src/utils/client.ts @@ -1,5 +1,4 @@ -import OpenAI_SDK from 'openai' -import { generateId as _generateId, getApiKeyFromEnv } from '@tanstack/ai-utils' +import { getApiKeyFromEnv } from '@tanstack/ai-utils' import type { OpenAICompatibleClientConfig } from '@tanstack/openai-base' import type { ClientOptions } from 'openai' @@ -7,13 +6,6 @@ export interface OpenAIClientConfig extends ClientOptions { apiKey: string } -/** - * Creates an OpenAI SDK client instance - */ -export function createOpenAIClient(config: OpenAIClientConfig): OpenAI_SDK { - return new OpenAI_SDK(config) -} - /** * Gets OpenAI API key from environment variables * @throws Error if OPENAI_API_KEY is not found @@ -22,13 +14,6 @@ export function getOpenAIApiKeyFromEnv(): string { return getApiKeyFromEnv('OPENAI_API_KEY') } -/** - * Generates a unique ID with a prefix - */ -export function generateId(prefix: string): string { - return _generateId(prefix) -} - /** * Converts an OpenAIClientConfig to OpenAICompatibleClientConfig. * This bridges the type gap between the local config type (which extends diff --git a/packages/typescript/openai-base/src/adapters/chat-completions-text.ts b/packages/typescript/openai-base/src/adapters/chat-completions-text.ts index 3f7a63e0e..153be0788 100644 --- a/packages/typescript/openai-base/src/adapters/chat-completions-text.ts +++ b/packages/typescript/openai-base/src/adapters/chat-completions-text.ts @@ -459,9 +459,7 @@ export class OpenAICompatibleChatCompletionsTextAdapter< messages.push(this.convertMessage(message)) } - const modelOptions = options.modelOptions as - | Record - | undefined + const modelOptions = options.modelOptions return { model: options.model, diff --git a/packages/typescript/openai-base/src/adapters/responses-text.ts b/packages/typescript/openai-base/src/adapters/responses-text.ts index 4b94206b5..197db3965 100644 --- a/packages/typescript/openai-base/src/adapters/responses-text.ts +++ b/packages/typescript/openai-base/src/adapters/responses-text.ts @@ -700,7 +700,7 @@ export class OpenAICompatibleResponsesTextAdapter< ) : undefined - const modelOptions = options.modelOptions as Record | undefined + const modelOptions = options.modelOptions return { model: options.model, diff --git a/packages/typescript/openai-base/src/types/provider-options.ts b/packages/typescript/openai-base/src/types/provider-options.ts index 018482e07..54c1bdb3d 100644 --- a/packages/typescript/openai-base/src/types/provider-options.ts +++ b/packages/typescript/openai-base/src/types/provider-options.ts @@ -4,7 +4,7 @@ export interface OpenAICompatibleBaseOptions { max_tokens?: number frequency_penalty?: number presence_penalty?: number - stop?: string | string[] + stop?: string | Array user?: string } From 19231f2d879681e30a23c5694b422a445e9e4d81 Mon Sep 17 00:00:00 2001 From: "autofix-ci[bot]" <114827586+autofix-ci[bot]@users.noreply.github.com> Date: Mon, 30 Mar 2026 17:03:41 +0000 Subject: [PATCH 15/15] ci: apply automated fixes --- .../openai-base/src/utils/schema-converter.ts | 3 +-- .../tests/chat-completions-text.test.ts | 20 +++++++------------ 2 files changed, 8 insertions(+), 15 deletions(-) diff --git a/packages/typescript/openai-base/src/utils/schema-converter.ts b/packages/typescript/openai-base/src/utils/schema-converter.ts index 1ae39172c..fb0164091 100644 --- a/packages/typescript/openai-base/src/utils/schema-converter.ts +++ b/packages/typescript/openai-base/src/utils/schema-converter.ts @@ -15,8 +15,7 @@ export function makeStructuredOutputCompatible( ): Record { const result = { ...schema } const required = - originalRequired ?? - (Array.isArray(result.required) ? result.required : []) + originalRequired ?? (Array.isArray(result.required) ? result.required : []) if (result.type === 'object' && result.properties) { const properties = { ...result.properties } diff --git a/packages/typescript/openai-base/tests/chat-completions-text.test.ts b/packages/typescript/openai-base/tests/chat-completions-text.test.ts index 610024221..b89324d8a 100644 --- a/packages/typescript/openai-base/tests/chat-completions-text.test.ts +++ b/packages/typescript/openai-base/tests/chat-completions-text.test.ts @@ -751,9 +751,7 @@ describe('OpenAICompatibleChatCompletionsTextAdapter', () => { { id: 'chatcmpl-123', model: 'test-model', - choices: [ - { delta: { content: 'Hi' }, finish_reason: null }, - ], + choices: [{ delta: { content: 'Hi' }, finish_reason: null }], }, { id: 'chatcmpl-123', @@ -793,9 +791,7 @@ describe('OpenAICompatibleChatCompletionsTextAdapter', () => { { id: 'chatcmpl-123', model: 'test-model', - choices: [ - { delta: { content: 'Hi' }, finish_reason: null }, - ], + choices: [{ delta: { content: 'Hi' }, finish_reason: null }], }, { id: 'chatcmpl-123', @@ -831,9 +827,7 @@ describe('OpenAICompatibleChatCompletionsTextAdapter', () => { it('does not include stream_options in structured output calls', async () => { const nonStreamResponse = { - choices: [ - { message: { content: '{"name":"Alice"}' } }, - ], + choices: [{ message: { content: '{"name":"Alice"}' } }], } setupMockSdkClient([], nonStreamResponse) @@ -866,9 +860,7 @@ describe('OpenAICompatibleChatCompletionsTextAdapter', () => { { id: 'chatcmpl-123', model: 'test-model', - choices: [ - { delta: { content: 'Hi' }, finish_reason: null }, - ], + choices: [{ delta: { content: 'Hi' }, finish_reason: null }], }, { id: 'chatcmpl-123', @@ -900,7 +892,9 @@ describe('OpenAICompatibleChatCompletionsTextAdapter', () => { // Verify second argument contains headers and signal const requestOptions = mockCreate.mock.calls[0]?.[1] expect(requestOptions).toBeDefined() - expect(requestOptions.headers).toEqual({ 'X-Custom-Header': 'test-value' }) + expect(requestOptions.headers).toEqual({ + 'X-Custom-Header': 'test-value', + }) expect(requestOptions.signal).toBe(controller.signal) }) })