Skip to content

Commit 96afcfb

Browse files
committed
Progress
1 parent 5522d6a commit 96afcfb

6 files changed

Lines changed: 36 additions & 32 deletions

File tree

src/api/providers/pearai/pearai.ts

Lines changed: 3 additions & 16 deletions
Original file line numberDiff line numberDiff line change
@@ -67,7 +67,7 @@ export class PearAiHandler extends BaseProvider implements SingleCompletionHandl
6767
}
6868
const data = (await response.json()) as PearAiModelsResponse
6969
const underlyingModel = data.models[modelId]?.underlyingModelUpdated || "claude-3-5-sonnet-20241022"
70-
if (underlyingModel.startsWith("claude")) {
70+
if (underlyingModel.startsWith("claude") || modelId.startsWith("anthropic/")) {
7171
// Default to Claude
7272
this.handler = new AnthropicHandler({
7373
...options,
@@ -93,7 +93,7 @@ export class PearAiHandler extends BaseProvider implements SingleCompletionHandl
9393
apiModelId: "claude-3-5-sonnet-20241022",
9494
})
9595
}
96-
} else if (modelId.startsWith("claude")) {
96+
} else if (modelId.startsWith("claude") || modelId.startsWith("anthropic/")) {
9797
this.handler = new AnthropicHandler({
9898
...options,
9999
apiKey: options.pearaiApiKey,
@@ -111,20 +111,7 @@ export class PearAiHandler extends BaseProvider implements SingleCompletionHandl
111111

112112
getModel(): { id: string; info: ModelInfo } {
113113
const baseModel = this.handler.getModel()
114-
return {
115-
id: baseModel.id,
116-
info: {
117-
...baseModel.info,
118-
// Inherit all capabilities from the underlying model
119-
supportsImages: baseModel.info.supportsImages,
120-
supportsComputerUse: baseModel.info.supportsComputerUse,
121-
supportsPromptCache: baseModel.info.supportsPromptCache,
122-
inputPrice: baseModel.info.inputPrice || 0,
123-
outputPrice: baseModel.info.outputPrice || 0,
124-
cacheWritesPrice: baseModel.info.cacheWritesPrice ? baseModel.info.cacheWritesPrice : undefined,
125-
cacheReadsPrice: baseModel.info.cacheReadsPrice ? baseModel.info.cacheReadsPrice : undefined,
126-
},
127-
}
114+
return baseModel
128115
}
129116

130117
async *createMessage(systemPrompt: string, messages: any[]): AsyncGenerator<any> {

src/api/providers/pearai/pearaiGeneric.ts

Lines changed: 26 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -16,6 +16,7 @@ import { ApiStream, ApiStreamUsageChunk } from "../../transform/stream"
1616
import { BaseProvider } from "../base-provider"
1717
import { XmlMatcher } from "../../../utils/xml-matcher"
1818
import { allModels, pearAiDefaultModelId, pearAiDefaultModelInfo } from "../../../shared/pearaiApi"
19+
import { calculateApiCostOpenAI } from "../../../utils/cost"
1920

2021
const DEEP_SEEK_DEFAULT_TEMPERATURE = 0.6
2122

@@ -65,6 +66,9 @@ export class PearAIGenericHandler extends BaseProvider implements SingleCompleti
6566
const modelUrl = this.options.openAiBaseUrl ?? ""
6667
const modelId = this.options.openAiModelId ?? ""
6768

69+
console.dir("MODEL INFO")
70+
console.dir(modelInfo)
71+
console.dir(modelId)
6872
const deepseekReasoner = modelId.includes("deepseek-reasoner")
6973
const ark = modelUrl.includes(".volces.com")
7074

@@ -198,21 +202,36 @@ export class PearAIGenericHandler extends BaseProvider implements SingleCompleti
198202
}
199203

200204
protected processUsageMetrics(usage: any, modelInfo?: ModelInfo): ApiStreamUsageChunk {
201-
console.dir(usage?.prompt_tokens_details)
205+
const inputTokens = usage?.prompt_tokens || 0
206+
const outputTokens = usage?.completion_tokens || 0
207+
const cacheWriteTokens = usage?.prompt_tokens_details?.caching_tokens || 0
208+
const cacheReadTokens = usage?.prompt_tokens_details?.cached_tokens || 0
209+
const totalCost = modelInfo
210+
? calculateApiCostOpenAI(modelInfo, inputTokens, outputTokens, cacheWriteTokens, cacheReadTokens)
211+
: 0
212+
213+
console.dir("COST")
214+
console.log(totalCost)
215+
console.dir("MODEL")
216+
console.dir(modelInfo)
202217
return {
203218
type: "usage",
204-
inputTokens: usage?.prompt_tokens || 0,
205-
outputTokens: usage?.completion_tokens || 0,
206-
cacheWriteTokens: usage?.prompt_tokens_details?.cache_miss_tokens,
207-
cacheReadTokens: usage?.prompt_tokens_details?.cached_tokens,
219+
inputTokens: inputTokens,
220+
outputTokens: outputTokens,
221+
cacheWriteTokens: cacheWriteTokens,
222+
cacheReadTokens: cacheReadTokens,
223+
totalCost: totalCost,
208224
}
209225
}
210226

211227
override getModel(): { id: string; info: ModelInfo } {
212-
const modelId = this.options.openAiModelId ?? pearAiDefaultModelId
228+
const modelId = this.options.openAiModelId ?? "none"
229+
console.log("MODEL INFO IN GETMODEL", allModels[modelId])
230+
console.log("Available models:", Object.keys(allModels))
231+
console.log("Keys: ", Object.keys(allModels[modelId]))
213232
return {
214233
id: modelId,
215-
info: allModels[modelId] ?? pearAiDefaultModelInfo,
234+
info: allModels[modelId],
216235
}
217236
}
218237

src/shared/api.ts

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -772,8 +772,8 @@ export const geminiModels = {
772772
contextWindow: 1_048_576,
773773
supportsImages: true,
774774
supportsPromptCache: false,
775-
inputPrice: 0,
776-
outputPrice: 0,
775+
inputPrice: 0.15,
776+
outputPrice: 0.6,
777777
},
778778
"gemini-2.0-flash-lite-preview-02-05": {
779779
maxTokens: 8192,

src/shared/pearaiApi.ts

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -135,7 +135,7 @@ export const allModels: { [key: string]: ModelInfo } = {
135135
...Object.entries(pearAiModels).reduce(
136136
(acc, [key, value]) => ({
137137
...acc,
138-
[`{key}`]: value,
138+
[key]: value,
139139
}),
140140
{},
141141
),

webview-ui/src/components/settings/ApiOptions.tsx

Lines changed: 4 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -56,7 +56,7 @@ import { validateApiConfiguration, validateModelId, validateBedrockArn } from "@
5656
import { ApiErrorMessage } from "./ApiErrorMessage"
5757
import { ThinkingBudget } from "./ThinkingBudget"
5858
import { usePearAiModels } from "../../hooks/usePearAiModels"
59-
import { pearAiDefaultModelId, pearAiDefaultModelInfo } from "../../../../src/shared/pearaiApi"
59+
import { allModels, pearAiDefaultModelId, pearAiDefaultModelInfo } from "../../../../src/shared/pearaiApi"
6060

6161
interface ApiOptionsProps {
6262
uriScheme: string | undefined
@@ -1690,10 +1690,9 @@ export function normalizeApiConfiguration(
16901690
}
16911691
case "pearai": {
16921692
// Always use the models from the hook which are fetched when provider is selected
1693-
return getProviderData(
1694-
pearAiModelsQuery || { [pearAiDefaultModelId]: pearAiDefaultModelInfo },
1695-
pearAiDefaultModelId,
1696-
)
1693+
let query = pearAiModelsQuery
1694+
console.log("query", query)
1695+
return getProviderData(pearAiModelsQuery || allModels, pearAiDefaultModelId)
16971696
}
16981697
default:
16991698
return getProviderData(anthropicModels, anthropicDefaultModelId)

webview-ui/src/components/settings/constants.ts

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -7,7 +7,6 @@ import {
77
geminiModels,
88
mistralModels,
99
openAiNativeModels,
10-
pearAiModels,
1110
vertexModels,
1211
} from "../../../../src/shared/api"
1312

0 commit comments

Comments
 (0)