@@ -16,6 +16,7 @@ import { ApiStream, ApiStreamUsageChunk } from "../../transform/stream"
1616import { BaseProvider } from "../base-provider"
1717import { XmlMatcher } from "../../../utils/xml-matcher"
1818import { allModels , pearAiDefaultModelId , pearAiDefaultModelInfo } from "../../../shared/pearaiApi"
19+ import { calculateApiCostOpenAI } from "../../../utils/cost"
1920
2021const DEEP_SEEK_DEFAULT_TEMPERATURE = 0.6
2122
@@ -65,6 +66,9 @@ export class PearAIGenericHandler extends BaseProvider implements SingleCompleti
6566 const modelUrl = this . options . openAiBaseUrl ?? ""
6667 const modelId = this . options . openAiModelId ?? ""
6768
69+ console . dir ( "MODEL INFO" )
70+ console . dir ( modelInfo )
71+ console . dir ( modelId )
6872 const deepseekReasoner = modelId . includes ( "deepseek-reasoner" )
6973 const ark = modelUrl . includes ( ".volces.com" )
7074
@@ -198,21 +202,36 @@ export class PearAIGenericHandler extends BaseProvider implements SingleCompleti
198202 }
199203
200204 protected processUsageMetrics ( usage : any , modelInfo ?: ModelInfo ) : ApiStreamUsageChunk {
201- console . dir ( usage ?. prompt_tokens_details )
205+ const inputTokens = usage ?. prompt_tokens || 0
206+ const outputTokens = usage ?. completion_tokens || 0
207+ const cacheWriteTokens = usage ?. prompt_tokens_details ?. caching_tokens || 0
208+ const cacheReadTokens = usage ?. prompt_tokens_details ?. cached_tokens || 0
209+ const totalCost = modelInfo
210+ ? calculateApiCostOpenAI ( modelInfo , inputTokens , outputTokens , cacheWriteTokens , cacheReadTokens )
211+ : 0
212+
213+ console . dir ( "COST" )
214+ console . log ( totalCost )
215+ console . dir ( "MODEL" )
216+ console . dir ( modelInfo )
202217 return {
203218 type : "usage" ,
204- inputTokens : usage ?. prompt_tokens || 0 ,
205- outputTokens : usage ?. completion_tokens || 0 ,
206- cacheWriteTokens : usage ?. prompt_tokens_details ?. cache_miss_tokens ,
207- cacheReadTokens : usage ?. prompt_tokens_details ?. cached_tokens ,
219+ inputTokens : inputTokens ,
220+ outputTokens : outputTokens ,
221+ cacheWriteTokens : cacheWriteTokens ,
222+ cacheReadTokens : cacheReadTokens ,
223+ totalCost : totalCost ,
208224 }
209225 }
210226
211227 override getModel ( ) : { id : string ; info : ModelInfo } {
212- const modelId = this . options . openAiModelId ?? pearAiDefaultModelId
228+ const modelId = this . options . openAiModelId ?? "none"
229+ console . log ( "MODEL INFO IN GETMODEL" , allModels [ modelId ] )
230+ console . log ( "Available models:" , Object . keys ( allModels ) )
231+ console . log ( "Keys: " , Object . keys ( allModels [ modelId ] ) )
213232 return {
214233 id : modelId ,
215- info : allModels [ modelId ] ?? pearAiDefaultModelInfo ,
234+ info : allModels [ modelId ] ,
216235 }
217236 }
218237
0 commit comments