Skip to content

Commit 781ed1e

Browse files
feat: add Kimi K2 thinking model to Fireworks AI provider (#9202)
Co-authored-by: Roo Code <roomote@roocode.com>
1 parent 741b268 commit 781ed1e

3 files changed

Lines changed: 118 additions & 8 deletions

File tree

packages/types/src/providers/fireworks.ts

Lines changed: 16 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -3,6 +3,7 @@ import type { ModelInfo } from "../model.js"
33
export type FireworksModelId =
44
| "accounts/fireworks/models/kimi-k2-instruct"
55
| "accounts/fireworks/models/kimi-k2-instruct-0905"
6+
| "accounts/fireworks/models/kimi-k2-thinking"
67
| "accounts/fireworks/models/minimax-m2"
78
| "accounts/fireworks/models/qwen3-235b-a22b-instruct-2507"
89
| "accounts/fireworks/models/qwen3-coder-480b-a35b-instruct"
@@ -43,6 +44,21 @@ export const fireworksModels = {
4344
description:
4445
"Kimi K2 is a state-of-the-art mixture-of-experts (MoE) language model with 32 billion activated parameters and 1 trillion total parameters. Trained with the Muon optimizer, Kimi K2 achieves exceptional performance across frontier knowledge, reasoning, and coding tasks while being meticulously optimized for agentic capabilities.",
4546
},
47+
"accounts/fireworks/models/kimi-k2-thinking": {
48+
maxTokens: 16000,
49+
contextWindow: 256000,
50+
supportsImages: false,
51+
supportsPromptCache: true,
52+
supportsNativeTools: true,
53+
supportsTemperature: true,
54+
preserveReasoning: true,
55+
defaultTemperature: 1.0,
56+
inputPrice: 0.6,
57+
outputPrice: 2.5,
58+
cacheReadsPrice: 0.15,
59+
description:
60+
"The kimi-k2-thinking model is a general-purpose agentic reasoning model developed by Moonshot AI. Thanks to its strength in deep reasoning and multi-turn tool use, it can solve even the hardest problems.",
61+
},
4662
"accounts/fireworks/models/minimax-m2": {
4763
maxTokens: 4096,
4864
contextWindow: 204800,

src/api/providers/__tests__/fireworks.spec.ts

Lines changed: 101 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -115,6 +115,31 @@ describe("FireworksHandler", () => {
115115
)
116116
})
117117

118+
it("should return Kimi K2 Thinking model with correct configuration", () => {
119+
const testModelId: FireworksModelId = "accounts/fireworks/models/kimi-k2-thinking"
120+
const handlerWithModel = new FireworksHandler({
121+
apiModelId: testModelId,
122+
fireworksApiKey: "test-fireworks-api-key",
123+
})
124+
const model = handlerWithModel.getModel()
125+
expect(model.id).toBe(testModelId)
126+
expect(model.info).toEqual(
127+
expect.objectContaining({
128+
maxTokens: 16000,
129+
contextWindow: 256000,
130+
supportsImages: false,
131+
supportsPromptCache: true,
132+
supportsNativeTools: true,
133+
supportsTemperature: true,
134+
preserveReasoning: true,
135+
defaultTemperature: 1.0,
136+
inputPrice: 0.6,
137+
outputPrice: 2.5,
138+
cacheReadsPrice: 0.15,
139+
}),
140+
)
141+
})
142+
118143
it("should return MiniMax M2 model with correct configuration", () => {
119144
const testModelId: FireworksModelId = "accounts/fireworks/models/minimax-m2"
120145
const handlerWithModel = new FireworksHandler({
@@ -424,16 +449,85 @@ describe("FireworksHandler", () => {
424449
)
425450
})
426451

427-
it("should use default temperature of 0.5", () => {
428-
const testModelId: FireworksModelId = "accounts/fireworks/models/kimi-k2-instruct"
452+
it("should use provider default temperature of 0.5 for models without defaultTemperature", async () => {
453+
const modelId: FireworksModelId = "accounts/fireworks/models/kimi-k2-instruct"
429454
const handlerWithModel = new FireworksHandler({
430-
apiModelId: testModelId,
455+
apiModelId: modelId,
431456
fireworksApiKey: "test-fireworks-api-key",
432457
})
433-
const model = handlerWithModel.getModel()
434-
// The temperature is set in the constructor as defaultTemperature: 0.5
435-
// This test verifies the handler is configured with the correct default temperature
436-
expect(handlerWithModel).toBeDefined()
458+
459+
mockCreate.mockImplementationOnce(() => ({
460+
[Symbol.asyncIterator]: () => ({
461+
async next() {
462+
return { done: true }
463+
},
464+
}),
465+
}))
466+
467+
const messageGenerator = handlerWithModel.createMessage("system", [])
468+
await messageGenerator.next()
469+
470+
expect(mockCreate).toHaveBeenCalledWith(
471+
expect.objectContaining({
472+
temperature: 0.5,
473+
}),
474+
undefined,
475+
)
476+
})
477+
478+
it("should use model defaultTemperature (1.0) over provider default (0.5) for kimi-k2-thinking", async () => {
479+
const modelId: FireworksModelId = "accounts/fireworks/models/kimi-k2-thinking"
480+
const handlerWithModel = new FireworksHandler({
481+
apiModelId: modelId,
482+
fireworksApiKey: "test-fireworks-api-key",
483+
})
484+
485+
mockCreate.mockImplementationOnce(() => ({
486+
[Symbol.asyncIterator]: () => ({
487+
async next() {
488+
return { done: true }
489+
},
490+
}),
491+
}))
492+
493+
const messageGenerator = handlerWithModel.createMessage("system", [])
494+
await messageGenerator.next()
495+
496+
// Model's defaultTemperature (1.0) should take precedence over provider's default (0.5)
497+
expect(mockCreate).toHaveBeenCalledWith(
498+
expect.objectContaining({
499+
temperature: 1.0,
500+
}),
501+
undefined,
502+
)
503+
})
504+
505+
it("should use user-specified temperature over model and provider defaults", async () => {
506+
const modelId: FireworksModelId = "accounts/fireworks/models/kimi-k2-thinking"
507+
const handlerWithModel = new FireworksHandler({
508+
apiModelId: modelId,
509+
fireworksApiKey: "test-fireworks-api-key",
510+
modelTemperature: 0.7,
511+
})
512+
513+
mockCreate.mockImplementationOnce(() => ({
514+
[Symbol.asyncIterator]: () => ({
515+
async next() {
516+
return { done: true }
517+
},
518+
}),
519+
}))
520+
521+
const messageGenerator = handlerWithModel.createMessage("system", [])
522+
await messageGenerator.next()
523+
524+
// User-specified temperature should take precedence over everything
525+
expect(mockCreate).toHaveBeenCalledWith(
526+
expect.objectContaining({
527+
temperature: 0.7,
528+
}),
529+
undefined,
530+
)
437531
})
438532

439533
it("should handle empty response in completePrompt", async () => {

src/api/providers/base-openai-compatible-provider.ts

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -84,7 +84,7 @@ export abstract class BaseOpenAiCompatibleProvider<ModelName extends string>
8484
format: "openai",
8585
}) ?? undefined
8686

87-
const temperature = this.options.modelTemperature ?? this.defaultTemperature
87+
const temperature = this.options.modelTemperature ?? info.defaultTemperature ?? this.defaultTemperature
8888

8989
const params: OpenAI.Chat.Completions.ChatCompletionCreateParamsStreaming = {
9090
model,

0 commit comments

Comments
 (0)