Skip to content

Commit c02213c

Browse files
committed
fix #120 : Replace magic numbers with constants in LLM providers
- Defined constants for model names, token limits, temperature settings, API endpoints, and headers in the following modules: - Claude - Groq - Gemini - ChatGPT - Grok - Ollama - Improved code maintainability and consistency by eliminating hardcoded values.
1 parent 12b8d33 commit c02213c

6 files changed

Lines changed: 63 additions & 22 deletions

File tree

internal/chatgpt/chatgpt.go

Lines changed: 5 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -10,6 +10,10 @@ import (
1010
"github.com/dfanso/commit-msg/pkg/types"
1111
)
1212

13+
const (
14+
chatgptModel = openai.ChatModelGPT4o
15+
)
16+
1317
// GenerateCommitMessage calls OpenAI's chat completions API to turn the provided
1418
// repository changes into a polished git commit message.
1519
func GenerateCommitMessage(config *types.Config, changes string, apiKey string, opts *types.GenerationOptions) (string, error) {
@@ -22,7 +26,7 @@ func GenerateCommitMessage(config *types.Config, changes string, apiKey string,
2226
Messages: []openai.ChatCompletionMessageParamUnion{
2327
openai.UserMessage(prompt),
2428
},
25-
Model: openai.ChatModelGPT4o,
29+
Model: chatgptModel,
2630
})
2731
if err != nil {
2832
return "", fmt.Errorf("OpenAI error: %w", err)

internal/claude/claude.go

Lines changed: 16 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -11,6 +11,16 @@ import (
1111
"github.com/dfanso/commit-msg/pkg/types"
1212
)
1313

14+
const (
15+
claudeModel = "claude-3-5-sonnet-20241022"
16+
claudeMaxTokens = 200
17+
claudeAPIEndpoint = "https://api.anthropic.com/v1/messages"
18+
claudeAPIVersion = "2023-06-01"
19+
contentTypeJSON = "application/json"
20+
anthropicVersionHeader = "anthropic-version"
21+
xAPIKeyHeader = "x-api-key"
22+
)
23+
1424
// ClaudeRequest describes the payload sent to Anthropic's Claude messages API.
1525
type ClaudeRequest struct {
1626
Model string `json:"model"`
@@ -34,8 +44,8 @@ func GenerateCommitMessage(config *types.Config, changes string, apiKey string,
3444
prompt := types.BuildCommitPrompt(changes, opts)
3545

3646
reqBody := ClaudeRequest{
37-
Model: "claude-3-5-sonnet-20241022",
38-
MaxTokens: 200,
47+
Model: claudeModel,
48+
MaxTokens: claudeMaxTokens,
3949
Messages: []types.Message{
4050
{
4151
Role: "user",
@@ -50,14 +60,14 @@ func GenerateCommitMessage(config *types.Config, changes string, apiKey string,
5060
}
5161

5262
ctx := context.Background()
53-
req, err := http.NewRequestWithContext(ctx, "POST", "https://api.anthropic.com/v1/messages", bytes.NewBuffer(jsonData))
63+
req, err := http.NewRequestWithContext(ctx, "POST", claudeAPIEndpoint, bytes.NewBuffer(jsonData))
5464
if err != nil {
5565
return "", err
5666
}
5767

58-
req.Header.Set("Content-Type", "application/json")
59-
req.Header.Set("x-api-key", apiKey)
60-
req.Header.Set("anthropic-version", "2023-06-01")
68+
req.Header.Set("Content-Type", contentTypeJSON)
69+
req.Header.Set(xAPIKeyHeader, apiKey)
70+
req.Header.Set(anthropicVersionHeader, claudeAPIVersion)
6171

6272
client := httpClient.GetClient()
6373
resp, err := client.Do(req)

internal/gemini/gemini.go

Lines changed: 7 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -10,6 +10,11 @@ import (
1010
"github.com/dfanso/commit-msg/pkg/types"
1111
)
1212

13+
const (
14+
geminiModel = "gemini-2.0-flash"
15+
geminiTemperature = 0.2
16+
)
17+
1318
// GenerateCommitMessage asks Google Gemini to author a commit message for the
1419
// supplied repository changes and optional style instructions.
1520
func GenerateCommitMessage(config *types.Config, changes string, apiKey string, opts *types.GenerationOptions) (string, error) {
@@ -25,8 +30,8 @@ func GenerateCommitMessage(config *types.Config, changes string, apiKey string,
2530
defer client.Close()
2631

2732
// Create a GenerativeModel with appropriate settings
28-
model := client.GenerativeModel("gemini-2.0-flash")
29-
model.SetTemperature(0.2) // Lower temperature for more focused responses
33+
model := client.GenerativeModel(geminiModel)
34+
model.SetTemperature(geminiTemperature) // Lower temperature for more focused responses
3035

3136
// Generate content using the prompt
3237
resp, err := model.GenerateContent(ctx, genai.Text(prompt))

internal/grok/grok.go

Lines changed: 13 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -11,6 +11,14 @@ import (
1111
"github.com/dfanso/commit-msg/pkg/types"
1212
)
1313

14+
const (
15+
grokModel = "grok-3-mini-fast-beta"
16+
grokTemperature = 0
17+
grokAPIEndpoint = "https://api.x.ai/v1/chat/completions"
18+
grokContentType = "application/json"
19+
authorizationPrefix = "Bearer "
20+
)
21+
1422
// GenerateCommitMessage calls X.AI's Grok API to create a commit message from
1523
// the provided Git diff and generation options.
1624
func GenerateCommitMessage(config *types.Config, changes string, apiKey string, opts *types.GenerationOptions) (string, error) {
@@ -25,9 +33,9 @@ func GenerateCommitMessage(config *types.Config, changes string, apiKey string,
2533
Content: prompt,
2634
},
2735
},
28-
Model: "grok-3-mini-fast-beta",
36+
Model: grokModel,
2937
Stream: false,
30-
Temperature: 0,
38+
Temperature: grokTemperature,
3139
}
3240

3341
requestBody, err := json.Marshal(request)
@@ -36,14 +44,14 @@ func GenerateCommitMessage(config *types.Config, changes string, apiKey string,
3644
}
3745

3846
// Create HTTP request
39-
req, err := http.NewRequest("POST", "https://api.x.ai/v1/chat/completions", bytes.NewBuffer(requestBody))
47+
req, err := http.NewRequest("POST", grokAPIEndpoint, bytes.NewBuffer(requestBody))
4048
if err != nil {
4149
return "", err
4250
}
4351

4452
// Set headers
45-
req.Header.Set("Content-Type", "application/json")
46-
req.Header.Set("Authorization", fmt.Sprintf("Bearer %s", apiKey))
53+
req.Header.Set("Content-Type", grokContentType)
54+
req.Header.Set("Authorization", fmt.Sprintf("%s%s", authorizationPrefix, apiKey))
4755

4856
client := httpClient.GetClient()
4957
resp, err := client.Do(req)

internal/groq/groq.go

Lines changed: 13 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -36,6 +36,14 @@ type chatResponse struct {
3636
// If Groq updates their defaults again, override via GROQ_MODEL.
3737
const defaultModel = "llama-3.3-70b-versatile"
3838

39+
const (
40+
groqTemperature = 0.2
41+
groqMaxTokens = 200
42+
groqSystemMessage = "You are an assistant that writes clear, concise git commit messages."
43+
groqContentType = "application/json"
44+
groqAuthorizationPrefix = "Bearer "
45+
)
46+
3947
var (
4048
// allow overrides in tests
4149
baseURL = "https://api.groq.com/openai/v1/chat/completions"
@@ -62,10 +70,10 @@ func GenerateCommitMessage(_ *types.Config, changes string, apiKey string, opts
6270

6371
payload := chatRequest{
6472
Model: model,
65-
Temperature: 0.2,
66-
MaxTokens: 200,
73+
Temperature: groqTemperature,
74+
MaxTokens: groqMaxTokens,
6775
Messages: []chatMessage{
68-
{Role: "system", Content: "You are an assistant that writes clear, concise git commit messages."},
76+
{Role: "system", Content: groqSystemMessage},
6977
{Role: "user", Content: prompt},
7078
},
7179
}
@@ -85,8 +93,8 @@ func GenerateCommitMessage(_ *types.Config, changes string, apiKey string, opts
8593
return "", fmt.Errorf("failed to create Groq request: %w", err)
8694
}
8795

88-
req.Header.Set("Content-Type", "application/json")
89-
req.Header.Set("Authorization", fmt.Sprintf("Bearer %s", apiKey))
96+
req.Header.Set("Content-Type", groqContentType)
97+
req.Header.Set("Authorization", fmt.Sprintf("%s%s", groqAuthorizationPrefix, apiKey))
9098

9199
resp, err := httpClient.Do(req)
92100
if err != nil {

internal/ollama/ollama.go

Lines changed: 9 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -11,6 +11,12 @@ import (
1111
"github.com/dfanso/commit-msg/pkg/types"
1212
)
1313

14+
const (
15+
ollamaDefaultModel = "llama3:latest"
16+
ollamaStream = false
17+
ollamaContentType = "application/json"
18+
)
19+
1420
// OllamaRequest captures the prompt payload sent to an Ollama HTTP endpoint.
1521
type OllamaRequest struct {
1622
Model string `json:"model"`
@@ -28,7 +34,7 @@ type OllamaResponse struct {
2834
func GenerateCommitMessage(_ *types.Config, changes string, url string, model string, opts *types.GenerationOptions) (string, error) {
2935
// Use llama3:latest as the default model
3036
if model == "" {
31-
model = "llama3:latest"
37+
model = ollamaDefaultModel
3238
}
3339

3440
// Preparing the prompt
@@ -38,7 +44,7 @@ func GenerateCommitMessage(_ *types.Config, changes string, url string, model st
3844
reqBody := map[string]interface{}{
3945
"model": model,
4046
"prompt": prompt,
41-
"stream": false,
47+
"stream": ollamaStream,
4248
}
4349

4450
// Generating the body
@@ -51,7 +57,7 @@ func GenerateCommitMessage(_ *types.Config, changes string, url string, model st
5157
if err != nil {
5258
return "", fmt.Errorf("failed to create request: %v", err)
5359
}
54-
req.Header.Set("Content-Type", "application/json")
60+
req.Header.Set("Content-Type", ollamaContentType)
5561

5662
resp, err := httpClient.GetOllamaClient().Do(req)
5763
if err != nil {

0 commit comments

Comments
 (0)