Skip to content

Commit e852039

Browse files
committed
Merge branch 'main' into dev
2 parents 15c5f35 + 9739e70 commit e852039

9 files changed

Lines changed: 872 additions & 89 deletions

File tree

README.md

Lines changed: 55 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -29,13 +29,14 @@ Looking to contribute? Check out:
2929

3030
**AI-Powered Commit Messages** - Automatically generate meaningful commit messages
3131
🔄 **Multiple LLM Support** - Choose between Google Gemini, Grok, Claude, ChatGPT, or Ollama (local)
32+
🧪 **Dry Run Mode** - Preview prompts without making API calls
3233
📝 **Context-Aware** - Analyzes staged and unstaged changes
3334
📋 **Auto-Copy to Clipboard** - Generated messages are automatically copied for instant use
3435
🎛️ **Interactive Review Flow** - Accept, regenerate with new styles, or open the message in your editor before committing
3536
📊 **File Statistics Display** - Visual preview of changed files and line counts
36-
**Smart Security Scrubbing** - Automatically removes API keys, passwords, and sensitive data from diffs
37-
🚀 **Easy to Use** - Simple CLI interface with beautiful terminal UI
38-
**Fast** - Quick generation of commit messages
37+
💡 **Smart Security Scrubbing** - Automatically removes API keys, passwords, and sensitive data from diffs
38+
🚀 **Easy to Use** - Simple CLI interface with beautiful terminal UI
39+
**Fast** - Quick generation of commit messages
3940

4041
## Supported LLM Providers
4142

@@ -112,6 +113,57 @@ Or if running from source:
112113
go run cmd/commit-msg/main.go .
113114
```
114115

116+
### Preview Mode (Dry Run)
117+
118+
Preview what would be sent to the LLM without making an API call:
119+
120+
```bash
121+
commit . --dry-run
122+
```
123+
124+
This displays:
125+
- The LLM provider that would be used
126+
- The exact prompt that would be sent
127+
- File statistics and change summary
128+
- Estimated token count
129+
- All without consuming API credits or sharing data
130+
131+
Perfect for:
132+
- 🐛 **Debugging** - See exactly what prompt is being sent
133+
- 💰 **Cost Control** - Review before consuming API credits
134+
- 🔒 **Privacy** - Verify what data would be shared with external APIs
135+
- 🧪 **Development** - Test prompt changes without API calls
136+
137+
### Auto Commit Mode
138+
139+
Automatically commit with the generated message without manual confirmation:
140+
141+
```bash
142+
commit . --auto
143+
```
144+
145+
This will:
146+
- Generate the commit message using your configured LLM
147+
- Automatically execute `git commit` with the generated message
148+
- Skip the interactive review and manual confirmation step
149+
150+
**Note**: The `--auto` flag cannot be combined with `--dry-run`. Dry run mode takes precedence and will only preview without committing.
151+
152+
**Platform Support**: Works on Linux, macOS, and Windows.
153+
154+
### Combining Flags
155+
156+
```bash
157+
# Preview only (no commit, no API call)
158+
commit . --dry-run
159+
160+
# Generate and auto-commit
161+
commit . --auto
162+
163+
# Generate with interactive review (default behavior)
164+
commit .
165+
```
166+
115167
### Setup LLM and API Key
116168

117169
```bash

cmd/cli/createMsg.go

Lines changed: 186 additions & 34 deletions
Original file line numberDiff line numberDiff line change
@@ -1,6 +1,7 @@
11
package cmd
22

33
import (
4+
"context"
45
"errors"
56
"fmt"
67
"os"
@@ -10,14 +11,9 @@ import (
1011

1112
"github.com/atotto/clipboard"
1213
"github.com/dfanso/commit-msg/cmd/cli/store"
13-
"github.com/dfanso/commit-msg/internal/chatgpt"
14-
"github.com/dfanso/commit-msg/internal/claude"
1514
"github.com/dfanso/commit-msg/internal/display"
16-
"github.com/dfanso/commit-msg/internal/gemini"
1715
"github.com/dfanso/commit-msg/internal/git"
18-
"github.com/dfanso/commit-msg/internal/grok"
19-
"github.com/dfanso/commit-msg/internal/groq"
20-
"github.com/dfanso/commit-msg/internal/ollama"
16+
"github.com/dfanso/commit-msg/internal/llm"
2117
"github.com/dfanso/commit-msg/internal/stats"
2218
"github.com/dfanso/commit-msg/pkg/types"
2319
"github.com/google/shlex"
@@ -26,7 +22,8 @@ import (
2622

2723
// CreateCommitMsg launches the interactive flow for reviewing, regenerating,
2824
// editing, and accepting AI-generated commit messages in the current repo.
29-
func CreateCommitMsg() {
25+
// If dryRun is true, it displays the prompt without making an API call.
26+
func CreateCommitMsg(dryRun bool, autoCommit bool) {
3027
// Validate COMMIT_LLM and required API keys
3128
useLLM, err := store.DefaultLLMKey()
3229
if err != nil {
@@ -94,6 +91,24 @@ func CreateCommitMsg() {
9491
return
9592
}
9693

94+
// Handle dry-run mode: display what would be sent to LLM without making API call
95+
if dryRun {
96+
pterm.Println()
97+
displayDryRunInfo(commitLLM, config, changes, apiKey)
98+
return
99+
}
100+
101+
ctx := context.Background()
102+
103+
providerInstance, err := llm.NewProvider(commitLLM, llm.ProviderOptions{
104+
Credential: apiKey,
105+
Config: config,
106+
})
107+
if err != nil {
108+
displayProviderError(commitLLM, err)
109+
os.Exit(1)
110+
}
111+
97112
pterm.Println()
98113
spinnerGenerating, err := pterm.DefaultSpinner.
99114
WithSequence("⠋", "⠙", "⠹", "⠸", "⠼", "⠴", "⠦", "⠧", "⠇", "⠏").
@@ -104,7 +119,7 @@ func CreateCommitMsg() {
104119
}
105120

106121
attempt := 1
107-
commitMsg, err := generateMessage(commitLLM, config, changes, apiKey, withAttempt(nil, attempt))
122+
commitMsg, err := generateMessage(ctx, providerInstance, changes, withAttempt(nil, attempt))
108123
if err != nil {
109124
spinnerGenerating.Fail("Failed to generate commit message")
110125
displayProviderError(commitLLM, err)
@@ -166,7 +181,7 @@ interactionLoop:
166181
pterm.Error.Printf("Failed to start spinner: %v\n", err)
167182
continue
168183
}
169-
updatedMessage, genErr := generateMessage(commitLLM, config, changes, apiKey, generationOpts)
184+
updatedMessage, genErr := generateMessage(ctx, providerInstance, changes, generationOpts)
170185
if genErr != nil {
171186
spinner.Fail("Regeneration failed")
172187
displayProviderError(commitLLM, genErr)
@@ -200,6 +215,38 @@ interactionLoop:
200215

201216
pterm.Println()
202217
display.ShowChangesPreview(fileStats)
218+
219+
// Auto-commit if flag is set (cross-platform compatible)
220+
if autoCommit && !dryRun {
221+
pterm.Println()
222+
spinner, err := pterm.DefaultSpinner.
223+
WithSequence("⠋", "⠙", "⠹", "⠸", "⠼", "⠴", "⠦", "⠧", "⠇", "⠏").
224+
Start("Automatically committing with generated message...")
225+
if err != nil {
226+
pterm.Error.Printf("Failed to start spinner: %v\n", err)
227+
return
228+
}
229+
230+
cmd := exec.Command("git", "commit", "-m", finalMessage)
231+
cmd.Dir = currentDir
232+
// Ensure git command works across all platforms
233+
cmd.Env = os.Environ()
234+
235+
output, err := cmd.CombinedOutput()
236+
if err != nil {
237+
spinner.Fail("Commit failed")
238+
pterm.Error.Printf("Failed to commit: %v\n", err)
239+
if len(output) > 0 {
240+
pterm.Error.Println(string(output))
241+
}
242+
return
243+
}
244+
245+
spinner.Success("Committed successfully!")
246+
if len(output) > 0 {
247+
pterm.Info.Println(strings.TrimSpace(string(output)))
248+
}
249+
}
203250
}
204251

205252
type styleOption struct {
@@ -227,32 +274,24 @@ var (
227274
errSelectionCancelled = errors.New("selection cancelled")
228275
)
229276

230-
func generateMessage(provider types.LLMProvider, config *types.Config, changes string, apiKey string, opts *types.GenerationOptions) (string, error) {
231-
switch provider {
232-
case types.ProviderGemini:
233-
return gemini.GenerateCommitMessage(config, changes, apiKey, opts)
234-
case types.ProviderOpenAI:
235-
return chatgpt.GenerateCommitMessage(config, changes, apiKey, opts)
236-
case types.ProviderClaude:
237-
return claude.GenerateCommitMessage(config, changes, apiKey, opts)
238-
case types.ProviderGroq:
239-
return groq.GenerateCommitMessage(config, changes, apiKey, opts)
240-
case types.ProviderOllama:
241-
url := apiKey
242-
if strings.TrimSpace(url) == "" {
243-
url = os.Getenv("OLLAMA_URL")
244-
if url == "" {
245-
url = "http://localhost:11434/api/generate"
246-
}
277+
// resolveOllamaConfig returns the URL and model for Ollama, using environment variables as fallbacks
278+
func resolveOllamaConfig(apiKey string) (url, model string) {
279+
url = apiKey
280+
if strings.TrimSpace(url) == "" {
281+
url = os.Getenv("OLLAMA_URL")
282+
if url == "" {
283+
url = "http://localhost:11434/api/generate"
247284
}
248-
model := os.Getenv("OLLAMA_MODEL")
249-
if model == "" {
250-
model = "llama3.1"
251-
}
252-
return ollama.GenerateCommitMessage(config, changes, url, model, opts)
253-
default:
254-
return grok.GenerateCommitMessage(config, changes, apiKey, opts)
255285
}
286+
model = os.Getenv("OLLAMA_MODEL")
287+
if model == "" {
288+
model = "llama3.1"
289+
}
290+
return url, model
291+
}
292+
293+
func generateMessage(ctx context.Context, provider llm.Provider, changes string, opts *types.GenerationOptions) (string, error) {
294+
return provider.Generate(ctx, changes, opts)
256295
}
257296

258297
func promptActionSelection() (string, error) {
@@ -410,6 +449,11 @@ func withAttempt(styleOpts *types.GenerationOptions, attempt int) *types.Generat
410449
}
411450

412451
func displayProviderError(provider types.LLMProvider, err error) {
452+
if errors.Is(err, llm.ErrMissingCredential) {
453+
displayMissingCredentialHint(provider)
454+
return
455+
}
456+
413457
switch provider {
414458
case types.ProviderGemini:
415459
pterm.Error.Printf("Gemini API error: %v. Check your GEMINI_API_KEY environment variable or run: commit llm setup\n", err)
@@ -421,7 +465,115 @@ func displayProviderError(provider types.LLMProvider, err error) {
421465
pterm.Error.Printf("Groq API error: %v. Check your GROQ_API_KEY environment variable or run: commit llm setup\n", err)
422466
case types.ProviderGrok:
423467
pterm.Error.Printf("Grok API error: %v. Check your GROK_API_KEY environment variable or run: commit llm setup\n", err)
468+
case types.ProviderOllama:
469+
pterm.Error.Printf("Ollama error: %v. Verify the Ollama service URL or run: commit llm setup\n", err)
470+
default:
471+
pterm.Error.Printf("LLM error: %v\n", err)
472+
}
473+
}
474+
475+
func displayMissingCredentialHint(provider types.LLMProvider) {
476+
switch provider {
477+
case types.ProviderGemini:
478+
pterm.Error.Println("Gemini requires an API key. Run: commit llm setup or set GEMINI_API_KEY.")
479+
case types.ProviderOpenAI:
480+
pterm.Error.Println("OpenAI requires an API key. Run: commit llm setup or set OPENAI_API_KEY.")
481+
case types.ProviderClaude:
482+
pterm.Error.Println("Claude requires an API key. Run: commit llm setup or set CLAUDE_API_KEY.")
483+
case types.ProviderGroq:
484+
pterm.Error.Println("Groq requires an API key. Run: commit llm setup or set GROQ_API_KEY.")
485+
case types.ProviderGrok:
486+
pterm.Error.Println("Grok requires an API key. Run: commit llm setup or set GROK_API_KEY.")
487+
case types.ProviderOllama:
488+
pterm.Error.Println("Ollama requires a reachable service URL. Run: commit llm setup or set OLLAMA_URL.")
489+
default:
490+
pterm.Error.Printf("%s is missing credentials. Run: commit llm setup.\n", provider)
491+
}
492+
}
493+
494+
// displayDryRunInfo shows what would be sent to the LLM without making an API call
495+
func displayDryRunInfo(provider types.LLMProvider, config *types.Config, changes string, apiKey string) {
496+
pterm.DefaultHeader.WithFullWidth().
497+
WithBackgroundStyle(pterm.NewStyle(pterm.BgBlue)).
498+
WithTextStyle(pterm.NewStyle(pterm.FgWhite, pterm.Bold)).
499+
Println("DRY RUN MODE - Preview Only")
500+
501+
pterm.Println()
502+
pterm.Info.Println("This is a dry-run. No API call will be made to the LLM provider.")
503+
pterm.Println()
504+
505+
// Display provider information
506+
pterm.DefaultSection.Println("LLM Provider Configuration")
507+
providerInfo := [][]string{
508+
{"Provider", provider.String()},
509+
}
510+
511+
// Add provider-specific info
512+
switch provider {
513+
case types.ProviderOllama:
514+
url, model := resolveOllamaConfig(apiKey)
515+
providerInfo = append(providerInfo, []string{"Ollama URL", url})
516+
providerInfo = append(providerInfo, []string{"Model", model})
517+
case types.ProviderGrok:
518+
providerInfo = append(providerInfo, []string{"API Endpoint", config.GrokAPI})
519+
providerInfo = append(providerInfo, []string{"API Key", maskAPIKey(apiKey)})
424520
default:
425-
pterm.Error.Printf("LLM API error: %v\n", err)
521+
providerInfo = append(providerInfo, []string{"API Key", maskAPIKey(apiKey)})
426522
}
523+
524+
pterm.DefaultTable.WithHasHeader(false).WithData(providerInfo).Render()
525+
526+
pterm.Println()
527+
528+
// Build and display the prompt
529+
opts := &types.GenerationOptions{Attempt: 1}
530+
prompt := types.BuildCommitPrompt(changes, opts)
531+
532+
pterm.DefaultSection.Println("Prompt That Would Be Sent")
533+
pterm.Println()
534+
535+
// Display prompt in a box
536+
promptBox := pterm.DefaultBox.
537+
WithTitle("Full LLM Prompt").
538+
WithTitleTopCenter().
539+
WithBoxStyle(pterm.NewStyle(pterm.FgCyan))
540+
promptBox.Println(prompt)
541+
542+
pterm.Println()
543+
544+
// Display changes statistics
545+
pterm.DefaultSection.Println("Changes Summary")
546+
linesCount := len(strings.Split(changes, "\n"))
547+
charsCount := len(changes)
548+
549+
statsData := [][]string{
550+
{"Total Lines", fmt.Sprintf("%d", linesCount)},
551+
{"Total Characters", fmt.Sprintf("%d", charsCount)},
552+
{"Prompt Size (approx)", fmt.Sprintf("%d tokens", estimateTokens(prompt))},
553+
}
554+
pterm.DefaultTable.WithHasHeader(false).WithData(statsData).Render()
555+
556+
pterm.Println()
557+
pterm.Success.Println("Dry-run complete. To generate actual commit message, run without --dry-run flag.")
558+
}
559+
560+
// maskAPIKey masks the API key for display purposes
561+
func maskAPIKey(apiKey string) string {
562+
if len(apiKey) == 0 {
563+
return "[NOT SET]"
564+
}
565+
// Don't mask URLs (used by Ollama)
566+
if strings.HasPrefix(apiKey, "http://") || strings.HasPrefix(apiKey, "https://") {
567+
return apiKey
568+
}
569+
if len(apiKey) <= 8 {
570+
return strings.Repeat("*", len(apiKey))
571+
}
572+
// Show first 4 and last 4 characters
573+
return apiKey[:4] + strings.Repeat("*", len(apiKey)-8) + apiKey[len(apiKey)-4:]
574+
}
575+
576+
// estimateTokens provides a rough estimate of token count (1 token ≈ 4 characters)
577+
func estimateTokens(text string) int {
578+
return len(text) / 4
427579
}

0 commit comments

Comments
 (0)