-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy pathtweet_helper.py
More file actions
executable file
·530 lines (420 loc) · 18.9 KB
/
tweet_helper.py
File metadata and controls
executable file
·530 lines (420 loc) · 18.9 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
#!/usr/bin/env python3
"""
Tweet Helper - RAG-powered tweet generator and analyzer
Interactive CLI tool for:
- Searching your successful tweets by topic
- Generating new tweets in your voice
- Analyzing drafts for voice match and predicted engagement
- Comparing tweet variations
- Interactive refinement
Usage:
python tweet_helper.py search "coffee"
python tweet_helper.py generate "weekend plans"
python tweet_helper.py analyze "my draft tweet"
python tweet_helper.py interactive
"""
import json
import os
from pathlib import Path
from typing import List, Dict, Optional
from dotenv import load_dotenv
from openai import OpenAI
import chromadb
import typer
from rich.console import Console
from rich.table import Table
from rich.panel import Panel
from rich.prompt import Prompt, Confirm
from rich import print as rprint
# Load environment
load_dotenv()
app = typer.Typer(help="RAG-powered tweet helper")
console = Console()
class TweetHelper:
"""RAG-powered tweet helper."""
def __init__(self):
"""Initialize helper."""
self.client = OpenAI(api_key=os.getenv('OPENAI_API_KEY'))
self.embedding_model = os.getenv('EMBEDDING_MODEL', 'text-embedding-3-small')
self.default_model = os.getenv('DEFAULT_MODEL', 'gpt-4o-2024-11-20')
self.temperature = float(os.getenv('DEFAULT_TEMPERATURE', '0.7'))
self.max_tokens = int(os.getenv('MAX_TOKENS', '100'))
# Load voice profile
self.voice_profile = self._load_voice_profile()
# Connect to ChromaDB
try:
chroma_client = chromadb.PersistentClient(path="chroma_db")
self.collection = chroma_client.get_collection("tweets")
except Exception as e:
console.print(f"[red]Error: ChromaDB not found. Run setup_rag.py first![/red]")
console.print(f"[dim]Error: {e}[/dim]")
raise
def _load_voice_profile(self) -> Dict:
"""Load voice profile from analysis."""
profile_path = Path('corpus/voice_profiles.json')
if profile_path.exists():
with open(profile_path) as f:
profiles = json.load(f)
return profiles.get('solid', {})
return {}
def _embed_query(self, text: str) -> List[float]:
"""Embed a text query."""
response = self.client.embeddings.create(
input=[text],
model=self.embedding_model
)
return response.data[0].embedding
def search(self, query: str, n_results: int = 5, min_engagement: int = 10,
has_media: Optional[bool] = None) -> List[Dict]:
"""Search for similar successful tweets."""
# Embed query
query_embedding = self._embed_query(query)
# Build where clause for filtering
where = {"engagement": {"$gte": min_engagement}}
if has_media is not None:
where["has_media"] = has_media
# Search
results = self.collection.query(
query_embeddings=[query_embedding],
n_results=n_results * 2, # Get more, then filter
where=where if where else None
)
# Format results
tweets = []
for doc, metadata in zip(results['documents'][0][:n_results],
results['metadatas'][0][:n_results]):
tweets.append({
'text': doc,
'engagement': metadata['engagement'],
'tier': metadata['engagement_tier'],
'has_media': metadata['has_media'],
'is_original': metadata['is_original']
})
return tweets
def generate(self, topic: str, model: Optional[str] = None,
temperature: Optional[float] = None, examples: Optional[List[Dict]] = None) -> str:
"""Generate a tweet on a topic using RAG examples."""
# Get similar examples if not provided
if examples is None:
examples = self.search(topic, n_results=3)
# Build structured prompt following GPT-5 best practices
examples_text = "\n".join([f"- [{ex['engagement']} engagement] {ex['text']}"
for ex in examples])
voice_desc = self._get_voice_description()
# Structured instructions with XML tags for clarity
instructions = """You are a tweet writer that produces single, standalone tweets.
<output_format>
- Output ONLY the tweet text itself
- NO explanations, meta-commentary, or preambles
- NO quotation marks around the tweet
- Approximately 140 characters
</output_format>"""
# Structured prompt with XML tags for better GPT-5 instruction following
prompt = f"""<voice_profile>
{voice_desc}
</voice_profile>
<examples>
{examples_text}
</examples>
<task>
Topic: {topic}
Write ONE tweet matching this voice profile exactly.
</task>"""
# Generate using Responses API (required for GPT-5)
# Note: Reasoning models (gpt-5, o1-*) don't support temperature parameter
selected_model = model or self.default_model
is_reasoning_model = selected_model in ['gpt-5', 'o1-pro', 'o1', 'o1-mini']
# Build params with core Responses API parameters
# Note: Advanced parameters (verbosity, reasoning_effort) not yet in Python SDK
# Using structured prompt instructions instead for GPT-5 best practices
params = {
'model': selected_model,
'instructions': instructions,
'input': prompt
}
# Only add temperature for non-reasoning models
if not is_reasoning_model:
params['temperature'] = temperature or self.temperature
response = self.client.responses.create(**params)
return response.output_text.strip()
def _get_voice_description(self) -> str:
"""Get voice profile as text description."""
if not self.voice_profile:
return "Write in a casual, personal style (~62 characters)."
style = self.voice_profile.get('style', {})
ling = self.voice_profile.get('linguistic', {})
sent = self.voice_profile.get('sentiment', {})
desc = f"""VOICE PROFILE:
- Tone: {sent.get('overall_tone', 'Positive')} ({sent.get('distribution', {}).get('positive_pct', 0)}% positive)
- Style: {style.get('formality_desc', 'Casual')} ({style.get('formality_score', 5)}/10 casual)
- Personal: {style.get('personal_pct', 77)}% | Observational: {style.get('observational_pct', 23)}%
- Length: ~{ling.get('avg_char_count', 62)} characters
- Profanity: {ling.get('profanity_pct', 0)}% of tweets
- Humor markers: {ling.get('humor_markers_pct', 0)}%"""
return desc
def analyze(self, draft: str) -> Dict:
"""Analyze a draft tweet for voice match and predicted engagement."""
# Calculate voice match score
voice_match = self._calculate_voice_match(draft)
# Predict engagement based on features
predicted_engagement = self._predict_engagement(draft)
# Check length
char_count = len(draft)
target_length = self.voice_profile.get('linguistic', {}).get('avg_char_count', 62)
length_ok = abs(char_count - target_length) < 20
# Analyze tone
is_personal = any(word in draft.lower() for word in ['i ', 'me ', 'my ', "i'm", "i've"])
has_media_intent = 'https://' in draft or 'pic.' in draft.lower()
analysis = {
'voice_match': voice_match,
'predicted_engagement': predicted_engagement,
'length': {
'chars': char_count,
'target': target_length,
'ok': length_ok
},
'is_personal': is_personal,
'has_media_intent': has_media_intent,
'suggestions': []
}
# Generate suggestions
if voice_match < 0.6:
analysis['suggestions'].append("❌ Doesn't match your voice well")
if not length_ok:
if char_count < target_length - 20:
analysis['suggestions'].append(f"📝 Too short (add ~{target_length - char_count} chars)")
else:
analysis['suggestions'].append(f"📝 Too long (cut ~{char_count - target_length} chars)")
personal_pct = self.voice_profile.get('style', {}).get('personal_pct', 77)
if personal_pct > 60 and not is_personal:
analysis['suggestions'].append("👤 Try making it more personal (use 'I', 'my', etc.)")
return analysis
def _calculate_voice_match(self, draft: str) -> float:
"""Calculate how well draft matches voice profile."""
if not self.voice_profile:
return 0.5
score = 1.0
# Check length (weight: 0.2)
target_length = self.voice_profile.get('linguistic', {}).get('avg_char_count', 62)
length_diff = abs(len(draft) - target_length)
length_score = max(0, 1 - (length_diff / target_length))
score *= (0.8 + 0.2 * length_score)
# Check personal vs observational (weight: 0.3)
is_personal = any(word in draft.lower() for word in ['i ', 'me ', 'my ', "i'm", "i've"])
target_personal = self.voice_profile.get('style', {}).get('personal_pct', 77) / 100
if is_personal and target_personal > 0.6:
score *= 1.0
elif not is_personal and target_personal <= 0.6:
score *= 1.0
else:
score *= 0.7
# Check formality (weight: 0.2)
casual_markers = ['lol', 'haha', 'gonna', 'wanna', 'kinda']
has_casual = any(marker in draft.lower() for marker in casual_markers)
target_casual = self.voice_profile.get('style', {}).get('formality_score', 5) > 5
if has_casual == target_casual:
score *= 1.0
else:
score *= 0.8
return min(1.0, score)
def _predict_engagement(self, draft: str) -> Dict:
"""Predict engagement range for a draft."""
# Base prediction on voice match
voice_match = self._calculate_voice_match(draft)
# Get baseline from voice profile
baseline = self.voice_profile.get('structural', {}).get('avg_engagement', 12)
# Adjust based on features
multiplier = voice_match
# Media detection (rough)
if 'https://' in draft or 'pic.' in draft.lower():
multiplier *= 1.3 # Boost for media
# Original vs reply
if draft.startswith('@'):
multiplier *= 0.5 # Penalty for reply format
# Length penalty for extremes
if len(draft) > 100 or len(draft) < 30:
multiplier *= 0.8
predicted = baseline * multiplier
lower = max(0, predicted * 0.7)
upper = predicted * 1.3
return {
'predicted': round(predicted),
'range': [round(lower), round(upper)]
}
def compare(self, draft_a: str, draft_b: str) -> Dict:
"""Compare two draft tweets."""
analysis_a = self.analyze(draft_a)
analysis_b = self.analyze(draft_b)
return {
'draft_a': {
'text': draft_a,
'analysis': analysis_a
},
'draft_b': {
'text': draft_b,
'analysis': analysis_b
},
'winner': 'A' if analysis_a['voice_match'] > analysis_b['voice_match'] else 'B',
'reason': 'Better voice match' if analysis_a['voice_match'] != analysis_b['voice_match'] else 'Similar'
}
def save_favorite(self, tweet: str, metadata: Optional[Dict] = None):
"""Save a tweet to favorites."""
favorites_file = Path('favorites.json')
if favorites_file.exists():
with open(favorites_file) as f:
favorites = json.load(f)
else:
favorites = []
favorites.append({
'tweet': tweet,
'saved_at': str(favorites_file.stat().st_ctime) if favorites_file.exists() else None,
'metadata': metadata or {}
})
with open(favorites_file, 'w') as f:
json.dump(favorites, f, indent=2)
console.print(f"[green]✓ Saved to favorites ({len(favorites)} total)[/green]")
# CLI Commands
@app.command()
def search(
query: str = typer.Argument(..., help="Search query (topic or keywords)"),
n: int = typer.Option(5, "--limit", "-n", help="Number of results"),
min_engagement: int = typer.Option(10, "--min-engagement", "-m", help="Minimum engagement"),
media_only: bool = typer.Option(False, "--media", help="Only show tweets with media")
):
"""Search your successful tweets by topic."""
helper = TweetHelper()
console.print(f"\n🔍 Searching for: [cyan]'{query}'[/cyan]\n")
results = helper.search(query, n_results=n, min_engagement=min_engagement,
has_media=media_only if media_only else None)
if not results:
console.print("[yellow]No results found. Try a different query or lower --min-engagement[/yellow]")
return
table = Table(title="Similar Successful Tweets")
table.add_column("Engagement", style="cyan", width=12)
table.add_column("Tweet", style="white")
table.add_column("Media", width=6)
for result in results:
table.add_row(
str(result['engagement']),
result['text'][:80] + "..." if len(result['text']) > 80 else result['text'],
"✓" if result['has_media'] else ""
)
console.print(table)
@app.command()
def generate(
topic: str = typer.Argument(..., help="Topic or theme for the tweet"),
model: Optional[str] = typer.Option(None, "--model", "-m", help="Model to use (gpt-4o, gpt-4o-mini)"),
save: bool = typer.Option(False, "--save", "-s", help="Save to favorites"),
interactive: bool = typer.Option(False, "--interactive", "-i", help="Interactive refinement")
):
"""Generate a tweet on a topic using your successful style."""
helper = TweetHelper()
console.print(f"\n🎯 Generating tweet about: [cyan]'{topic}'[/cyan]\n")
# Search for examples
examples = helper.search(topic, n_results=3)
if examples:
console.print("[dim]Based on similar successful tweets:[/dim]")
for ex in examples:
console.print(f"[dim] [{ex['engagement']} engagement] {ex['text'][:60]}...[/dim]")
console.print()
# Generate
with console.status("[bold green]Generating...", spinner="dots"):
tweet = helper.generate(topic, model=model, examples=examples)
# Analyze
analysis = helper.analyze(tweet)
# Display
console.print(Panel(tweet, title="✨ Generated Tweet", border_style="green"))
console.print(f"\n📊 Analysis:")
console.print(f" Voice match: {analysis['voice_match']*100:.0f}%")
console.print(f" Predicted engagement: {analysis['predicted_engagement']['range'][0]}-{analysis['predicted_engagement']['range'][1]}")
console.print(f" Length: {analysis['length']['chars']} chars (target: ~{analysis['length']['target']})")
if analysis['suggestions']:
console.print(f"\n💡 Suggestions:")
for suggestion in analysis['suggestions']:
console.print(f" {suggestion}")
# Save option
if save or (interactive and Confirm.ask("\nSave to favorites?")):
helper.save_favorite(tweet, {'topic': topic, 'analysis': analysis})
@app.command()
def analyze(
draft: str = typer.Argument(..., help="Draft tweet to analyze")
):
"""Analyze a draft tweet for voice match and predicted engagement."""
helper = TweetHelper()
console.print(f"\n📊 Analyzing: [cyan]'{draft}'[/cyan]\n")
analysis = helper.analyze(draft)
# Display
console.print(f"Voice Match: {analysis['voice_match']*100:.0f}%")
if analysis['voice_match'] > 0.8:
console.print("[green] ✓ Strong match to your voice[/green]")
elif analysis['voice_match'] > 0.6:
console.print("[yellow] ⚠ Moderate match[/yellow]")
else:
console.print("[red] ✗ Weak match to your voice[/red]")
console.print(f"\nPredicted Engagement: {analysis['predicted_engagement']['range'][0]}-{analysis['predicted_engagement']['range'][1]}")
console.print(f"Length: {analysis['length']['chars']} chars (target: ~{analysis['length']['target']})")
console.print(f"Personal: {'Yes' if analysis['is_personal'] else 'No'}")
if analysis['suggestions']:
console.print(f"\n💡 Suggestions:")
for suggestion in analysis['suggestions']:
console.print(f" {suggestion}")
@app.command()
def compare(
draft_a: str = typer.Argument(..., help="First draft"),
draft_b: str = typer.Argument(..., help="Second draft")
):
"""Compare two draft tweets."""
helper = TweetHelper()
console.print("\n📊 Comparing drafts...\n")
comparison = helper.compare(draft_a, draft_b)
# Display
table = Table(title="Draft Comparison")
table.add_column("Metric", style="cyan")
table.add_column("Draft A", style="yellow")
table.add_column("Draft B", style="green")
table.add_row("Voice Match",
f"{comparison['draft_a']['analysis']['voice_match']*100:.0f}%",
f"{comparison['draft_b']['analysis']['voice_match']*100:.0f}%")
table.add_row("Predicted Engagement",
f"{comparison['draft_a']['analysis']['predicted_engagement']['predicted']}",
f"{comparison['draft_b']['analysis']['predicted_engagement']['predicted']}")
table.add_row("Length",
f"{comparison['draft_a']['analysis']['length']['chars']} chars",
f"{comparison['draft_b']['analysis']['length']['chars']} chars")
console.print(table)
console.print(f"\n🏆 Winner: [bold]Draft {comparison['winner']}[/bold] ({comparison['reason']})")
@app.command()
def interactive():
"""Interactive tweet generation session."""
helper = TweetHelper()
console.print(Panel.fit(
"[bold]Interactive Tweet Helper[/bold]\n"
"Generate and refine tweets interactively",
border_style="cyan"
))
while True:
console.print()
topic = Prompt.ask("Topic (or 'quit' to exit)")
if topic.lower() in ['quit', 'exit', 'q']:
break
# Generate
with console.status("[bold green]Generating...", spinner="dots"):
tweet = helper.generate(topic)
console.print(Panel(tweet, title="✨ Suggestion", border_style="green"))
# Analyze
analysis = helper.analyze(tweet)
console.print(f"[dim]Voice: {analysis['voice_match']*100:.0f}% | "
f"Predicted: {analysis['predicted_engagement']['range'][0]}-{analysis['predicted_engagement']['range'][1]}[/dim]")
# Options
action = Prompt.ask("\nAction", choices=["save", "regenerate", "edit", "next", "quit"], default="next")
if action == "save":
helper.save_favorite(tweet, {'topic': topic})
elif action == "regenerate":
continue
elif action == "edit":
edited = Prompt.ask("Edit tweet", default=tweet)
helper.save_favorite(edited, {'topic': topic, 'edited': True})
elif action == "quit":
break
if __name__ == '__main__':
app()