-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy pathcleanup_twitter_archive.py
More file actions
executable file
·351 lines (287 loc) · 11.9 KB
/
cleanup_twitter_archive.py
File metadata and controls
executable file
·351 lines (287 loc) · 11.9 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
#!/usr/bin/env python3
"""
Twitter Archive Cleanup Script
Extracts public tweets and relevant data from a Twitter/X archive export,
converting from window.YTD.* JavaScript format to clean JSON files.
Removes:
- Direct messages and DM media
- Ad tracking data
- Grok AI chat data
- Deleted tweets
- IP audit logs and other metadata
Keeps:
- Public tweets with media
- Likes
- Followers/following lists
- Profile information
- Community notes
"""
import os
import re
import json
import shutil
from pathlib import Path
from datetime import datetime
from typing import Dict, List, Any, Optional
class TwitterArchiveCleanup:
"""Clean up Twitter archive by extracting public content to JSON."""
# Files to extract and convert to JSON
FILES_TO_KEEP = {
'tweets.js': 'tweets.json',
'like.js': 'likes.json',
'follower.js': 'followers.json',
'following.js': 'following.json',
'profile.js': 'profile.json',
'community-tweet.js': 'community-tweets.json',
'note-tweet.js': 'note-tweets.json',
'tweet-headers.js': 'tweet-headers.json',
'account.js': 'account.json',
}
# Media directories to copy
MEDIA_DIRS_TO_KEEP = [
'tweets_media',
'profile_media',
'community_tweet_media',
]
# Files/dirs to explicitly exclude (for documentation/clarity)
EXCLUDE_PATTERNS = [
'direct-message*',
'direct_message*',
'ad-*.js',
'deleted-*.js',
'grok-*.js',
'grok_*',
'ip-audit.js',
'device-token.js',
'personalization.js',
]
def __init__(self, archive_path: str, output_path: str = 'twitter_archive_clean'):
"""Initialize cleanup with archive and output paths."""
self.archive_path = Path(archive_path)
self.output_path = Path(output_path)
self.data_path = self.archive_path / 'data'
self.output_data_path = self.output_path / 'data'
self.output_media_path = self.output_path / 'media'
self.stats = {
'files_processed': 0,
'files_skipped': 0,
'total_tweets': 0,
'total_likes': 0,
'space_saved_gb': 0,
'errors': []
}
def parse_ytd_js_file(self, file_path: Path) -> Optional[List[Dict[str, Any]]]:
"""
Parse a Twitter YTD JavaScript file.
These files have format: window.YTD.tweets.part0 = [{...}]
We need to extract the JSON array.
"""
try:
with open(file_path, 'r', encoding='utf-8') as f:
content = f.read()
# Find the JSON array part after the = sign
# Pattern: window.YTD.*.part* = [...]
match = re.search(r'window\.YTD\.[^=]+=\s*(\[.*\])\s*$', content, re.DOTALL)
if not match:
# Try alternative pattern without semicolon
match = re.search(r'=\s*(\[.*\])\s*$', content, re.DOTALL)
if match:
json_str = match.group(1)
data = json.loads(json_str)
return data
else:
print(f" ⚠️ Could not parse JS format in {file_path.name}")
return None
except json.JSONDecodeError as e:
error_msg = f"JSON decode error in {file_path.name}: {e}"
print(f" ❌ {error_msg}")
self.stats['errors'].append(error_msg)
return None
except Exception as e:
error_msg = f"Error parsing {file_path.name}: {e}"
print(f" ❌ {error_msg}")
self.stats['errors'].append(error_msg)
return None
def process_data_files(self):
"""Process and convert data files from JS to JSON."""
print("\n📄 Processing data files...")
for js_filename, json_filename in self.FILES_TO_KEEP.items():
js_path = self.data_path / js_filename
if not js_path.exists():
print(f" ⊘ {js_filename} - not found, skipping")
self.stats['files_skipped'] += 1
continue
print(f" → {js_filename} → {json_filename}")
data = self.parse_ytd_js_file(js_path)
if data is not None:
# Write clean JSON
output_file = self.output_data_path / json_filename
with open(output_file, 'w', encoding='utf-8') as f:
json.dump(data, f, indent=2, ensure_ascii=False)
# Track statistics
if json_filename == 'tweets.json':
self.stats['total_tweets'] = len(data)
elif json_filename == 'likes.json':
self.stats['total_likes'] = len(data)
file_size_mb = output_file.stat().st_size / (1024 * 1024)
print(f" ✓ Wrote {len(data)} items ({file_size_mb:.1f} MB)")
self.stats['files_processed'] += 1
else:
self.stats['files_skipped'] += 1
def copy_media_directories(self):
"""Copy media directories to clean archive."""
print("\n🖼️ Copying media directories...")
for media_dir in self.MEDIA_DIRS_TO_KEEP:
source_dir = self.data_path / media_dir
if not source_dir.exists():
print(f" ⊘ {media_dir} - not found, skipping")
continue
dest_dir = self.output_media_path / media_dir
print(f" → {media_dir}/")
# Get size before copying
total_size = sum(f.stat().st_size for f in source_dir.rglob('*') if f.is_file())
size_mb = total_size / (1024 * 1024)
file_count = len(list(source_dir.rglob('*')))
# Copy directory
shutil.copytree(source_dir, dest_dir, dirs_exist_ok=True)
print(f" ✓ Copied {file_count} files ({size_mb:.1f} MB)")
def calculate_space_saved(self):
"""Calculate how much space was saved by excluding files."""
print("\n💾 Calculating space saved...")
# Calculate size of excluded directories
excluded_dirs = [
'direct_messages_media',
'direct_messages_group_media',
'grok_chat_media',
'deleted_tweets_media',
]
excluded_size = 0
for dir_name in excluded_dirs:
dir_path = self.data_path / dir_name
if dir_path.exists():
size = sum(f.stat().st_size for f in dir_path.rglob('*') if f.is_file())
size_gb = size / (1024 ** 3)
print(f" ⊗ Excluded {dir_name}: {size_gb:.2f} GB")
excluded_size += size
# Calculate size of excluded files
excluded_files = [
'direct-messages*.js',
'ad-*.js',
'deleted-*.js',
'grok-*.js',
]
for pattern in excluded_files:
for file_path in self.data_path.glob(pattern):
size = file_path.stat().st_size
excluded_size += size
self.stats['space_saved_gb'] = excluded_size / (1024 ** 3)
print(f" 💰 Total space saved: {self.stats['space_saved_gb']:.2f} GB")
def generate_summary_report(self):
"""Generate a summary report of the cleanup operation."""
print("\n📊 Generating summary report...")
report = {
'cleanup_date': datetime.now().isoformat(),
'original_archive': str(self.archive_path),
'cleaned_archive': str(self.output_path),
'statistics': {
'files_processed': self.stats['files_processed'],
'files_skipped': self.stats['files_skipped'],
'total_tweets': self.stats['total_tweets'],
'total_likes': self.stats['total_likes'],
'space_saved_gb': round(self.stats['space_saved_gb'], 2),
},
'files_kept': list(self.FILES_TO_KEEP.values()),
'media_directories_kept': self.MEDIA_DIRS_TO_KEEP,
'errors': self.stats['errors'],
}
# Write report
report_path = self.output_path / 'cleanup_report.json'
with open(report_path, 'w', encoding='utf-8') as f:
json.dump(report, f, indent=2, ensure_ascii=False)
print(f" ✓ Report saved to: {report_path}")
# Also write human-readable summary
summary_path = self.output_path / 'README.txt'
with open(summary_path, 'w', encoding='utf-8') as f:
f.write("TWITTER ARCHIVE CLEANUP SUMMARY\n")
f.write("=" * 50 + "\n\n")
f.write(f"Cleanup Date: {datetime.now().strftime('%Y-%m-%d %H:%M:%S')}\n")
f.write(f"Original Archive: {self.archive_path}\n\n")
f.write("STATISTICS\n")
f.write("-" * 50 + "\n")
f.write(f"Files Processed: {self.stats['files_processed']}\n")
f.write(f"Total Tweets: {self.stats['total_tweets']}\n")
f.write(f"Total Likes: {self.stats['total_likes']}\n")
f.write(f"Space Saved: {self.stats['space_saved_gb']:.2f} GB\n\n")
f.write("CONTENTS\n")
f.write("-" * 50 + "\n")
f.write("data/\n")
for json_file in self.FILES_TO_KEEP.values():
f.write(f" - {json_file}\n")
f.write("\nmedia/\n")
for media_dir in self.MEDIA_DIRS_TO_KEEP:
f.write(f" - {media_dir}/\n")
if self.stats['errors']:
f.write("\nERRORS\n")
f.write("-" * 50 + "\n")
for error in self.stats['errors']:
f.write(f" - {error}\n")
print(f" ✓ Summary saved to: {summary_path}")
def run(self):
"""Execute the cleanup process."""
print("=" * 60)
print("🧹 TWITTER ARCHIVE CLEANUP")
print("=" * 60)
print(f"\nSource: {self.archive_path}")
print(f"Output: {self.output_path}")
# Verify archive exists
if not self.archive_path.exists():
print(f"\n❌ Error: Archive path does not exist: {self.archive_path}")
return False
if not self.data_path.exists():
print(f"\n❌ Error: data/ directory not found in archive")
return False
# Create output directories
print("\n📁 Creating output directories...")
self.output_data_path.mkdir(parents=True, exist_ok=True)
self.output_media_path.mkdir(parents=True, exist_ok=True)
print(" ✓ Created output structure")
# Process files
self.process_data_files()
# Copy media
self.copy_media_directories()
# Calculate savings
self.calculate_space_saved()
# Generate report
self.generate_summary_report()
# Final summary
print("\n" + "=" * 60)
print("✅ CLEANUP COMPLETE!")
print("=" * 60)
print(f"\n📊 Summary:")
print(f" • Processed {self.stats['files_processed']} files")
print(f" • Extracted {self.stats['total_tweets']} tweets")
print(f" • Extracted {self.stats['total_likes']} likes")
print(f" • Saved {self.stats['space_saved_gb']:.2f} GB of space")
print(f"\n📂 Clean archive location: {self.output_path.absolute()}")
if self.stats['errors']:
print(f"\n⚠️ {len(self.stats['errors'])} errors occurred (see cleanup_report.json)")
return True
def main():
"""Main entry point."""
import sys
# Get archive path from command line or use current directory
if len(sys.argv) > 1:
archive_path = sys.argv[1]
else:
archive_path = '.'
# Get output path from command line or use default
if len(sys.argv) > 2:
output_path = sys.argv[2]
else:
output_path = 'twitter_archive_clean'
# Run cleanup
cleanup = TwitterArchiveCleanup(archive_path, output_path)
success = cleanup.run()
sys.exit(0 if success else 1)
if __name__ == '__main__':
main()