-
Notifications
You must be signed in to change notification settings - Fork 1
Expand file tree
/
Copy pathcli.js
More file actions
executable file
·385 lines (330 loc) · 15.9 KB
/
cli.js
File metadata and controls
executable file
·385 lines (330 loc) · 15.9 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
#!/usr/bin/env node
'use strict';
/**
* science-agent — Detect AI-confabulated academic citations
*
* Usage:
* science-agent audit <dir> --bibtex=<path> Audit citations against BibTeX
* science-agent arxiv [count] [--cat=cs.AI] Audit recent arXiv papers
* science-agent verify <doi> Verify a DOI against CrossRef
* science-agent search "title query" Search CrossRef by title
*
* Examples:
* science-agent audit ./docs/specs --bibtex=./docs/arxiv-paper/references.bib
* science-agent verify 10.1167/jov.25.3.15
* science-agent search "Chromatic sensitivity across the visual field"
*/
const path = require('path');
const fs = require('fs');
const args = process.argv.slice(2);
const command = args[0];
function usage() {
console.log(`
science-agent — Detect AI-confabulated academic citations
Usage:
science-agent audit <dir> --bibtex=<path> Audit citations against BibTeX
science-agent arxiv [count] [--cat=cs.AI] Audit recent arXiv papers
science-agent verify <doi> Verify a DOI against CrossRef
science-agent search "title query" Search CrossRef by title
science-agent notebook-audit <dir> Audit [NB##:K##] claim references
--aggregate=<path> Path to notebook-key-claims.md
--notebooks=<dir> Path to notebooks directory
--cross-repo=<dir> Scan downstream repo for stale values
science-agent aggregate <notebooks-dir> Generate key-claims aggregate
-o <path> Output file (default: stdout)
Options:
--json Output as JSON
--verbose Show all citations, not just issues
`);
process.exit(1);
}
async function main() {
if (!command) usage();
const flags = {};
const positional = [];
const slicedArgs = args.slice(1);
for (let i = 0; i < slicedArgs.length; i++) {
const arg = slicedArgs[i];
if (arg.startsWith('--')) {
const [key, val] = arg.slice(2).split('=');
flags[key] = val || true;
} else if (arg === '-o' && i + 1 < slicedArgs.length) {
flags.o = slicedArgs[++i];
} else {
positional.push(arg);
}
}
if (command === 'audit') {
const dir = positional[0] || '.';
const bibtex = flags.bibtex;
if (!bibtex) {
console.error('Error: --bibtex=<path> is required for audit');
process.exit(1);
}
if (!fs.existsSync(bibtex)) {
console.error(`Error: BibTeX file not found: ${bibtex}`);
process.exit(1);
}
const { auditDirectory } = require('./src/audit');
const result = auditDirectory(path.resolve(dir), path.resolve(bibtex));
if (flags.json) {
console.log(JSON.stringify(result, null, 2));
return;
}
// Pretty print
console.log(`\n═══ Science Agent Audit ═══\n`);
console.log(` Directory: ${path.resolve(dir)}`);
console.log(` BibTeX: ${path.resolve(bibtex)}`);
console.log(` Citations: ${result.stats.total}`);
console.log(` In BibTeX: ${result.stats.inBibTeX}`);
console.log(` Orphans: ${result.stats.orphans}`);
console.log(` With DOI: ${result.stats.withDOI}`);
console.log(` Ambiguous: ${result.stats.ambiguous}`);
console.log(` Issues: ${result.stats.issueCount}\n`);
if (result.issues.length > 0) {
console.log(`── Issues ──\n`);
for (const issue of result.issues) {
const icon = issue.severity === 'warn' ? '⚠' : issue.severity === 'error' ? '✗' : 'ℹ';
console.log(` ${icon} [${issue.type}] ${issue.citation}`);
console.log(` ${issue.file}`);
console.log(` ${issue.message}\n`);
}
}
if (flags.verbose) {
console.log(`── All Citations ──\n`);
for (const c of result.citations) {
const status = c.inBibTeX ? (c.hasDOI ? '✓' : '~') : '?';
console.log(` ${status} ${c.raw} (${c.file})`);
}
console.log('');
}
// Exit code
const errors = result.issues.filter(i => i.severity === 'error').length;
if (errors > 0) process.exit(1);
} else if (command === 'arxiv') {
const count = parseInt(positional[0]) || 10;
const category = flags.cat || 'cs.AI';
const { auditArxiv } = require('./src/arxiv');
console.log(`\n═══ Science Agent: arXiv Audit ═══`);
console.log(`Checking references in the ${count} most recent ${category} papers\n`);
const result = await auditArxiv(count, { category });
if (flags.json) {
console.log(JSON.stringify(result, null, 2));
return;
}
for (const p of result.papers) {
if (p.skipped) {
console.log(`\n── ${p.id}: ${p.title.slice(0, 70)}...`);
console.log(` ${p.authors.slice(0, 3).join(', ')}${p.authors.length > 3 ? ' et al.' : ''}`);
console.log(` (${p.skipped})`);
} else {
console.log(`\n── ${p.id}: ${p.title.slice(0, 70)}...`);
console.log(` ${p.authors.slice(0, 3).join(', ')}${p.authors.length > 3 ? ' et al.' : ''}`);
console.log(` ${p.refs} references | Verified: ${p.verified} | Issues: ${p.issues}${p.skippedArxivDOIs > 0 ? ` (${p.skippedArxivDOIs} arXiv DOIs skipped)` : ''}`);
}
}
console.log(`\n\n═══ Summary ═══`);
console.log(`Papers audited: ${result.stats.papersAudited}`);
console.log(`Total references: ${result.stats.totalRefs}`);
console.log(`References checked: ${result.stats.refsChecked}`);
console.log(`Issues found: ${result.stats.issuesFound}`);
console.log(`Issue rate: ${(result.stats.issueRate * 100).toFixed(1)}%`);
if (result.issues.length > 0) {
console.log(`\n── Issues ──\n`);
for (const i of result.issues) {
console.log(` ✗ [${i.issue}] ${i.paper}`);
console.log(` ${i.ref}`);
if (i.claimed) console.log(` claimed: ${i.claimed}`);
if (i.actual) console.log(` actual: ${i.actual}`);
if (i.bestMatch) console.log(` best match: ${i.bestMatch}`);
if (i.doi) console.log(` DOI: ${i.doi}`);
if (i.similarity !== undefined) console.log(` similarity: ${i.similarity}`);
console.log('');
}
} else {
console.log(`\n ✓ No citation issues detected.`);
}
if (result.stats.issuesFound > 0) process.exit(1);
} else if (command === 'verify') {
const doi = positional[0];
if (!doi) {
console.error('Error: DOI required. Usage: science-agent verify 10.1167/jov.25.3.15');
process.exit(1);
}
const { verifyDOI } = require('./src/crossref');
console.log(`Verifying DOI: ${doi}...`);
const result = await verifyDOI(doi);
if (flags.json) {
console.log(JSON.stringify(result, null, 2));
return;
}
if (result.verified) {
console.log(`\n ✓ Verified`);
console.log(` Title: ${result.title}`);
console.log(` Authors: ${result.authors.join('; ')}`);
console.log(` Journal: ${result.journal}`);
console.log(` Year: ${result.year}\n`);
} else {
console.log(`\n ✗ Not verified: ${result.error}\n`);
process.exit(1);
}
} else if (command === 'search') {
const query = positional.join(' ');
if (!query) {
console.error('Error: search query required');
process.exit(1);
}
const { searchByTitle } = require('./src/crossref');
console.log(`Searching CrossRef: "${query}"...\n`);
const results = await searchByTitle(query);
if (flags.json) {
console.log(JSON.stringify(results, null, 2));
return;
}
if (results.length === 0) {
console.log(' No results found.\n');
} else {
for (const r of results) {
console.log(` ${r.doi}`);
console.log(` ${r.title}`);
console.log(` ${r.authors.slice(0, 3).join('; ')}${r.authors.length > 3 ? ' et al.' : ''}`);
console.log(` ${r.year}\n`);
}
}
} else if (command === 'aggregate') {
const dir = positional[0];
if (!dir) {
console.log(`
science-agent aggregate — Generate a Key Claims summary from Jupyter notebooks
Usage: science-agent aggregate <notebooks-dir> [-o <output.md>]
This scans .ipynb files for "## Key Claims" sections and compiles them
into a single reference file. Other commands (notebook-audit) use this
aggregate to verify claim references in prose.
Example:
science-agent aggregate ./notebooks/ -o docs/key-claims.md
`);
process.exit(0);
}
if (!fs.existsSync(dir)) {
console.error(`Error: directory not found: ${dir}`);
process.exit(1);
}
const { aggregate, formatMarkdown } = require('./src/aggregate');
const result = aggregate(path.resolve(dir));
// Graceful: if no notebooks with claims found, explain instead of erroring
if (result.stats.notebooksWithClaims === 0) {
console.log(`\n═══ Science Agent: Aggregate ═══\n`);
console.log(` Scanned ${result.stats.notebooksScanned} notebook(s) in ${path.resolve(dir)}`);
console.log(` No "## Key Claims" sections found.\n`);
console.log(` To use this feature, add a Key Claims block to your notebooks:`);
console.log(` ## Key Claims`);
console.log(` - **K1**: Finding description (p < .05, d = 0.8)`);
console.log(` - **K2**: Another finding\n`);
console.log(` See: https://github.com/andyed/science-agent/blob/main/docs/notebook-conventions.md\n`);
process.exit(0);
}
if (flags.json) {
console.log(JSON.stringify(result, null, 2));
return;
}
const md = formatMarkdown(result);
if (flags.o) {
fs.writeFileSync(flags.o, md);
console.log(`\n═══ Science Agent: Aggregate ═══\n`);
console.log(` Notebooks scanned: ${result.stats.notebooksScanned}`);
console.log(` With Key Claims: ${result.stats.notebooksWithClaims}`);
console.log(` Without: ${result.stats.notebooksWithout}`);
console.log(` Total claims: ${result.stats.totalClaims}`);
console.log(`\n Written to: ${flags.o}\n`);
} else {
process.stdout.write(md);
}
} else if (command === 'notebook-audit') {
const dir = positional[0] || '.';
// Graceful degradation: check if the directory has any [NB##:K##] references
const targetDir = path.resolve(dir);
if (!fs.existsSync(targetDir)) {
console.log(`\n ℹ Directory not found: ${targetDir}`);
console.log(` This command audits [NB##:K##] claim references in prose files.`);
console.log(` See: science-agent aggregate --help for setting up Key Claims.\n`);
process.exit(0);
}
const { auditNotebookClaims, auditCrossRepo } = require('./src/notebook-audit');
const result = auditNotebookClaims(targetDir, {
aggregatePath: flags.aggregate ? path.resolve(flags.aggregate) : null,
notebookDir: flags.notebooks ? path.resolve(flags.notebooks) : null,
});
// If no claim references found at all, explain what this command is for
if (result.stats.totalRefs === 0 && result.issues.length === 0) {
console.log(`\n═══ Science Agent: Notebook Claims Audit ═══\n`);
console.log(` Directory: ${targetDir}`);
console.log(` No [NB##:K##] claim references found in this directory.\n`);
console.log(` This command verifies notebook-style claim references.`);
console.log(` To get started with Key Claims:`);
console.log(` 1. Add a "## Key Claims" section to your notebooks`);
console.log(` 2. Reference claims in prose as [NB01:K1], [NB01:K2], etc.`);
console.log(` 3. Run: science-agent aggregate ./notebooks/ -o key-claims.md`);
console.log(` 4. Then: science-agent notebook-audit ./docs --aggregate=key-claims.md\n`);
console.log(` See: https://github.com/andyed/science-agent/blob/main/docs/notebook-conventions.md\n`);
process.exit(0);
}
if (flags.json) {
console.log(JSON.stringify(result, null, 2));
return;
}
console.log(`\n═══ Science Agent: Notebook Claims Audit ═══\n`);
console.log(` Directory: ${path.resolve(dir)}`);
console.log(` Claim references: ${result.stats.totalRefs}`);
console.log(` Unique notebooks: ${result.stats.uniqueNotebooks}`);
console.log(` Unique claims: ${result.stats.uniqueClaims}`);
console.log(` Issues: ${result.stats.issueCount}`);
if (result.notebookStatus.length > 0) {
console.log(`\n── Notebook Key Claims Status ──\n`);
for (const nb of result.notebookStatus) {
const icon = nb.hasBlock ? '✓' : '✗';
const detail = nb.hasBlock
? `${nb.claimCount} claims${nb.verifiedDate ? `, verified ${nb.verifiedDate}` : ''}`
: 'no Key Claims block';
console.log(` ${icon} ${nb.file} (${detail})`);
}
}
if (result.issues.length > 0) {
console.log(`\n── Issues ──\n`);
for (const issue of result.issues) {
const icon = issue.severity === 'error' ? '✗' : '⚠';
const loc = issue.line ? `${issue.file}:${issue.line}` : issue.file;
console.log(` ${icon} [${issue.type}] ${issue.ref || ''}`);
console.log(` ${loc}`);
console.log(` ${issue.message}\n`);
}
} else {
console.log(`\n ✓ All claim references resolve correctly.\n`);
}
// Cross-repo scan
if (flags['cross-repo']) {
const crossDir = path.resolve(flags['cross-repo']);
console.log(`\n── Cross-repo scan: ${crossDir} ──\n`);
// Default stale values from the coordinate-space audit
const staleValues = [
{ value: '0.827', message: 'Pre-fix M3 AUC (corrected to 0.792)', correction: '0.792' },
{ value: '0.821', message: 'Pre-fix M4 AUC (corrected to 0.792)', correction: '0.792' },
{ value: '994', message: 'Pre-fix evaluated-rejected N (corrected to 344)', correction: '344' },
];
const crossResult = auditCrossRepo(crossDir, staleValues);
if (crossResult.issues.length > 0) {
for (const issue of crossResult.issues) {
console.log(` ⚠ ${issue.file}:${issue.line} — ${issue.message}`);
if (issue.correction) console.log(` → should be: ${issue.correction}`);
}
} else {
console.log(` ✓ No stale values detected.\n`);
}
}
const errors = result.issues.filter(i => i.severity === 'error').length;
if (errors > 0) process.exit(1);
} else {
console.error(`Unknown command: ${command}`);
usage();
}
}
main().catch(err => { console.error(err); process.exit(1); });