-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy pathnodes.py
More file actions
1327 lines (1121 loc) · 61.8 KB
/
nodes.py
File metadata and controls
1327 lines (1121 loc) · 61.8 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985
986
987
988
989
990
991
992
993
994
995
996
997
998
999
1000
import os
import re
import yaml
from pocketflow import Node, BatchNode
from utils.crawl_github_files import crawl_github_files
from utils.call_llm import call_llm
from utils.crawl_local_files import crawl_local_files
# Helper to get content for specific file indices
def get_content_for_indices(files_data, indices):
content_map = {}
for i in indices:
if 0 <= i < len(files_data):
path, content = files_data[i]
content_map[f"{i} # {path}"] = (
content # Use index + path as key for context
)
return content_map
class FetchRepo(Node):
def prep(self, shared):
repo_url = shared.get("repo_url")
local_dir = shared.get("local_dir")
project_name = shared.get("project_name")
if not project_name:
# Basic name derivation from URL or directory
if repo_url:
project_name = repo_url.split("/")[-1].replace(".git", "")
else:
project_name = os.path.basename(os.path.abspath(local_dir))
shared["project_name"] = project_name
# Get file patterns directly from shared
include_patterns = shared["include_patterns"]
exclude_patterns = shared["exclude_patterns"]
max_file_size = shared["max_file_size"]
return {
"repo_url": repo_url,
"local_dir": local_dir,
"token": shared.get("github_token"),
"include_patterns": include_patterns,
"exclude_patterns": exclude_patterns,
"max_file_size": max_file_size,
"use_relative_paths": True,
}
def exec(self, prep_res):
if prep_res["repo_url"]:
print(f"Crawling repository: {prep_res['repo_url']}...")
result = crawl_github_files(
repo_url=prep_res["repo_url"],
token=prep_res["token"],
include_patterns=prep_res["include_patterns"],
exclude_patterns=prep_res["exclude_patterns"],
max_file_size=prep_res["max_file_size"],
use_relative_paths=prep_res["use_relative_paths"],
)
else:
print(f"Crawling directory: {prep_res['local_dir']}...")
result = crawl_local_files(
directory=prep_res["local_dir"],
include_patterns=prep_res["include_patterns"],
exclude_patterns=prep_res["exclude_patterns"],
max_file_size=prep_res["max_file_size"],
use_relative_paths=prep_res["use_relative_paths"]
)
# Convert dict to list of tuples: [(path, content), ...]
files_list = list(result.get("files", {}).items())
if len(files_list) == 0:
raise (ValueError("Failed to fetch files"))
print(f"Fetched {len(files_list)} files.")
return files_list
def post(self, shared, prep_res, exec_res):
shared["files"] = exec_res # List of (path, content) tuples
class IdentifyAbstractions(Node):
def prep(self, shared):
files_data = shared["files"]
project_name = shared["project_name"] # Get project name
language = shared.get("language", "english") # Get language
use_cache = shared.get("use_cache", True) # Get use_cache flag, default to True
max_abstraction_num = shared.get("max_abstraction_num", 30) # Get max_abstraction_num, default to 30
# Helper to create context from files, respecting limits (basic example)
def create_llm_context(files_data):
context = ""
file_info = [] # Store tuples of (index, path)
for i, (path, content) in enumerate(files_data):
entry = f"--- File Index {i}: {path} ---\n{content}\n\n"
context += entry
file_info.append((i, path))
return context, file_info # file_info is list of (index, path)
context, file_info = create_llm_context(files_data)
# Format file info for the prompt (comment is just a hint for LLM)
file_listing_for_prompt = "\n".join(
[f"- {idx} # {path}" for idx, path in file_info]
)
return (
context,
file_listing_for_prompt,
len(files_data),
project_name,
language,
use_cache,
max_abstraction_num,
) # Return all parameters
def exec(self, prep_res):
(
context,
file_listing_for_prompt,
file_count,
project_name,
language,
use_cache,
max_abstraction_num,
) = prep_res # Unpack all parameters
print(f"Identifying abstractions using LLM...")
# Add language instruction and hints only if not English
language_instruction = ""
name_lang_hint = ""
desc_lang_hint = ""
if language.lower() != "english":
language_instruction = f"IMPORTANT: Generate the `name` and `description` for each abstraction in **{language.capitalize()}** language. Do NOT use English for these fields.\n\n"
# Keep specific hints here as name/description are primary targets
name_lang_hint = f" (value in {language.capitalize()})"
desc_lang_hint = f" (value in {language.capitalize()})"
prompt = f"""
For the project `{project_name}`:
Codebase Context:
{context}
{language_instruction}Analyze the codebase context.
Identify the top 5-{max_abstraction_num} core most important abstractions to help those new to the codebase.
For each abstraction, provide:
1. A concise `name`{name_lang_hint}.
2. A beginner-friendly `description` explaining what it is with a simple analogy, in around 100 words{desc_lang_hint}.
3. A list of relevant `file_indices` (integers) using the format `idx # path/comment`.
List of file indices and paths present in the context:
{file_listing_for_prompt}
Format the output as a YAML list of dictionaries:
```yaml
- name: |
Query Processing{name_lang_hint}
description: |
Explains what the abstraction does.
It's like a central dispatcher routing requests.{desc_lang_hint}
file_indices:
- 0 # path/to/file1.py
- 3 # path/to/related.py
- name: |
Query Optimization{name_lang_hint}
description: |
Another core concept, similar to a blueprint for objects.{desc_lang_hint}
file_indices:
- 5 # path/to/another.js
# ... up to {max_abstraction_num} abstractions
```"""
response = call_llm(prompt, use_cache=(use_cache and self.cur_retry == 0)) # Use cache only if enabled and not retrying
# --- Validation ---
yaml_str = response.strip().split("```yaml")[1].split("```")[0].strip()
abstractions = yaml.safe_load(yaml_str)
if not isinstance(abstractions, list):
raise ValueError("LLM Output is not a list")
validated_abstractions = []
for item in abstractions:
if not isinstance(item, dict) or not all(
k in item for k in ["name", "description", "file_indices"]
):
raise ValueError(f"Missing keys in abstraction item: {item}")
if not isinstance(item["name"], str):
raise ValueError(f"Name is not a string in item: {item}")
if not isinstance(item["description"], str):
raise ValueError(f"Description is not a string in item: {item}")
if not isinstance(item["file_indices"], list):
raise ValueError(f"file_indices is not a list in item: {item}")
# Validate indices
validated_indices = []
for idx_entry in item["file_indices"]:
try:
if isinstance(idx_entry, int):
idx = idx_entry
elif isinstance(idx_entry, str) and "#" in idx_entry:
idx = int(idx_entry.split("#")[0].strip())
else:
idx = int(str(idx_entry).strip())
if not (0 <= idx < file_count):
raise ValueError(
f"Invalid file index {idx} found in item {item['name']}. Max index is {file_count - 1}."
)
validated_indices.append(idx)
except (ValueError, TypeError):
raise ValueError(
f"Could not parse index from entry: {idx_entry} in item {item['name']}"
)
item["files"] = sorted(list(set(validated_indices)))
# Store only the required fields
validated_abstractions.append(
{
"name": item["name"], # Potentially translated name
"description": item[
"description"
], # Potentially translated description
"files": item["files"],
}
)
print(f"Identified {len(validated_abstractions)} abstractions.")
return validated_abstractions
def post(self, shared, prep_res, exec_res):
shared["abstractions"] = (
exec_res # List of {"name": str, "description": str, "files": [int]}
)
class AnalyzeRelationships(Node):
def prep(self, shared):
abstractions = shared[
"abstractions"
] # Now contains 'files' list of indices, name/description potentially translated
files_data = shared["files"]
project_name = shared["project_name"] # Get project name
language = shared.get("language", "english") # Get language
use_cache = shared.get("use_cache", True) # Get use_cache flag, default to True
# Get the actual number of abstractions directly
num_abstractions = len(abstractions)
# Create context with abstraction names, indices, descriptions, and relevant file snippets
context = "Identified Abstractions:\\n"
all_relevant_indices = set()
abstraction_info_for_prompt = []
for i, abstr in enumerate(abstractions):
# Use 'files' which contains indices directly
file_indices_str = ", ".join(map(str, abstr["files"]))
# Abstraction name and description might be translated already
info_line = f"- Index {i}: {abstr['name']} (Relevant file indices: [{file_indices_str}])\\n Description: {abstr['description']}"
context += info_line + "\\n"
abstraction_info_for_prompt.append(
f"{i} # {abstr['name']}"
) # Use potentially translated name here too
all_relevant_indices.update(abstr["files"])
context += "\\nRelevant File Snippets (Referenced by Index and Path):\\n"
# Get content for relevant files using helper
relevant_files_content_map = get_content_for_indices(
files_data, sorted(list(all_relevant_indices))
)
# Format file content for context
file_context_str = "\\n\\n".join(
f"--- File: {idx_path} ---\\n{content}"
for idx_path, content in relevant_files_content_map.items()
)
context += file_context_str
return (
context,
"\n".join(abstraction_info_for_prompt),
num_abstractions, # Pass the actual count
project_name,
language,
use_cache,
) # Return use_cache
def exec(self, prep_res):
(
context,
abstraction_listing,
num_abstractions, # Receive the actual count
project_name,
language,
use_cache,
) = prep_res # Unpack use_cache
print(f"Analyzing relationships using LLM...")
# Add language instruction and hints only if not English
language_instruction = ""
lang_hint = ""
list_lang_note = ""
if language.lower() != "english":
language_instruction = f"IMPORTANT: Generate the `summary` and relationship `label` fields in **{language.capitalize()}** language. Do NOT use English for these fields.\n\n"
lang_hint = f" (in {language.capitalize()})"
list_lang_note = f" (Names might be in {language.capitalize()})" # Note for the input list
prompt = f"""
Based on the following abstractions and relevant code snippets from the project `{project_name}`:
List of Abstraction Indices and Names{list_lang_note}:
{abstraction_listing}
Context (Abstractions, Descriptions, Code):
{context}
{language_instruction}Please provide:
1. A high-level `summary` of the project's main purpose and functionality in a few beginner-friendly sentences{lang_hint}. Use markdown formatting with **bold** and *italic* text to highlight important concepts.
2. A list (`relationships`) describing the key interactions between these abstractions. For each relationship, specify:
- `from_abstraction`: Index of the source abstraction (e.g., `0 # AbstractionName1`)
- `to_abstraction`: Index of the target abstraction (e.g., `1 # AbstractionName2`)
- `label`: A brief label for the interaction **in just a few words**{lang_hint} (e.g., "Manages", "Inherits", "Uses").
Ideally the relationship should be backed by one abstraction calling or passing parameters to another.
Simplify the relationship and exclude those non-important ones.
IMPORTANT: Make sure EVERY abstraction is involved in at least ONE relationship (either as source or target). Each abstraction index must appear at least once across all relationships.
Format the output as YAML:
```yaml
summary: |
A brief, simple explanation of the project{lang_hint}.
Can span multiple lines with **bold** and *italic* for emphasis.
relationships:
- from_abstraction: 0 # AbstractionName1
to_abstraction: 1 # AbstractionName2
label: "Manages"{lang_hint}
- from_abstraction: 2 # AbstractionName3
to_abstraction: 0 # AbstractionName1
label: "Provides config"{lang_hint}
# ... other relationships
```
Now, provide the YAML output:
"""
response = call_llm(prompt, use_cache=(use_cache and self.cur_retry == 0)) # Use cache only if enabled and not retrying
# --- Validation ---
yaml_str = response.strip().split("```yaml")[1].split("```")[0].strip()
relationships_data = yaml.safe_load(yaml_str)
if not isinstance(relationships_data, dict) or not all(
k in relationships_data for k in ["summary", "relationships"]
):
raise ValueError(
"LLM output is not a dict or missing keys ('summary', 'relationships')"
)
if not isinstance(relationships_data["summary"], str):
raise ValueError("summary is not a string")
if not isinstance(relationships_data["relationships"], list):
raise ValueError("relationships is not a list")
# Validate relationships structure
validated_relationships = []
for rel in relationships_data["relationships"]:
# Check for 'label' key
if not isinstance(rel, dict) or not all(
k in rel for k in ["from_abstraction", "to_abstraction", "label"]
):
raise ValueError(
f"Missing keys (expected from_abstraction, to_abstraction, label) in relationship item: {rel}"
)
# Validate 'label' is a string
if not isinstance(rel["label"], str):
raise ValueError(f"Relationship label is not a string: {rel}")
# Validate indices
try:
from_idx = int(str(rel["from_abstraction"]).split("#")[0].strip())
to_idx = int(str(rel["to_abstraction"]).split("#")[0].strip())
if not (
0 <= from_idx < num_abstractions and 0 <= to_idx < num_abstractions
):
raise ValueError(
f"Invalid index in relationship: from={from_idx}, to={to_idx}. Max index is {num_abstractions-1}."
)
validated_relationships.append(
{
"from": from_idx,
"to": to_idx,
"label": rel["label"], # Potentially translated label
}
)
except (ValueError, TypeError):
raise ValueError(f"Could not parse indices from relationship: {rel}")
print("Generated project summary and relationship details.")
return {
"summary": relationships_data["summary"], # Potentially translated summary
"details": validated_relationships, # Store validated, index-based relationships with potentially translated labels
}
def post(self, shared, prep_res, exec_res):
# Structure is now {"summary": str, "details": [{"from": int, "to": int, "label": str}]}
# Summary and label might be translated
shared["relationships"] = exec_res
class OrderChapters(Node):
def prep(self, shared):
abstractions = shared["abstractions"] # Name/description might be translated
relationships = shared["relationships"] # Summary/label might be translated
project_name = shared["project_name"] # Get project name
language = shared.get("language", "english") # Get language
use_cache = shared.get("use_cache", True) # Get use_cache flag, default to True
# Prepare context for the LLM
abstraction_info_for_prompt = []
for i, a in enumerate(abstractions):
abstraction_info_for_prompt.append(
f"- {i} # {a['name']}"
) # Use potentially translated name
abstraction_listing = "\n".join(abstraction_info_for_prompt)
# Use potentially translated summary and labels
summary_note = ""
if language.lower() != "english":
summary_note = (
f" (Note: Project Summary might be in {language.capitalize()})"
)
context = f"Project Summary{summary_note}:\n{relationships['summary']}\n\n"
context += "Relationships (Indices refer to abstractions above):\n"
for rel in relationships["details"]:
from_name = abstractions[rel["from"]]["name"]
to_name = abstractions[rel["to"]]["name"]
# Use potentially translated 'label'
context += f"- From {rel['from']} ({from_name}) to {rel['to']} ({to_name}): {rel['label']}\n" # Label might be translated
list_lang_note = ""
if language.lower() != "english":
list_lang_note = f" (Names might be in {language.capitalize()})"
return (
abstraction_listing,
context,
len(abstractions),
project_name,
list_lang_note,
use_cache,
) # Return use_cache
def exec(self, prep_res):
(
abstraction_listing,
context,
num_abstractions,
project_name,
list_lang_note,
use_cache,
) = prep_res # Unpack use_cache
print("Determining chapter order using LLM...")
# No language variation needed here in prompt instructions, just ordering based on structure
# The input names might be translated, hence the note.
prompt = f"""
Given the following project abstractions and their relationships for the project ```` {project_name} ````:
Abstractions (Index # Name){list_lang_note}:
{abstraction_listing}
Context about relationships and project summary:
{context}
If you are going to make a tutorial for ```` {project_name} ````, what is the best order to explain these abstractions, from first to last?
Ideally, first explain those that are the most important or foundational, perhaps user-facing concepts or entry points. Then move to more detailed, lower-level implementation details or supporting concepts.
Output the ordered list of abstraction indices, including the name in a comment for clarity. Use the format `idx # AbstractionName`.
```yaml
- 2 # FoundationalConcept
- 0 # CoreClassA
- 1 # CoreClassB (uses CoreClassA)
- ...
```
Now, provide the YAML output:
"""
response = call_llm(prompt, use_cache=(use_cache and self.cur_retry == 0)) # Use cache only if enabled and not retrying
# --- Validation ---
yaml_str = response.strip().split("```yaml")[1].split("```")[0].strip()
ordered_indices_raw = yaml.safe_load(yaml_str)
if not isinstance(ordered_indices_raw, list):
raise ValueError("LLM output is not a list")
ordered_indices = []
seen_indices = set()
for entry in ordered_indices_raw:
try:
if isinstance(entry, int):
idx = entry
elif isinstance(entry, str) and "#" in entry:
idx = int(entry.split("#")[0].strip())
else:
idx = int(str(entry).strip())
if not (0 <= idx < num_abstractions):
raise ValueError(
f"Invalid index {idx} in ordered list. Max index is {num_abstractions-1}."
)
if idx in seen_indices:
raise ValueError(f"Duplicate index {idx} found in ordered list.")
ordered_indices.append(idx)
seen_indices.add(idx)
except (ValueError, TypeError):
raise ValueError(
f"Could not parse index from ordered list entry: {entry}"
)
# Check if all abstractions are included
if len(ordered_indices) != num_abstractions:
raise ValueError(
f"Ordered list length ({len(ordered_indices)}) does not match number of abstractions ({num_abstractions}). Missing indices: {set(range(num_abstractions)) - seen_indices}"
)
print(f"Determined chapter order (indices): {ordered_indices}")
return ordered_indices # Return the list of indices
def post(self, shared, prep_res, exec_res):
# exec_res is already the list of ordered indices
shared["chapter_order"] = exec_res # List of indices
class AnalyzeAPICalls(Node):
def prep(self, shared):
files_data = shared["files"] # List of (path, content) tuples
project_name = shared["project_name"]
language = shared.get("language", "english")
use_cache = shared.get("use_cache", True)
# Filter for frontend files (JavaScript, TypeScript)
frontend_files = []
for path, content in files_data:
if path.endswith((".js", ".jsx", ".ts", ".tsx")):
frontend_files.append({"path": path, "content": content})
if not frontend_files:
print("No frontend files (JS/TS) found to analyze for API calls.")
return None # Skip exec if no relevant files
return {
"frontend_files": frontend_files,
"project_name": project_name,
"language": language,
"use_cache": use_cache,
}
def exec(self, prep_res):
if prep_res is None:
return [] # Return empty list if prep returned None
frontend_files = prep_res["frontend_files"]
project_name = prep_res["project_name"]
language = prep_res["language"]
use_cache = prep_res["use_cache"]
all_api_calls_info = []
print(f"Analyzing API calls in {len(frontend_files)} frontend files using LLM...")
for file_info in frontend_files:
file_path = file_info["path"]
file_content = file_info["content"]
# Add language instruction and hints only if not English
# While the primary analysis is on code, the output YAML structure might be described or confirmed in the target language.
language_instruction = ""
yaml_lang_hint = ""
if language.lower() != "english":
language_instruction = f"IMPORTANT: The response should be YAML. If you add any descriptive text outside the YAML, it should be in **{language.capitalize()}** language.\n\n"
yaml_lang_hint = f" (values for description/notes, if any, should be in {language.capitalize()})"
prompt = f"""
{language_instruction}For the project `{project_name}`, and the file `{file_path}`:
File Content:
```{'javascript' if file_path.endswith(('.js', '.jsx')) else 'typescript'}
{file_content}
```
Analyze the frontend code (JavaScript/TypeScript) above.
Identify all API calls (e.g., using `fetch`, `axios`, `XMLHttpRequest`, or other HTTP client libraries).
For each API call found, provide the following details:
1. `calling_function_name`: The name of the function in which the API call is made. If it's not in a function, use "global scope" or a relevant class/method name.
2. `api_endpoint`: The URL or endpoint of the API being called. If it's a variable, provide the variable name.
3. `http_method`: The HTTP method used (e.g., GET, POST, PUT, DELETE).
4. `request_parameters`: A list of key-value pairs or a description of parameters sent with the request (query parameters, request body, headers if significant). {yaml_lang_hint}
5. `response_usage`: A description of how the API response data is used in the code (e.g., "response.data is stored in `userData` state", "items from response are mapped to UI components"). {yaml_lang_hint}
Format the output as a YAML list of dictionaries, with one dictionary per API call found in this file.
If no API calls are found in this file, output an empty YAML list `[]`.
Example for a single API call:
```yaml
- calling_function_name: "fetchUserDetails"
api_endpoint: "/api/users/{{userId}}" # or variable name like "API_BASE_URL + '/users/' + userId"
http_method: "GET"
request_parameters:
- name: "userId"
source: "path_variable" # e.g., path_variable, query_param, request_body, header
description: "User's unique identifier" {yaml_lang_hint}
response_usage: "The user's name (response.name) and email (response.email) are displayed in the profile section." {yaml_lang_hint}
```
Now, provide the YAML output for the file `{file_path}`:
"""
try:
response = call_llm(prompt, use_cache=(use_cache and self.cur_retry == 0))
yaml_str = response.strip()
if "```yaml" in yaml_str:
yaml_str = yaml_str.split("```yaml")[1].split("```")[0].strip()
elif "```" in yaml_str: # Handle cases where only ``` is present
yaml_str = yaml_str.split("```")[1].strip()
# Ensure it's valid YAML, even if empty
if not yaml_str:
api_calls_in_file = []
else:
api_calls_in_file = yaml.safe_load(yaml_str)
if not isinstance(api_calls_in_file, list):
print(f"Warning: LLM output for {file_path} was not a list, but: {type(api_calls_in_file)}. Treating as no API calls found.")
api_calls_in_file = []
if api_calls_in_file: # Only add if there's content
all_api_calls_info.append({
"file_path": file_path,
"api_calls": api_calls_in_file
})
print(f" - Found {len(api_calls_in_file)} API call(s) in {file_path}")
except yaml.YAMLError as e:
print(f"Error parsing YAML from LLM response for {file_path}: {e}")
print(f"LLM Response was:\n{response}")
except Exception as e:
print(f"Error processing file {file_path} for API calls: {e}")
print(f"LLM Response was (if available):\n{response if 'response' in locals() else 'N/A'}")
if not all_api_calls_info:
print("No API calls identified in any frontend files.")
else:
print(f"Identified API calls in {len(all_api_calls_info)} file(s).")
return all_api_calls_info
def post(self, shared, prep_res, exec_res):
shared["api_call_analysis"] = exec_res
class AnalyzeFastAPIEndpoints(Node):
def prep(self, shared):
files_data = shared["files"] # List of (path, content) tuples
project_name = shared["project_name"]
language = shared.get("language", "english") # For potential descriptions in YAML
use_cache = shared.get("use_cache", True)
# Filter for Python files
python_files = []
for path, content in files_data:
if path.endswith((".py")):
python_files.append({"path": path, "content": content})
if not python_files:
print("No Python files found to analyze for FastAPI endpoints.")
return None # Skip exec if no relevant files
return {
"python_files": python_files,
"project_name": project_name,
"language": language,
"use_cache": use_cache,
}
def exec(self, prep_res):
if prep_res is None:
return [] # Return empty list if prep returned None
python_files = prep_res["python_files"]
project_name = prep_res["project_name"]
language = prep_res["language"] # For descriptions in YAML
use_cache = prep_res["use_cache"]
all_endpoints_info = []
print(f"Analyzing FastAPI endpoints in {len(python_files)} Python files using LLM...")
for file_info in python_files:
file_path = file_info["path"]
file_content = file_info["content"]
language_instruction = ""
yaml_desc_hint = ""
if language.lower() != "english":
language_instruction = f"IMPORTANT: The response MUST be YAML. If you include any descriptive text for fields like 'description', it should be in **{language.capitalize()}** language.\n\n"
yaml_desc_hint = f" (in {language.capitalize()})"
prompt = f"""
{language_instruction}For the project `{project_name}`, and the Python file `{file_path}`:
File Content:
```python
{file_content}
```
Analyze the Python code above to identify FastAPI endpoints.
For each FastAPI endpoint (e.g., defined with `@app.get`, `@router.post`, etc.), extract the following information:
1. `http_method`: The HTTP method (e.g., GET, POST, PUT, DELETE).
2. `route_path`: The URL path for the endpoint (e.g., "/items/{{item_id}}").
3. `summary`: A brief summary or description of the endpoint, often found in the function's docstring or comments above it{yaml_desc_hint}.
4. `path_parameters`: A list of path parameters. For each, include:
* `name`: Parameter name (e.g., "item_id").
* `type`: Parameter type (e.g., "int", "str"){yaml_desc_hint}.
5. `query_parameters`: A list of query parameters. For each, include:
* `name`: Parameter name (e.g., "limit").
* `type`: Parameter type (e.g., "int", "str"){yaml_desc_hint}.
* `default` (optional): Default value if specified.
* `required` (optional): Boolean, true if the parameter is required, false or absent otherwise.
6. `request_body_model`: Information about the request body, if any. Include:
* `model_name`: The Pydantic model name (e.g., "ItemCreate").
* `fields`: A list of fields in the model, each with `name`, `type`{yaml_desc_hint}, and `required` (boolean).
* `example` (optional): A simple JSON example of the request body{yaml_desc_hint}.
7. `response_model`: Information about the response. Include:
* `model_name`: The Pydantic model name (e.g., "ItemRead").
* `fields`: A list of fields in the model, each with `name` and `type`{yaml_desc_hint}.
* `example` (optional): A simple JSON example of the response body{yaml_desc_hint}.
* `status_code` (optional): The primary HTTP status code for successful responses (e.g., 200, 201).
Format the output as a YAML list of dictionaries, with one dictionary per endpoint found in this file.
If no FastAPI endpoints are found in this file, output an empty YAML list `[]`.
Example for a single endpoint:
```yaml
- http_method: "POST"
route_path: "/items/"
summary: "Create a new item."{yaml_desc_hint}
path_parameters: []
query_parameters: []
request_body_model:
model_name: "ItemCreate"
fields:
- name: "name"
type: "str"
required: true
- name: "price"
type: "float"
required: true
- name: "description"
type: "Optional[str]"
required: false
example:
name: "My Item"
price: 10.5
description: "A cool item."
response_model:
model_name: "ItemRead"
fields:
- name: "id"
type: "int"
- name: "name"
type: "str"
- name: "price"
type: "float"
- name: "description"
type: "Optional[str]"
status_code: 201
example:
id: 1
name: "My Item"
price: 10.5
description: "A cool item."
```
Now, provide the YAML output for the file `{file_path}`:
"""
try:
response = call_llm(prompt, use_cache=(use_cache and self.cur_retry == 0))
yaml_str = response.strip()
if "```yaml" in yaml_str:
yaml_str = yaml_str.split("```yaml")[1].split("```")[0].strip()
elif "```" in yaml_str:
yaml_str = yaml_str.split("```")[1].strip()
if not yaml_str:
endpoints_in_file = []
else:
endpoints_in_file = yaml.safe_load(yaml_str)
if not isinstance(endpoints_in_file, list):
print(f"Warning: LLM output for {file_path} (FastAPI) was not a list, but: {type(endpoints_in_file)}. Treating as no endpoints found.")
endpoints_in_file = []
if endpoints_in_file:
all_endpoints_info.append({
"file_path": file_path,
"endpoints": endpoints_in_file
})
print(f" - Found {len(endpoints_in_file)} FastAPI endpoint(s) in {file_path}")
except yaml.YAMLError as e:
print(f"Error parsing YAML from LLM response for {file_path} (FastAPI): {e}")
print(f"LLM Response was:\n{response}")
except Exception as e:
print(f"Error processing file {file_path} for FastAPI endpoints: {e}")
print(f"LLM Response was (if available):\n{response if 'response' in locals() else 'N/A'}")
if not all_endpoints_info:
print("No FastAPI endpoints identified in any Python files.")
else:
print(f"Identified FastAPI endpoints in {len(all_endpoints_info)} file(s).")
return all_endpoints_info
def post(self, shared, prep_res, exec_res):
shared["fastapi_endpoint_analysis"] = exec_res
class GenerateAPIDocumentation(Node):
def prep(self, shared):
fastapi_analysis_data = shared.get("fastapi_endpoint_analysis", [])
project_name = shared["project_name"]
language = shared.get("language", "english")
use_cache = shared.get("use_cache", True)
if not fastapi_analysis_data:
print("No FastAPI endpoint analysis data found to generate API documentation.")
return None
return {
"fastapi_analysis_data": fastapi_analysis_data,
"project_name": project_name,
"language": language,
"use_cache": use_cache,
}
def exec(self, prep_res):
if prep_res is None:
return "" # Return empty string if no data
fastapi_analysis_data = prep_res["fastapi_analysis_data"]
project_name = prep_res["project_name"]
language = prep_res["language"]
use_cache = prep_res["use_cache"]
print(f"Generating API documentation for {project_name} using LLM...")
# Construct a string representation of the endpoint data for the prompt
endpoint_data_str_parts = []
for file_analysis in fastapi_analysis_data:
file_path = file_analysis.get("file_path", "Unknown file")
endpoint_data_str_parts.append(f"Endpoints from file: {file_path}\n")
if isinstance(file_analysis.get("endpoints"), list):
for endpoint in file_analysis["endpoints"]:
endpoint_data_str_parts.append(f"- Method: {endpoint.get('http_method')}, Path: {endpoint.get('route_path')}")
endpoint_data_str_parts.append(f" Summary: {endpoint.get('summary', 'N/A')}")
# Add more details as needed for the prompt, e.g., parameters, request/response bodies
# This part can be expanded to make the YAML string more complete for the LLM context
if endpoint.get("path_parameters"):
endpoint_data_str_parts.append(f" Path Params: {endpoint.get('path_parameters')}")
if endpoint.get("query_parameters"):
endpoint_data_str_parts.append(f" Query Params: {endpoint.get('query_parameters')}")
if endpoint.get("request_body_model"):
endpoint_data_str_parts.append(f" Request Body: {endpoint.get('request_body_model')}")
if endpoint.get("response_model"):
endpoint_data_str_parts.append(f" Response Model: {endpoint.get('response_model')}")
endpoint_data_str_parts.append("\n")
full_endpoint_data_for_prompt = "\n".join(endpoint_data_str_parts)
language_instruction = ""
doc_lang_note = ""
if language.lower() != "english":
lang_cap = language.capitalize()
language_instruction = f"IMPORTANT: Generate the ENTIRE API documentation in **{lang_cap}**. Input data (summaries, types) might already be in {lang_cap}, but all surrounding text, explanations, and section titles MUST be in {lang_cap}. DO NOT use English except for technical keywords like HTTP methods, or Pydantic model names if they are intrinsically English.\n\n"
doc_lang_note = f" (Translate all descriptive text to {lang_cap})"
prompt = f"""
{language_instruction}Project Name: {project_name}
FastAPI Endpoint Data (extracted from source code):
```yaml
{full_endpoint_data_for_prompt}
```
Based on the structured FastAPI endpoint data provided above, generate a comprehensive API documentation in Markdown format, specifically for frontend developers{doc_lang_note}.
The documentation should include:
1. A main title for the API documentation (e.g., "API Reference for {project_name}"){doc_lang_note}.
2. An introductory section briefly explaining what the API does or how to use the documentation{doc_lang_note}.
3. For each endpoint, create a section with:
* A clear title including the HTTP method and route path (e.g., `POST /items/`){doc_lang_note}.
* The summary/description of the endpoint{doc_lang_note}.
* Path Parameters: If any, list them in a table with columns for `Name`, `Type`, and `Description`{doc_lang_note}.
* Query Parameters: If any, list them in a table with columns for `Name`, `Type`, `Required`, `Default`, and `Description`{doc_lang_note}.
* Request Body: If applicable, describe the expected request body. Include the Pydantic model name, its fields (with `Name`, `Type`, `Required`), and a JSON example{doc_lang_note}.
* Response Model: Describe the expected response. Include the Pydantic model name, its fields (with `Name`, `Type`), the success status code, and a JSON example of the response{doc_lang_note}.
Use clear Markdown formatting, including headings, tables, and code blocks for JSON examples.
Ensure the language used is beginner-friendly for frontend developers and all descriptive text is in the target language specified ({language.capitalize() if language.lower() != 'english' else 'English'}).
Output *only* the Markdown content for this API documentation.
Do NOT include ```markdown``` tags around the output.
Begin the documentation now:
"""
try:
api_doc_markdown = call_llm(prompt, use_cache=(use_cache and self.cur_retry == 0))
print(f"Successfully generated API documentation for {project_name}.")
return api_doc_markdown.strip()
except Exception as e:
print(f"Error generating API documentation for {project_name}: {e}")
return "" # Return empty string on error
def post(self, shared, prep_res, exec_res):
shared["api_documentation_md"] = exec_res
class WriteChapters(BatchNode):
def prep(self, shared):
chapter_order = shared["chapter_order"] # List of indices
abstractions = shared[
"abstractions"
] # List of {"name": str, "description": str, "files": [int]}
files_data = shared["files"] # List of (path, content) tuples
project_name = shared["project_name"]
language = shared.get("language", "english")
use_cache = shared.get("use_cache", True) # Get use_cache flag, default to True
api_call_analysis = shared.get("api_call_analysis", []) # Get API call analysis
# Get already written chapters to provide context
# We store them temporarily during the batch run, not in shared memory yet
# The 'previous_chapters_summary' will be built progressively in the exec context
self.chapters_written_so_far = (
[]
) # Use instance variable for temporary storage across exec calls
# Create a complete list of all chapters
all_chapters = []
chapter_filenames = {} # Store chapter filename mapping for linking
for i, abstraction_index in enumerate(chapter_order):
if 0 <= abstraction_index < len(abstractions):
chapter_num = i + 1
chapter_name = abstractions[abstraction_index][
"name"
] # Potentially translated name
# Create safe filename (from potentially translated name)
safe_name = "".join(
c if c.isalnum() else "_" for c in chapter_name
).lower()
filename = f"{i+1:02d}_{safe_name}.md"
# Format with link (using potentially translated name)
all_chapters.append(f"{chapter_num}. [{chapter_name}]({filename})")
# Store mapping of chapter index to filename for linking
chapter_filenames[abstraction_index] = {
"num": chapter_num,
"name": chapter_name,
"filename": filename,
}
# Create a formatted string with all chapters
full_chapter_listing = "\n".join(all_chapters)
items_to_process = []
for i, abstraction_index in enumerate(chapter_order):
if 0 <= abstraction_index < len(abstractions):
abstraction_details = abstractions[
abstraction_index
] # Contains potentially translated name/desc
# Use 'files' (list of indices) directly
related_file_indices = abstraction_details.get("files", [])
# Get content using helper, passing indices
related_files_content_map = get_content_for_indices(
files_data, related_file_indices
)
# Find relevant API calls for this abstraction's files
relevant_api_calls_for_abstraction = []
abstraction_file_paths = [files_data[idx][0] for idx in related_file_indices]
for analysis_item in api_call_analysis:
if analysis_item["file_path"] in abstraction_file_paths:
relevant_api_calls_for_abstraction.append(analysis_item)
# Get previous chapter info for transitions (uses potentially translated name)
prev_chapter = None