-
Notifications
You must be signed in to change notification settings - Fork 1
Expand file tree
/
Copy pathjsintellirecon.py
More file actions
executable file
·133 lines (114 loc) · 5.04 KB
/
jsintellirecon.py
File metadata and controls
executable file
·133 lines (114 loc) · 5.04 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
import argparse
import requests
from urllib.parse import urljoin, urlparse
from bs4 import BeautifulSoup
import re
import json
from termcolor import colored
class JSIntelliRecon:
def __init__(self, url, output, deep=False):
self.url = url.rstrip('/')
self.output = output
self.deep = deep
self.js_files = set()
self.results = []
self.sensitive_keywords = ['auth', 'admin', 'debug', 'config', 'reset', 'token', 'login']
def fetch_html(self, target_url):
try:
response = requests.get(target_url, timeout=10)
if response.status_code == 200:
return response.text
except Exception as e:
print(colored(f"[!] Error fetching {target_url}: {e}", "red"))
return ""
def extract_js_links(self, html):
soup = BeautifulSoup(html, 'html.parser')
scripts = soup.find_all('script')
for script in scripts:
src = script.get('src')
if src:
full_url = urljoin(self.url, src)
self.js_files.add(full_url)
elif script.string:
inline_js = script.string
self.analyze_js(inline_js, self.url + ' (inline)')
def fetch_js(self, js_url):
try:
response = requests.get(js_url, timeout=10)
if response.status_code == 200:
return response.text
except Exception as e:
print(colored(f"[!] Error fetching JS file {js_url}: {e}", "red"))
return ""
def tag_sensitive(self, item):
tags = [kw for kw in self.sensitive_keywords if kw in item.lower()]
return f" [TAG: {', '.join(tags)}]" if tags else ""
def analyze_js(self, js_code, js_url):
endpoints = list(set(re.findall(r'[\"\']((?:https?:)?\/\/[^\"\']+)[\"\']', js_code)))
secrets = re.findall(r'(?:api[_-]?key|token|secret|password)[\"\']?\s*[:=]\s*[\"\']([^\"\']+)[\"\']', js_code, re.IGNORECASE)
versions = list(set(re.findall(r'(jquery|react|angular)[^0-9]*([0-9]+\.[0-9]+(?:\.[0-9]+)?)', js_code, re.IGNORECASE)))
internal_paths = list(set(re.findall(r'/\w+/\w+\.(?:php|aspx|jsp|json|html)', js_code)))
ips = re.findall(r'\b(?:[0-9]{1,3}\.){3}[0-9]{1,3}\b', js_code)
if endpoints or secrets or versions or internal_paths or ips:
print(colored(f"\n[+] Analyzing: {js_url}", "cyan"))
if endpoints:
print(colored("[!] API Endpoints:", "yellow"))
for endpoint in endpoints:
tag = self.tag_sensitive(endpoint)
print(f" - {endpoint}{tag}")
if secrets:
print(colored("[!] Possible Secrets:", "red"))
for secret in secrets:
print(f" - {secret}")
if versions:
print(colored("[!] Detected Library Versions:", "magenta"))
for lib, ver in versions:
print(f" - {lib} {ver}")
if internal_paths:
print(colored("[!] Internal Paths:", "green"))
for path in internal_paths:
tag = self.tag_sensitive(path)
print(f" - {path}{tag}")
if ips:
print(colored("[!] Internal IPs:", "red"))
for ip in ips:
print(f" - {ip}")
findings = {
'url': js_url,
'endpoints': endpoints,
'secrets': secrets,
'versions': versions,
'internal_paths': internal_paths,
'ips': ips
}
self.results.append(findings)
def run(self):
print(colored(f"[*] Scanning {self.url}...", "cyan"))
html = self.fetch_html(self.url)
self.extract_js_links(html)
if self.deep:
soup = BeautifulSoup(html, 'html.parser')
for link in soup.find_all('a', href=True):
href = link['href']
if urlparse(href).netloc == '' and href.startswith('/'):
subpage_url = urljoin(self.url, href)
print(colored(f"[*] Crawling subpage: {subpage_url}", "cyan"))
sub_html = self.fetch_html(subpage_url)
self.extract_js_links(sub_html)
for js_file in self.js_files:
js_code = self.fetch_js(js_file)
if js_code:
self.analyze_js(js_code, js_file)
with open(self.output, 'w') as f:
json.dump(self.results, f, indent=4)
print(colored(f"\n[+] Done! Results saved to {self.output}", "green"))
def main():
parser = argparse.ArgumentParser(description='JSIntelliRecon - JavaScript Reconnaissance Tool')
parser.add_argument('--url', required=True, help='Target URL')
parser.add_argument('--output', required=True, help='Output file (JSON)')
parser.add_argument('--deep', action='store_true', help='Enable deep crawling for subpages')
args = parser.parse_args()
recon = JSIntelliRecon(args.url, args.output, args.deep)
recon.run()
if __name__ == '__main__':
main()