|
| 1 | +import argparse |
| 2 | +import requests |
| 3 | +from urllib.parse import urljoin, urlparse |
| 4 | +from bs4 import BeautifulSoup |
| 5 | +import re |
| 6 | +import json |
| 7 | +from termcolor import colored |
| 8 | + |
| 9 | + |
| 10 | +class JSIntelliRecon: |
| 11 | + def __init__(self, url, output, deep=False): |
| 12 | + self.url = url.rstrip('/') |
| 13 | + self.output = output |
| 14 | + self.deep = deep |
| 15 | + self.js_files = set() |
| 16 | + self.results = [] |
| 17 | + self.sensitive_keywords = ['auth', 'admin', 'debug', 'config', 'reset', 'token', 'login'] |
| 18 | + |
| 19 | + def fetch_html(self, target_url): |
| 20 | + try: |
| 21 | + response = requests.get(target_url, timeout=10) |
| 22 | + if response.status_code == 200: |
| 23 | + return response.text |
| 24 | + except Exception as e: |
| 25 | + print(colored(f"[!] Error fetching {target_url}: {e}", "red")) |
| 26 | + return "" |
| 27 | + |
| 28 | + def extract_js_links(self, html): |
| 29 | + soup = BeautifulSoup(html, 'html.parser') |
| 30 | + scripts = soup.find_all('script') |
| 31 | + for script in scripts: |
| 32 | + src = script.get('src') |
| 33 | + if src: |
| 34 | + full_url = urljoin(self.url, src) |
| 35 | + self.js_files.add(full_url) |
| 36 | + elif script.string: |
| 37 | + inline_js = script.string |
| 38 | + self.analyze_js(inline_js, self.url + ' (inline)') |
| 39 | + |
| 40 | + def fetch_js(self, js_url): |
| 41 | + try: |
| 42 | + response = requests.get(js_url, timeout=10) |
| 43 | + if response.status_code == 200: |
| 44 | + return response.text |
| 45 | + except Exception as e: |
| 46 | + print(colored(f"[!] Error fetching JS file {js_url}: {e}", "red")) |
| 47 | + return "" |
| 48 | + |
| 49 | + def tag_sensitive(self, item): |
| 50 | + tags = [kw for kw in self.sensitive_keywords if kw in item.lower()] |
| 51 | + return f" [TAG: {', '.join(tags)}]" if tags else "" |
| 52 | + |
| 53 | + def analyze_js(self, js_code, js_url): |
| 54 | + endpoints = list(set(re.findall(r'[\"\']((?:https?:)?\/\/[^\"\']+)[\"\']', js_code))) |
| 55 | + secrets = re.findall(r'(?:api[_-]?key|token|secret|password)[\"\']?\s*[:=]\s*[\"\']([^\"\']+)[\"\']', js_code, re.IGNORECASE) |
| 56 | + versions = list(set(re.findall(r'(jquery|react|angular)[^0-9]*([0-9]+\.[0-9]+(?:\.[0-9]+)?)', js_code, re.IGNORECASE))) |
| 57 | + internal_paths = list(set(re.findall(r'/\w+/\w+\.(?:php|aspx|jsp|json|html)', js_code))) |
| 58 | + ips = re.findall(r'\b(?:[0-9]{1,3}\.){3}[0-9]{1,3}\b', js_code) |
| 59 | + |
| 60 | + if endpoints or secrets or versions or internal_paths or ips: |
| 61 | + print(colored(f"\n[+] Analyzing: {js_url}", "cyan")) |
| 62 | + if endpoints: |
| 63 | + print(colored("[!] API Endpoints:", "yellow")) |
| 64 | + for endpoint in endpoints: |
| 65 | + tag = self.tag_sensitive(endpoint) |
| 66 | + print(f" - {endpoint}{tag}") |
| 67 | + if secrets: |
| 68 | + print(colored("[!] Possible Secrets:", "red")) |
| 69 | + for secret in secrets: |
| 70 | + print(f" - {secret}") |
| 71 | + if versions: |
| 72 | + print(colored("[!] Detected Library Versions:", "magenta")) |
| 73 | + for lib, ver in versions: |
| 74 | + print(f" - {lib} {ver}") |
| 75 | + if internal_paths: |
| 76 | + print(colored("[!] Internal Paths:", "green")) |
| 77 | + for path in internal_paths: |
| 78 | + tag = self.tag_sensitive(path) |
| 79 | + print(f" - {path}{tag}") |
| 80 | + if ips: |
| 81 | + print(colored("[!] Internal IPs:", "red")) |
| 82 | + for ip in ips: |
| 83 | + print(f" - {ip}") |
| 84 | + |
| 85 | + findings = { |
| 86 | + 'url': js_url, |
| 87 | + 'endpoints': endpoints, |
| 88 | + 'secrets': secrets, |
| 89 | + 'versions': versions, |
| 90 | + 'internal_paths': internal_paths, |
| 91 | + 'ips': ips |
| 92 | + } |
| 93 | + self.results.append(findings) |
| 94 | + |
| 95 | + def run(self): |
| 96 | + print(colored(f"[*] Scanning {self.url}...", "cyan")) |
| 97 | + html = self.fetch_html(self.url) |
| 98 | + self.extract_js_links(html) |
| 99 | + |
| 100 | + if self.deep: |
| 101 | + soup = BeautifulSoup(html, 'html.parser') |
| 102 | + for link in soup.find_all('a', href=True): |
| 103 | + href = link['href'] |
| 104 | + if urlparse(href).netloc == '' and href.startswith('/'): |
| 105 | + subpage_url = urljoin(self.url, href) |
| 106 | + print(colored(f"[*] Crawling subpage: {subpage_url}", "cyan")) |
| 107 | + sub_html = self.fetch_html(subpage_url) |
| 108 | + self.extract_js_links(sub_html) |
| 109 | + |
| 110 | + for js_file in self.js_files: |
| 111 | + js_code = self.fetch_js(js_file) |
| 112 | + if js_code: |
| 113 | + self.analyze_js(js_code, js_file) |
| 114 | + |
| 115 | + with open(self.output, 'w') as f: |
| 116 | + json.dump(self.results, f, indent=4) |
| 117 | + |
| 118 | + print(colored(f"\n[+] Done! Results saved to {self.output}", "green")) |
| 119 | + |
| 120 | + |
| 121 | +def main(): |
| 122 | + parser = argparse.ArgumentParser(description='JSIntelliRecon - JavaScript Reconnaissance Tool') |
| 123 | + parser.add_argument('--url', required=True, help='Target URL') |
| 124 | + parser.add_argument('--output', required=True, help='Output file (JSON)') |
| 125 | + parser.add_argument('--deep', action='store_true', help='Enable deep crawling for subpages') |
| 126 | + args = parser.parse_args() |
| 127 | + |
| 128 | + recon = JSIntelliRecon(args.url, args.output, args.deep) |
| 129 | + recon.run() |
| 130 | + |
| 131 | + |
| 132 | +if __name__ == '__main__': |
| 133 | + main() |
0 commit comments