-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy pathscrap.py
More file actions
46 lines (35 loc) · 1.24 KB
/
scrap.py
File metadata and controls
46 lines (35 loc) · 1.24 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
from bs4 import BeautifulSoup
import requests
import tldextract
import sys
url = sys.argv[1]
dirc = sys.argv[2]
headers = requests.utils.default_headers()
headers.update({
'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_10_1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/39.0.2171.95 Safari/537.36',
})
r = requests.get(url, headers=headers)
raw_html = r.content
soup = BeautifulSoup(raw_html, 'html.parser')
links = soup.select(r'[href^=\.\/], [src^=\.\/]')
other_links = soup.select(
r'[href]:not([href^=\.]):not([href^=\#]), [src]:not([src^=\.]):not([src^=\#])')
for link in links:
if link.get('href'):
link['href'] = link['href'].replace("./", url)
else:
link['src'] = link['src'].replace("./", url)
for link in other_links:
if link.get('href'):
urlcheck = tldextract.extract(link['href'])
if(urlcheck.suffix == ''):
link['href'] = url+'/'+link['href']
elif link.get('src'):
urlcheck = tldextract.extract(link['src'])
if(urlcheck.suffix == ''):
link['src'] = url+'/'+link['src']
name = tldextract.extract(url)
f = open(dirc+'\\'+name.domain+".html", "w", encoding="utf-8")
f.write(soup.prettify())
f.close()
print(dirc+'\\'+name.domain+".html")