-
Notifications
You must be signed in to change notification settings - Fork 1
Expand file tree
/
Copy pathl7.py
More file actions
89 lines (67 loc) · 2.28 KB
/
l7.py
File metadata and controls
89 lines (67 loc) · 2.28 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
from bs4 import BeautifulSoup as bs
import re
import urllib.request
from collections import deque
from django.core.validators import URLValidator
from django.core.exceptions import ValidationError
import concurrent
import multiprocessing as mp
def make_soup(url):
validator = URLValidator()
try:
validator(url)
except ValidationError:
return -1
try:
page = urllib.request.urlopen(url)
except urllib.request.HTTPError:
return -1
except UnicodeEncodeError:
return -1
request = urllib.request.Request(
url, headers={'User-Agent': 'Chrome/83.0.4103.97'})
try:
page = urllib.request.urlopen(request).read()
except UnicodeEncodeError:
return -1
return bs(page, 'html.parser')
def get_links(url):
soup = make_soup(url)
if soup == -1:
return []
return list(filter(lambda l: l is not None and len(l) > 0 and l.startswith('http'), [l.get('href') for l in soup.findAll('a')]))
def get_text(url):
soup = make_soup(url)
if soup == -1:
return ""
for noise in soup(["script", "style", "head", "meta"]):
noise.decompose()
return " ".join(list(soup.stripped_strings))
def crawl(start_page, distance, action):
pages = set([start_page])
all_pages = set([start_page])
print('Getting links: ', flush=True)
for _ in range(distance):
current_pages = set()
with concurrent.futures.ThreadPoolExecutor() as executor:
current_pages = set(list(executor.map(get_links, pages))[0])
pages = current_pages
all_pages = all_pages.union(current_pages)
pages = all_pages
results = []
print('Actioning: ', flush=True)
with mp.Pool() as pool:
parsed_pages = pool.map(get_text, pages)
sentences = list(map(action, parsed_pages))
results = list(zip(pages, sentences))
return iter(results)
def sentence_with_python(text):
sentences = re.split(r'(^[A-Za-z,;\'\"\s]+[.?!]\s*$")', text)
sentences = [s.strip()
for s in filter(lambda x: x.find('Python') != -1, sentences)]
return sentences
results = crawl(
"https://en.wikipedia.org/wiki/Computer_programming", 1, sentence_with_python)
for url, sentences in results:
if sentences:
print(url, sentences)