-
Notifications
You must be signed in to change notification settings - Fork 11
Expand file tree
/
Copy pathpage_dispatch.py
More file actions
59 lines (45 loc) · 1.83 KB
/
page_dispatch.py
File metadata and controls
59 lines (45 loc) · 1.83 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
from parsel import Selector
from scraperx import Scraper, run_cli, Dispatch, Download, Extract
class MyDispatch(Dispatch):
base_url = 'http://webscraperio.us-east-1.elasticbeanstalk.com/test-sites/e-commerce/static/computers/tablets' # noqa: E501
def submit_tasks(self):
max_page = self._get_max_page()
tasks = []
for page in range(1, max_page + 1):
tasks.append({'url': f'{self.base_url}?page={page}',
'page': page,
})
return tasks
def _get_max_page(self):
task = {'url': self.base_url}
source_data = DispatchDownloadHelper(self.scraper, task).download()
element = Selector(text=source_data)
max_page = element.css('h1 ~ ul.pagination li').xpath('string()').extract()[-2]
return int(max_page)
class DispatchDownloadHelper(Download):
def download(self):
return self.request_get(self.task['url']).text
class MyExtract(Extract):
def extract(self, raw_source, source_idx):
yield self.extract_task(
name='products',
selectors=['h1 + div.row > div'],
idx_offset=1,
callback=self.extract_products,
post_extract=self.save_as,
post_extract_kwargs={'file_format': 'json'},
)
def extract_products(self, element, idx, **kwargs):
return {'title': element.css('div.caption a').xpath('string()').extract_first(),
'rank': idx,
'page': self.task['page'],
}
my_scraper = Scraper(dispatch_cls=MyDispatch,
extract_cls=MyExtract)
if __name__ == '__main__':
import logging
logging.basicConfig(
level=logging.INFO,
format='%(asctime)s - %(levelname)s - %(name)s - [%(scraper_name)s] %(message)s'
)
run_cli(my_scraper)