diff --git a/.idea/.gitignore b/.idea/.gitignore new file mode 100644 index 0000000..e7e9d11 --- /dev/null +++ b/.idea/.gitignore @@ -0,0 +1,2 @@ +# Default ignored files +/workspace.xml diff --git a/.idea/desafio-webcrawler.iml b/.idea/desafio-webcrawler.iml new file mode 100644 index 0000000..0b0e0bb --- /dev/null +++ b/.idea/desafio-webcrawler.iml @@ -0,0 +1,8 @@ + + + + + + + + \ No newline at end of file diff --git a/.idea/inspectionProfiles/profiles_settings.xml b/.idea/inspectionProfiles/profiles_settings.xml new file mode 100644 index 0000000..105ce2d --- /dev/null +++ b/.idea/inspectionProfiles/profiles_settings.xml @@ -0,0 +1,6 @@ + + + + \ No newline at end of file diff --git a/.idea/misc.xml b/.idea/misc.xml new file mode 100644 index 0000000..0c095dc --- /dev/null +++ b/.idea/misc.xml @@ -0,0 +1,4 @@ + + + + \ No newline at end of file diff --git a/.idea/modules.xml b/.idea/modules.xml new file mode 100644 index 0000000..8db51b8 --- /dev/null +++ b/.idea/modules.xml @@ -0,0 +1,8 @@ + + + + + + + + \ No newline at end of file diff --git a/.idea/vcs.xml b/.idea/vcs.xml new file mode 100644 index 0000000..94a25f7 --- /dev/null +++ b/.idea/vcs.xml @@ -0,0 +1,6 @@ + + + + + + \ No newline at end of file diff --git a/b2wcrawler/.gitignore b/b2wcrawler/.gitignore new file mode 100644 index 0000000..772826c --- /dev/null +++ b/b2wcrawler/.gitignore @@ -0,0 +1,21 @@ +/.vagrant +/scrapy.iml +*.pyc +_trial_temp* +dropin.cache +docs/build +*egg-info +.tox +venv +build +dist +.idea +htmlcov/ +.coverage +.pytest_cache/ +.coverage.* +.cache/ +.mypy_cache/ +/tests/keys/localhost.crt +/tests/keys/localhost.key +Thumbs.db \ No newline at end of file diff --git a/b2wcrawler/README.md b/b2wcrawler/README.md new file mode 100644 index 0000000..012ea76 --- /dev/null +++ b/b2wcrawler/README.md @@ -0,0 +1,6 @@ +# Webcrawler + +O robô, localizado em `b2crawler/spiders`, faz a varredura de todas as páginas do website *http://quotes.toscrape.com/*. +O formato dos registros segue o modelo indicado, e a pipeline faz o registro dos itens em uma coleção do mongodb `pauloandre_limaflores`. + +As queries estão registradas no arquivo `queries.js`. Todas foram realizadas e verificadas utilizando o Mongo Shell ou o Compass. \ No newline at end of file diff --git a/b2wcrawler/b2wcrawler/__init__.py b/b2wcrawler/b2wcrawler/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/b2wcrawler/b2wcrawler/items.py b/b2wcrawler/b2wcrawler/items.py new file mode 100644 index 0000000..a603d94 --- /dev/null +++ b/b2wcrawler/b2wcrawler/items.py @@ -0,0 +1,12 @@ +# Define here the models for your scraped items +# +# See documentation in: +# https://docs.scrapy.org/en/latest/topics/items.html + +import scrapy + + +class QuoteItem(scrapy.Item): + title = scrapy.Field() + author = scrapy.Field() + tags = scrapy.Field() diff --git a/b2wcrawler/b2wcrawler/middlewares.py b/b2wcrawler/b2wcrawler/middlewares.py new file mode 100644 index 0000000..674f95a --- /dev/null +++ b/b2wcrawler/b2wcrawler/middlewares.py @@ -0,0 +1,103 @@ +# Define here the models for your spider middleware +# +# See documentation in: +# https://docs.scrapy.org/en/latest/topics/spider-middleware.html + +from scrapy import signals + +# useful for handling different item types with a single interface +from itemadapter import is_item, ItemAdapter + + +class B2WcrawlerSpiderMiddleware: + # Not all methods need to be defined. If a method is not defined, + # scrapy acts as if the spider middleware does not modify the + # passed objects. + + @classmethod + def from_crawler(cls, crawler): + # This method is used by Scrapy to create your spiders. + s = cls() + crawler.signals.connect(s.spider_opened, signal=signals.spider_opened) + return s + + def process_spider_input(self, response, spider): + # Called for each response that goes through the spider + # middleware and into the spider. + + # Should return None or raise an exception. + return None + + def process_spider_output(self, response, result, spider): + # Called with the results returned from the Spider, after + # it has processed the response. + + # Must return an iterable of Request, or item objects. + for i in result: + yield i + + def process_spider_exception(self, response, exception, spider): + # Called when a spider or process_spider_input() method + # (from other spider middleware) raises an exception. + + # Should return either None or an iterable of Request or item objects. + pass + + def process_start_requests(self, start_requests, spider): + # Called with the start requests of the spider, and works + # similarly to the process_spider_output() method, except + # that it doesn’t have a response associated. + + # Must return only requests (not items). + for r in start_requests: + yield r + + def spider_opened(self, spider): + spider.logger.info('Spider opened: %s' % spider.name) + + +class B2WcrawlerDownloaderMiddleware: + # Not all methods need to be defined. If a method is not defined, + # scrapy acts as if the downloader middleware does not modify the + # passed objects. + + @classmethod + def from_crawler(cls, crawler): + # This method is used by Scrapy to create your spiders. + s = cls() + crawler.signals.connect(s.spider_opened, signal=signals.spider_opened) + return s + + def process_request(self, request, spider): + # Called for each request that goes through the downloader + # middleware. + + # Must either: + # - return None: continue processing this request + # - or return a Response object + # - or return a Request object + # - or raise IgnoreRequest: process_exception() methods of + # installed downloader middleware will be called + return None + + def process_response(self, request, response, spider): + # Called with the response returned from the downloader. + + # Must either; + # - return a Response object + # - return a Request object + # - or raise IgnoreRequest + return response + + def process_exception(self, request, exception, spider): + # Called when a download handler or a process_request() + # (from other downloader middleware) raises an exception. + + # Must either: + # - return None: continue processing this exception + # - return a Response object: stops process_exception() chain + # - return a Request object: stops process_exception() chain + pass + + def spider_opened(self, spider): + spider.logger.info('Spider opened: %s' % spider.name) diff --git a/b2wcrawler/b2wcrawler/pipelines.py b/b2wcrawler/b2wcrawler/pipelines.py new file mode 100644 index 0000000..ec6f3ca --- /dev/null +++ b/b2wcrawler/b2wcrawler/pipelines.py @@ -0,0 +1,36 @@ +# Define your item pipelines here +# +# Don't forget to add your pipeline to the ITEM_PIPELINES setting +# See: https://docs.scrapy.org/en/latest/topics/item-pipeline.html + + +# useful for handling different item types with a single interface + +import pymongo +from itemadapter import ItemAdapter + + +class MongoDBPipeline: + collection_name = 'pauloandre_limaflores' + + def __init__(self, mongo_uri, mongo_db): + self.mongo_uri = mongo_uri + self.mongo_db = mongo_db + + @classmethod + def from_crawler(cls, crawler): + return cls( + mongo_uri=crawler.settings.get('MONGO_URI'), + mongo_db=crawler.settings.get('MONGO_DATABASE', 'items') + ) + + def open_spider(self, spider): + self.client = pymongo.MongoClient(self.mongo_uri) + self.db = self.client[self.mongo_db] + + def close_spider(self, spider): + self.client.close() + + def process_item(self, item, spider): + self.db[self.collection_name].insert_one(ItemAdapter(item).asdict()) + return item diff --git a/b2wcrawler/b2wcrawler/settings.py b/b2wcrawler/b2wcrawler/settings.py new file mode 100644 index 0000000..f0e7d06 --- /dev/null +++ b/b2wcrawler/b2wcrawler/settings.py @@ -0,0 +1,92 @@ +# Scrapy settings for b2wcrawler project +# +# For simplicity, this file contains only settings considered important or +# commonly used. You can find more settings consulting the documentation: +# +# https://docs.scrapy.org/en/latest/topics/settings.html +# https://docs.scrapy.org/en/latest/topics/downloader-middleware.html +# https://docs.scrapy.org/en/latest/topics/spider-middleware.html + +BOT_NAME = 'b2wcrawler' + +SPIDER_MODULES = ['b2wcrawler.spiders'] +NEWSPIDER_MODULE = 'b2wcrawler.spiders' + + +# Crawl responsibly by identifying yourself (and your website) on the user-agent +#USER_AGENT = 'b2wcrawler (+http://www.yourdomain.com)' + +# Obey robots.txt rules +ROBOTSTXT_OBEY = True + +# Configure maximum concurrent requests performed by Scrapy (default: 16) +#CONCURRENT_REQUESTS = 32 + +# Configure a delay for requests for the same website (default: 0) +# See https://docs.scrapy.org/en/latest/topics/settings.html#download-delay +# See also autothrottle settings and docs +#DOWNLOAD_DELAY = 3 +# The download delay setting will honor only one of: +#CONCURRENT_REQUESTS_PER_DOMAIN = 16 +#CONCURRENT_REQUESTS_PER_IP = 16 + +# Disable cookies (enabled by default) +#COOKIES_ENABLED = False + +# Disable Telnet Console (enabled by default) +#TELNETCONSOLE_ENABLED = False + +# Override the default request headers: +#DEFAULT_REQUEST_HEADERS = { +# 'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8', +# 'Accept-Language': 'en', +#} + +# Enable or disable spider middlewares +# See https://docs.scrapy.org/en/latest/topics/spider-middleware.html +#SPIDER_MIDDLEWARES = { +# 'b2wcrawler.middlewares.B2WcrawlerSpiderMiddleware': 543, +#} + +# Enable or disable downloader middlewares +# See https://docs.scrapy.org/en/latest/topics/downloader-middleware.html +#DOWNLOADER_MIDDLEWARES = { +# 'b2wcrawler.middlewares.B2WcrawlerDownloaderMiddleware': 543, +#} + +# Enable or disable extensions +# See https://docs.scrapy.org/en/latest/topics/extensions.html +#EXTENSIONS = { +# 'scrapy.extensions.telnet.TelnetConsole': None, +#} + +# Configure item pipelines +# See https://docs.scrapy.org/en/latest/topics/item-pipeline.html +ITEM_PIPELINES = { + 'b2wcrawler.pipelines.MongoDBPipeline': 300, +} + +MONGO_URI = "mongodb://localhost:27017" +MONGO_DATABASE = "quotestoscrape" + + +# Enable and configure the AutoThrottle extension (disabled by default) +# See https://docs.scrapy.org/en/latest/topics/autothrottle.html +#AUTOTHROTTLE_ENABLED = True +# The initial download delay +#AUTOTHROTTLE_START_DELAY = 5 +# The maximum download delay to be set in case of high latencies +#AUTOTHROTTLE_MAX_DELAY = 60 +# The average number of requests Scrapy should be sending in parallel to +# each remote server +#AUTOTHROTTLE_TARGET_CONCURRENCY = 1.0 +# Enable showing throttling stats for every response received: +#AUTOTHROTTLE_DEBUG = False + +# Enable and configure HTTP caching (disabled by default) +# See https://docs.scrapy.org/en/latest/topics/downloader-middleware.html#httpcache-middleware-settings +#HTTPCACHE_ENABLED = True +#HTTPCACHE_EXPIRATION_SECS = 0 +#HTTPCACHE_DIR = 'httpcache' +#HTTPCACHE_IGNORE_HTTP_CODES = [] +#HTTPCACHE_STORAGE = 'scrapy.extensions.httpcache.FilesystemCacheStorage' diff --git a/b2wcrawler/b2wcrawler/spiders/__init__.py b/b2wcrawler/b2wcrawler/spiders/__init__.py new file mode 100644 index 0000000..ebd689a --- /dev/null +++ b/b2wcrawler/b2wcrawler/spiders/__init__.py @@ -0,0 +1,4 @@ +# This package will contain the spiders of your Scrapy project +# +# Please refer to the documentation for information on how to create and manage +# your spiders. diff --git a/b2wcrawler/b2wcrawler/spiders/quotes_spider.py b/b2wcrawler/b2wcrawler/spiders/quotes_spider.py new file mode 100644 index 0000000..de0f043 --- /dev/null +++ b/b2wcrawler/b2wcrawler/spiders/quotes_spider.py @@ -0,0 +1,29 @@ +import scrapy +from ..items import QuoteItem + + +class QuotesSpider(scrapy.Spider): + name = "quotes" + + start_urls = [ + 'http://quotes.toscrape.com/page/1/', + ] + + def parse(self, response): + for quote in response.css('div.quote'): + item = QuoteItem() + + item['title'] = quote.css('span.text::text').get() + item['author'] = { + 'name': quote.css('small.author::text').get(), + 'url': 'http://quotes.toscrape.com' + quote.css('span a::attr(href)').extract()[0], + } + item['tags'] = quote.css('div.tags a.tag::text').getall() + + yield item + + + next_page = response.css('li.next a::attr(href)').get() + if next_page is not None: + next_page = response.urljoin(next_page) + yield scrapy.Request(next_page, callback=self.parse) diff --git a/b2wcrawler/queries.js b/b2wcrawler/queries.js new file mode 100644 index 0000000..75a7366 --- /dev/null +++ b/b2wcrawler/queries.js @@ -0,0 +1,21 @@ +//Quantas citações foram coletadas? +db.pauloandre_limaflores.count() + +//Quantas tags distintas foram coletadas? +db.pauloandre_limaflores.distinct( "tags" ) + +//Quantas citações por autor foram coletadas? (exemplo abaixo) +db.pauloandre_limaflores.aggregate( + [{ + $match: { + + } + }, { + $group: { + _id: '$author.name', + total: { + '$sum': 1 + } + } + }] +) \ No newline at end of file diff --git a/b2wcrawler/scrapy.cfg b/b2wcrawler/scrapy.cfg new file mode 100644 index 0000000..850bca0 --- /dev/null +++ b/b2wcrawler/scrapy.cfg @@ -0,0 +1,11 @@ +# Automatically created by: scrapy startproject +# +# For more information about the [deploy] section see: +# https://scrapyd.readthedocs.io/en/latest/deploy.html + +[settings] +default = b2wcrawler.settings + +[deploy] +#url = http://localhost:6800/ +project = b2wcrawler