Skip to content

Commit

Permalink
Fill with the results of following the tutorial
Browse files Browse the repository at this point in the history
  • Loading branch information
Gallaecio committed Nov 23, 2023
1 parent 98fd0a1 commit 25bf4c2
Show file tree
Hide file tree
Showing 16 changed files with 466 additions and 0 deletions.
3 changes: 3 additions & 0 deletions .gitignore
Original file line number Diff line number Diff line change
@@ -0,0 +1,3 @@
/books.csv
/quotes.csv
/tutorial-env/
54 changes: 54 additions & 0 deletions README.rst
Original file line number Diff line number Diff line change
Expand Up @@ -6,3 +6,57 @@ Scrapy_ project built following `Zyte’s web scraping tutorial`_.

.. _Scrapy: https://scrapy.org/
.. _Zyte’s web scraping tutorial: https://docs.zyte.com/web-scraping/tutorial/index.html

Requirements
============

Python 3.8 or higher.


Setup
=====

To be able to use this project, you must first:

#. Create a Python virtual environment.

- On **Windows**:

.. code-block:: shell
python3 -m venv tutorial-env
tutorial-env\Scripts\activate.bat
- On **macOS** and **Linux**:

.. code-block:: shell
python3 -m venv tutorial-env
. tutorial-env/bin/activate
#. Install the project requirements:

.. code-block:: shell
pip install --upgrade -r requirements.txt
#. To be able to deploy to `Scrapy Cloud`_, copy your `Scrapy Cloud API key`_,
run ``shub login`` and, when prompted, paste your API key and press Enter.

.. _Scrapy Cloud: https://docs.zyte.com/scrapy-cloud/get-started.html
.. _Scrapy Cloud API key: https://app.zyte.com/o/settings/apikey

#. To be able to use `Zyte API`_, append the following line to
``tutorial/settings.py``, replacing ``YOUR_API_KEY`` with your `Zyte API
key`_:

.. code-block:: python
ZYTE_API_KEY = "YOUR_API_KEY"
.. _Zyte API: https://docs.zyte.com/zyte-api/get-started.html
.. _Zyte API key: https://app.zyte.com/o/zyte-api/api-access

.. tip:: For local development, you can alternatively use an environment
variable with that name. In Scrapy Cloud, the ``ZYTE_API_KEY`` setting
will be automatically defined with your Zyte API key.
4 changes: 4 additions & 0 deletions requirements.txt
Original file line number Diff line number Diff line change
@@ -0,0 +1,4 @@
scrapy
scrapy-zyte-api
shub
zyte-spider-templates
4 changes: 4 additions & 0 deletions scrapinghub.yml
Original file line number Diff line number Diff line change
@@ -0,0 +1,4 @@
requirements:
file: requirements.txt
stacks:
default: scrapy:2.11
11 changes: 11 additions & 0 deletions scrapy.cfg
Original file line number Diff line number Diff line change
@@ -0,0 +1,11 @@
# Automatically created by: scrapy startproject
#
# For more information about the [deploy] section see:
# https://scrapyd.readthedocs.io/en/latest/deploy.html

[settings]
default = tutorial.settings

[deploy]
#url = http://localhost:6800/
project = tutorial
Empty file added tutorial/__init__.py
Empty file.
12 changes: 12 additions & 0 deletions tutorial/items.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,12 @@
# Define here the models for your scraped items
#
# See documentation in:
# https://docs.scrapy.org/en/latest/topics/items.html

import scrapy


class TutorialItem(scrapy.Item):
# define the fields for your item here like:
# name = scrapy.Field()
pass
103 changes: 103 additions & 0 deletions tutorial/middlewares.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,103 @@
# Define here the models for your spider middleware
#
# See documentation in:
# https://docs.scrapy.org/en/latest/topics/spider-middleware.html

from scrapy import signals

# useful for handling different item types with a single interface
from itemadapter import is_item, ItemAdapter


class TutorialSpiderMiddleware:
# Not all methods need to be defined. If a method is not defined,
# scrapy acts as if the spider middleware does not modify the
# passed objects.

@classmethod
def from_crawler(cls, crawler):
# This method is used by Scrapy to create your spiders.
s = cls()
crawler.signals.connect(s.spider_opened, signal=signals.spider_opened)
return s

def process_spider_input(self, response, spider):
# Called for each response that goes through the spider
# middleware and into the spider.

# Should return None or raise an exception.
return None

def process_spider_output(self, response, result, spider):
# Called with the results returned from the Spider, after
# it has processed the response.

# Must return an iterable of Request, or item objects.
for i in result:
yield i

def process_spider_exception(self, response, exception, spider):
# Called when a spider or process_spider_input() method
# (from other spider middleware) raises an exception.

# Should return either None or an iterable of Request or item objects.
pass

def process_start_requests(self, start_requests, spider):
# Called with the start requests of the spider, and works
# similarly to the process_spider_output() method, except
# that it doesn’t have a response associated.

# Must return only requests (not items).
for r in start_requests:
yield r

def spider_opened(self, spider):
spider.logger.info("Spider opened: %s" % spider.name)


class TutorialDownloaderMiddleware:
# Not all methods need to be defined. If a method is not defined,
# scrapy acts as if the downloader middleware does not modify the
# passed objects.

@classmethod
def from_crawler(cls, crawler):
# This method is used by Scrapy to create your spiders.
s = cls()
crawler.signals.connect(s.spider_opened, signal=signals.spider_opened)
return s

def process_request(self, request, spider):
# Called for each request that goes through the downloader
# middleware.

# Must either:
# - return None: continue processing this request
# - or return a Response object
# - or return a Request object
# - or raise IgnoreRequest: process_exception() methods of
# installed downloader middleware will be called
return None

def process_response(self, request, response, spider):
# Called with the response returned from the downloader.

# Must either;
# - return a Response object
# - return a Request object
# - or raise IgnoreRequest
return response

def process_exception(self, request, exception, spider):
# Called when a download handler or a process_request()
# (from other downloader middleware) raises an exception.

# Must either:
# - return None: continue processing this exception
# - return a Response object: stops process_exception() chain
# - return a Request object: stops process_exception() chain
pass

def spider_opened(self, spider):
spider.logger.info("Spider opened: %s" % spider.name)
13 changes: 13 additions & 0 deletions tutorial/pipelines.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,13 @@
# Define your item pipelines here
#
# Don't forget to add your pipeline to the ITEM_PIPELINES setting
# See: https://docs.scrapy.org/en/latest/topics/item-pipeline.html


# useful for handling different item types with a single interface
from itemadapter import ItemAdapter


class TutorialPipeline:
def process_item(self, item, spider):
return item
121 changes: 121 additions & 0 deletions tutorial/settings.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,121 @@
# Scrapy settings for tutorial project
#
# For simplicity, this file contains only settings considered important or
# commonly used. You can find more settings consulting the documentation:
#
# https://docs.scrapy.org/en/latest/topics/settings.html
# https://docs.scrapy.org/en/latest/topics/downloader-middleware.html
# https://docs.scrapy.org/en/latest/topics/spider-middleware.html

BOT_NAME = "tutorial"

SPIDER_MODULES = [
"tutorial.spiders",
"zyte_spider_templates.spiders",
]
NEWSPIDER_MODULE = "tutorial.spiders"


# Crawl responsibly by identifying yourself (and your website) on the user-agent
#USER_AGENT = "tutorial (+http://www.yourdomain.com)"

# Obey robots.txt rules
ROBOTSTXT_OBEY = True

# Configure maximum concurrent requests performed by Scrapy (default: 16)
#CONCURRENT_REQUESTS = 32

# Configure a delay for requests for the same website (default: 0)
# See https://docs.scrapy.org/en/latest/topics/settings.html#download-delay
# See also autothrottle settings and docs
#DOWNLOAD_DELAY = 3
# The download delay setting will honor only one of:
#CONCURRENT_REQUESTS_PER_DOMAIN = 16
#CONCURRENT_REQUESTS_PER_IP = 16

# Disable cookies (enabled by default)
#COOKIES_ENABLED = False

# Disable Telnet Console (enabled by default)
#TELNETCONSOLE_ENABLED = False

# Override the default request headers:
#DEFAULT_REQUEST_HEADERS = {
# "Accept": "text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8",
# "Accept-Language": "en",
#}

# Enable or disable spider middlewares
# See https://docs.scrapy.org/en/latest/topics/spider-middleware.html
#SPIDER_MIDDLEWARES = {
# "tutorial.middlewares.TutorialSpiderMiddleware": 543,
#}

# Enable or disable downloader middlewares
# See https://docs.scrapy.org/en/latest/topics/downloader-middleware.html
#DOWNLOADER_MIDDLEWARES = {
# "tutorial.middlewares.TutorialDownloaderMiddleware": 543,
#}

# Enable or disable extensions
# See https://docs.scrapy.org/en/latest/topics/extensions.html
#EXTENSIONS = {
# "scrapy.extensions.telnet.TelnetConsole": None,
#}

# Configure item pipelines
# See https://docs.scrapy.org/en/latest/topics/item-pipeline.html
#ITEM_PIPELINES = {
# "tutorial.pipelines.TutorialPipeline": 300,
#}

# Enable and configure the AutoThrottle extension (disabled by default)
# See https://docs.scrapy.org/en/latest/topics/autothrottle.html
#AUTOTHROTTLE_ENABLED = True
# The initial download delay
#AUTOTHROTTLE_START_DELAY = 5
# The maximum download delay to be set in case of high latencies
#AUTOTHROTTLE_MAX_DELAY = 60
# The average number of requests Scrapy should be sending in parallel to
# each remote server
#AUTOTHROTTLE_TARGET_CONCURRENCY = 1.0
# Enable showing throttling stats for every response received:
#AUTOTHROTTLE_DEBUG = False

# Enable and configure HTTP caching (disabled by default)
# See https://docs.scrapy.org/en/latest/topics/downloader-middleware.html#httpcache-middleware-settings
#HTTPCACHE_ENABLED = True
#HTTPCACHE_EXPIRATION_SECS = 0
#HTTPCACHE_DIR = "httpcache"
#HTTPCACHE_IGNORE_HTTP_CODES = []
#HTTPCACHE_STORAGE = "scrapy.extensions.httpcache.FilesystemCacheStorage"

# Set settings whose default value is deprecated to a future-proof value
REQUEST_FINGERPRINTER_IMPLEMENTATION = "2.7"
TWISTED_REACTOR = "twisted.internet.asyncioreactor.AsyncioSelectorReactor"
FEED_EXPORT_ENCODING = "utf-8"

# Custom settings
DOWNLOAD_HANDLERS = {
"http": "scrapy_zyte_api.ScrapyZyteAPIDownloadHandler",
"https": "scrapy_zyte_api.ScrapyZyteAPIDownloadHandler",
}
DOWNLOADER_MIDDLEWARES = {
"scrapy_poet.InjectionMiddleware": 543,
"scrapy_zyte_api.ScrapyZyteAPIDownloaderMiddleware": 1000,
}
REQUEST_FINGERPRINTER_CLASS = "scrapy_zyte_api.ScrapyZyteAPIRequestFingerprinter"
ZYTE_API_TRANSPARENT_MODE = True
SPIDER_MIDDLEWARES = {
"scrapy_poet.RetryMiddleware": 275,
"zyte_spider_templates.middlewares.CrawlingLogsMiddleware": 1000,
}
SCRAPY_POET_DISCOVER = [
"zyte_spider_templates.page_objects",
]
SCRAPY_POET_PROVIDERS = {
"scrapy_zyte_api.providers.ZyteApiProvider": 1100,
}
CLOSESPIDER_TIMEOUT_NO_ITEM = 600
SCHEDULER_DISK_QUEUE = "scrapy.squeues.PickleFifoDiskQueue"
SCHEDULER_MEMORY_QUEUE = "scrapy.squeues.FifoMemoryQueue"
4 changes: 4 additions & 0 deletions tutorial/spiders/__init__.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,4 @@
# This package will contain the spiders of your Scrapy project
#
# Please refer to the documentation for information on how to create and manage
# your spiders.
21 changes: 21 additions & 0 deletions tutorial/spiders/books_toscrape_com.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,21 @@
from scrapy import Spider


class BooksToScrapeComSpider(Spider):
name = "books_toscrape_com"
start_urls = [
"http://books.toscrape.com/catalogue/category/books/mystery_3/index.html"
]

def parse(self, response):
next_page_links = response.css(".next a")
yield from response.follow_all(next_page_links)
book_links = response.css("article a")
yield from response.follow_all(book_links, callback=self.parse_book)

def parse_book(self, response):
yield {
"name": response.css("h1::text").get(),
"price": response.css(".price_color::text").re_first("£(.*)"),
"url": response.url,
}
19 changes: 19 additions & 0 deletions tutorial/spiders/books_toscrape_com_extract.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,19 @@
from scrapy import Spider


class BooksToScrapeComExtractSpider(Spider):
name = "books_toscrape_com_extract"
start_urls = [
"http://books.toscrape.com/catalogue/category/books/mystery_3/index.html"
]

def parse(self, response):
next_page_links = response.css(".next a")
yield from response.follow_all(next_page_links)
book_links = response.css("article a")
for request in response.follow_all(book_links, callback=self.parse_book):
request.meta["zyte_api_automap"] = {"product": True}
yield request

def parse_book(self, response):
yield response.raw_api_response["product"]
Loading

0 comments on commit 25bf4c2

Please sign in to comment.