aboutsummaryrefslogtreecommitdiff
path: root/web_grater
diff options
context:
space:
mode:
authorCody Hiar <cody@hiar.ca>2021-03-21 13:14:53 -0600
committerCody Hiar <cody@hiar.ca>2021-03-21 13:14:53 -0600
commit03870c0eca405cd8c08253c8b07b93a9a9fadc44 (patch)
tree985df08b591a5a32cc92830e1402f97dbc6d122f /web_grater
Initial commitHEADmaster
Diffstat (limited to 'web_grater')
-rw-r--r--web_grater/scrapy.cfg11
-rw-r--r--web_grater/web_grater/__init__.py0
-rw-r--r--web_grater/web_grater/items.py14
-rw-r--r--web_grater/web_grater/middlewares.py103
-rw-r--r--web_grater/web_grater/pipelines.py11
-rw-r--r--web_grater/web_grater/settings.py93
-rw-r--r--web_grater/web_grater/spiders/__init__.py4
-rw-r--r--web_grater/web_grater/spiders/grater_spider.py66
8 files changed, 302 insertions, 0 deletions
diff --git a/web_grater/scrapy.cfg b/web_grater/scrapy.cfg
new file mode 100644
index 0000000..fd9b643
--- /dev/null
+++ b/web_grater/scrapy.cfg
@@ -0,0 +1,11 @@
+# Automatically created by: scrapy startproject
+#
+# For more information about the [deploy] section see:
+# https://scrapyd.readthedocs.io/en/latest/deploy.html
+
+[settings]
+default = web_grater.settings
+
+[deploy]
+#url = http://localhost:6800/
+project = web_grater
diff --git a/web_grater/web_grater/__init__.py b/web_grater/web_grater/__init__.py
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/web_grater/web_grater/__init__.py
diff --git a/web_grater/web_grater/items.py b/web_grater/web_grater/items.py
new file mode 100644
index 0000000..3e08b63
--- /dev/null
+++ b/web_grater/web_grater/items.py
@@ -0,0 +1,14 @@
+# -*- coding: utf-8 -*-
+
+# Define here the models for your scraped items
+#
+# See documentation in:
+# https://doc.scrapy.org/en/latest/topics/items.html
+
+import scrapy
+
+
+class WebGraterItem(scrapy.Item):
+ # define the fields for your item here like:
+ # name = scrapy.Field()
+ pass
diff --git a/web_grater/web_grater/middlewares.py b/web_grater/web_grater/middlewares.py
new file mode 100644
index 0000000..6f57985
--- /dev/null
+++ b/web_grater/web_grater/middlewares.py
@@ -0,0 +1,103 @@
+# -*- coding: utf-8 -*-
+
+# Define here the models for your spider middleware
+#
+# See documentation in:
+# https://doc.scrapy.org/en/latest/topics/spider-middleware.html
+
+from scrapy import signals
+
+
+class WebGraterSpiderMiddleware(object):
+ # Not all methods need to be defined. If a method is not defined,
+ # scrapy acts as if the spider middleware does not modify the
+ # passed objects.
+
+ @classmethod
+ def from_crawler(cls, crawler):
+ # This method is used by Scrapy to create your spiders.
+ s = cls()
+ crawler.signals.connect(s.spider_opened, signal=signals.spider_opened)
+ return s
+
+ def process_spider_input(self, response, spider):
+ # Called for each response that goes through the spider
+ # middleware and into the spider.
+
+ # Should return None or raise an exception.
+ return None
+
+ def process_spider_output(self, response, result, spider):
+ # Called with the results returned from the Spider, after
+ # it has processed the response.
+
+ # Must return an iterable of Request, dict or Item objects.
+ for i in result:
+ yield i
+
+ def process_spider_exception(self, response, exception, spider):
+ # Called when a spider or process_spider_input() method
+ # (from other spider middleware) raises an exception.
+
+ # Should return either None or an iterable of Response, dict
+ # or Item objects.
+ pass
+
+ def process_start_requests(self, start_requests, spider):
+ # Called with the start requests of the spider, and works
+ # similarly to the process_spider_output() method, except
+ # that it doesn’t have a response associated.
+
+ # Must return only requests (not items).
+ for r in start_requests:
+ yield r
+
+ def spider_opened(self, spider):
+ spider.logger.info('Spider opened: %s' % spider.name)
+
+
+class WebGraterDownloaderMiddleware(object):
+ # Not all methods need to be defined. If a method is not defined,
+ # scrapy acts as if the downloader middleware does not modify the
+ # passed objects.
+
+ @classmethod
+ def from_crawler(cls, crawler):
+ # This method is used by Scrapy to create your spiders.
+ s = cls()
+ crawler.signals.connect(s.spider_opened, signal=signals.spider_opened)
+ return s
+
+ def process_request(self, request, spider):
+ # Called for each request that goes through the downloader
+ # middleware.
+
+ # Must either:
+ # - return None: continue processing this request
+ # - or return a Response object
+ # - or return a Request object
+ # - or raise IgnoreRequest: process_exception() methods of
+ # installed downloader middleware will be called
+ return None
+
+ def process_response(self, request, response, spider):
+ # Called with the response returned from the downloader.
+
+ # Must either;
+ # - return a Response object
+ # - return a Request object
+ # - or raise IgnoreRequest
+ return response
+
+ def process_exception(self, request, exception, spider):
+ # Called when a download handler or a process_request()
+ # (from other downloader middleware) raises an exception.
+
+ # Must either:
+ # - return None: continue processing this exception
+ # - return a Response object: stops process_exception() chain
+ # - return a Request object: stops process_exception() chain
+ pass
+
+ def spider_opened(self, spider):
+ spider.logger.info('Spider opened: %s' % spider.name)
diff --git a/web_grater/web_grater/pipelines.py b/web_grater/web_grater/pipelines.py
new file mode 100644
index 0000000..849e1d1
--- /dev/null
+++ b/web_grater/web_grater/pipelines.py
@@ -0,0 +1,11 @@
+# -*- coding: utf-8 -*-
+
+# Define your item pipelines here
+#
+# Don't forget to add your pipeline to the ITEM_PIPELINES setting
+# See: https://doc.scrapy.org/en/latest/topics/item-pipeline.html
+
+
+class WebGraterPipeline(object):
+ def process_item(self, item, spider):
+ return item
diff --git a/web_grater/web_grater/settings.py b/web_grater/web_grater/settings.py
new file mode 100644
index 0000000..3dbd0ad
--- /dev/null
+++ b/web_grater/web_grater/settings.py
@@ -0,0 +1,93 @@
+# -*- coding: utf-8 -*-
+
+# Scrapy settings for web_grater project
+#
+# For simplicity, this file contains only settings considered important or
+# commonly used. You can find more settings consulting the documentation:
+#
+# https://doc.scrapy.org/en/latest/topics/settings.html
+# https://doc.scrapy.org/en/latest/topics/downloader-middleware.html
+# https://doc.scrapy.org/en/latest/topics/spider-middleware.html
+
+BOT_NAME = 'web_grater'
+
+SPIDER_MODULES = ['web_grater.spiders']
+NEWSPIDER_MODULE = 'web_grater.spiders'
+
+LOG_LEVEL = 'INFO'
+
+HTTPERROR_ALLOW_ALL = True
+
+# Crawl responsibly by identifying yourself (and your website) on the user-agent
+#USER_AGENT = 'web_grater (+http://www.yourdomain.com)'
+
+# Obey robots.txt rules
+ROBOTSTXT_OBEY = False
+
+# Configure maximum concurrent requests performed by Scrapy (default: 16)
+#CONCURRENT_REQUESTS = 32
+
+# Configure a delay for requests for the same website (default: 0)
+# See https://doc.scrapy.org/en/latest/topics/settings.html#download-delay
+# See also autothrottle settings and docs
+#DOWNLOAD_DELAY = 3
+# The download delay setting will honor only one of:
+#CONCURRENT_REQUESTS_PER_DOMAIN = 16
+#CONCURRENT_REQUESTS_PER_IP = 16
+
+# Disable cookies (enabled by default)
+#COOKIES_ENABLED = False
+
+# Disable Telnet Console (enabled by default)
+#TELNETCONSOLE_ENABLED = False
+
+# Override the default request headers:
+#DEFAULT_REQUEST_HEADERS = {
+# 'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
+# 'Accept-Language': 'en',
+#}
+
+# Enable or disable spider middlewares
+# See https://doc.scrapy.org/en/latest/topics/spider-middleware.html
+#SPIDER_MIDDLEWARES = {
+# 'web_grater.middlewares.WebGraterSpiderMiddleware': 543,
+#}
+
+# Enable or disable downloader middlewares
+# See https://doc.scrapy.org/en/latest/topics/downloader-middleware.html
+#DOWNLOADER_MIDDLEWARES = {
+# 'web_grater.middlewares.WebGraterDownloaderMiddleware': 543,
+#}
+
+# Enable or disable extensions
+# See https://doc.scrapy.org/en/latest/topics/extensions.html
+#EXTENSIONS = {
+# 'scrapy.extensions.telnet.TelnetConsole': None,
+#}
+
+# Configure item pipelines
+# See https://doc.scrapy.org/en/latest/topics/item-pipeline.html
+#ITEM_PIPELINES = {
+# 'web_grater.pipelines.WebGraterPipeline': 300,
+#}
+
+# Enable and configure the AutoThrottle extension (disabled by default)
+# See https://doc.scrapy.org/en/latest/topics/autothrottle.html
+#AUTOTHROTTLE_ENABLED = True
+# The initial download delay
+#AUTOTHROTTLE_START_DELAY = 5
+# The maximum download delay to be set in case of high latencies
+#AUTOTHROTTLE_MAX_DELAY = 60
+# The average number of requests Scrapy should be sending in parallel to
+# each remote server
+#AUTOTHROTTLE_TARGET_CONCURRENCY = 1.0
+# Enable showing throttling stats for every response received:
+#AUTOTHROTTLE_DEBUG = False
+
+# Enable and configure HTTP caching (disabled by default)
+# See https://doc.scrapy.org/en/latest/topics/downloader-middleware.html#httpcache-middleware-settings
+#HTTPCACHE_ENABLED = True
+#HTTPCACHE_EXPIRATION_SECS = 0
+#HTTPCACHE_DIR = 'httpcache'
+#HTTPCACHE_IGNORE_HTTP_CODES = []
+#HTTPCACHE_STORAGE = 'scrapy.extensions.httpcache.FilesystemCacheStorage'
diff --git a/web_grater/web_grater/spiders/__init__.py b/web_grater/web_grater/spiders/__init__.py
new file mode 100644
index 0000000..ebd689a
--- /dev/null
+++ b/web_grater/web_grater/spiders/__init__.py
@@ -0,0 +1,4 @@
+# This package will contain the spiders of your Scrapy project
+#
+# Please refer to the documentation for information on how to create and manage
+# your spiders.
diff --git a/web_grater/web_grater/spiders/grater_spider.py b/web_grater/web_grater/spiders/grater_spider.py
new file mode 100644
index 0000000..a8780d6
--- /dev/null
+++ b/web_grater/web_grater/spiders/grater_spider.py
@@ -0,0 +1,66 @@
+# -*- coding: utf-8 -*-
+"""Grater Spider for parsing urls."""
+from typing import Generator
+
+from blessings import Terminal
+from scrapy.http import Request
+from scrapy.http.response.html import HtmlResponse
+from scrapy.linkextractors import LinkExtractor
+from scrapy.spiders import Spider
+
+t = Terminal()
+
+# The site you want to crawl
+WEBSITE = "www.example.com"
+
+
+class CustomLinkExtractor(LinkExtractor):
+ """Custom extractor for checking results."""
+
+ def extract_links(self, response):
+ """Get links from a page.
+
+ If you run the scraper and find that there is a url that is broken
+ and you want to know which page it was on you can uncomment and upate
+ the term below so that it will print pages containing that link in
+ yellow so you can find them
+ """
+ links = super().extract_links(response)
+ # for link in links:
+ # if 'insert-search-term-here' in link.url:
+ # print(f'{t.yellow}200 Page: {response.url}{t.normal}')
+ return links
+
+
+class GraterSpider(Spider):
+ """Grater Spider."""
+
+ name = "grater"
+ allowed_domains = [f"{WEBSITE}"]
+ start_urls = [f"https://{WEBSITE}"]
+
+ def __init__(self, *args, **kwargs):
+ """Init custom link extractor."""
+ self.link_extractor = CustomLinkExtractor()
+ return super().__init__(*args, **kwargs)
+
+ def parse(self, response: HtmlResponse) -> Generator:
+ """Parse a page."""
+ if response.status == 200:
+ print(f"{t.green}200 Page: {response.url}{t.normal}")
+ else:
+ print(f"{t.red}{response.status} Page: {response.url}{t.normal}")
+ # Parse all links on the current page
+ for link in self.link_extractor.extract_links(response):
+ yield Request(link.url, callback=self.parse)
+ # Parse all the images
+ img_urls = response.css("img::attr(src)").extract()
+ for img_url in img_urls:
+ yield response.follow(img_url, callback=self.parse_image_request)
+
+ def parse_image_request(self, response: HtmlResponse) -> None:
+ """Parse the request for an image."""
+ if response.status == 200:
+ print(f"{t.green}200 Image: {response.url}{t.normal}")
+ else:
+ print(f"{t.red}{response.status} Image: {response.url}{t.normal}")