aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorCody Hiar <cody@hiar.ca>2021-03-21 13:14:53 -0600
committerCody Hiar <cody@hiar.ca>2021-03-21 13:14:53 -0600
commit03870c0eca405cd8c08253c8b07b93a9a9fadc44 (patch)
tree985df08b591a5a32cc92830e1402f97dbc6d122f
Initial commitHEADmaster
-rw-r--r--.gitignore12
-rw-r--r--Makefile23
-rw-r--r--README.md24
-rw-r--r--docker-compose.yml14
-rw-r--r--docker/Dockerfile17
-rw-r--r--requirements.txt2
-rw-r--r--web_grater/scrapy.cfg11
-rw-r--r--web_grater/web_grater/__init__.py0
-rw-r--r--web_grater/web_grater/items.py14
-rw-r--r--web_grater/web_grater/middlewares.py103
-rw-r--r--web_grater/web_grater/pipelines.py11
-rw-r--r--web_grater/web_grater/settings.py93
-rw-r--r--web_grater/web_grater/spiders/__init__.py4
-rw-r--r--web_grater/web_grater/spiders/grater_spider.py66
14 files changed, 394 insertions, 0 deletions
diff --git a/.gitignore b/.gitignore
new file mode 100644
index 0000000..a57a9ed
--- /dev/null
+++ b/.gitignore
@@ -0,0 +1,12 @@
+.*.swp
+*.pyc
+*.pyo
+.DS_Store
+tags
+.ropeproject
+*.actual
+.vimcache
+.idea
+
+docker/requirements.txt
+.mypy_cache
diff --git a/Makefile b/Makefile
new file mode 100644
index 0000000..8a84b00
--- /dev/null
+++ b/Makefile
@@ -0,0 +1,23 @@
+.PHONY: build
+
+help:
+ @grep -E '^[a-zA-Z_-]+:.*?## .*$$' $(MAKEFILE_LIST) | sort | awk 'BEGIN {FS = ":.*?## "}; {printf "\033[36m%-30s\033[0m %s\n", $$1, $$2}'
+
+build: ## Build the Docker image
+ cp requirements.txt docker/requirements.txt
+ docker-compose -p web_grater build
+
+up: build ## Bring the container up
+ docker-compose -p web_grater up -d
+
+down: ## Stop the container
+ docker-compose -p web_grater stop
+
+enter: ## Enter the running container
+ docker-compose -p web_grater exec backend /bin/bash
+
+clean: down ## Remove stoped containers
+ docker-compose -p web_grater rm
+
+crawl: ## Execute a crawl against a site
+ docker-compose -p web_grater exec backend /bin/bash -c 'cd web_grater && scrapy crawl grater'
diff --git a/README.md b/README.md
new file mode 100644
index 0000000..411a8d8
--- /dev/null
+++ b/README.md
@@ -0,0 +1,24 @@
+# Web Grater
+
+A very simple scrapy app for finding broken hrefs/images on your website. I've
+used this on a number of website to help find broken content and hopefully it
+can help you.
+
+## How do I use this?
+
+The project is dockerized but if don't want to use docker you can make a
+virtualenv. First you need to update the site you want to crawl in
+`web_grater/spiders/grater_spider.py`. After that you can just run:
+
+```
+make up && make crawl
+```
+
+This will print off all the urls the scraper can find and the broken ones in
+red.
+
+## How do I find which page contains the broken link?
+
+In `grater_spider.py` there is a `CustomLinkExtractor` with a method called
+`extract_links`. There is some commented out code that shows how you can search
+for the broken link during link extraction and print the parent page in yellow.
diff --git a/docker-compose.yml b/docker-compose.yml
new file mode 100644
index 0000000..1690296
--- /dev/null
+++ b/docker-compose.yml
@@ -0,0 +1,14 @@
+version: '3'
+services:
+ backend:
+ build: ./docker/
+ network_mode: "bridge"
+ container_name: web_grater
+ image: thornycrackers/web_grater
+ ports:
+ - "8000"
+ volumes:
+ - .:/usr/src/app
+ command: /bin/bash
+ tty: true
+ stdin_open: true
diff --git a/docker/Dockerfile b/docker/Dockerfile
new file mode 100644
index 0000000..22e6328
--- /dev/null
+++ b/docker/Dockerfile
@@ -0,0 +1,17 @@
+FROM python:3.6
+
+# Set the locale of the container
+RUN apt-get update && DEBIAN_FRONTEND=noninteractive apt-get install -y locales
+RUN sed -i -e 's/# en_US.UTF-8 UTF-8/en_US.UTF-8 UTF-8/' /etc/locale.gen && \
+ dpkg-reconfigure --frontend=noninteractive locales && \
+ update-locale LANG=en_US.UTF-8
+ENV LANG en_US.UTF-8
+ENV LANGUAGE en_US:en
+ENV LC_ALL en_US.UTF-8
+
+# Install Dependancies
+COPY requirements.txt /opt/requirements.txt
+RUN pip3 install -r /opt/requirements.txt
+
+# The code is stored here
+WORKDIR /usr/src/app
diff --git a/requirements.txt b/requirements.txt
new file mode 100644
index 0000000..4cf21c6
--- /dev/null
+++ b/requirements.txt
@@ -0,0 +1,2 @@
+Scrapy==2.4.0
+blessings==1.6.1
diff --git a/web_grater/scrapy.cfg b/web_grater/scrapy.cfg
new file mode 100644
index 0000000..fd9b643
--- /dev/null
+++ b/web_grater/scrapy.cfg
@@ -0,0 +1,11 @@
+# Automatically created by: scrapy startproject
+#
+# For more information about the [deploy] section see:
+# https://scrapyd.readthedocs.io/en/latest/deploy.html
+
+[settings]
+default = web_grater.settings
+
+[deploy]
+#url = http://localhost:6800/
+project = web_grater
diff --git a/web_grater/web_grater/__init__.py b/web_grater/web_grater/__init__.py
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/web_grater/web_grater/__init__.py
diff --git a/web_grater/web_grater/items.py b/web_grater/web_grater/items.py
new file mode 100644
index 0000000..3e08b63
--- /dev/null
+++ b/web_grater/web_grater/items.py
@@ -0,0 +1,14 @@
+# -*- coding: utf-8 -*-
+
+# Define here the models for your scraped items
+#
+# See documentation in:
+# https://doc.scrapy.org/en/latest/topics/items.html
+
+import scrapy
+
+
+class WebGraterItem(scrapy.Item):
+ # define the fields for your item here like:
+ # name = scrapy.Field()
+ pass
diff --git a/web_grater/web_grater/middlewares.py b/web_grater/web_grater/middlewares.py
new file mode 100644
index 0000000..6f57985
--- /dev/null
+++ b/web_grater/web_grater/middlewares.py
@@ -0,0 +1,103 @@
+# -*- coding: utf-8 -*-
+
+# Define here the models for your spider middleware
+#
+# See documentation in:
+# https://doc.scrapy.org/en/latest/topics/spider-middleware.html
+
+from scrapy import signals
+
+
+class WebGraterSpiderMiddleware(object):
+ # Not all methods need to be defined. If a method is not defined,
+ # scrapy acts as if the spider middleware does not modify the
+ # passed objects.
+
+ @classmethod
+ def from_crawler(cls, crawler):
+ # This method is used by Scrapy to create your spiders.
+ s = cls()
+ crawler.signals.connect(s.spider_opened, signal=signals.spider_opened)
+ return s
+
+ def process_spider_input(self, response, spider):
+ # Called for each response that goes through the spider
+ # middleware and into the spider.
+
+ # Should return None or raise an exception.
+ return None
+
+ def process_spider_output(self, response, result, spider):
+ # Called with the results returned from the Spider, after
+ # it has processed the response.
+
+ # Must return an iterable of Request, dict or Item objects.
+ for i in result:
+ yield i
+
+ def process_spider_exception(self, response, exception, spider):
+ # Called when a spider or process_spider_input() method
+ # (from other spider middleware) raises an exception.
+
+ # Should return either None or an iterable of Response, dict
+ # or Item objects.
+ pass
+
+ def process_start_requests(self, start_requests, spider):
+ # Called with the start requests of the spider, and works
+ # similarly to the process_spider_output() method, except
+ # that it doesn’t have a response associated.
+
+ # Must return only requests (not items).
+ for r in start_requests:
+ yield r
+
+ def spider_opened(self, spider):
+ spider.logger.info('Spider opened: %s' % spider.name)
+
+
+class WebGraterDownloaderMiddleware(object):
+ # Not all methods need to be defined. If a method is not defined,
+ # scrapy acts as if the downloader middleware does not modify the
+ # passed objects.
+
+ @classmethod
+ def from_crawler(cls, crawler):
+ # This method is used by Scrapy to create your spiders.
+ s = cls()
+ crawler.signals.connect(s.spider_opened, signal=signals.spider_opened)
+ return s
+
+ def process_request(self, request, spider):
+ # Called for each request that goes through the downloader
+ # middleware.
+
+ # Must either:
+ # - return None: continue processing this request
+ # - or return a Response object
+ # - or return a Request object
+ # - or raise IgnoreRequest: process_exception() methods of
+ # installed downloader middleware will be called
+ return None
+
+ def process_response(self, request, response, spider):
+ # Called with the response returned from the downloader.
+
+ # Must either;
+ # - return a Response object
+ # - return a Request object
+ # - or raise IgnoreRequest
+ return response
+
+ def process_exception(self, request, exception, spider):
+ # Called when a download handler or a process_request()
+ # (from other downloader middleware) raises an exception.
+
+ # Must either:
+ # - return None: continue processing this exception
+ # - return a Response object: stops process_exception() chain
+ # - return a Request object: stops process_exception() chain
+ pass
+
+ def spider_opened(self, spider):
+ spider.logger.info('Spider opened: %s' % spider.name)
diff --git a/web_grater/web_grater/pipelines.py b/web_grater/web_grater/pipelines.py
new file mode 100644
index 0000000..849e1d1
--- /dev/null
+++ b/web_grater/web_grater/pipelines.py
@@ -0,0 +1,11 @@
+# -*- coding: utf-8 -*-
+
+# Define your item pipelines here
+#
+# Don't forget to add your pipeline to the ITEM_PIPELINES setting
+# See: https://doc.scrapy.org/en/latest/topics/item-pipeline.html
+
+
+class WebGraterPipeline(object):
+ def process_item(self, item, spider):
+ return item
diff --git a/web_grater/web_grater/settings.py b/web_grater/web_grater/settings.py
new file mode 100644
index 0000000..3dbd0ad
--- /dev/null
+++ b/web_grater/web_grater/settings.py
@@ -0,0 +1,93 @@
+# -*- coding: utf-8 -*-
+
+# Scrapy settings for web_grater project
+#
+# For simplicity, this file contains only settings considered important or
+# commonly used. You can find more settings consulting the documentation:
+#
+# https://doc.scrapy.org/en/latest/topics/settings.html
+# https://doc.scrapy.org/en/latest/topics/downloader-middleware.html
+# https://doc.scrapy.org/en/latest/topics/spider-middleware.html
+
+BOT_NAME = 'web_grater'
+
+SPIDER_MODULES = ['web_grater.spiders']
+NEWSPIDER_MODULE = 'web_grater.spiders'
+
+LOG_LEVEL = 'INFO'
+
+HTTPERROR_ALLOW_ALL = True
+
+# Crawl responsibly by identifying yourself (and your website) on the user-agent
+#USER_AGENT = 'web_grater (+http://www.yourdomain.com)'
+
+# Obey robots.txt rules
+ROBOTSTXT_OBEY = False
+
+# Configure maximum concurrent requests performed by Scrapy (default: 16)
+#CONCURRENT_REQUESTS = 32
+
+# Configure a delay for requests for the same website (default: 0)
+# See https://doc.scrapy.org/en/latest/topics/settings.html#download-delay
+# See also autothrottle settings and docs
+#DOWNLOAD_DELAY = 3
+# The download delay setting will honor only one of:
+#CONCURRENT_REQUESTS_PER_DOMAIN = 16
+#CONCURRENT_REQUESTS_PER_IP = 16
+
+# Disable cookies (enabled by default)
+#COOKIES_ENABLED = False
+
+# Disable Telnet Console (enabled by default)
+#TELNETCONSOLE_ENABLED = False
+
+# Override the default request headers:
+#DEFAULT_REQUEST_HEADERS = {
+# 'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
+# 'Accept-Language': 'en',
+#}
+
+# Enable or disable spider middlewares
+# See https://doc.scrapy.org/en/latest/topics/spider-middleware.html
+#SPIDER_MIDDLEWARES = {
+# 'web_grater.middlewares.WebGraterSpiderMiddleware': 543,
+#}
+
+# Enable or disable downloader middlewares
+# See https://doc.scrapy.org/en/latest/topics/downloader-middleware.html
+#DOWNLOADER_MIDDLEWARES = {
+# 'web_grater.middlewares.WebGraterDownloaderMiddleware': 543,
+#}
+
+# Enable or disable extensions
+# See https://doc.scrapy.org/en/latest/topics/extensions.html
+#EXTENSIONS = {
+# 'scrapy.extensions.telnet.TelnetConsole': None,
+#}
+
+# Configure item pipelines
+# See https://doc.scrapy.org/en/latest/topics/item-pipeline.html
+#ITEM_PIPELINES = {
+# 'web_grater.pipelines.WebGraterPipeline': 300,
+#}
+
+# Enable and configure the AutoThrottle extension (disabled by default)
+# See https://doc.scrapy.org/en/latest/topics/autothrottle.html
+#AUTOTHROTTLE_ENABLED = True
+# The initial download delay
+#AUTOTHROTTLE_START_DELAY = 5
+# The maximum download delay to be set in case of high latencies
+#AUTOTHROTTLE_MAX_DELAY = 60
+# The average number of requests Scrapy should be sending in parallel to
+# each remote server
+#AUTOTHROTTLE_TARGET_CONCURRENCY = 1.0
+# Enable showing throttling stats for every response received:
+#AUTOTHROTTLE_DEBUG = False
+
+# Enable and configure HTTP caching (disabled by default)
+# See https://doc.scrapy.org/en/latest/topics/downloader-middleware.html#httpcache-middleware-settings
+#HTTPCACHE_ENABLED = True
+#HTTPCACHE_EXPIRATION_SECS = 0
+#HTTPCACHE_DIR = 'httpcache'
+#HTTPCACHE_IGNORE_HTTP_CODES = []
+#HTTPCACHE_STORAGE = 'scrapy.extensions.httpcache.FilesystemCacheStorage'
diff --git a/web_grater/web_grater/spiders/__init__.py b/web_grater/web_grater/spiders/__init__.py
new file mode 100644
index 0000000..ebd689a
--- /dev/null
+++ b/web_grater/web_grater/spiders/__init__.py
@@ -0,0 +1,4 @@
+# This package will contain the spiders of your Scrapy project
+#
+# Please refer to the documentation for information on how to create and manage
+# your spiders.
diff --git a/web_grater/web_grater/spiders/grater_spider.py b/web_grater/web_grater/spiders/grater_spider.py
new file mode 100644
index 0000000..a8780d6
--- /dev/null
+++ b/web_grater/web_grater/spiders/grater_spider.py
@@ -0,0 +1,66 @@
+# -*- coding: utf-8 -*-
+"""Grater Spider for parsing urls."""
+from typing import Generator
+
+from blessings import Terminal
+from scrapy.http import Request
+from scrapy.http.response.html import HtmlResponse
+from scrapy.linkextractors import LinkExtractor
+from scrapy.spiders import Spider
+
+t = Terminal()
+
+# The site you want to crawl
+WEBSITE = "www.example.com"
+
+
+class CustomLinkExtractor(LinkExtractor):
+ """Custom extractor for checking results."""
+
+ def extract_links(self, response):
+ """Get links from a page.
+
+ If you run the scraper and find that there is a url that is broken
+ and you want to know which page it was on you can uncomment and upate
+ the term below so that it will print pages containing that link in
+ yellow so you can find them
+ """
+ links = super().extract_links(response)
+ # for link in links:
+ # if 'insert-search-term-here' in link.url:
+ # print(f'{t.yellow}200 Page: {response.url}{t.normal}')
+ return links
+
+
+class GraterSpider(Spider):
+ """Grater Spider."""
+
+ name = "grater"
+ allowed_domains = [f"{WEBSITE}"]
+ start_urls = [f"https://{WEBSITE}"]
+
+ def __init__(self, *args, **kwargs):
+ """Init custom link extractor."""
+ self.link_extractor = CustomLinkExtractor()
+ return super().__init__(*args, **kwargs)
+
+ def parse(self, response: HtmlResponse) -> Generator:
+ """Parse a page."""
+ if response.status == 200:
+ print(f"{t.green}200 Page: {response.url}{t.normal}")
+ else:
+ print(f"{t.red}{response.status} Page: {response.url}{t.normal}")
+ # Parse all links on the current page
+ for link in self.link_extractor.extract_links(response):
+ yield Request(link.url, callback=self.parse)
+ # Parse all the images
+ img_urls = response.css("img::attr(src)").extract()
+ for img_url in img_urls:
+ yield response.follow(img_url, callback=self.parse_image_request)
+
+ def parse_image_request(self, response: HtmlResponse) -> None:
+ """Parse the request for an image."""
+ if response.status == 200:
+ print(f"{t.green}200 Image: {response.url}{t.normal}")
+ else:
+ print(f"{t.red}{response.status} Image: {response.url}{t.normal}")