From 3f5efcb91afc2e6d013800132b92e4a6c297f662 Mon Sep 17 00:00:00 2001 From: Cody Hiar Date: Mon, 19 Mar 2018 21:30:32 -0600 Subject: Initial commit of working files --- .gitignore | 10 +++ Makefile | 23 +++++++ build/Dockerfile | 34 ++++++++++ build/entry.sh | 3 + build/requirements.txt | 2 + myproject/myproject/__init__.py | 0 myproject/myproject/items.py | 19 ++++++ myproject/myproject/middlewares.py | 103 ++++++++++++++++++++++++++++++ myproject/myproject/pipelines.py | 11 ++++ myproject/myproject/settings.py | 91 ++++++++++++++++++++++++++ myproject/myproject/spiders/__init__.py | 4 ++ myproject/myproject/spiders/blogitems.py | 32 ++++++++++ myproject/myproject/spiders/linkfinder.py | 18 ++++++ myproject/myproject/spiders/shell.py | 13 ++++ myproject/scrapy.cfg | 11 ++++ requirements.txt | 2 + 16 files changed, 376 insertions(+) create mode 100644 .gitignore create mode 100644 Makefile create mode 100644 build/Dockerfile create mode 100644 build/entry.sh create mode 100644 build/requirements.txt create mode 100644 myproject/myproject/__init__.py create mode 100644 myproject/myproject/items.py create mode 100644 myproject/myproject/middlewares.py create mode 100644 myproject/myproject/pipelines.py create mode 100644 myproject/myproject/settings.py create mode 100644 myproject/myproject/spiders/__init__.py create mode 100644 myproject/myproject/spiders/blogitems.py create mode 100644 myproject/myproject/spiders/linkfinder.py create mode 100644 myproject/myproject/spiders/shell.py create mode 100644 myproject/scrapy.cfg create mode 100644 requirements.txt diff --git a/.gitignore b/.gitignore new file mode 100644 index 0000000..db02cd5 --- /dev/null +++ b/.gitignore @@ -0,0 +1,10 @@ +.*.swp +*.pyc +*.pyo +.DS_Store +tags +.ropeproject +*.actual +.vimcache +.idea +.mypy_cache diff --git a/Makefile b/Makefile new file mode 100644 index 0000000..902cb2f --- /dev/null +++ b/Makefile @@ -0,0 +1,23 @@ +.PHONY: build + +CONTAINERNAME=thornycrackers_scrapy_example +IMAGENAME=thornycrackers/scrapy_example + +help: + @grep -E '^[a-zA-Z_-]+:.*?## .*$$' $(MAKEFILE_LIST) | sort | awk 'BEGIN {FS = ":.*?## "}; {printf "\033[36m%-30s\033[0m %s\n", $$1, $$2}' + +build: ## Build the Docker image + cp -r requirements.txt ./build + docker build -t $(IMAGENAME) ./build + +up: build ## Bring the container up + docker run -dP -v $(CURDIR):/app --name $(CONTAINERNAME) $(IMAGENAME) /bin/bash -c '/opt/entry.sh' + +down: ## Stop the container + docker stop $(CONTAINERNAME) || echo 'No container to stop' + +enter: ## Enter the running container + docker exec -it $(CONTAINERNAME) /bin/bash + +clean: down ## Remove the image and any stopped containers + docker rm $(CONTAINERNAME) || echo 'No container to remove' diff --git a/build/Dockerfile b/build/Dockerfile new file mode 100644 index 0000000..adfade3 --- /dev/null +++ b/build/Dockerfile @@ -0,0 +1,34 @@ +FROM ubuntu:16.04 +MAINTAINER Cody Hiar + +# Set a term for terminal inside the container, can't clear without it +ENV TERM screen-256color +ENV DEBIAN_FRONTEND noninteractive + + +# Update and install +RUN apt-get update && apt-get install -y \ + wget \ + python3-dev \ + python3-pip \ + vim \ + locales + +# Add the project requirements +ADD requirements.txt /opt/requirements.txt + +# Install the requirements +RUN /bin/bash -c 'cd /opt && pip3 install -r requirements.txt' + +# Generally a good idea to have these, extensions sometimes need them +RUN locale-gen en_US.UTF-8 +ENV LANG en_US.UTF-8 +ENV LANGUAGE en_US:en +ENV LC_ALL en_US.UTF-8 + +# The code should be symlinked to this directory +WORKDIR /app + +# Create the entry script +ADD entry.sh /opt/ +RUN chmod 755 /opt/entry.sh diff --git a/build/entry.sh b/build/entry.sh new file mode 100644 index 0000000..09a943f --- /dev/null +++ b/build/entry.sh @@ -0,0 +1,3 @@ +#!/bin/bash + +while true; do echo hi; sleep 1; done; diff --git a/build/requirements.txt b/build/requirements.txt new file mode 100644 index 0000000..56ea79c --- /dev/null +++ b/build/requirements.txt @@ -0,0 +1,2 @@ +scrapy==1.5.0 +ipython==6.0.0 diff --git a/myproject/myproject/__init__.py b/myproject/myproject/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/myproject/myproject/items.py b/myproject/myproject/items.py new file mode 100644 index 0000000..99e70ed --- /dev/null +++ b/myproject/myproject/items.py @@ -0,0 +1,19 @@ +# -*- coding: utf-8 -*- + +# Define here the models for your scraped items +# +# See documentation in: +# https://doc.scrapy.org/en/latest/topics/items.html + +import scrapy + + +class BlogPostItem(scrapy.Item): + # define the fields for your item here like: + title = scrapy.Field() + year = scrapy.Field() + date = scrapy.Field() + pass + + def __repr__(self): + return "{date} {year} - {title}".format(**self) diff --git a/myproject/myproject/middlewares.py b/myproject/myproject/middlewares.py new file mode 100644 index 0000000..5c0f3e3 --- /dev/null +++ b/myproject/myproject/middlewares.py @@ -0,0 +1,103 @@ +# -*- coding: utf-8 -*- + +# Define here the models for your spider middleware +# +# See documentation in: +# https://doc.scrapy.org/en/latest/topics/spider-middleware.html + +from scrapy import signals + + +class MyprojectSpiderMiddleware(object): + # Not all methods need to be defined. If a method is not defined, + # scrapy acts as if the spider middleware does not modify the + # passed objects. + + @classmethod + def from_crawler(cls, crawler): + # This method is used by Scrapy to create your spiders. + s = cls() + crawler.signals.connect(s.spider_opened, signal=signals.spider_opened) + return s + + def process_spider_input(self, response, spider): + # Called for each response that goes through the spider + # middleware and into the spider. + + # Should return None or raise an exception. + return None + + def process_spider_output(self, response, result, spider): + # Called with the results returned from the Spider, after + # it has processed the response. + + # Must return an iterable of Request, dict or Item objects. + for i in result: + yield i + + def process_spider_exception(self, response, exception, spider): + # Called when a spider or process_spider_input() method + # (from other spider middleware) raises an exception. + + # Should return either None or an iterable of Response, dict + # or Item objects. + pass + + def process_start_requests(self, start_requests, spider): + # Called with the start requests of the spider, and works + # similarly to the process_spider_output() method, except + # that it doesn’t have a response associated. + + # Must return only requests (not items). + for r in start_requests: + yield r + + def spider_opened(self, spider): + spider.logger.info('Spider opened: %s' % spider.name) + + +class MyprojectDownloaderMiddleware(object): + # Not all methods need to be defined. If a method is not defined, + # scrapy acts as if the downloader middleware does not modify the + # passed objects. + + @classmethod + def from_crawler(cls, crawler): + # This method is used by Scrapy to create your spiders. + s = cls() + crawler.signals.connect(s.spider_opened, signal=signals.spider_opened) + return s + + def process_request(self, request, spider): + # Called for each request that goes through the downloader + # middleware. + + # Must either: + # - return None: continue processing this request + # - or return a Response object + # - or return a Request object + # - or raise IgnoreRequest: process_exception() methods of + # installed downloader middleware will be called + return None + + def process_response(self, request, response, spider): + # Called with the response returned from the downloader. + + # Must either; + # - return a Response object + # - return a Request object + # - or raise IgnoreRequest + return response + + def process_exception(self, request, exception, spider): + # Called when a download handler or a process_request() + # (from other downloader middleware) raises an exception. + + # Must either: + # - return None: continue processing this exception + # - return a Response object: stops process_exception() chain + # - return a Request object: stops process_exception() chain + pass + + def spider_opened(self, spider): + spider.logger.info('Spider opened: %s' % spider.name) diff --git a/myproject/myproject/pipelines.py b/myproject/myproject/pipelines.py new file mode 100644 index 0000000..0bdee9f --- /dev/null +++ b/myproject/myproject/pipelines.py @@ -0,0 +1,11 @@ +# -*- coding: utf-8 -*- + +# Define your item pipelines here +# +# Don't forget to add your pipeline to the ITEM_PIPELINES setting +# See: https://doc.scrapy.org/en/latest/topics/item-pipeline.html + + +class MyprojectPipeline(object): + def process_item(self, item, spider): + return item diff --git a/myproject/myproject/settings.py b/myproject/myproject/settings.py new file mode 100644 index 0000000..6790f78 --- /dev/null +++ b/myproject/myproject/settings.py @@ -0,0 +1,91 @@ +# -*- coding: utf-8 -*- + +# Scrapy settings for myproject project +# +# For simplicity, this file contains only settings considered important or +# commonly used. You can find more settings consulting the documentation: +# +# https://doc.scrapy.org/en/latest/topics/settings.html +# https://doc.scrapy.org/en/latest/topics/downloader-middleware.html +# https://doc.scrapy.org/en/latest/topics/spider-middleware.html + +BOT_NAME = 'myproject' + +SPIDER_MODULES = ['myproject.spiders'] +NEWSPIDER_MODULE = 'myproject.spiders' + + +# Crawl responsibly by identifying yourself (and your website) on the user-agent +#USER_AGENT = 'myproject (+http://www.yourdomain.com)' + +# Obey robots.txt rules +ROBOTSTXT_OBEY = False +LOG_LEVEL = 'WARNING' + +# Configure maximum concurrent requests performed by Scrapy (default: 16) +#CONCURRENT_REQUESTS = 32 + +# Configure a delay for requests for the same website (default: 0) +# See https://doc.scrapy.org/en/latest/topics/settings.html#download-delay +# See also autothrottle settings and docs +#DOWNLOAD_DELAY = 3 +# The download delay setting will honor only one of: +#CONCURRENT_REQUESTS_PER_DOMAIN = 16 +#CONCURRENT_REQUESTS_PER_IP = 16 + +# Disable cookies (enabled by default) +#COOKIES_ENABLED = False + +# Disable Telnet Console (enabled by default) +#TELNETCONSOLE_ENABLED = False + +# Override the default request headers: +#DEFAULT_REQUEST_HEADERS = { +# 'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8', +# 'Accept-Language': 'en', +#} + +# Enable or disable spider middlewares +# See https://doc.scrapy.org/en/latest/topics/spider-middleware.html +#SPIDER_MIDDLEWARES = { +# 'myproject.middlewares.MyprojectSpiderMiddleware': 543, +#} + +# Enable or disable downloader middlewares +# See https://doc.scrapy.org/en/latest/topics/downloader-middleware.html +#DOWNLOADER_MIDDLEWARES = { +# 'myproject.middlewares.MyprojectDownloaderMiddleware': 543, +#} + +# Enable or disable extensions +# See https://doc.scrapy.org/en/latest/topics/extensions.html +#EXTENSIONS = { +# 'scrapy.extensions.telnet.TelnetConsole': None, +#} + +# Configure item pipelines +# See https://doc.scrapy.org/en/latest/topics/item-pipeline.html +#ITEM_PIPELINES = { +# 'myproject.pipelines.MyprojectPipeline': 300, +#} + +# Enable and configure the AutoThrottle extension (disabled by default) +# See https://doc.scrapy.org/en/latest/topics/autothrottle.html +#AUTOTHROTTLE_ENABLED = True +# The initial download delay +#AUTOTHROTTLE_START_DELAY = 5 +# The maximum download delay to be set in case of high latencies +#AUTOTHROTTLE_MAX_DELAY = 60 +# The average number of requests Scrapy should be sending in parallel to +# each remote server +#AUTOTHROTTLE_TARGET_CONCURRENCY = 1.0 +# Enable showing throttling stats for every response received: +#AUTOTHROTTLE_DEBUG = False + +# Enable and configure HTTP caching (disabled by default) +# See https://doc.scrapy.org/en/latest/topics/downloader-middleware.html#httpcache-middleware-settings +#HTTPCACHE_ENABLED = True +#HTTPCACHE_EXPIRATION_SECS = 0 +#HTTPCACHE_DIR = 'httpcache' +#HTTPCACHE_IGNORE_HTTP_CODES = [] +#HTTPCACHE_STORAGE = 'scrapy.extensions.httpcache.FilesystemCacheStorage' diff --git a/myproject/myproject/spiders/__init__.py b/myproject/myproject/spiders/__init__.py new file mode 100644 index 0000000..ebd689a --- /dev/null +++ b/myproject/myproject/spiders/__init__.py @@ -0,0 +1,4 @@ +# This package will contain the spiders of your Scrapy project +# +# Please refer to the documentation for information on how to create and manage +# your spiders. diff --git a/myproject/myproject/spiders/blogitems.py b/myproject/myproject/spiders/blogitems.py new file mode 100644 index 0000000..805681b --- /dev/null +++ b/myproject/myproject/spiders/blogitems.py @@ -0,0 +1,32 @@ +# -*- coding: utf-8 -*- +import scrapy + +from ..items import BlogPostItem + + +class BlogItemsSpider(scrapy.Spider): + name = 'blogitems' + allowed_domains = ['codyhiar.com'] + start_urls = ['http://codyhiar.com/'] + + def __init__(self, tag=None, *args, **kwargs): + super().__init__(*args, **kwargs) + self.start_urls = ['https://www.codyhiar.com/tags/{}'.format(tag)] + + def parse(self, response): + # from scrapy.shell import inspect_response + # inspect_response(response, self) + year_archives = response.css('.archive') + for year_archive in year_archives: + year = year_archive.css('h3::text').extract_first() + posts = year_archive.css('.post-item') + for post in posts: + title = post.css('a::text').extract_first().strip() + date = post.css('.post-time::text').extract_first() + blog_post = BlogPostItem( + title=title, + date=date, + year=year + ) + print(blog_post) + diff --git a/myproject/myproject/spiders/linkfinder.py b/myproject/myproject/spiders/linkfinder.py new file mode 100644 index 0000000..04c057b --- /dev/null +++ b/myproject/myproject/spiders/linkfinder.py @@ -0,0 +1,18 @@ +# -*- coding: utf-8 -*- +import scrapy +from scrapy.spiders import CrawlSpider, Rule +from scrapy.linkextractors import LinkExtractor + + +class LinkFinderSpider(scrapy.spiders.CrawlSpider): + name = 'linkfinder' + allowed_domains = ['www.codyhiar.com'] + start_urls = ['https://codyhiar.com/'] + + rules = (Rule(LinkExtractor(allow=()), process_links='print_internal_links'),) + + def print_internal_links(self, links): + for link in links: + if 'codyhiar.com' in link.url: + print(link.url) + return links diff --git a/myproject/myproject/spiders/shell.py b/myproject/myproject/spiders/shell.py new file mode 100644 index 0000000..a342921 --- /dev/null +++ b/myproject/myproject/spiders/shell.py @@ -0,0 +1,13 @@ +# -*- coding: utf-8 -*- +"""Invoke the scrapy shell from the spider.""" +import scrapy + + +class ShellSpider(scrapy.Spider): + name = 'shell' + allowed_domains = ['codyhiar.com'] + start_urls = ['http://codyhiar.com/'] + + def parse(self, response): + from scrapy.shell import inspect_response + inspect_response(response, self) diff --git a/myproject/scrapy.cfg b/myproject/scrapy.cfg new file mode 100644 index 0000000..86af274 --- /dev/null +++ b/myproject/scrapy.cfg @@ -0,0 +1,11 @@ +# Automatically created by: scrapy startproject +# +# For more information about the [deploy] section see: +# https://scrapyd.readthedocs.io/en/latest/deploy.html + +[settings] +default = myproject.settings + +[deploy] +#url = http://localhost:6800/ +project = myproject diff --git a/requirements.txt b/requirements.txt new file mode 100644 index 0000000..251c547 --- /dev/null +++ b/requirements.txt @@ -0,0 +1,2 @@ +scrapy==1.5.0 +ipython==6.2.1 -- cgit v1.2.3