aboutsummaryrefslogtreecommitdiff
path: root/myproject
diff options
context:
space:
mode:
authorCody Hiar <codyfh@gmail.com>2018-03-19 21:30:32 -0600
committerCody Hiar <codyfh@gmail.com>2018-03-19 21:30:32 -0600
commit3f5efcb91afc2e6d013800132b92e4a6c297f662 (patch)
tree3de0b3c6804ec51d5784d788b8df450eae208f1a /myproject
Initial commit of working files
Diffstat (limited to 'myproject')
-rw-r--r--myproject/myproject/__init__.py0
-rw-r--r--myproject/myproject/items.py19
-rw-r--r--myproject/myproject/middlewares.py103
-rw-r--r--myproject/myproject/pipelines.py11
-rw-r--r--myproject/myproject/settings.py91
-rw-r--r--myproject/myproject/spiders/__init__.py4
-rw-r--r--myproject/myproject/spiders/blogitems.py32
-rw-r--r--myproject/myproject/spiders/linkfinder.py18
-rw-r--r--myproject/myproject/spiders/shell.py13
-rw-r--r--myproject/scrapy.cfg11
10 files changed, 302 insertions, 0 deletions
diff --git a/myproject/myproject/__init__.py b/myproject/myproject/__init__.py
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/myproject/myproject/__init__.py
diff --git a/myproject/myproject/items.py b/myproject/myproject/items.py
new file mode 100644
index 0000000..99e70ed
--- /dev/null
+++ b/myproject/myproject/items.py
@@ -0,0 +1,19 @@
+# -*- coding: utf-8 -*-
+
+# Define here the models for your scraped items
+#
+# See documentation in:
+# https://doc.scrapy.org/en/latest/topics/items.html
+
+import scrapy
+
+
+class BlogPostItem(scrapy.Item):
+ # define the fields for your item here like:
+ title = scrapy.Field()
+ year = scrapy.Field()
+ date = scrapy.Field()
+ pass
+
+ def __repr__(self):
+ return "{date} {year} - {title}".format(**self)
diff --git a/myproject/myproject/middlewares.py b/myproject/myproject/middlewares.py
new file mode 100644
index 0000000..5c0f3e3
--- /dev/null
+++ b/myproject/myproject/middlewares.py
@@ -0,0 +1,103 @@
+# -*- coding: utf-8 -*-
+
+# Define here the models for your spider middleware
+#
+# See documentation in:
+# https://doc.scrapy.org/en/latest/topics/spider-middleware.html
+
+from scrapy import signals
+
+
+class MyprojectSpiderMiddleware(object):
+ # Not all methods need to be defined. If a method is not defined,
+ # scrapy acts as if the spider middleware does not modify the
+ # passed objects.
+
+ @classmethod
+ def from_crawler(cls, crawler):
+ # This method is used by Scrapy to create your spiders.
+ s = cls()
+ crawler.signals.connect(s.spider_opened, signal=signals.spider_opened)
+ return s
+
+ def process_spider_input(self, response, spider):
+ # Called for each response that goes through the spider
+ # middleware and into the spider.
+
+ # Should return None or raise an exception.
+ return None
+
+ def process_spider_output(self, response, result, spider):
+ # Called with the results returned from the Spider, after
+ # it has processed the response.
+
+ # Must return an iterable of Request, dict or Item objects.
+ for i in result:
+ yield i
+
+ def process_spider_exception(self, response, exception, spider):
+ # Called when a spider or process_spider_input() method
+ # (from other spider middleware) raises an exception.
+
+ # Should return either None or an iterable of Response, dict
+ # or Item objects.
+ pass
+
+ def process_start_requests(self, start_requests, spider):
+ # Called with the start requests of the spider, and works
+ # similarly to the process_spider_output() method, except
+ # that it doesn’t have a response associated.
+
+ # Must return only requests (not items).
+ for r in start_requests:
+ yield r
+
+ def spider_opened(self, spider):
+ spider.logger.info('Spider opened: %s' % spider.name)
+
+
+class MyprojectDownloaderMiddleware(object):
+ # Not all methods need to be defined. If a method is not defined,
+ # scrapy acts as if the downloader middleware does not modify the
+ # passed objects.
+
+ @classmethod
+ def from_crawler(cls, crawler):
+ # This method is used by Scrapy to create your spiders.
+ s = cls()
+ crawler.signals.connect(s.spider_opened, signal=signals.spider_opened)
+ return s
+
+ def process_request(self, request, spider):
+ # Called for each request that goes through the downloader
+ # middleware.
+
+ # Must either:
+ # - return None: continue processing this request
+ # - or return a Response object
+ # - or return a Request object
+ # - or raise IgnoreRequest: process_exception() methods of
+ # installed downloader middleware will be called
+ return None
+
+ def process_response(self, request, response, spider):
+ # Called with the response returned from the downloader.
+
+ # Must either;
+ # - return a Response object
+ # - return a Request object
+ # - or raise IgnoreRequest
+ return response
+
+ def process_exception(self, request, exception, spider):
+ # Called when a download handler or a process_request()
+ # (from other downloader middleware) raises an exception.
+
+ # Must either:
+ # - return None: continue processing this exception
+ # - return a Response object: stops process_exception() chain
+ # - return a Request object: stops process_exception() chain
+ pass
+
+ def spider_opened(self, spider):
+ spider.logger.info('Spider opened: %s' % spider.name)
diff --git a/myproject/myproject/pipelines.py b/myproject/myproject/pipelines.py
new file mode 100644
index 0000000..0bdee9f
--- /dev/null
+++ b/myproject/myproject/pipelines.py
@@ -0,0 +1,11 @@
+# -*- coding: utf-8 -*-
+
+# Define your item pipelines here
+#
+# Don't forget to add your pipeline to the ITEM_PIPELINES setting
+# See: https://doc.scrapy.org/en/latest/topics/item-pipeline.html
+
+
+class MyprojectPipeline(object):
+ def process_item(self, item, spider):
+ return item
diff --git a/myproject/myproject/settings.py b/myproject/myproject/settings.py
new file mode 100644
index 0000000..6790f78
--- /dev/null
+++ b/myproject/myproject/settings.py
@@ -0,0 +1,91 @@
+# -*- coding: utf-8 -*-
+
+# Scrapy settings for myproject project
+#
+# For simplicity, this file contains only settings considered important or
+# commonly used. You can find more settings consulting the documentation:
+#
+# https://doc.scrapy.org/en/latest/topics/settings.html
+# https://doc.scrapy.org/en/latest/topics/downloader-middleware.html
+# https://doc.scrapy.org/en/latest/topics/spider-middleware.html
+
+BOT_NAME = 'myproject'
+
+SPIDER_MODULES = ['myproject.spiders']
+NEWSPIDER_MODULE = 'myproject.spiders'
+
+
+# Crawl responsibly by identifying yourself (and your website) on the user-agent
+#USER_AGENT = 'myproject (+http://www.yourdomain.com)'
+
+# Obey robots.txt rules
+ROBOTSTXT_OBEY = False
+LOG_LEVEL = 'WARNING'
+
+# Configure maximum concurrent requests performed by Scrapy (default: 16)
+#CONCURRENT_REQUESTS = 32
+
+# Configure a delay for requests for the same website (default: 0)
+# See https://doc.scrapy.org/en/latest/topics/settings.html#download-delay
+# See also autothrottle settings and docs
+#DOWNLOAD_DELAY = 3
+# The download delay setting will honor only one of:
+#CONCURRENT_REQUESTS_PER_DOMAIN = 16
+#CONCURRENT_REQUESTS_PER_IP = 16
+
+# Disable cookies (enabled by default)
+#COOKIES_ENABLED = False
+
+# Disable Telnet Console (enabled by default)
+#TELNETCONSOLE_ENABLED = False
+
+# Override the default request headers:
+#DEFAULT_REQUEST_HEADERS = {
+# 'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
+# 'Accept-Language': 'en',
+#}
+
+# Enable or disable spider middlewares
+# See https://doc.scrapy.org/en/latest/topics/spider-middleware.html
+#SPIDER_MIDDLEWARES = {
+# 'myproject.middlewares.MyprojectSpiderMiddleware': 543,
+#}
+
+# Enable or disable downloader middlewares
+# See https://doc.scrapy.org/en/latest/topics/downloader-middleware.html
+#DOWNLOADER_MIDDLEWARES = {
+# 'myproject.middlewares.MyprojectDownloaderMiddleware': 543,
+#}
+
+# Enable or disable extensions
+# See https://doc.scrapy.org/en/latest/topics/extensions.html
+#EXTENSIONS = {
+# 'scrapy.extensions.telnet.TelnetConsole': None,
+#}
+
+# Configure item pipelines
+# See https://doc.scrapy.org/en/latest/topics/item-pipeline.html
+#ITEM_PIPELINES = {
+# 'myproject.pipelines.MyprojectPipeline': 300,
+#}
+
+# Enable and configure the AutoThrottle extension (disabled by default)
+# See https://doc.scrapy.org/en/latest/topics/autothrottle.html
+#AUTOTHROTTLE_ENABLED = True
+# The initial download delay
+#AUTOTHROTTLE_START_DELAY = 5
+# The maximum download delay to be set in case of high latencies
+#AUTOTHROTTLE_MAX_DELAY = 60
+# The average number of requests Scrapy should be sending in parallel to
+# each remote server
+#AUTOTHROTTLE_TARGET_CONCURRENCY = 1.0
+# Enable showing throttling stats for every response received:
+#AUTOTHROTTLE_DEBUG = False
+
+# Enable and configure HTTP caching (disabled by default)
+# See https://doc.scrapy.org/en/latest/topics/downloader-middleware.html#httpcache-middleware-settings
+#HTTPCACHE_ENABLED = True
+#HTTPCACHE_EXPIRATION_SECS = 0
+#HTTPCACHE_DIR = 'httpcache'
+#HTTPCACHE_IGNORE_HTTP_CODES = []
+#HTTPCACHE_STORAGE = 'scrapy.extensions.httpcache.FilesystemCacheStorage'
diff --git a/myproject/myproject/spiders/__init__.py b/myproject/myproject/spiders/__init__.py
new file mode 100644
index 0000000..ebd689a
--- /dev/null
+++ b/myproject/myproject/spiders/__init__.py
@@ -0,0 +1,4 @@
+# This package will contain the spiders of your Scrapy project
+#
+# Please refer to the documentation for information on how to create and manage
+# your spiders.
diff --git a/myproject/myproject/spiders/blogitems.py b/myproject/myproject/spiders/blogitems.py
new file mode 100644
index 0000000..805681b
--- /dev/null
+++ b/myproject/myproject/spiders/blogitems.py
@@ -0,0 +1,32 @@
+# -*- coding: utf-8 -*-
+import scrapy
+
+from ..items import BlogPostItem
+
+
+class BlogItemsSpider(scrapy.Spider):
+ name = 'blogitems'
+ allowed_domains = ['codyhiar.com']
+ start_urls = ['http://codyhiar.com/']
+
+ def __init__(self, tag=None, *args, **kwargs):
+ super().__init__(*args, **kwargs)
+ self.start_urls = ['https://www.codyhiar.com/tags/{}'.format(tag)]
+
+ def parse(self, response):
+ # from scrapy.shell import inspect_response
+ # inspect_response(response, self)
+ year_archives = response.css('.archive')
+ for year_archive in year_archives:
+ year = year_archive.css('h3::text').extract_first()
+ posts = year_archive.css('.post-item')
+ for post in posts:
+ title = post.css('a::text').extract_first().strip()
+ date = post.css('.post-time::text').extract_first()
+ blog_post = BlogPostItem(
+ title=title,
+ date=date,
+ year=year
+ )
+ print(blog_post)
+
diff --git a/myproject/myproject/spiders/linkfinder.py b/myproject/myproject/spiders/linkfinder.py
new file mode 100644
index 0000000..04c057b
--- /dev/null
+++ b/myproject/myproject/spiders/linkfinder.py
@@ -0,0 +1,18 @@
+# -*- coding: utf-8 -*-
+import scrapy
+from scrapy.spiders import CrawlSpider, Rule
+from scrapy.linkextractors import LinkExtractor
+
+
+class LinkFinderSpider(scrapy.spiders.CrawlSpider):
+ name = 'linkfinder'
+ allowed_domains = ['www.codyhiar.com']
+ start_urls = ['https://codyhiar.com/']
+
+ rules = (Rule(LinkExtractor(allow=()), process_links='print_internal_links'),)
+
+ def print_internal_links(self, links):
+ for link in links:
+ if 'codyhiar.com' in link.url:
+ print(link.url)
+ return links
diff --git a/myproject/myproject/spiders/shell.py b/myproject/myproject/spiders/shell.py
new file mode 100644
index 0000000..a342921
--- /dev/null
+++ b/myproject/myproject/spiders/shell.py
@@ -0,0 +1,13 @@
+# -*- coding: utf-8 -*-
+"""Invoke the scrapy shell from the spider."""
+import scrapy
+
+
+class ShellSpider(scrapy.Spider):
+ name = 'shell'
+ allowed_domains = ['codyhiar.com']
+ start_urls = ['http://codyhiar.com/']
+
+ def parse(self, response):
+ from scrapy.shell import inspect_response
+ inspect_response(response, self)
diff --git a/myproject/scrapy.cfg b/myproject/scrapy.cfg
new file mode 100644
index 0000000..86af274
--- /dev/null
+++ b/myproject/scrapy.cfg
@@ -0,0 +1,11 @@
+# Automatically created by: scrapy startproject
+#
+# For more information about the [deploy] section see:
+# https://scrapyd.readthedocs.io/en/latest/deploy.html
+
+[settings]
+default = myproject.settings
+
+[deploy]
+#url = http://localhost:6800/
+project = myproject