I have common behaviour between several spiders on spider_idle
signal being received, and I would like to move this behaviour into an extension.
My extension already listens for spider_opened
and spider_closed
signals successfully. However, the spider_idle
signal is not received.
Here is my extension (edited for brevity):
import logging
import MySQLdb
import MySQLdb.cursors
from scrapy import signals
logger = logging.getLogger(__name__)
class MyExtension(object):
def __init__(self, settings, stats):
self.settings = settings
self.stats = stats
@classmethod
def from_crawler(cls, crawler):
# instantiate the extension object
ext = cls(crawler.settings, crawler.stats)
# connect the extension object to signals
crawler.signals.connect(ext.spider_opened, signal=signals.spider_opened)
crawler.signals.connect(ext.spider_closed, signal=signals.spider_closed)
crawler.signals.connect(ext.spider_idle, signal=signals.spider_idle)
# return the extension object
return ext
def spider_opened(self, spider):
logger.info("start logging spider %s", spider.name)
def spider_closed(self, spider):
logger.info("end logging spider %s", spider.name)
def spider_idle(self, spider):
logger.info("idle logging spider %s", spider.name)
# attempt to crawl orphaned products
db = MySQLdb.connect(host=self.settings['AWS_RDS_HOST'],
port=self.settings['AWS_RDS_PORT'],
user=self.settings['AWS_RDS_USER'],
passwd=self.settings['AWS_RDS_PASSWD'],
db=self.settings['AWS_RDS_DB'],
cursorclass=MySQLdb.cursors.DictCursor,
use_unicode=True,
charset="utf8",)
c=db.cursor()
c.execute("""SELECT p.url FROM products p LEFT JOIN product_data pd ON p.id = pd.product_id AND pd.scrape_date = CURDATE() WHERE p.website_id = %s AND pd.id IS NULL""", (spider.website_id,))
while True:
product = c.fetchone()
if product is None:
break
# record orphaned product
self.stats.inc_value('orphaned_count')
yield self.crawler.engine.crawl(Request(url=product['url'], callback=spider.parse_item), spider)
db.close()
Why is the signal not being received?
Update
As requested, here's some more info.
This is my settings.py
:
BOT_NAME = 'myproject'
SPIDER_MODULES = ['myproject.spiders']
NEWSPIDER_MODULE = 'myproject.spiders'
DOWNLOADER_MIDDLEWARES = {
'rotating_proxies.middlewares.RotatingProxyMiddleware': 610,
'rotating_proxies.middlewares.BanDetectionMiddleware': 620,
}
AUTOTHROTTLE_ENABLED = True
AUTOTHROTTLE_START_DELAY = 2
AUTOTHROTTLE_MAX_DELAY = 30
AUTOTHROTTLE_TARGET_CONCURRENCY = 1.0
EXTENSIONS = {
'myproject.extensions.MyExtension': 500,
}
I get the same issue whether the rotating_proxy middleware is enabled or not.
This is an example spider I'm testing with:
import scrapy
from furl import furl
from scrapy.http.request import Request
from scrapy.spiders import CrawlSpider, Rule
from scrapy.linkextractors import LinkExtractor
from myproject.items import MyProjectItem
class ScrapeThisSpider(CrawlSpider):
name = "scrapethis"
website_id = 5
custom_settings = {
'IMAGES_STORE': 's3://myproject-dev/images/scrapethis/',
'LOG_FILE': 'scrapethis.log',
'LOG_LEVEL': 'DEBUG',
}
allowed_domains = ['scrapethis.co.uk']
start_urls = ['https://www.scrapethis.co.uk']
def start_requests(self):
for url in self.start_urls:
yield Request(url, dont_filter=True)
def customise_url(link):
f = furl(link)
f.args['ar'] = '3'
return f.url
rules = (
Rule(LinkExtractor(allow=(), deny=('articleId=', ), process_value=customise_url)),
Rule(LinkExtractor(allow=('articleId=', ), process_value=customise_url), callback='parse_item'),
)
def parse_item(self, response):
price = response.xpath("//article[@class='productbox'] //strong[@class='pricefield__price']//text()").extract()
item = MyProjectItem()
f = furl(response.url)
item['id'] = f.args['articleId']
item['spider'] = self.name
item['price'] = price
return item
Update 2
I think I have discovered what is causing the spider_idle
signal to fail - in my method, I'm connecting to an Amazon RDS database, querying it and processing the results.
If I comment out the code, my signal runs (I get the log entry), if my query code remains, the signal does not run (or at least, I get no log entry).
It's strange, because the first thing I do in my method is to log the signal?
Update 3
I've discovered if I remove the yield
keyword in my query result loop, it works - but only 1 request is made. I need each of the urls returned in my query to be added to the crawler. (Apologies if I'm asking anything stupid - I'm still rather new to Python and scrapy).