• 欢迎访问金笔头博客,这是一个菜鸟(伪)程序员的自留地,欢迎访问我的github:点击进入

Scrapy学习笔记(8)-使用signals来监控spider的状态

爬虫技术 eason 10729次浏览 3个评论 扫描二维码

场景介绍

有时候我们需要在spider启动或者结束的时候执行一些特定的操作,比如说记录日志之类的,在scrapy中我们可以使用signals来实现。

主要实现代码如下:

# -*- coding: utf-8 -*-
from scrapy import signals
from scrapy.xlib.pydispatch import dispatcher
from scrapy.linkextractors import LinkExtractor
from scrapy.spiders import CrawlSpider, Rule
from items import IpProxyPoolItem
from model.spider_running_log import SpiderCrawlLog
from model import loadSession
from datetime import datetime
class ProxySpiderSpider(CrawlSpider):
    name = 'MagicSpider'
    def __init__(self,rule):
        #spider启动信号和spider_opened函数绑定
        dispatcher.connect(self.spider_opened, signals.spider_opened)
        #spider关闭信号和spider_spider_closed函数绑定
        dispatcher.connect(self.spider_closed, signals.spider_closed)
        self.rule = rule
        self.name = rule.name
        self.allowed_domains = rule.allowed_domains.split(',')
        self.start_urls = rule.start_urls.split(',')
        rule_list = []

        # 添加`下一页`的规则
        if len(rule.next_page):
            rule_list.append(Rule(LinkExtractor(restrict_xpaths=rule.next_page), follow=True))

        rule_list.append(Rule(LinkExtractor(
            allow=rule.allow_url.split(','),
            unique=True),
            follow=True,
            callback='parse_item'))

        self.rules = tuple(rule_list)
        super(ProxySpiderSpider, self).__init__()
    #spider关闭时的逻辑
    def spider_closed(self, spider):
        print "spider is closed!"
        session = loadSession()
        log = session.query(SpiderCrawlLog).filter(SpiderCrawlLog.spiderID == self.rule.id,
                                                   SpiderCrawlLog.endTime == None
                                                   ).first()
        log.endTime = datetime.now().strftime("%Y-%m-%d %H:%M:%S")
        log.status = "closed"
        session.commit()

    #spider启动时的逻辑
    def spider_opened(self, spider):
        print "spider is running!"
        item = SpiderCrawlLog(
                              spiderID=self.rule.id,
                              spiderName=self.rule.name,
                              status="Running...",
                              startTime=datetime.now().strftime("%Y-%m-%d %H:%M:%S"),
                              endTime=None,
                              pages=0,
                              items=0
                              )
        session = loadSession()
        log = session.query(SpiderCrawlLog).filter(
            SpiderCrawlLog.spiderID == self.rule.id,
            SpiderCrawlLog.endTime == None)

        # 查询当前spider是否有未结束的日志
        if log.count() == 0 :
            session.add(item)
            session.commit()
        else:
            pass

    def parse_item(self, response):
        # print 'Hi, this is an item page! %s' % response.url
        #print response.body

        session = loadSession()
        log = session.query(SpiderCrawlLog).filter(SpiderCrawlLog.spiderID == self.rule.id,
                                                   SpiderCrawlLog.endTime == None).first()
        log.pages = int(log.pages) + 1
        session.commit()



        item=IpProxyPoolItem()

        if len(self.rule.loop_xpath):
            # print 'Find %d items!'% len(response.xpath(self.rule.loop_xpath))
            for proxy in response.xpath(self.rule.loop_xpath):
                if len(self.rule.ip_xpath):
                    tmp_ip = proxy.xpath(self.rule.ip_xpath).extract_first()
                    ip = tmp_ip.strip() if tmp_ip is not None else ""
                else:
                    ip = ""
                if len(self.rule.port_xpath):
                    tmp_port = proxy.xpath(self.rule.port_xpath).extract_first()
                    port = tmp_port.strip() if tmp_port is not None else ""
                else:
                    port = ""
                if len(self.rule.location1_xpath):
                    tmp_location1 = proxy.xpath(self.rule.location1_xpath).extract_first()
                    location1 = tmp_location1.strip() if tmp_location1 is not None else ""
                else:
                    location1 = ""
                if len(self.rule.location2_xpath):
                    tmp_location2 = proxy.xpath(self.rule.location2_xpath).extract_first()
                    location2 = tmp_location2.strip() if tmp_location2 is not None else ""
                else:
                    location2 = ""
                if len(self.rule.lifetime_xpath):
                    tmp_lifetime = proxy.xpath(self.rule.lifetime_xpath).extract_first()
                    lifetime = tmp_lifetime.strip() if tmp_lifetime is not None else ""
                else:
                    lifetime = ""
                if len(self.rule.lastcheck_xpath):
                    tmp_lastcheck = proxy.xpath(self.rule.lastcheck_xpath).extract_first()
                    lastcheck = tmp_lastcheck.strip() if tmp_lastcheck is not None else ""
                else:
                    lastcheck = ""
                if len(self.rule.level_xpath):
                    tmp_level = proxy.xpath(self.rule.level_xpath).extract_first()
                    level = tmp_level.strip() if tmp_level is not None else ""
                else:
                    level = ""
                if len(self.rule.type_xpath):
                    tmp_type = proxy.xpath(self.rule.type_xpath).extract_first()
                    type = tmp_type.strip() if tmp_type is not None else ""
                else:
                    type = ""
                if len(self.rule.speed_xpath):
                    tmp_speed = proxy.xpath(self.rule.speed_xpath).extract_first()
                    speed = tmp_speed.strip() if tmp_speed is not None else ""
                else:
                    speed = ""

                item['ip_port']=(":".join([ip,port])) if len(port) else ip
                item['type']=type
                item['level']=level
                item['location']=(" ".join([location1,location2])) if location2 is not None and len(location2) else location1
                item['speed']=speed
                item['lifetime']=lifetime
                item['lastcheck']=lastcheck
                item['rule_id']=self.rule.id
                item['source']=response.url
                yield item

应用截图:

使用signals机制来插入日志并实时更新spider的运行和数据爬取状态
Scrapy学习笔记(8)-使用signals来监控spider的状态

金笔头博客, 版权所有丨如未注明 , 均为原创, 转载请注明Scrapy学习笔记(8)-使用signals来监控spider的状态
喜欢 (5)
发表我的评论
取消评论

表情 贴图 加粗 删除线 居中 斜体 签到

Hi,您需要填写昵称和邮箱!

  • 昵称 (必填)
  • 邮箱 (必填)
  • 网址
(3)个小伙伴在吐槽
  1. 下面的“应用截图”,应用是如何做的?
    聆风2018-07-22 15:10 Reply 未知操作系统 | Chrome 67.0.3396.99
  2. 你的model哪来的啊
    crawler2018-04-13 15:21 Reply 未知操作系统 | Chrome 63.0.3239.132