0

我的scrapy中有这个管道,我需要从Scrapy stats中获取信息

class MyPipeline(object):

    def __init__(self, stats):
        self.stats = stats

    @classmethod
    def from_crawler(cls, crawler):
        return cls(crawler.stats)

    def process_item(self, item, spider):           
        print self.stats.get_stats()['item_scraped_count']
        return item

当我运行代码时,我得到了这个错误

Traceback (most recent call last):
  File "D:\Kerja\HIT\PYTHON~1\<project_name>\<project_name>\lib\site-packages\twisted\internet\defer.py", line 649, in _runCallbacks
    current.result = callback(current.result, *args, **kw)
  File "D:\Kerja\HIT\Python Projects\<project_name>\<project_name>\<project_name>\<project_name>\pipelines.py", line 35, in process_item
    print self.stats.get_stats()['item_scraped_count']
KeyError: 'item_scraped_count'

如果这不是获取统计数据值的正确方法,那我该怎么办?

4

2 回答 2

3

找到了答案!最后!

而不是self.stats.get_stats()['item_scraped_count']使用self.stats.get_value('item_scraped_count')

https://doc.scrapy.org/en/latest/topics/stats.html

于 2016-11-17T22:46:16.380 回答
0

固定版本:使用 Scrapy 1.8

# -*- coding: utf-8 -*-

# Define your item pipelines here
#
# Don't forget to add your pipeline to the ITEM_PIPELINES setting
# See: http://doc.scrapy.org/en/latest/topics/item-pipeline.html

from scrapy.exporters import CsvItemExporter

import datetime


class SplitFilePipeline(object):

    def __init__(self, stats):
        self.stats = stats
        self.base_filename = "crawls/output_{}.csv"
        self.next_split = self.split_limit = 10000 # assuming you want to split 50000 items/csv
        self.create_exporter()

    @classmethod
    def from_crawler(cls, crawler):
        return cls(crawler.stats)

    def create_exporter(self):
        now = datetime.datetime.now()
        datetime_stamp = now.strftime("%Y%m%d%H%M")
        self.file = open(self.base_filename.format(datetime_stamp),'w+b')
        self.exporter = CsvItemExporter(self.file)
        self.exporter.start_exporting()

    def process_item(self, item, spider):
        if (self.stats.get_value('item_scraped_count') or 0) >= self.next_split:
            self.next_split += self.split_limit
            self.exporter.finish_exporting()
            self.file.close()
            self.create_exporter()
        self.exporter.export_item(item)
        return item
于 2020-03-02T12:21:28.170 回答