1

我有一个蜘蛛,它将数据导出到不同的 CSV 文件(根据蜘蛛类中定义的类定义的名称)。但是,我还希望在处理字段并将其导出到不同的 CSV 文件时保持特定顺序的字段顺序。

例如,这是我的 items.py:

import scrapy

class first_class_def_Item(scrapy.Item):
    f1 = scrapy.Field() # f1 an arbitrary id used for both class definition items
    f2 = scrapy.Field()
    f3 = scrapy.Field()

class second_class_def_Item(scrapy.Item):
    f1 = scrapy.Field()
    f4 = scrapy.Field()
    f5 = scrapy.Field()
    f6 = scrapy.Field()

这是我的 pipelines.py:

from scrapy.exporters import CsvItemExporter
from scrapy import signals
from pydispatch import dispatcher


def item_type(item):
    # The CSV file names are used (imported) from the scrapy spider.
    # For this example, I just want to keep "first_class_def.csv" without,
    # the "_item", as in "first_class_def_Item.csv" as defined in the main scrapy spider
    return type(item).__name__.replace('_Item','')

class SomeSitePipeline(object):
    # For simplicity, I'm using the same class def names as found in the,
    # main scrapy spider and as defined in the items.py
    SaveTypes = ['first_class_def','second_class_def']

    def __init__(self):
        dispatcher.connect(self.spider_opened, signal=signals.spider_opened)
        dispatcher.connect(self.spider_closed, signal=signals.spider_closed)

    def spider_opened(self, spider):
        self.files = dict([ (name, open("/somefolder/"+name+'.csv','wb')) for name in self.SaveTypes ])
        self.exporters = dict([ (name,CsvItemExporter(self.files[name])) for name in self.SaveTypes ])
        [e.start_exporting() for e in self.exporters.values()]

    def spider_closed(self, spider):
        [e.finish_exporting() for e in self.exporters.values()]
        [f.close() for f in self.files.values()]

    def process_item(self, item, spider):
        typesItem = item_type(item)
        if typesItem in set(self.SaveTypes):
            self.exporters[typesItem].export_item(item)
        return item

这是我的spider.py:

import os
import scrapy
from itertools import zip_longest
from somesite.items import first_class_def_Item, second_class_def_Item
from csv import DictReader

path = os.path.join(os.path.expanduser('~'), 'user', 'somefolder', 'IDs.csv')

class SomeSiteSpider(scrapy.Spider):
    name = 'somesite'
    allowed_domains = ['somesite.com']
    start_urls = ['https://somesite.com/login.aspx']

    def parse(self, response):

        return scrapy.FormRequest.from_response(response,
                            formdata={'txtLogin$txtInput': 'User',
                                      'txtPassword$txtInput': 'pass',
                                      'btnLogin.x': '53',
                                      'btnLogin.y': '33'},
                            callback=self.Tables)

    def Tables(self, response):

        with open(path) as rows:

            for row in DictReader(rows):

                id=row["id"]

            yield scrapy.Request("https://somesite.com/page1.aspx",
                meta={'mid': mid,
                      'form_control': some_form_control},
                dont_filter = True,
                callback=self.first_class_def)

            yield scrapy.Request("https://somesite.com/page2.aspx",
                meta={'mid': mid,
                      'form_control': some_form_control},
                dont_filter = True,
                callback=self.second_class_def)

    def first_class_def(self, response):

        return scrapy.FormRequest.from_response(response,
                    formdata={'id': response.meta['id'],
                              'form_control': response.meta['some_form_control'],
                              'SearchControl$btnCreateReport': 'Create Report'},
                    meta={'id': response.meta['id']},
                    callback=self.scrap_page_1)

    def scrap_page_1(self, response):
        items = first_class_def_Item()

        field_1 = response.xpath('//*[@class="formatText"][1]/text()').extract()
        field_2 = response.xpath('//*[@class="formatCurrency"][1]/text()').extract()

        for a,b in zip(field_1,field_2):
            items['f1'] = response.meta['id']
            items['f2'] = a
            items['f3'] = b

            yield items

    def second_class_def(self, response):

        return scrapy.FormRequest.from_response(response,
                    formdata={'id': response.meta['id'],
                              'form_control': response.meta['some_form_control'],
                              'form_control_two': 'some_form_control_two',
                              'SearchControl$btnCreateReport': 'Create Report'},
                    meta={'id': response.meta['id']},
                    callback=self.scrap_page_2)

    def scrap_page_2(self, response):
        items = second_class_def_Item()

        field_1 = response.xpath('//*[@class="formatText"][1]/text()').extract()
        field_2 = response.xpath('//*[@class="formatCurrency"][1]/text()').extract()
        field_3 = response.xpath('//*[@class="formatText"][3]/text()').extract()

        for a,b,c in zip(field_1,field_2,field_3):
            items['f1'] = response.meta['id']
            items['f4'] = a
            items['f5'] = b
            items['f6'] = c

            yield items

由于蜘蛛正在处理和导出数据,我正在寻找一种方法来保留 CSV 生成的文件“first_class_def.csv”和“second_class_def.csv”中的字段,以与 items.py 中相同的顺序导出:

f1,f2,f3

f1,f4,f5,f6

但是,每当我抓取蜘蛛时,CSV 文件中的字段都会以随机顺序导出:

f2,f1,f3 和 f5,f1,f4,f6

解决方法贴在下面!

4

2 回答 2

1

不幸的是,由于scrapy的Item实现方式,有关字段定义顺序的信息没有被保留。

如果顺序很重要,您可以做的最好的事情是将您想要的顺序定义为一个单独的类变量,并在您的管道中使用它。fields_to_export将参数传递给CsvItemExporter可能是最简单的。

这是您可以尝试的基本想法:

# items.py
class Item1(scrapy.Item):
    fields_to_export = ['fi', 'f2']
    f1 = scrapy.Field()
    f2 = scrapy.Field()
# pipelines.py
from project.items import Item1


class SomeSitePipeline(object):
    save_types = {'item1': Item1}

    def spider_opened(self, spider):
        # (...)
        self.exporters = dict(
            (name, CsvItemExporter(self.files[name], fields_to_export=item_type.fields_to_export))
            for name, item_type in self.save_types.items()
        )
        # (...)

另外,我刚刚注意到您正在对副作用使用列表推导,这是一个坏主意,您应该只使用普通循环。

于 2018-03-01T21:31:55.290 回答
0

这是我的具体问题的解决方案:根据项目类定义组织的导出字段,如scrapy蜘蛛项目的 items.py 中定义的那样。

因此,在解决了这个问题并实施了@stranac 摆脱列表理解的建议之后,我想出了以下解决方案,允许将所有字段按顺序导出到它们的相关 csv 文件中:

from scrapy.exporters import CsvItemExporter
from scrapy import signals
from pydispatch import dispatcher


def item_type(item):
    # just want "first_class_def.csv" not "first_class_def_Item.csv"
    return type(item).__name__.replace('_Item','')

class SomeSitePipeline(object):
    fileNamesCsv = ['first_class_def','second_class_def']

    def __init__(self):
        self.files = {}
        self.exporters = {}
        dispatcher.connect(self.spider_opened, signal=signals.spider_opened)
        dispatcher.connect(self.spider_closed, signal=signals.spider_closed)

    def spider_opened(self, spider):
        self.files = dict([ (name, open("/somefolder/"+name+'.csv','wb')) for name in self.fileNamesCsv ])
        for name in self.fileNamesCsv:
            self.exporters[name] = CsvItemExporter(self.files[name])

            if name == 'first_class_def':
                self.exporters[name].fields_to_export = ['f1','f2','f3']
                self.exporters[name].start_exporting()

            if name == 'second_class_def':
                self.exporters[name].fields_to_export = ['f1','f4','f5','f6']
                self.exporters[name].start_exporting()

    def spider_closed(self, spider):
        [e.finish_exporting() for e in self.exporters.values()]
        [f.close() for f in self.files.values()]

    def process_item(self, item, spider):
        typesItem = item_type(item)
        if typesItem in set(self.fileNamesCsv):
            self.exporters[typesItem].export_item(item)
        return item

现在,一切都按照我最初的意图进行。

于 2018-04-05T17:50:54.287 回答