1
from string import join
from scrapy.contrib.linkextractors.sgml import SgmlLinkExtractor
from scrapy.contrib.spiders.crawl import Rule, CrawlSpider
from scrapy.http.request import Request
from scrapy.selector import HtmlXPathSelector
from Gfire.items import GfireItem

class GuideSpider(CrawlSpider):
    name = "Gfire"
    allowed_domains = ['www.example.com']
    start_urls = [
        "http://www.example.com/gfire/guides"
    ]
    rules = (
        Rule(SgmlLinkExtractor(allow=("gfire/guides.*page=")), callback='parse_item', follow=True),
    )

    def parse_item(self, response):
        hxs = HtmlXPathSelector(response)
        items = []
        sites = hxs.select('//div[@class="title"]')
        for site in sites:
            item = GFireItem()
            item['title'] = site.select('./a/text()').extract()
            item['guide_url'] = site.select('./a/@href').extract()
            item['guide_url'] = "http://www.example.com" + join(item['guide_url'])
            items.append(item)
        return Request(items[1], callback=self.parse_item2)

    def parse_item2(self, response):
        hxs = HtmlXPathSelector(response)
        hero = hxs.select("//h3/a/text()").extract()
        return hero

不能让这个蜘蛛工作。请求函数包含应该是 item['guide_url'] 的 items[1] 但它告诉我参数必须是 str 或 unicode。我该如何纠正这个错误?以及如何将项目列表传递给回调函数?通过 request.meta?

4

2 回答 2

4

item[1]实际上是GFireItem.

我不确定您为什么要创建这些,因为您只使用一个(站点列表中的第二个站点),而丢弃列表的其余部分。

除此之外,您需要items[1]['guide_url']在创建时提取 url Request

        return Request(items[1]['guide_url'], callback=self.parse_item2)
于 2012-09-08T12:28:36.697 回答
4
def parse_item(self, response):
    hxs = HtmlXPathSelector(response)
    items = []
    sites = hxs.select('//div[@class="title"]')
    for site in sites:
        item = GFireItem()
        item['title'] = site.select('./a/text()').extract()
        item['guide_url'] = site.select('./a/@href').extract()
        item['guide_url'] = "http://www.example.com" + join(item['guide_url'])
        items.append(item)
    return Request(items[1]['guide_url'], request.meta={'items':items}, callback=self.parse_item2)

def parse_item2(self, response):
    items = response.meta["items"]
    hxs = HtmlXPathSelector(response)
    hero = hxs.select("//h3/a/text()").extract()
    return hero
于 2012-09-18T11:53:36.490 回答