2

我创建了一个名为 aqaq 的蜘蛛,它位于文件名 image.py 中。image.py 的内容如下:

from scrapy.spider import BaseSpider
from scrapy.selector import HtmlXPathSelector
from scrapy.http import Request
a=[]
from aqaq.items import aqaq
import os
class aqaqspider(BaseSpider):
    name = "aqaq"
    allowed_domains = ["aqaq.com"]
    start_urls = [
                        "http://www.aqaq.com/list/female/view-all?limit=all"
    ]

    def parse(self, response):
        hxs = HtmlXPathSelector(response)
        sites=hxs.select('//ul[@class="list"]/li')
        for site in sites:
                name=site.select('a[@class="product-name"]/@href').extract()
                a.append(name)
        f=open("url","w+")
        for i in a:
                if str(i)=='[]':
                        pass;
                else:
                        f.write(str(i)[3:-2]+os.linesep)
                        yield Request(str(i)[3:-2].rstrip('\n'),callback=self.parsed)

        f.close()
    def parsed(self,response):
        hxs = HtmlXPathSelector(response)
        sites=hxs.select('//div[@class="form"]')
        items=[]
        for site in sites:
                item=aqaq()
                item['title']=site.select('h1/text()').extract()
                item['cost']=site.select('div[@class="price-container"]/span[@class="regular-price"]/span[@class="price"]/text()').extract()
                item['desc']=site.select('div[@class="row-block"]/p/text()').extract()
                item['color']=site.select('div[@id="colours"]/ul/li/a/img/@src').extract()
                items.append(item)
                return items

我正在尝试使用我的 python 脚本运行这个蜘蛛,如下所示:

from twisted.internet import reactor
from scrapy.crawler import Crawler
from scrapy.settings import Settings
from scrapy import log, signals
from spiders.image import aqaqspider
from scrapy.xlib.pydispatch import dispatcher
def stop_reactor():
    reactor.stop()
dispatcher.connect(stop_reactor, signal=signals.spider_closed)
spider = aqaqspider(domain='aqaq.com')
crawler = Crawler(Settings())
crawler.configure()
crawler.crawl(spider)
crawler.start()a
log.start(loglevel=log.DEBUG)
log.msg("------------>Running reactor")
result = reactor.run()
print result
log.msg("------------>Running stoped")

在运行上述脚本时,我收到以下错误:

2013-09-27 19:21:06+0530 [aqaq] ERROR: Error downloading <GET http://www.aqaq.com/list/female/view-all?limit=all>: 'Settings' object has no attribute 'overrides'

我是初学者,需要帮助???

4

1 回答 1

4

您必须使用CrawlerSettings而不是Settings.

更改此行:

    from scrapy.settings import Settings

经过:

    from scrapy.settings import CrawlerSettings

而这一行:

    crawler = Crawler(Settings())

经过:

    crawler = Crawler(CrawlerSettings())
于 2013-09-28T17:09:31.503 回答