我编写了这个脚本来调用多个蜘蛛。它适用于单个蜘蛛,但不适用于多个蜘蛛。我是 Scrapy 的新手。
import sys
import os
c=os.getcwd()
os.chdir("myweb")
d=os.getcwd()
os.chdir(c)
sys.path.insert(0, d)
import os
os.environ['DJANGO_SETTINGS_MODULE'] = 'myweb.settings'
from twisted.internet import reactor
from scrapy.crawler import Crawler
from scrapy.settings import CrawlerSettings
from scrapy import log, signals
from scrapy.xlib.pydispatch import dispatcher
def stop_reactor():
reactor.stop()
def setup_crawler(spider_name):
crawler = Crawler(CrawlerSettings())
crawler.configure()
crawler.crawl(spider_name)
dispatcher.connect(stop_reactor, signal=signals.spider_closed)
crawler.start()
log.start(loglevel=log.DEBUG)
crawler = Crawler(CrawlerSettings())
crawler.configure()
from aqaq.aqaq.spiders.spider import aqaqspider
spider = aqaqspider(domain='aqaq.com')
setup_crawler(spider)
from aqaq.aqaq.spiders.spider2 import DmozSpider
spider=DmozSpider(domain='shoptiques.com')
setup_crawler(spider)
result = reactor.run()
print result
log.msg("------------>Running stoped")
此外,当一只蜘蛛运行时,另一只蜘蛛开始运行,当一只蜘蛛停止时,整个蜘蛛停止。