1

我正在为多个网站创建通用蜘蛛(scrapy spider)。下面是我的项目目录结构。

myproject <Directory>
--- __init__.py
--- common.py
--- scrapy.cfg
--- myproject <Directory>
    ---__init__.py
    ---items.py
    ---pipelines.py
    ---settings.py
    ---spiders <Directory>
       ---__init__.py
       ---spider.py              (generic spider)
       ---stackoverflow_com.py   (spider per website)
       ---anotherwebsite1_com.py (spider per website)
       ---anotherwebsite2_com.py (spider per website)

常见的.py

#!/usr/bin/env python
# -*- coding: UTF-8 -*-
#
'''
    common file
'''

from scrapy.spider import BaseSpider
from scrapy.selector import HtmlXPathSelector
from scrapy.crawler import CrawlerProcess
from scrapy.stats import stats
from scrapy.http import Request
from WSS.items import WssItem
import MySQLdb, time
import urllib2, sys

#Database connection
def open_database_connection():
    connection = MySQLdb.connect(user=db_user, passwd=db_password, db=database, host=db_host, port=db_port, charset="utf8", use_unicode=True)
    cursor = connection.cursor()
    return connection, cursor

def close_database_connection(cursor, connection):
    cursor.close()
    connection.close()
    return

class Domain_:
    def __init__(self, spider_name, allowed_domains, start_urls, extract_topics_xpath, extract_viewed_xpath):
        self.spider_name = spider_name
        self.extract_topics_xpath = extract_topics_xpath
        self.extract_viewed_xpath = extract_viewed_xpath
        self.allowed_domains = allowed_domains
        self.start_urls = start_urls

蜘蛛.py

#!/usr/bin/env python
# -*- coding: UTF-8 -*-
#

from common import *

class DomainSpider(BaseSpider):
    name = "generic_spider"
    def __init__(self, current_domain):
        self.allowed_domains = current_domain.allowed_domains
        self.start_urls = current_domain.start_urls
        self.current_domain = current_domain

    def parse(self, response):
        hxs = HtmlXPathSelector(response)
        for topics in list(set(hxs.select(self.current_domain.extract_topics_xpath).extract())):
            yield Request(topics, dont_filter=True, callback=self.extract_all_topics_data)

    def extract_all_topics_data(self, response):
        hxs = HtmlXPathSelector(response)
        item = WssItem()
        print "Processing "+response.url
        connection, cursor = open_database_connection()
        for viewed in hxs.select(self.current_domain.extract_viewed_xpath).extract():
                    item['TopicURL']  = response.url
                    item['Topic_viewed'] = viewed
                    yield item
        close_database_connection(cursor, connection)
        return 

stackoverflow_com.py

#!/usr/bin/env python
# -*- coding: UTF-8 -*-
#

from common import *

current_domain = Domain_(
    spider_name = 'stackoverflow_com', 
    allowed_domains = ["stackoverflow.com"], 
    start_urls = ["http://stackoverflow.com/"], 
    extract_topics_xpath = '//div[contains(@class,\"bottomOrder\")]/a/@href',
    extract_viewed_xpath = '//div[contains(@class,\"views\")]/text()'
    )

import WSS.spiders.spider as spider
StackOverflowSpider = spider.DomainSpider(current_domain)

从上面的脚本中,我不想接触 spider.py(假设所有网站都具有相同的结构,所以我可以将 spider.py 用于所有蜘蛛)

我只想为每个网站创建与 stackoverflow_com.py 相同的新蜘蛛,并且我想调用 spider.py 进行爬行过程。

你能告诉我我的代码有什么问题吗?它显示下面的错误消息

output1:如果我运行“scrapy crawl stackoverflow_com”,它会显示在错误消息下方

    C:\myproject>scrapy crawl stackoverflow_com
2013-08-05 09:41:45+0400 [scrapy] INFO: Scrapy 0.16.4 started (bot: WSS)
2013-08-05 09:41:45+0400 [scrapy] DEBUG: Enabled extensions: LogStats, TelnetConsole, CloseSpider, WebService, CoreStats, SpiderState
2013-08-05 09:41:45+0400 [scrapy] DEBUG: Enabled downloader middlewares: HttpAuthMiddleware, DownloadTimeoutMiddleware, UserAgentMiddleware, RetryMiddleware, DefaultHeadersMiddleware, RedirectMiddleware, CookiesMiddleware, HttpCompressionMiddleware, ChunkedTransferMiddleware, DownloaderStats
2013-08-05 09:41:45+0400 [scrapy] DEBUG: Enabled spider middlewares: HttpErrorMiddleware, OffsiteMiddleware, RefererMiddleware, UrlLengthMiddleware, DepthMiddleware
2013-08-05 09:41:45+0400 [scrapy] DEBUG: Enabled item pipelines: WssPipeline
Traceback (most recent call last):
  File "C:\Python27\lib\runpy.py", line 162, in _run_module_as_main
    "__main__", fname, loader, pkg_name)
  File "C:\Python27\lib\runpy.py", line 72, in _run_code
    exec code in run_globals
  File "C:\Python27\lib\site-packages\scrapy-0.16.4-py2.7.egg\scrapy\cmdline.py", line 156, in <module>
    execute()
  File "C:\Python27\lib\site-packages\scrapy-0.16.4-py2.7.egg\scrapy\cmdline.py", line 131, in execute
    _run_print_help(parser, _run_command, cmd, args, opts)
  File "C:\Python27\lib\site-packages\scrapy-0.16.4-py2.7.egg\scrapy\cmdline.py", line 76, in _run_print_help
    func(*a, **kw)
  File "C:\Python27\lib\site-packages\scrapy-0.16.4-py2.7.egg\scrapy\cmdline.py", line 138, in _run_command
    cmd.run(args, opts)
  File "C:\Python27\lib\site-packages\scrapy-0.16.4-py2.7.egg\scrapy\commands\crawl.py", line 43, in run
    spider = self.crawler.spiders.create(spname, **opts.spargs)
  File "C:\Python27\lib\site-packages\scrapy-0.16.4-py2.7.egg\scrapy\spidermanager.py", line 43, in create
    raise KeyError("Spider not found: %s" % spider_name)
KeyError: 'Spider not found: stackoverflow_com'

output2:如果我运行“scrapy crawl generic_spider”,它会显示在错误消息下方

C:\myproject>scrapy crawl generic_spider
2013-08-05 12:25:15+0400 [scrapy] INFO: Scrapy 0.16.4 started (bot: WSS)
2013-08-05 12:25:15+0400 [scrapy] DEBUG: Enabled extensions: LogStats, TelnetConsole, CloseSpider, WebService, CoreStats, SpiderState
2013-08-05 12:25:16+0400 [scrapy] DEBUG: Enabled downloader middlewares: HttpAuthMiddleware, DownloadTimeoutMiddleware, UserAgentMiddleware, RetryMiddleware, DefaultHeadersMiddleware, RedirectMiddleware, CookiesMiddleware, HttpCompressionMiddleware, ChunkedTransferMiddleware, DownloaderStats
2013-08-05 12:25:16+0400 [scrapy] DEBUG: Enabled spider middlewares: HttpErrorMiddleware, OffsiteMiddleware, RefererMiddleware, UrlLengthMiddleware, DepthMiddleware
2013-08-05 12:25:16+0400 [scrapy] DEBUG: Enabled item pipelines: WssPipeline
Traceback (most recent call last):
  File "C:\Python27\lib\runpy.py", line 162, in _run_module_as_main
    "__main__", fname, loader, pkg_name)
  File "C:\Python27\lib\runpy.py", line 72, in _run_code
    exec code in run_globals
  File "C:\Python27\lib\site-packages\scrapy-0.16.4-py2.7.egg\scrapy\cmdline.py", line 156, in <module>
    execute()
  File "C:\Python27\lib\site-packages\scrapy-0.16.4-py2.7.egg\scrapy\cmdline.py", line 131, in execute
    _run_print_help(parser, _run_command, cmd, args, opts)
  File "C:\Python27\lib\site-packages\scrapy-0.16.4-py2.7.egg\scrapy\cmdline.py", line 76, in _run_print_help
    func(*a, **kw)
  File "C:\Python27\lib\site-packages\scrapy-0.16.4-py2.7.egg\scrapy\cmdline.py", line 138, in _run_command
    cmd.run(args, opts)
  File "C:\Python27\lib\site-packages\scrapy-0.16.4-py2.7.egg\scrapy\commands\crawl.py", line 43, in run
    spider = self.crawler.spiders.create(spname, **opts.spargs)
  File "C:\Python27\lib\site-packages\scrapy-0.16.4-py2.7.egg\scrapy\spidermanager.py", line 44, in create
    return spcls(**spider_kwargs)
TypeError: __init__() takes exactly 2 arguments (1 given)

提前谢谢你:)

4

2 回答 2

0

尝试遵循http://doc.scrapy.org/en/latest/topics/spiders.html#spider-arguments中的模板

class DomainSpider(BaseSpider):
    name = "generic_spider"
    def __init__(self, current_domain, *args, **kwargs):
        self.allowed_domains = current_domain.allowed_domains
        self.start_urls = current_domain.start_urls
        super(DomainSpider, self).__init__(*args, **kwargs)
        self.current_domain = current_domain
    ...
于 2013-08-05T07:23:38.957 回答
0

根据我的经验,只有在未定义蜘蛛名称时,scrapy 才会抛出蜘蛛未找到错误。确保您的蜘蛛名称,即stackoverflow_com在 DomainSpider 中分配尝试添加

self.name=current_domain.spider_name

于 2017-04-18T09:31:06.683 回答