2

这是我的演示,我认为没有错误但是这段代码不能向mysql插入数据!?

设置.py

这是设置代码:

BOT_NAME = 'Scan'
SPIDER_MODULES = ['scan.spiders']
#NEWSPIDER_MODULE = 'scan.spiders'
ITEM_PIPELINES = ['scan.pipelines.MySQLStorePipeline']

这是管道代码,我认为它没有错误:

管道.py

from scrapy import log
from twisted.enterprise import adbapi
from scrapy.http import Request
from scrapy.exceptions import DropItem
from scrapy.contrib.pipeline.images import ImagesPipeline
import datetime
import MySQLdb
import MySQLdb.cursors


class MySQLStorePipeline(object):

    def __init__(self):
        self.db = adbapi.ConnectionPool('MySQLdb',
            db = 'spider',
            host='localhost',
            user = 'root',
            passwd = '123456',
            cursorclass = MySQLdb.cursors.DictCursor,
            charset = 'utf8',
            use_unicode = True
        )

    def process_item(self, item, spider):
        query = self.db.runInteraction(self._conditional_insert, item)
        query.addErrback(self.handle_error)
        return item

    def _conditional_insert(self, tx, item):
        if item.get('url'):
            tx.execute(\
                "insert into spider (url) "
                "values (%s)",(item['link'])
            )
            #log.msg("Item stored in db: %s" % item, level=log.DEBUG)
    def handle_error(self, e):
        log.err(e)

这是蜘蛛模块,我认为它没有错误!

蜘蛛.py

# coding=utf-8
from urlparse import urljoin
import simplejson

from scrapy.http import Request
from scrapy.contrib.spiders import CrawlSpider, Rule
from scrapy.contrib.linkextractors.sgml import SgmlLinkExtractor
from scrapy.selector import HtmlXPathSelector

from scan.items import ScanItem

class ScanSpider(CrawlSpider):
    name = 'Scan'
    allowed_domains = ["a.com"]
    start_urls = [
        "http://www.a.com",
    ]
    rules = (
        Rule(SgmlLinkExtractor(allow=(r'http://(.*?)'),deny_domains=(r'qq.com'))
        ),
        Rule(SgmlLinkExtractor(allow=(r'http://www.a.com')), callback="parse_item"),
        )

    def parse_item(self, response):
        hxs = HtmlXPathSelector(response)
        item = ScanItem()
        items = []
        #item['title'] = hxs.select('//title/text()').extract()
        item['url'] = hxs.select('//a[@href]').re('(\"http://(.*?)\")').extract()
        items.append(item)
        return items
SPIDER = ScanSpider()
4

2 回答 2

1

you seriously needs to read out http://doc.scrapy.org , you are missing very basics .

def parse_item(self, response):
    hxs = HtmlXPathSelector(response)
    item = ScanItem()
    items = []
    #item['title'] = hxs.select('//title/text()').extract()
    item['url'] = hxs.select('//a[@href]').re('(\"http://(.*?)\")').extract()
    items.append(item)
    return items

in your parse_items there is no need to add item in items of list you can simply return item like

    def parse_item(self, response):
       hxs = HtmlXPathSelector(response)
       item = ScanItem()
       #item['title'] = hxs.select('//title/text()').extract()
       item['url'] = hxs.select('//a[@href]').re('(\"http://(.*?)\")').extract()
       return item

keeping in mind item['url'] has a list of urls

in your MySQLStorePipeline

def _conditional_insert(self, tx, item):
    if item.get('url'):
        tx.execute(\
            "insert into spider (url) "
            "values (%s)",(item['link'])
        )

you are trying to insert item['link'] in database while you never populated item['link'] but only item['url'].

于 2012-12-20T07:42:23.053 回答
0

目前,没有进一步澄清。

我认为,由于预计刮擦的返回是一个Item(其子类,甚至只是一个dict- 我承认我没有彻底检查代码以了解它应该/确实如何工作),那么:

item = ScanItem()
items = []
...
return items

看起来有点苏。

你需要做更多的分析才能得到一个有意义/正确的答案。

于 2012-12-19T13:09:17.423 回答