1

我在scrapy,python中使用站点地图蜘蛛。站点地图似乎有不寻常的格式,网址前有“//”:

<url>
    <loc>//www.example.com/10/20-baby-names</loc>
</url>
<url>
    <loc>//www.example.com/elizabeth/christmas</loc>
 </url>

myspider.py

from scrapy.contrib.spiders import SitemapSpider
from myspider.items import *

class MySpider(SitemapSpider):
    name = "myspider"
    sitemap_urls = ["http://www.example.com/robots.txt"]

    def parse(self, response):
        item = PostItem()           
        item['url'] = response.url
        item['title'] = response.xpath('//title/text()').extract()

        return item

我收到此错误:

raise ValueError('Missing scheme in request url: %s' % self._url)
    exceptions.ValueError: Missing scheme in request url: //www.example.com/10/20-baby-names

如何使用站点地图蜘蛛手动解析 url?

4

3 回答 3

2

如果我没看错,您可以(为了快速解决)覆盖_parse_sitemapin的默认实现SitemapSpider。这不好,因为您将不得不复制大量代码,但应该可以。您必须添加一种方法来生成带有方案的 URL。

"""if the URL starts with // take the current website scheme and make an absolute
URL with the same scheme"""
def _fix_url_bug(url, current_url):
    if url.startswith('//'):
           ':'.join((urlparse.urlsplit(current_url).scheme, url))
       else:
           yield url

def _parse_sitemap(self, response):
    if response.url.endswith('/robots.txt'):
        for url in sitemap_urls_from_robots(response.body)
            yield Request(url, callback=self._parse_sitemap)
    else:
        body = self._get_sitemap_body(response)
        if body is None:
            log.msg(format="Ignoring invalid sitemap: %(response)s",
                    level=log.WARNING, spider=self, response=response)
            return

        s = Sitemap(body)
        if s.type == 'sitemapindex':
            for loc in iterloc(s):
                # added it before follow-test, to allow test to return true
                # if it includes the scheme (yet do not know if this is the better solution)
                loc = _fix_url_bug(loc, response.url)
                if any(x.search(loc) for x in self._follow):
                    yield Request(loc, callback=self._parse_sitemap)
        elif s.type == 'urlset':
            for loc in iterloc(s):
                loc = _fix_url_bug(loc, response.url) # same here
                for r, c in self._cbs:
                    if r.search(loc):
                        yield Request(loc, callback=c)
                        break

这只是一个普遍的想法,未经测试。所以它要么完全不起作用,要么可能存在语法错误。请通过评论回复,以便我改进我的答案。

您尝试解析的站点地图似乎是错误的。从 RFC 看,缺少方案是完全可以的,但站点地图要求 URL 以方案开头

于 2014-12-04T09:46:55.653 回答
1

我使用@alecxe 的技巧来解析蜘蛛中的网址。我让它工作,但不确定这是否是最好的方法。

from urlparse import urlparse
import re 
from scrapy.spider import BaseSpider
from scrapy.http import Request
from scrapy.utils.response import body_or_str
from example.items import *

class ExampleSpider(BaseSpider):
    name = "example"
    start_urls = ["http://www.example.com/sitemap.xml"]

    def parse(self,response):
        nodename = 'loc'
        text = body_or_str(response)
        r = re.compile(r"(<%s[\s>])(.*?)(</%s>)" % (nodename, nodename), re.DOTALL)
        for match in r.finditer(text):
            url = match.group(2)
            if url.startswith('//'):
                url = 'http:'+url
                yield Request(url, callback=self.parse_page)

    def parse_page(self, response):
        # print response.url
        item = PostItem()   

        item['url'] = response.url
        item['title'] = response.xpath('//title/text()').extract()
        return item
于 2014-12-05T08:07:23.543 回答
1

我认为最好和最干净的解决方案是添加一个下载器中间件,它会在没有蜘蛛注意到的情况下更改恶意 URL。

import re
import urlparse
from scrapy.http import XmlResponse
from scrapy.utils.gz import gunzip, is_gzipped
from scrapy.contrib.spiders import SitemapSpider

# downloader middleware
class SitemapWithoutSchemeMiddleware(object):
    def process_response(self, request, response, spider):
        if isinstance(spider, SitemapSpider):
            body = self._get_sitemap_body(response)

            if body:
                scheme = urlparse.urlsplit(response.url).scheme
                body = re.sub(r'<loc>\/\/(.+)<\/loc>', r'<loc>%s://\1</loc>' % scheme, body)    
                return response.replace(body=body)

        return response

    # this is from scrapy's Sitemap class, but sitemap is
    # only for internal use and it's api can change without
    # notice
    def _get_sitemap_body(self, response):
        """Return the sitemap body contained in the given response, or None if the
        response is not a sitemap.
        """
        if isinstance(response, XmlResponse):
            return response.body
        elif is_gzipped(response):
            return gunzip(response.body)
        elif response.url.endswith('.xml'):
            return response.body
        elif response.url.endswith('.xml.gz'):
            return gunzip(response.body)
于 2014-12-08T17:08:15.073 回答