2

我已经在这工作了 12 小时,我希望有人可以帮我一把。

这是我的代码,我想要的只是在页面爬行时获取页面上每个链接的锚点和 url。

from scrapy.contrib.spiders import CrawlSpider, Rule
from scrapy.contrib.linkextractors.sgml import SgmlLinkExtractor
from scrapy.selector import HtmlXPathSelector
from scrapy.utils.url import urljoin_rfc
from scrapy.utils.response import get_base_url
from urlparse import urljoin

#from scrapy.item import Item
from tutorial.items import DmozItem

class HopitaloneSpider(CrawlSpider):
name = 'dmoz'
allowed_domains = ['domain.co.uk']
start_urls = [
    'http://www.domain.co.uk'
]

rules = (
    #Rule(SgmlLinkExtractor(allow='>example\.org', )),
    Rule(SgmlLinkExtractor(allow=('\w+$', )), callback='parse_item', follow=True),
)

user_agent = 'Mozilla/5.0 (Windows; U; MSIE 9.0; WIndows NT 9.0; en-US))'

def parse_item(self, response):
    #self.log('Hi, this is an item page! %s' % response.url)

    hxs = HtmlXPathSelector(response)
    #print response.url
    sites = hxs.select('//html')
    #item = DmozItem()
    items = []

    for site in sites: 

                   item = DmozItem()
                   item['title'] = site.select('a/text()').extract()
                   item['link'] = site.select('a/@href').extract()

                   items.append(item)

    return items

我做错了什么......我的眼睛现在受伤了。

4

2 回答 2

3

response.body 应该是你想要的

def parse_item(self, response):
    #self.log('Hi, this is an item page! %s' % response.url)

    body = response.body
    item = ....
于 2013-03-29T07:01:32.140 回答
1

要在单个页面上获取所有链接:

def parse_item(self, response):
  hxs = HtmlXPathSelector(response)
  items = []
  links = hxs.select("//a")

  for link in links: 

                 item = DmozItem()
                 item['title'] = site.select('text()').extract()
                 item['link'] = site.select('@href').extract()

                 items.append(item)

return items
于 2012-11-19T14:29:11.460 回答