0

我想点击 pdf 文件所在的网页上的所有链接,并将这些 pdf 文件存储在我的系统上。

from scrapy.spider import BaseSpider
from scrapy.selector import HtmlXPathSelector
from scrapy.http import Request
from bs4 import BeautifulSoup


class spider_a(BaseSpider):
    name = "Colleges"
    allowed_domains = ["http://www.abc.org"]
    start_urls = [
        "http://www.abc.org/appwebsite.html",
        "http://www.abc.org/misappengineering.htm",
    ]

    def parse(self, response):
        soup = BeautifulSoup(response.body)
        for link in soup.find_all('a'):
            download_link = link.get('href')
            if '.pdf' in download_link:
                pdf_url = "http://www.abc.org/" + download_link
                print pdf_url

使用上面的代码,我可以在 pdf 文件所在的预期页面上找到链接

from scrapy.spider import BaseSpider
from scrapy.selector import HtmlXPathSelector

class FileSpider(BaseSpider):
    name = "fspider"
    allowed_domains = ["www.aicte-india.org"]
    start_urls = [
        "http://www.abc.org/downloads/approved_institut_websites/an.pdf#toolbar=0&zoom=85"
    ]

    def parse(self, response):
        filename = response.url.split("/")[-1]
        open(filename, 'wb').write(response.body)

使用此代码,我可以保存中列出的页面正文start_urls

有没有办法加入这两个蜘蛛,以便我可以通过运行我的爬虫来保存这些 pdf?

4

1 回答 1

2

为什么需要两只蜘蛛?

from urlparse import urljoin
from scrapy.http import Request
from scrapy.selector import HtmlXPathSelector

class spider_a(BaseSpider):
    ...
    def parse(self, response):
        hxs = HtmlXPathSelector(response)
        for href in hxs.select('//a/@href[contains(.,".pdf")]'):
            yield Request(urljoin(response.url, href),
                    callback=self.save_file)

    def save_file(self, response):
        filename = response.url.split("/")[-1]
        with open(filename, 'wb') as f:
            f.write(response.body)
于 2013-04-23T00:50:10.970 回答