3

我编写了一个 python 代码来获取与给定 url 对应的网页,并将该页面上的所有链接解析到链接存储库中。接下来,它从刚刚创建的存储库中获取任何 url 的内容,将新内容中的链接解析到存储库中,并继续对存储库中的所有链接进行此过程,直到停止或获取给定数量的链接之后。

这里代码:

import BeautifulSoup
import urllib2
import itertools
import random


class Crawler(object):
"""docstring for Crawler"""

def __init__(self):

    self.soup = None                                        # Beautiful Soup object
    self.current_page   = "http://www.python.org/"          # Current page's address
    self.links          = set()                             # Queue with every links fetched
    self.visited_links  = set()

    self.counter = 0 # Simple counter for debug purpose

def open(self):

    # Open url
    print self.counter , ":", self.current_page
    res = urllib2.urlopen(self.current_page)
    html_code = res.read()
    self.visited_links.add(self.current_page) 

    # Fetch every links
    self.soup = BeautifulSoup.BeautifulSoup(html_code)

    page_links = []
    try :
        page_links = itertools.ifilter(  # Only deal with absolute links 
                                        lambda href: 'http://' in href,
                                            ( a.get('href') for a in self.soup.findAll('a') )  )
    except Exception: # Magnificent exception handling
        pass



    # Update links 
    self.links = self.links.union( set(page_links) ) 



    # Choose a random url from non-visited set
    self.current_page = random.sample( self.links.difference(self.visited_links),1)[0]
    self.counter+=1


def run(self):

    # Crawl 3 webpages (or stop if all url has been fetched)
    while len(self.visited_links) < 3 or (self.visited_links == self.links):
        self.open()

    for link in self.links:
        print link



if __name__ == '__main__':

C = Crawler()
C.run()

此代码不获取内部链接(仅绝对形式的超链接)

如何获取以“/”或“#”或“。”开头的内部链接

4

2 回答 2

7

好吧,您的代码已经告诉您发生了什么。在您的 lambda 中,您只获取以 http:// 开头的绝对链接(您没有获取 https FWIW)。您应该抓取所有链接并检查它们是否以 http+ 开头。如果他们不这样做,那么他们就是一个相对链接,既然你知道是什么,current_page那么你可以使用它来创建一个绝对链接。

这是对您的代码的修改。请原谅我的 Python,因为它有点生锈,但我运行了它,它在 Python 2.7 中为我工作。你会想要清理它并添加一些边缘/错误检测,但你得到了要点:

#!/usr/bin/python

from bs4 import BeautifulSoup
import urllib2
import itertools
import random
import urlparse


class Crawler(object):
"""docstring for Crawler"""

def __init__(self):
    self.soup = None                                        # Beautiful Soup object
    self.current_page   = "http://www.python.org/"          # Current page's address
    self.links          = set()                             # Queue with every links fetched
    self.visited_links  = set()

    self.counter = 0 # Simple counter for debug purpose

def open(self):

    # Open url
    print self.counter , ":", self.current_page
    res = urllib2.urlopen(self.current_page)
    html_code = res.read()
    self.visited_links.add(self.current_page)

    # Fetch every links
    self.soup = BeautifulSoup(html_code)

    page_links = []
    try :
        for link in [h.get('href') for h in self.soup.find_all('a')]:
            print "Found link: '" + link + "'"
            if link.startswith('http'):
                page_links.append(link)
                print "Adding link" + link + "\n"
            elif link.startswith('/'):
                parts = urlparse.urlparse(self.current_page)
                page_links.append(parts.scheme + '://' + parts.netloc + link)
                print "Adding link " + parts.scheme + '://' + parts.netloc + link + "\n"
            else:
                page_links.append(self.current_page+link)
                print "Adding link " + self.current_page+link + "\n"

    except Exception, ex: # Magnificent exception handling
        print ex

    # Update links 
    self.links = self.links.union( set(page_links) )

    # Choose a random url from non-visited set
    self.current_page = random.sample( self.links.difference(self.visited_links),1)[0]
    self.counter+=1

def run(self):

    # Crawl 3 webpages (or stop if all url has been fetched)
    while len(self.visited_links) < 3 or (self.visited_links == self.links):
        self.open()

    for link in self.links:
        print link

if __name__ == '__main__':
    C = Crawler()
    C.run()
于 2013-10-03T20:26:32.273 回答
1

lambda 中的变化条件:

page_links = itertools.ifilter(  # Only deal with absolute links 
                                        lambda href: 'http://' in href or href.startswith('/') or href.startswith('#') or href.startswith('.'),
                                            ( a.get('href') for a in  self.soup.findAll('a') )  )
于 2013-10-03T20:22:24.330 回答