1

在这里,我使用 python 和美丽的汤编写了代码,以将该页面上的所有链接解析为链接存储库。接下来,它从刚刚创建的存储库中获取任何 url 的内容,将新内容中的链接解析到存储库中,并继续对存储库中的所有链接进行此过程,直到停止或获取给定数量的链接之后。

但是这段代码很慢。如何通过在 python 中使用 gevents 进行异步编程来改进它?


代码

class Crawler(object):


def __init__(self):

    self.soup = None                                        # Beautiful Soup object
    self.current_page   = "http://www.python.org/"          # Current page's address
    self.links          = set()                             # Queue with every links fetched
    self.visited_links  = set()

    self.counter = 0 # Simple counter for debug purpose

def open(self):

    # Open url
    print self.counter , ":", self.current_page
    res = urllib2.urlopen(self.current_page)
    html_code = res.read()
    self.visited_links.add(self.current_page) 

    # Fetch every links
    self.soup = BeautifulSoup.BeautifulSoup(html_code)

    page_links = []
    try :
        page_links = itertools.ifilter(  # Only deal with absolute links 
                                        lambda href: 'http://' in href,
                                            ( a.get('href') for a in self.soup.findAll('a') )  )
    except Exception as e: # Magnificent exception handling
        print 'Error: ',e
        pass



    # Update links 
    self.links = self.links.union( set(page_links) ) 



    # Choose a random url from non-visited set
    self.current_page = random.sample( self.links.difference(self.visited_links),1)[0]
    self.counter+=1


def run(self):

    # Crawl 3 webpages (or stop if all url has been fetched)
    while len(self.visited_links) < 3 or (self.visited_links == self.links):
        self.open()

    for link in self.links:
        print link



if __name__ == '__main__':

C = Crawler()
C.run()

更新 1


import gevent.monkey; gevent.monkey.patch_thread()
from bs4 import BeautifulSoup
import urllib2
import itertools
import random
import urlparse
import sys

import gevent.monkey; gevent.monkey.patch_all(thread=False)




class Crawler(object):


def __init__(self):
self.soup = None                                        # Beautiful Soup object
self.current_page   = "http://www.python.org/"          # Current page's address
self.links          = set()                             # Queue with every links fetched
self.visited_links  = set()

self.counter = 0 # Simple counter for debug purpose

def open(self):

# Open url
print self.counter , ":", self.current_page
res = urllib2.urlopen(self.current_page)
html_code = res.read()
self.visited_links.add(self.current_page)

# Fetch every links
self.soup = BeautifulSoup(html_code)

page_links = []
try :
    for link in [h.get('href') for h in self.soup.find_all('a')]:
        print "Found link: '" + link + "'"
        if link.startswith('http'):
    print 'entered in if link: ',link
            page_links.append(link)
            print "Adding link" + link + "\n"
        elif link.startswith('/'):
    print 'entered in elif link: ',link
            parts = urlparse.urlparse(self.current_page)
            page_links.append(parts.scheme + '://' + parts.netloc + link)
            print "Adding link " + parts.scheme + '://' + parts.netloc + link + "\n"
        else:
    print 'entered in else link: ',link
            page_links.append(self.current_page+link)
            print "Adding link " + self.current_page+link + "\n"

except Exception, ex: # Magnificent exception handling
    print ex

# Update links 
self.links = self.links.union( set(page_links) )

# Choose a random url from non-visited set
self.current_page = random.sample( self.links.difference(self.visited_links),1)[0]
self.counter+=1

def run(self):

# Crawl 3 webpages (or stop if all url has been fetched)
crawling_greenlets = []

for i in range(3):
  crawling_greenlets.append(gevent.spawn(self.open))


gevent.joinall(crawling_greenlets)

#while len(self.visited_links) < 4 or (self.visited_links == self.links):
#    self.open()

for link in self.links:
  print link

if __name__ == '__main__':
C = Crawler()
C.run()
4

1 回答 1

2

导入 gevent 并确保完成猴子补丁以使标准库调用非阻塞并了解 gevent:

import gevent
from gevent import monkey; monkey.patch_all()

(你可以有选择地决定什么是猴子补丁,但假设这不是你的问题)

在你的run,让你的open函数在一个greenlet中被调用。可以返回 greenlet 对象,因此您可以在需要使用例如run获取结果时等待它。gevent.joinall像这样的东西:

def run(self):
    return gevent.spawn(self.open)

c1 = Crawler()
c2 = Crawler()
c3 = Crawler()
crawling_tasks = [c.run() for c in (c1,c2,c3)]
gevent.joinall(crawling_tasks)

print [c.links for c in (c1, c2, c3)]
于 2013-10-06T15:52:35.490 回答