我正在创建一个网络爬虫,在第一步中,我需要爬取一个网站并提取其所有链接,但是我的代码没有循环提取。我尝试使用 append 但这会产生一个列表列表。我正在尝试使用 foo 并出现错误。任何帮助,将不胜感激。谢谢
from urllib2 import urlopen
import re
def get_all_urls(url):
get_content = urlopen(url).read()
url_list = []
find_url = re.compile(r'a\s?href="(.*)">')
url_list_temp = find_url.findall(get_content)
for i in url_list_temp:
url_temp = url_list_temp.pop()
source = 'http://blablabla/'
url = source + url_temp
url_list.append(url)
#print url_list
return url_list
def web_crawler(seed):
tocrawl = [seed]
crawled = []
i = 0
while i < len(tocrawl):
page = tocrawl.pop()
if page not in crawled:
#tocrawl.append(get_all_urls(page))
foo = (get_all_urls(page))
tocrawl = foo
crawled.append(page)
if not tocrawl:
break
print crawled
return crawled