我制作了一个网络爬虫,它获取所有链接,直到第一级页面,并从中获取所有链接和文本以及图像链接和 alt。这是整个代码:
import urllib
import re
import time
from threading import Thread
import MySQLdb
import mechanize
import readability
from bs4 import BeautifulSoup
from readability.readability import Document
import urlparse
url = ["http://sparkbrowser.com"]
i=0
while i<len(url):
counterArray = [0]
levelLinks = []
linkText = ["homepage"]
levelLinks = []
def scraper(root,steps):
urls = [root]
visited = [root]
counter = 0
while counter < steps:
step_url = scrapeStep(urls)
urls = []
for u in step_url:
if u not in visited:
urls.append(u)
visited.append(u)
counterArray.append(counter +1)
counter +=1
levelLinks.append(visited)
return visited
def scrapeStep(root):
result_urls = []
br = mechanize.Browser()
br.set_handle_robots(False)
br.set_handle_equiv(False)
br.addheaders = [('User-agent', 'Mozilla/5.0 (X11; U; Linux i686; en-US; rv:1.9.0.1) Gecko/2008071615 Fedora/3.0.1-1.fc9 Firefox/3.0.1')]
for url in root:
try:
br.open(url)
for link in br.links():
newurl = urlparse.urljoin(link.base_url, link.url)
result_urls.append(newurl)
#levelLinks.append(newurl)
except:
print "error"
return result_urls
scraperOut = scraper(url[i],1)
for sl,ca in zip(scraperOut,counterArray):
print "\n\n",sl," Level - ",ca,"\n"
#Mechanize
br = mechanize.Browser()
page = br.open(sl)
br.set_handle_robots(False)
br.set_handle_equiv(False)
br.addheaders = [('User-agent', 'Mozilla/5.0 (X11; U; Linux i686; en-US; rv:1.9.0.1) Gecko/2008071615 Fedora/3.0.1-1.fc9 Firefox/3.0.1')]
#BeautifulSoup
htmlcontent = page.read()
soup = BeautifulSoup(htmlcontent)
for linkins in br.links(text_regex=re.compile('^((?!IMG).)*$')):
newesturl = urlparse.urljoin(linkins.base_url, linkins.url)
linkTxt = linkins.text
print newesturl,linkTxt
for linkwimg in soup.find_all('a', attrs={'href': re.compile("^http://")}):
imgSource = linkwimg.find('img')
if linkwimg.find('img',alt=True):
imgLink = linkwimg['href']
#imageLinks.append(imgLink)
imgAlt = linkwimg.img['alt']
#imageAlt.append(imgAlt)
print imgLink,imgAlt
elif linkwimg.find('img',alt=False):
imgLink = linkwimg['href']
#imageLinks.append(imgLink)
imgAlt = ['No Alt']
#imageAlt.append(imgAlt)
print imgLink,imgAlt
i+=1
一切都很好,直到我的爬虫到达facebook links
他无法阅读的一个,但他给了我错误
httperror_seek_wrapper: HTTP Error 403: request disallowed by robots.txt
对于第 68 行,即:page = br.open(sl)
我现在不知道为什么,因为正如你所看到的,我已经设置了机械化set_handle_robots
和add_headers
选项。
我不知道为什么会这样,但我注意到我收到了facebook
链接错误,在这种情况下facebook.com/sparkbrowser
和谷歌。
欢迎任何帮助或建议。
干杯