我正在尝试实现一个简单的网络爬虫,并且我已经编写了一个简单的代码来开始:有两个模块fetcher.py和crawler.py。以下是文件:
fetcher.py:
import urllib2
import re
def fetcher(s):
"fetch a web page from a url"
try:
req = urllib2.Request(s)
urlResponse = urllib2.urlopen(req).read()
except urllib2.URLError as e:
print e.reason
return
p,q = s.split("//")
d = q.split("/")
fdes = open(d[0],"w+")
fdes.write(str(urlResponse))
fdes.seek(0)
return fdes
if __name__ == "__main__":
defaultSeed = "http://www.python.org"
print fetcher(defaultSeed)
爬虫.py:
from bs4 import BeautifulSoup
import re
from fetchpage import fetcher
usedLinks = open("Used","a+")
newLinks = open("New","w+")
newLinks.seek(0)
def parse(fd,var=0):
soup = BeautifulSoup(fd)
for li in soup.find_all("a",href=re.compile("http")):
newLinks.seek(0,2)
newLinks.write(str(li.get("href")).strip("/"))
newLinks.write("\n")
fd.close()
newLinks.seek(var)
link = newLinks.readline().strip("\n")
return str(link)
def crawler(seed,n):
if n == 0:
usedLinks.close()
newLinks.close()
return
else:
usedLinks.write(seed)
usedLinks.write("\n")
fdes = fetcher(seed)
newSeed = parse(fdes,newLinks.tell())
crawler(newSeed,n-1)
if __name__ == "__main__":
crawler("http://www.python.org/",7)
问题是,当我运行crawler.py时,它对前 4-5 个链接工作正常,然后它挂起,一分钟后给我以下错误:
[Errno 110] Connection timed out
Traceback (most recent call last):
File "crawler.py", line 37, in <module>
crawler("http://www.python.org/",7)
File "crawler.py", line 34, in crawler
crawler(newSeed,n-1)
File "crawler.py", line 34, in crawler
crawler(newSeed,n-1)
File "crawler.py", line 34, in crawler
crawler(newSeed,n-1)
File "crawler.py", line 34, in crawler
crawler(newSeed,n-1)
File "crawler.py", line 34, in crawler
crawler(newSeed,n-1)
File "crawler.py", line 33, in crawler
newSeed = parse(fdes,newLinks.tell())
File "crawler.py", line 11, in parse
soup = BeautifulSoup(fd)
File "/usr/lib/python2.7/dist-packages/bs4/__init__.py", line 169, in __init__
self.builder.prepare_markup(markup, from_encoding))
File "/usr/lib/python2.7/dist-packages/bs4/builder/_lxml.py", line 68, in prepare_markup
dammit = UnicodeDammit(markup, try_encodings, is_html=True)
File "/usr/lib/python2.7/dist-packages/bs4/dammit.py", line 191, in __init__
self._detectEncoding(markup, is_html)
File "/usr/lib/python2.7/dist-packages/bs4/dammit.py", line 362, in _detectEncoding
xml_encoding_match = xml_encoding_re.match(xml_data)
TypeError: expected string or buffer
谁能帮我解决这个问题,我对 python 很陌生,我无法找出为什么它说连接在一段时间后超时?