尝试抓取文章时,我需要做什么,但各种广告不断出现?具体来说,那些会在屏幕中间弹出,要求登录/注册的,你必须在阅读之前手动关闭它。
因此,我的爬虫无法提取任何内容。关于如何使用 pyquery 在“爬行前关闭广告”中编码的任何建议?
编辑:现在与 Selenium 一起尝试消除弹出窗口。任何建议将不胜感激。
import mechanize
import time
import urllib2
import pdb
import lxml.html
import re
from pyquery import PyQuery as pq
def open_url(url):
print 'open url:',url
try:
br = mechanize.Browser()
br.set_handle_equiv(True)
br.set_handle_redirect(True)
br.set_handle_referer(True)
br.set_handle_robots(False)
br.addheaders = [('user-agent', 'Mozilla/5.0 (X11; U; Linux i686; en-US; rv:1.9.2.3) Gecko/20100423 Ubuntu/10.04 (lucid) Firefox/3.6.3')]
response = br.open(url)
html = response.get_data()
return html
except:
print u"!!!! url can not be open by mechanize either!!! \n"
def extract_text_pyquery(html):
p = pq(html)
article_whole = p.find(".entry-content")
p_tag = article_whole('p')
print len(p_tag)
print p_tag
for i in range (0, len(p_tag)):
text = p_tag.eq(i).text()
print text
entire = p.find(".grid_12")
author = entire.find('p')
print len(author)
print "By:", author.text()
images = p.find('#main_photo')
link = images('img')
print len(link)
for i in range(len(link)):
url = pq(link[i])
result =url.attr('src').find('smedia')
if result>0:
print url.attr('src')
if __name__ =='__main__':
#print '----------------------------------------------------------------'
url_list = ['http://www.newsobserver.com/2014/10/17/4240490/obama-weighs-ebola-czar-texas.html?sp=/99/100/&ihp=1',
]
html= open_url(url_list[0])
# dissect_article(html)
extract_text_pyquery(html)