如何为 monster.com 创建一个爬虫来爬取所有页面。对于“下一页”链接,monster.com 调用了 javascript 函数,但 scrapy 无法识别 javascript
这是我的代码,它不适用于分页:
import scrapy
class MonsterComSpider(scrapy.Spider):
name = 'monster.com'
allowed_domains = ['www.monsterindia.com']
start_urls = ['http://www.monsterindia.com/data-analyst-jobs.html/']
def parse(self, response):
urls = response.css('h2.seotitle > a::attr(href)').extract()
for url in urls:
yield scrapy.Request(url =url, callback = self.parse_details)
#crawling all the pages
next_page_url = response.css('ul.pager > li > a::attr(althref)').extract()
if next_page_url:
next_page_url = response.urljoin(next_page_url)
yield scrapy.Request(url = next_page_url, callback = self.parse)
def parse_details(self,response):
yield {
'name' : response.css('h3 > a > span::text').extract()
}