我有一个使用scrapy_selenium 的正在运行的解决方案,用于带有javascript 加载的站点。正如您在下面的代码中看到的那样,在使用 parseDetails 生成 detailPage 时使用了 SeleniumRequest -
但是,当我需要在我的主页上准备好 SeleniumRequest(而不仅仅是下面的详细信息页面)时,我该怎么办?
在这种情况下,我该如何使用 SeleniumRequest?
import scrapy
from scrapy_selenium import SeleniumRequest
class ZoosSpider(scrapy.Spider):
name = 'zoos'
allowed_domains = ['www.tripadvisor.co.uk']
start_urls = [
"https://www.tripadvisor.co.uk/Attractions-g186216-Activities-c53-a_allAttractions.true-United_Kingdom.html"
]
existList = []
def parse(self, response):
tmpSEC = response.xpath("//section[@data-automation='AppPresentation_SingleFlexCardSection']")
for elem in tmpSEC:
link = response.urljoin(elem.xpath(".//a/@href").get())
yield SeleniumRequest(
url=link,
wait_time= 10,
callback=self.parseDetails)
def parseDetails(self, response):
tmpName = response.xpath("//h1[@data-automation='mainH1']/text()").get()
tmpLink = response.xpath("//div[@class='Lvkmj']/a/@href").getall()
tmpURL = tmpTelnr = tmpMail = "N/A"
yield {
"Name": tmpName,
"URL": tmpURL,
}