2
import os   
from selenium import webdriver
import time    
from linkedin_scraper import actions    
from selenium import webdriver    
from selenium.webdriver.common.by import By    
from selenium.webdriver.support.ui import WebDriverWait    
from selenium.webdriver.support import expected_conditions as EC    
from selenium.common.exceptions import TimeoutException    
from selenium.webdriver.chrome.options import Options


chrome_options = Options()    
chrome_options.add_argument("--headless")

driver = webdriver.Chrome("driver/chromedriver", options=chrome_options)    
email = os.getenv("LINKEDIN_USER")    
password = os.getenv("LINKEDIN_PASSWORD")

actions.login(driver, email, password) # if email and password isnt given, it'll prompt in terminal
driver.get('https://www.linkedin.com/company/biorasi-llc/about/')

_ = WebDriverWait(driver, 3).until(EC.presence_of_all_elements_located((By.TAG_NAME, 'section')))

time.sleep(3)    
grid = driver.find_elements_by_tag_name("section")[3]
about_us = grid.find_elements_by_tag_name("p")[0].text.strip()

print(about_us)

这是我用于抓取公司 about_us 数据的代码,它可以工作,但有时我会收到如下错误:

TimeoutException Traceback(最近一次调用最后一次)在

 17 email = os.getenv("LINKEDIN_USER")
 18 password = os.getenv("LINKEDIN_PASSWORD")

---> 19 actions.login(driver, email, password) # 如果没有给出邮箱和密码,会在终端提示

 20 driver.get('https://www.linkedin.com/company/biorasi-llc/about/')
 21 _ = WebDriverWait(driver, 3).until(EC.presence_of_all_elements_located((By.TAG_NAME, 'section')))

~\Anaconda3\lib\site-packages\linkedin_scraper\actions.py 登录(驱动程序,电子邮件,密码)

 28   password_elem.submit()
 29 

---> 30 element = WebDriverWait(driver, 10).until(EC.presence_of_element_located((By.ID, "profile-nav-item")))

~\Anaconda3\lib\site-packages\selenium\webdriver\support\wait.py in until(self, method, message)

 78             if time.time() > end_time:
 79                 break

---> 80 raise TimeoutException(message, screen, stacktrace) 81 82 def until_not(self, method, message=''):

超时异常:消息:

有人,请帮助如何解决这个问题。

4

1 回答 1

0

可能是因为您的超时时间太短(3 秒),所以在页面完全加载之前,它达到了超时阈值。尝试在第 21 行将其提高到 5-10 秒。

TIMEOUT = 10
 _ = WebDriverWait(driver, TIMEOUT).until(EC.presence_of_all_elements_located((By.TAG_NAME, 'section')))

以下是一些改进代码的提示:

  • 您已经在使用 fluent wait ( ),如果可能WebDriverWait,请尽量减少使用。将停止等待并返回您的元素,因此将节省时间。time.sleepWebDriverWait
  • 通过标签名称查找元素并按序号(在本例中为第 4 节标签)定位它是一个坏主意。如果网站添加更多部分,将会中断。尝试使用更好的 XPATH,这是我的代码,我还没有测试过,但我认为它可以正常工作。
import os   
from selenium import webdriver
import time    
from linkedin_scraper import actions    
from selenium import webdriver    
from selenium.webdriver.common.by import By    
from selenium.webdriver.support.ui import WebDriverWait    
from selenium.webdriver.support import expected_conditions as EC    
from selenium.common.exceptions import TimeoutException    
from selenium.webdriver.chrome.options import Options


chrome_options = Options()    
chrome_options.add_argument("--headless")

driver = webdriver.Chrome("driver/chromedriver", options=chrome_options)    
email = os.getenv("LINKEDIN_USER")    
password = os.getenv("LINKEDIN_PASSWORD")

actions.login(driver, email, password) # if email and password isnt given, it'll prompt in terminal
driver.get('https://www.linkedin.com/company/biorasi-llc/about/')

# directly finds paragraph, removed time.sleep
paragraph_elem = WebDriverWait(driver, 15).until(EC.presence_of_element_located((By.XPATH, '//section//h4/..//p')))
about_us = paragraph_elem.text.strip()

print(about_us)
于 2020-12-08T08:45:54.037 回答