0

我正在尝试在 python 中使用 selenium 从网络上抓取一张表。但是网站速度很慢,而且大部分时间都有很多网络问题。因此,即使网站需要时间加载,我也希望代码继续尝试。我必须抓取 941 个条目才能抓取。我尝试了我在网上找到的名为 retry 的模块,但它似乎不起作用。给出下面的代码示例。有没有其他方法可以让代码在网站加载之前不断重试?

import pandas as pd
import io
import time
from selenium import webdriver 
from webdriver_manager.firefox import GeckoDriverManager
driver = webdriver.Firefox(executable_path=GeckoDriverManager().install())

from selenium.webdriver.support.ui import Select
from retry import retry

# Web page url 
driver.get("http://mnregaweb4.nic.in/netnrega/dynamic_work_details.aspx?page=S&lflag=eng&state_name=KERALA&state_code=16&fin_year=2020-2021&source=national&Digest=s5wXOIOkT98cNVkcwF6NQA") 
@retry()
def make_trouble():
    '''Retry until succeed'''
driver.implicitly_wait(5)  
# Find District of option 
x = driver.find_element_by_id('ContentPlaceHolder1_ddl_dist') 
drop = Select(x) 

# Select by value 
drop.select_by_value("1613")
@retry()
def make_trouble():
    '''Retry until succeed'''
time.sleep(6) 

# Find Block of option 
x = driver.find_element_by_id('ContentPlaceHolder1_ddl_blk') 
drop = Select(x) 
  
# Select by value 
drop.select_by_value("1613001")
@retry()
def make_trouble():
    '''Retry until succeed'''
time.sleep(4) 

# Find GP of option 
x = driver.find_element_by_id('ContentPlaceHolder1_ddl_pan') 
drop = Select(x) 
  
# Select by value 
drop.select_by_value("1613001001")
@retry()
def make_trouble():
    '''Retry until succeed'''
time.sleep(4) 


search_button = driver.find_element_by_id("ContentPlaceHolder1_Button1")
search_button.click()
time.sleep(8)

soup = BeautifulSoup(driver.page_source, 'lxml')
tables = soup.find_all('table')
dfs = pd.read_html(str(tables))

print(dfs[4])

df1 = pd.read_csv(io.StringIO(dfs[4].to_csv(index=False)), skiprows=1, header=[0,1])
df1.to_csv("test with pandas V3.csv", index=False)
driver.close()```
4

2 回答 2

1
while True:
    try:
        driver.implicitly_wait(5)  
        # Find District of option 
        x = driver.find_element_by_id('ContentPlaceHolder1_ddl_dist') 
        drop = Select(x) 

        # Select by value 
        drop.select_by_value("1613")

        time.sleep(6) 

        # Find Block of option 
        x = driver.find_element_by_id('ContentPlaceHolder1_ddl_blk') 
        drop = Select(x) 

        # Select by value 
        drop.select_by_value("1613001")

        time.sleep(4) 

        # Find GP of option 
        x = driver.find_element_by_id('ContentPlaceHolder1_ddl_pan') 
        drop = Select(x) 

        # Select by value 
        drop.select_by_value("1613001001")

        time.sleep(4) 


        search_button = driver.find_element_by_id("ContentPlaceHolder1_Button1")
        search_button.click()
        time.sleep(8)

        soup = BeautifulSoup(driver.page_source, 'lxml')
        tables = soup.find_all('table')
        dfs = pd.read_html(str(tables))

        print(dfs[4])

        df1 = pd.read_csv(io.StringIO(dfs[4].to_csv(index=False)), skiprows=1, header=[0,1])
        df1.to_csv("test with pandas V3.csv", index=False)
        driver.close()
    except:
        print("Error")
于 2021-03-01T04:53:13.067 回答
1

这不是我的代码,而是按照 ABC 管理员的要求,Sangun Devkota 对代码进行了修改。

这样,它每 5 次循环打印一个错误。

x = 0
while True:
    try:
        driver.implicitly_wait(5)  
        # Find District of option 
        x = driver.find_element_by_id('ContentPlaceHolder1_ddl_dist') 
        drop = Select(x) 

        # Select by value 
        drop.select_by_value("1613")

        time.sleep(6) 

        # Find Block of option 
        x = driver.find_element_by_id('ContentPlaceHolder1_ddl_blk') 
        drop = Select(x) 

        # Select by value 
        drop.select_by_value("1613001")

        time.sleep(4) 

        # Find GP of option 
        x = driver.find_element_by_id('ContentPlaceHolder1_ddl_pan') 
        drop = Select(x) 

        # Select by value 
        drop.select_by_value("1613001001")

        time.sleep(4) 


        search_button = driver.find_element_by_id("ContentPlaceHolder1_Button1")
        search_button.click()
        time.sleep(8)

        soup = BeautifulSoup(driver.page_source, 'lxml')
        tables = soup.find_all('table')
        dfs = pd.read_html(str(tables))

        print(dfs[4])

        df1 = pd.read_csv(io.StringIO(dfs[4].to_csv(index=False)), skiprows=1, header=[0,1])
        df1.to_csv("test with pandas V3.csv", index=False)
        driver.close()
    except: 
        if x%5 == 0:
            print("Error")
        x += 1

如果您希望它只打印一次,您可以将其更改为:

x = True

... 其他代码 ...

except:
   if x:
      print('Error')
      x = False
于 2021-03-01T10:44:26.803 回答