试图从此 url 获取所有 (5) 个表。
我可以填充的各个页面的下拉框,type(value)
但这不会刷新页面。使用 nextPage 按钮单步浏览页面失败,因为该对象不再附加到 DOM(而且我不知道如何解决这个问题)。
尝试填充下拉列表,然后选择它。这将返回此错误:
回溯(最近一次通话最后):
File "<stdin>", line 69, in <module>
File "/usr/local/lib/python2.6/dist-packages/splinter/driver/webdriver/__init__.py", line 334, in select
self.find_by_xpath('//select[@name="%s"]/option[@value="%s"]' % (self["name"], value))._element.click()
File "/usr/local/lib/python2.6/dist-packages/splinter/element_list.py", line 73, in __getattr__
self.__class__.__name__, name))
AttributeError: 'ElementList' object has no attribute '_element'
我使用了下面的代码。非常感谢任何帮助!
from splinter import Browser
from lxml.html import parse
from StringIO import StringIO
from time import sleep
url = r'http://www.molpower.com//VLCWeb/UIAboutMOL/PortScheduleInfo.aspx?pPort=NLRTMDE&pFromDate=01-Oct-2013&pToDate=10-Oct-2013'
def _unpack(row, kind = 'td'):
elts = row.findall('.//%s' %kind)
return [val.text_content() for val in elts[0:7]]
def parse_schdls_data(table):
rows = table.findall('.//tr')
hdrs = _unpack(rows[0], kind = 'th')
data = [_unpack(r, kind = 'td') for ir, r in enumerate(rows[1:-1]) if ir % 3 == 0]
return (hdrs, data)
with Browser() as browser:
browser.visit(url)
print browser.url
pages = browser.find_by_tag('option')
pagevals = [p.value for p in pages]
maxpagev = max(pagevals)
inputs = browser.find_by_tag('input')
'''
for ip, inp in enumerate(inputs):
if inp.has_class('btnMRBPageNext'):
#print ip, inp.value, inp.text
#Need input 35 for the nextPage
inp.click()
'''
selects = browser.find_by_tag('select')
for ns, sel in enumerate(selects):
if sel.has_class('inputDropDown'):
print ns, sel.value, sel.text
sel.type(sel.value)
sleep(2)
moldata = list()
for page in range(len(pagevals)):
content = browser.html
parsed = parse(StringIO(content))
doc = parsed.getroot()
tables = doc.findall('.//table')
schdls = tables[91]
#Get all rows from that table
rows = schdls.findall('.//tr')
hdr, data = parse_schdls_data(schdls)
#print page, data
moldata.append(data)
while browser.is_element_not_present_by_tag('select', wait_time = 2):
pass
inputs = browser.find_by_tag('input')
selects = browser.find_by_tag('select')
#inputs[35].click()
#selects[0].type(str(page + 1))
selects[0].select(selects[0].value)