2

我将废弃下面链接中的 javascript 表格。 http://data2.7m.cn/history_Matches_Data/2009-2010/92/en/index.shtml

import codecs
import lxml.html as lh
from lxml import etree
import requests
from selenium import webdriver
import urllib2
from bs4 import BeautifulSoup

URL = 'http://data2.7m.cn/history_Matches_Data/2009-2010/92/en/index.shtml'
profile = webdriver.FirefoxProfile()
profile.set_preference('network.http.max-connections', 30)
profile.update_preferences()
browser = webdriver.Firefox(profile)
browser.get(URL)
content = browser.page_source
soup = BeautifulSoup(''.join(content))

当我得到网页的内容时,我需要知道该特定联赛的足球比赛轮数。

下面的代码只找到了唯一的表,我可以知道如何获得所有 38 个足球比赛的表吗?谢谢你。

# scrap the round of soccer matches
soup.findAll('td', attrs={'class': 'lsm2'})

# print the soccer matches' result of default round, but there have 38 rounds (id from s1 to s38)
print soup.find("div", {"id": "Match_Table"}).prettify()
4

2 回答 2

2
# ============================================================
import codecs
import lxml.html as lh
from lxml import etree
import requests
from selenium import webdriver
import urllib2
from bs4 import BeautifulSoup
from pandas import DataFrame, Series
import html5lib

URL = 'http://data2.7m.cn/history_Matches_Data/2009-2010/92/en/index.shtml'
profile = webdriver.FirefoxProfile()
profile.set_preference('network.http.max-connections', 30)
profile.update_preferences()
browser = webdriver.Firefox(profile)
browser.get(URL)

content = browser.page_source
soup = BeautifulSoup(''.join(content))
# num = soup.findAll('td', attrs={'class': 'lsm2'})
# num = soup.findAll('table')[2].findAll('td')[37].text
# soup.findAll('table',attrs={'class':'e_run_tb'})

    num1 = soup.findAll('table')[2].findAll('tr')
    for i in range(1,len(num1)+1):
        for j in range(1,len(num1[i-1])+1):
            # click button on website
            clickme = browser.find_element_by_xpath('//*[@id="e_run_tb"]/tbody/tr'+'['+str(i)+']'+'/td'+'['+str(j)+']')
            clickme.click()

            content = browser.page_source
            soup = BeautifulSoup(''.join(content))

            table = soup.find('div', attrs={'class': 'e_matches'})
            rows = table.findAll('tr')
#           for tr in rows:
#             cols = tr.findAll('td')
#             for td in cols:
#                    text = td.find(text=True)
#                    print text,
#                print
            for tr in rows[5:16]: #from row 5 to 16
                cols = tr.findAll('td')
                for td in cols:
                    text = td.find(text=True)
                    print text,
                print
            print
于 2013-11-10T04:48:29.703 回答
0

最简单的事情可能是我使用 Selenium 单击lsm22-38 中的链接(因为存在 1 开始),然后Match_Table在每次单击后用 id 刮取表格——在你去的时候积累你的结果。

于 2013-11-02T04:37:14.327 回答