0

我正在尝试确定记录从原始网页链接的网页链接的网页内容的最简单方法。我希望我的输出是一个表格,其中的行对应于第三层页面深处的内容。

从代码中可以看出,我目前只能在第三级页面上获取所需项目的第一个实例。此外,虽然我当前的代码将返回与基本 URL 上的每个 h2 项目相对应的一行,但我希望每个 h2 项目有多行(与 "span.'case-doc-details' a" 的实例一样多)第二层)。

一些附加信息:在每个链接状态下,我不知道将链接多少页。我正在使用 Python 和 Scraperwiki,对两者都是新手。我试图研究这个问题,但在我知道要问什么方面遇到了障碍。提前感谢您的帮助。

import scraperwiki
import urlparse
import lxml.html
import urllib

def scrape_table(root):
    rows = root.cssselect("h2")
    record = {}
    counter=0
    for row in rows:
        table_cells = row.cssselect("h2 a")
        for cell in table_cells:
            record['Count']=counter
            table_cellsurls = table_cells[0].cssselect("a")
            record['CaseURL'] = table_cellsurls[0].attrib.get('href')
            caselinkurl = urllib.urlopen('http://www.italaw.com/'+table_cellsurls[0].attrib.get('href')).read()

            #print caselinkurl
            caseroots = lxml.html.fromstring(caselinkurl)
            title=caseroots.cssselect("title")
            record['Title'] = title[0].text_content()
            ids=caseroots.cssselect("div div div div a")
            for i in ids:
                if len(ids)<=2:
                    record['Rules']="None"
                    record['Treaty']="None"
                else:
                    record['Rules']=ids[2].text_content()
                    record['Treaty']=ids[3].text_content()
            pars = caseroots.cssselect("span.'case-doc-details' a")
            #print "pars length is", len(pars)
            caselinkurl2=urllib.urlopen('http://www.italaw.com/'+pars[0].attrib.get('href')).read()
            caseroots2=lxml.html.fromstring(caselinkurl2)
            #create another table element with rows, marked off with the case that they came from, create all the rows.
            for i in pars:     
                if len(pars)==0:
                    record['DetailsURL']="None"
                else:                    
                    record['DetailsURL']=pars[0].attrib.get('href')
                pars2=caseroots2.cssselect("div.'field-item even' span.'date-display-single'")
                if len(pars2)==0:
                    record['Doc Date']="None"
                else:                        
                    record['Doc Date']=pars2[0].text_content()
                pars3=caseroots2.cssselect("div.'field-name-field-case-doc-file' span.'file' a")
                if len(pars3) ==0:
                    record['Doc Type Link']="None"
                    record['Doc Type']="None"  
                else:
                    record['Doc Type Link']=pars3[0].attrib.get('href')
                    record['Doc Type']=pars3[0].text_content()
                pars4=caseroots2.cssselect("div.'field-name-field-arbitrator-claimant'")
                if len(pars4)==0:
                    record['Claimant Nominee']="None"
                else:
                    record['Claimant Nominee']=pars4[0].text_content()
                pars5=caseroots2.cssselect("div.'field-name-field-arbitrator-respondent'")
                if len(pars5)==0:
                    record['Respondent Nominee']="None"
                else:
                    record['Respondent Nominee']=pars5[0].text_content()
                pars6=caseroots2.cssselect("div.'field-name-field-arbitrator-chair'")
                if len(pars6)==0:
                    record['President']="None"
                else:
                    record['President']=pars6[0].text_content()

            print record, '------------'
            scraperwiki.sqlite.save(['Count'],record)
            counter+=1
def scrape_and_look_for_next_link(url):
    html = scraperwiki.scrape(url)
    print html
    root = lxml.html.fromstring(html)
    scrape_table(root)


#START HERE:
url = 'http://www.italaw.com/cases-by-respondent?field_case_respondent_tid=All'
scrape_and_look_for_next_link(url)
4

2 回答 2

0

这是我到目前为止的代码 - 这还没有获取文档链接数据(或保存任何内容),但这应该是将这里的原则扩展到另一个函数的情况:

import scraperwiki
import urlparse
import lxml.html
import urllib

def scrape_page(linkurl):
    html = scraperwiki.scrape(linkurl)
    root = lxml.html.fromstring(html)
    title = root.cssselect("h1")
    print "the title:", title[0].text
    record = {}
    record['title'] = title[0].text
    record['url'] = linkurl
    #<div class="field-items"><div class="field-item even"><a
    arbrules = root.cssselect("div.field-items a")
    if arbrules:
        record['arbruleurl'] = arbrules[0].attrib.get("href")
        record['arbrule'] = arbrules[0].text_content()
    else:
        record['arbruleurl'] = "NO URL"
        record['arbrule'] = "NO ARBRULE"
    legalbasis = root.cssselect("div.field-label")
    if legalbasis:
        record['legalbasis'] = legalbasis[0].text_content()
    else:
        record['legalbasis'] = "NO LEGAL BASIS GIVEN"
    extralinks = []
    contents = root.cssselect("div.view-content a")
    if contents:
        for content in contents:
            extralinks.append(content.text_content())
            extralinks.append(content.attrib.get("href"))
        record['extralinks']  = extralinks
    else:
        record['extralinks']  = "NO EXTRA LINKS"
    #record['firstparty'] = title[0].text.split(" v. ")[0]
    #record['secondparty'] = title[0].text.split(" v. ")[1]
    #record['casenumber'] = title[0].text.split(" Case No.")[1]
    print record


def scrape_table(root):
    links = root.cssselect("div.link-wrapper a")
    for link in links:
        print link.text_content()
        linkurl = link.attrib.get("href")
        print linkurl
        scrape_page('http://www.italaw.com'+linkurl)

def scrape_and_look_for_next_link(url):
    html = scraperwiki.scrape(url)
    print html
    root = lxml.html.fromstring(html)
    scrape_table(root)


#START HERE:
url = 'http://www.italaw.com/cases-by-respondent?field_case_respondent_tid=All'
scrape_and_look_for_next_link(url)
于 2013-07-11T13:57:11.333 回答
0

这是我为这个问题所做的工作。

一些有启发性的一般要点:

  1. 使用 if else 循环来区分关键属性长度为零和非零长度的情况。

  2. 在此之前,创建您的字典。

  3. 在循环的 if 和 else 组件中,给出打印、存储和索引扩充指​​令。您将在进入循环之前将索引设置为零。
  4. 在 else 位中,创建一个迭代每个实例 i 的 for 循环,并使用要迭代的关键属性集来记录第 i 个实例。将所有其他属性设置为第零个实例。
  5. 最后,当处理任意数量的三重嵌套链接时,通常最好从您正在抓取的最低级别抓取所有数据(如果可能)。就我而言,这很有效,因为我想记录的所有属性都在这个级别上重复了。在其他情况下,我不确定最好的方法是什么。

感谢保罗推动了这一进程。

import scraperwiki
import urlparse
import lxml.html
import urllib

def scrape_table(root):
    rows = root.cssselect("h2")
    counter=0
    for row in rows:
        table_cells = row.cssselect("h2 a")
        for cell in table_cells:
            table_cellsurls = table_cells[0].cssselect("a")
            #record['CaseURL'] = table_cellsurls[0].attrib.get('href')
            caselinkurl = urllib.urlopen('http://www.italaw.com/'+table_cellsurls[0].attrib.get('href')).read()
            #print caselinkurl
            caseroots = lxml.html.fromstring(caselinkurl)
            pars = caseroots.cssselect("span.'case-doc-details' a")
            #print "pars length is", len(pars)
            record = {}
            #create another table element with rows, marked off with the case that they came from, create all the rows.
            if  len(pars)==0:
                record['DetailsURL']="None"
                record['Count']=counter
                print record, '------------'
                scraperwiki.sqlite.save(['Count'],record)
                counter+=1
            else:
                for i in range(0,len(pars)):                       
                    record['Count']=counter
                    caselinkurl2=urllib.urlopen('http://www.italaw.com/'+pars[i].attrib.get('href')).read()
                    caseroots2=lxml.html.fromstring(caselinkurl2)
                    record['DetailsURL']=pars[i].attrib.get('href')
                    title=caseroots2.cssselect("h2")
                    record['Title'] = title[1].text_content()
                    rules=caseroots2.cssselect("div.'field-name-field-arbitration-rules'")
                    if len(rules)==0:
                        record['Rules']="None"
                    else:
                        record['Rules']=rules[0].text_content()
                    treaty=caseroots2.cssselect("div.'field-name-field-case-treaties'")
                    if len(treaty)==0:
                        record['Treaty']="None"                                    
                    else:
                        record['Treaty']=treaty[0].text_content()
                    pars2=caseroots2.cssselect("div.'field-name-field-case-document-date'")
                    if len(pars2)==0:
                        record['Doc Date']="None"
                    else:                        
                        record['Doc Date']=pars2[0].text_content()
                    pars3=caseroots2.cssselect("div.'field-name-field-case-doc-file' span.'file' a")
                    if len(pars3) ==0:
                        record['Doc Type Link']="None"
                        record['Doc Type']="None"  
                    else:
                        record['Doc Type Link']=pars3[0].attrib.get('href')
                        record['Doc Type']=pars3[0].text_content()
                    pars4=caseroots2.cssselect("div.'field-name-field-arbitrator-claimant'")
                    if len(pars4)==0:
                        record['Claimant Nominee']="None"
                    else:
                        record['Claimant Nominee']=pars4[0].text_content()
                    pars5=caseroots2.cssselect("div.'field-name-field-arbitrator-respondent'")
                    if len(pars5)==0:
                        record['Respondent Nominee']="None"
                    else:
                        record['Respondent Nominee']=pars5[0].text_content()
                    pars6=caseroots2.cssselect("div.'field-name-field-arbitrator-chair'")
                    if len(pars6)==0:
                        record['President']="None"
                    else:
                        record['President']=pars6[0].text_content()

                    print record, '------------'
                    scraperwiki.sqlite.save(['Count'],record)
                    counter+=1
def scrape_and_look_for_next_link(url):
    html = scraperwiki.scrape(url)
    print html
    root = lxml.html.fromstring(html)
    scrape_table(root)


#START HERE:
url = 'http://www.italaw.com/cases-by-respondent?field_case_respondent_tid=All'
scrape_and_look_for_next_link(url)
于 2013-07-13T09:40:42.313 回答