0

嗨:我是 Scraperwiki 和 Python 的新手,并且试图弄清楚当抓取的网页上没有符合我的 cssselect 规范的项目时如何返回“NA”或类似的东西。

在下面的代码中,我正在抓取一组双重嵌套的网页。当我抓取一个没有 cssselect 属性值的子页面时,它只是复制最后一个有值的抓取页面的值。

有小费吗?谢谢!托德

导入 scraperwiki 导入 urlparse 导入 lxml.html 导入 urllib

def scrape_table(root): rows = root.cssselect("h2")

record = {}

for row in rows:
    table_cells = row.cssselect("h2 a")
    for cell in table_cells:
        record['Title'] = table_cells[0].text_content()
        table_cellsurls = table_cells[0].cssselect("a")

        record['CaseURL'] = table_cellsurls[0].attrib.get('href')

        caselinkurl = urllib.urlopen('http://www.italaw.com/'+table_cellsurls[0].attrib.get('href')).read()
        print caselinkurl

        caseroots = lxml.html.fromstring(caselinkurl)

        ids=caseroots.cssselect("div div div div a")
        #turns out that the data i want is third and second instance. BUT THE PROBLEM I HAVE IS THAT IT COPIES THE PREVIOUS ROW IF NULL.
        for i in ids:
            if len(ids)>=2:
                record['Rules']=ids[2].text_content()
                record['Treaty']=ids[3].text_content()
            else:
                return None
                #record['Rules']="NA"
                #record['Treaty']="NA"
                #pass
                #print "None"
# As you can see, i have experimented with different ways of returning nothing.
        pars = caseroots.cssselect("span.'case-doc-details'")


        for par in pars:

            for i in pars:                
                pars1=pars[0].cssselect("a")
                if len(pars1)>=0:
                    record['DetailsURL']=pars1[0].attrib.get('href')
                else: 
                    return None

             #Create a third level of scrape.
                caselinkurl2=urllib.urlopen('http://www.italaw.com/'+pars1[0].attrib.get('href')).read()
                print caselinkurl2
                caseroots2=lxml.html.fromstring(caselinkurl2)
                pars2=caseroots2.cssselect("div.'field-item even' span.'date-display-single'")
                for i in pars2:
                    if len(pars2)>=0:
                        record['Doc Date']=pars2[0].text_content()

                    else:

                        return None

                pars3=caseroots2.cssselect("div.'field-item even' span.'file' a")
                for i in pars3:        
                    if len(pars3)>=0:    
                        record['Doc Type Link']=pars3[0].attrib.get('href') 
                        record['Doc Type']=pars3[0].text_content()    
                    else:
                        return None

                pars4=caseroots2.cssselect("div.'field-name-field-arbitrator-claimant'")
                for i in pars4:
                    if len(pars4)>=0:

                        record['Claimant Nominee']=pars4[0].text_content()
                    else:
                        return None

                pars5=caseroots2.cssselect("div.'field-name-field-arbitrator-respondent'")
                for i in pars5:
                    if len(pars5)>=0:

                        record['Respondent Nominee']=pars5[0].text_content()
                    else:

                        return None

                pars6=caseroots2.cssselect("div.'field-name-field-arbitrator-chair'")
                for i in pars6:
                    if len(pars6)>=0:

                        record['President']=pars6[0].text_content()
                    else:

                        return None

        print record, '------------'

        scraperwiki.sqlite.save(["Title"],record)

def scrape_and_look_for_next_link(url): html = scraperwiki.scrape(url) print html root = lxml.html.fromstring(html) scrape_table(root)

从这里开始:

url = ' http://www.italaw.com/cases-by-respondent?field_case_respondent_tid=All ' scrape_and_look_for_next_link(url)

4

1 回答 1

1

在这里回答了我自己的问题。

对于可能出现空值的每个查询,请使用以下内容:

        for par in pars:
            pars1=pars[0].cssselect("a")
            for i in pars1:                
                if len(pars)==0:
                    record['DetailsURL']="None"
                else:
                    record['DetailsURL']=pars1[0].attrib.get('href')
于 2013-07-10T15:24:33.777 回答