我几乎完成了一个抓取桌子的网络爬虫。这仅输出表中的第一行。任何人都可以帮助确定为什么这不会返回表中的所有行。请忽略 while 循环,因为它最终会有一个循环部分。
import urllib
from bs4 import BeautifulSoup
#file_name = "/user/joe/uspc-cpc.txt
#file = open(file_name,"w")
i=125
while i==125:
url = "http://www.uspto.gov/web/patents/classification/cpc/html/us" + str(i) + "tocpc.html"
print url + '\n'
i += 1
data = urllib.urlopen(url).read()
print data
#get the table data from dump
#append to csv file
soup = BeautifulSoup(data)
table = soup.find("table", width='80%')
for tr in table.findAll('tr')[2:]:
col = row.findAll('td')
uspc = col[0].get_text().encode('ascii','ignore')
cpc1 = col[1].get_text().encode('ascii','ignore')
cpc2 = col[2].get_text().encode('ascii','ignore')
cpc3 = col[3].get_text().encode('ascii','ignore')
print uspc + ',' + cpc1 + ',' + cpc2 + ',' + cpc3 + '\n'
#file.write(record)
#file.close()
我正在运行的代码:
import urllib
from bs4 import BeautifulSoup
#file_name = "/users/ripple/uspc-cpc.txt"
#file = open(file_name,"w")
i=125
while i==125:
url = "http://www.uspto.gov/web/patents/classification/cpc/html/us" + str(i) + "tocpc.html"
print 'Grabbing from: ' + url + '\n'
i += 1
#get the table data from the page
data = urllib.urlopen(url).read()
#send to beautiful soup
soup = BeautifulSoup(data)
table = soup.find("table", width='80%')
for tr in table.findAll('tr')[2:]:
col = tr.findAll('td')
uspc = col[0].get_text().encode('ascii','ignore').replace(" ","")
cpc1 = col[1].get_text().encode('ascii','ignore').replace(" ","")
cpc2 = col[2].get_text().encode('ascii','ignore').replace(" ","")
cpc3 = col[3].get_text().encode('ascii','ignore').replace(" ","").replace("more...", "")
record = uspc + ',' + cpc1 + ',' + cpc2 + ',' + cpc3 + '\n'
print record
#file.write(record)
#file.close()