2

我正在尝试自动拉入并保存到这个 readHTML 函数的数据框;我是一个 R 新手,并且无法弄清楚如何编写一个循环来自动化这个函数,如果你一个一个地做这个函数就可以工作。

library('XML')

urls<-c("http://www.basketball-reference.com/teams/ATL/","http://www.basketball-reference.com/teams/BOS/")
theurl<-urls[2] #Pick second link (celtics)

tables <- readHTMLTable(theurl)
n.rows <- unlist(lapply(tables, function(t) dim(t)[1]))
BOS <-tables[[which.max(n.rows)]] 
Team.History<-write.csv(BOS,"Bos.csv")

任何和所有的帮助将不胜感激!

4

2 回答 2

2

我认为这结合了两个答案中最好的(并整理了一下)。

library(RCurl)
library(XML)

stem <- "http://www.basketball-reference.com/teams/"
teams <- htmlParse(getURL(stem), asText=T)
teams <- xpathSApply(teams,"//*/a[contains(@href,'/teams/')]", xmlAttrs)[-1]
teams <- gsub("/teams/(.*)/", "\\1", teams)
urls <- paste0(stem, teams)

names(teams) <- NULL   # get rid of the "href" labels
names(urls) <- teams

results <- data.frame()
for(team in teams){
   tables <- readHTMLTable(urls[team])
   n.rows <- unlist(lapply(tables, function(t) dim(t)[1]))
   team.results <- tables[[which.max(n.rows)]] 
   write.csv(team.results, file=paste0(team, ".csv"))
   team.results$TeamCode <- team
   results <- rbind(results, team.results)
   rm(team.results, n.rows, tables)
}
rm(stem, team)

write.csv(results, file="AllTeams.csv")
于 2012-08-04T01:22:00.227 回答
1

我假设你想遍历你的 urls 向量?我会尝试这样的事情:

library('XML')

url_base <- "http://www.basketball-reference.com/teams/"
teams <- c("ATL", "BOS")

# better still, get the full list of teams as in
# http://stackoverflow.com/a/11804014/1543437

results <- data.frame()
for(team in teams){
   theurl <- paste(url_base, team , sep="/")
   tables <- readHTMLTable(theurl)
   n.rows <- unlist(lapply(tables, function(t) dim(t)[1]))
   team.results <-tables[[which.max(n.rows)]] 
   write.csv(team.results, file=paste0(team, ".csv"))
   team.results$TeamCode <- team
   results <- rbind(results, team.results)
}
write.csv(results, file="AllTeams.csv")
于 2012-08-03T22:09:55.733 回答