1

目录中有一堆文件,每行都有 json 格式的条目。文件大小从 5k 到 200MB 不等。我有这段代码可以遍历每个文件,解析我在 json 中查找的数据,最后形成一个数据框。这个脚本需要很长时间才能完成,实际上它永远不会完成。

有什么方法可以加快速度,以便我可以更快地读取文件?

代码:

library(jsonlite)
library(data.table) 

setwd("C:/Files/")

#data <- lapply(readLines("test.txt"), fromJSON)

df<-data.frame(Timestamp=factor(),Source=factor(),Host=factor(),Status=factor())
filenames <- list.files("Json_files", pattern="*.txt", full.names=TRUE)

for(i in filenames){
  print(i)
  data <- lapply(readLines(i), fromJSON)
  myDf <- do.call("rbind", lapply(data, function(d) { 
    data.frame(TimeStamp = d$payloadData$timestamp, 
               Source = d$payloadData$source, 
               Host = d$payloadData$host, 
               Status = d$payloadData$status)}))

  df<-rbind(df,myDf)

}

这是一个示例条目,但文件中有数千个这样的条目:

{"senderDateTimeStamp":"2016/04/08 10:53:18","senderHost":null,"senderAppcode":"app","senderUsecase":"appinternalstats_prod","destinationTopic":"app_appinternalstats_realtimedata_topic","correlatedRecord":false,"needCorrelationCacheCleanup":false,"needCorrelation":false,"correlationAttributes":null,"correlationRecordCount":0,"correlateTimeWindowInMills":0,"lastCorrelationRecord":false,"realtimeESStorage":true,"receiverDateTimeStamp":1460127623591,"payloadData":{"timestamp":"2016-04-08T10:53:18.169","status":"get","source":"STREAM","fund":"JVV","client":"","region":"","evetid":"","osareqid":"","basis":"","pricingdate":"","content":"","msgname":"","recipient":"","objid":"","idlreqno":"","host":"WEB01","servermember":"test"},"payloadDataText":"","key":"app:appinternalstats_prod","destinationTopicName":"app_appinternalstats_realtimedata_topic","hdfsPath":"app/appinternalstats_prod","esindex":"app","estype":"appinternalstats_prod","useCase":"appinternalstats_prod","appCode":"app"}

{"senderDateTimeStamp":"2016/04/08 10:54:18","senderHost":null,"senderAppcode":"app","senderUsecase":"appinternalstats_prod","destinationTopic":"app_appinternalstats_realtimedata_topic","correlatedRecord":false,"needCorrelationCacheCleanup":false,"needCorrelation":false,"correlationAttributes":null,"correlationRecordCount":0,"correlateTimeWindowInMills":0,"lastCorrelationRecord":false,"realtimeESStorage":true,"receiverDateTimeStamp":1460127623591,"payloadData":{"timestamp":"2016-04-08T10:53:18.169","status":"get","source":"STREAM","fund":"JVV","client":"","region":"","evetid":"","osareqid":"","basis":"","pricingdate":"","content":"","msgname":"","recipient":"","objid":"","idlreqno":"","host":"WEB02","servermember":""},"payloadDataText":"","key":"app:appinternalstats_prod","destinationTopicName":"app_appinternalstats_realtimedata_topic","hdfsPath":"app/appinternalstats_prod","esindex":"app","estype":"appinternalstats_prod","useCase":"appinternalstats_prod","appCode":"app"}

{"senderDateTimeStamp":"2016/04/08 10:55:18","senderHost":null,"senderAppcode":"app","senderUsecase":"appinternalstats_prod","destinationTopic":"app_appinternalstats_realtimedata_topic","correlatedRecord":false,"needCorrelationCacheCleanup":false,"needCorrelation":false,"correlationAttributes":null,"correlationRecordCount":0,"correlateTimeWindowInMills":0,"lastCorrelationRecord":false,"realtimeESStorage":true,"receiverDateTimeStamp":1460127623591,"payloadData":{"timestamp":"2016-04-08T10:53:18.169","status":"get","source":"STREAM","fund":"JVV","client":"","region":"","evetid":"","osareqid":"","basis":"","pricingdate":"","content":"","msgname":"","recipient":"","objid":"","idlreqno":"","host":"WEB02","servermember":""},"payloadDataText":"","key":"app:appinternalstats_prod","destinationTopicName":"app_appinternalstats_realtimedata_topic","hdfsPath":"app/appinternalstats_prod","esindex":"app","estype":"appinternalstats_prod","useCase":"appinternalstats_prod","appCode":"app"}
4

2 回答 2

3

使用“c:/tmp.txt”中的示例数据:

> df <- jsonlite::fromJSON(paste0("[",paste0(readLines("c:/tmp.txt"),collapse=","),"]"))$payloadData[c("timestamp","source","host","status")]
> df
                timestamp source  host status
1 2016-04-08T10:53:18.169 STREAM WEB01    get
2 2016-04-08T10:53:18.169 STREAM WEB02    get
3 2016-04-08T10:53:18.169 STREAM WEB02    get

因此,要调整您的代码以获取数据框列表:

dflist <- lapply(filenames, function(i) {
  jsonlite::fromJSON(
    paste0("[",
            paste0(readLines(i),collapse=","),
            "]")
  )$payloadData[c("timestamp","source","host","status")]
})

这个想法是将您的行(从readLines)转换为一个大的 json 数组,然后通过将其解析为 json 来创建数据帧。

正如 lmo 已经展示的那样,在您的 filenmaes 列表中使用 lapply 会为您提供一个数据帧列表,如果您真的只想要一个数据帧,您可以加载data.table包然后使用rbindlistondflist只获取一个数据帧。

或者,如果您记忆力不足,此线程可能会对您有所帮助。

于 2016-04-14T14:52:29.103 回答
1

一种加速方法是将for循环替换为lapplyThen drop the final rbind。这里的加速将是R不必重复复制一个越来越大的文件,df覆盖你的“一堆”文件。结果将存储在一个方便的列表中,您可以按原样使用或一次性转换为 data.frame:

# create processing function
getData <- function(i) {
  print(i)
  data <- lapply(readLines(i), fromJSON)
  myDf <- do.call("rbind", lapply(data, function(d) { 
  data.frame(TimeStamp = d$payloadData$timestamp, 
           Source = d$payloadData$source, 
           Host = d$payloadData$host, 
           Status = d$payloadData$status)}))
}

# lapply over files
myDataList <- lapply(filenames, getData)
于 2016-04-14T14:29:44.867 回答