-1

当我运行我的 topicmodel 代码时,发生了一个非常奇怪的错误。基本上我有一个带有用户评论的 .csv 文件。我想创建一个 dtm,每个评论都是一个文档。我抽取了 8k 条评论的样本,并在上面使用了以下代码:

> #LOAD LIBRARYS
> 
> library(tm)
> library(SnowballC)
> library(stringr)
> library(tictoc)
> tic()
> 
> #SET FILE LOCATION
> file_loc <- "C:/Users/Andreas/Desktop/first8k.csv"
> 
> #LOAD DOCUMENTS
> Database <- read.csv(file_loc, header = FALSE)
> require(tm)
> 
> #PROCEED
> Database <- Corpus(DataframeSource(Database))
> 
> Database <-tm_map(Database,content_transformer(tolower))
> 
> 
> Database <- tm_map(Database, removePunctuation)
> Database <- tm_map(Database, removeNumbers)
> Database <- tm_map(Database, removeWords, stopwords("english"))
> Database <- tm_map(Database, stripWhitespace)
> 
> 
> myStopwords <- c("some", "individual", "stop","words")
> Database <- tm_map(Database, removeWords, myStopwords)
> 
> Database <- tm_map(Database,stemDocument) 
> 
> 
> dtm <- DocumentTermMatrix(Database,          control=list(minDocFreq=2,minWordLength=2))
> 
> row_total = apply(dtm, 1, sum)
> dtm.new = dtm[row_total>0,]
> 
> removeSparseTerms( dtm, .99)
>
>>Outcome:DocumentTermMatrix (documents: 12753, terms: 194)
>Non-/sparse entries: 66261/2407821
>Sparsity           : 97%
>Maximal term length: 11
>Weighting          : term frequency (tf)
> 
> #TOPICMODELLING
> 
> library(topicmodels)
> 
>  
> 
> burnin <- 100
> iter <- 500
> thin <- 100
> seed <-list(200,5,500,3700,1666)
> nstart <- 5
> best <- TRUE
> 
>  
> k <- 12
> 
>
> ldaOut <-LDA(dtm.new,k, method="Gibbs", control=list(nstart=nstart, seed = seed, best=best, burnin = burnin, iter = iter, thin=thin))
> 

所以这个工作得很好。如果我再取 8k 评论样本,还有 csv 文件、相同格式等,则会发生以下错误:

> library(tm)
> library(SnowballC)
> library(stringr)
> library(tictoc)
> tic()
> 
> #SET FILE LOCATION
> file_loc <- "C:/Users/Andreas/Desktop/try8k.csv"
> 
> #LOAD DOCUMENTS
> Database <- read.csv(file_loc, header = FALSE)
> require(tm)
> 
> #PROCEED
> Database <- Corpus(DataframeSource(Database))
> 
> Database <-tm_map(Database,content_transformer(tolower))
> 
> 
> Database <- tm_map(Database, removePunctuation)
> Database <- tm_map(Database, removeNumbers)
> Database <- tm_map(Database, removeWords, stopwords("english"))
> Database <- tm_map(Database, stripWhitespace)
> 
> 
> myStopwords <- c("some", "individual", "stop","words")
> Database <- tm_map(Database, removeWords, myStopwords)
> 
> Database <- tm_map(Database,stemDocument) 
> 
> dtm <- DocumentTermMatrix(Database,control=list(minDocFreq=2,minWordLength=2))
> 
> row_total = apply(dtm, 1, sum)
> dtm.new = dtm[row_total>0,]
> 
> removeSparseTerms( dtm, .99)
>
>>Outcome:DocumentTermMatrix (documents: 9875, terms: 0)
Non-/sparse entries: 0/0
Sparsity           : 100%
Maximal term length: 0
Weighting          : term frequency (tf)
> 
> #TOPICMODELLING
> 
> library(topicmodels)
> 
>  
> 
> burnin <- 100
> iter <- 500
> thin <- 100
> seed <-list(200,5,500,3700,1666)
> nstart <- 5
> best <- TRUE
> 
>  
> k <- 12
> 
> 
> ldaOut <-LDA(dtm.new,k, method="Gibbs", control=list(nstart=nstart, seed = seed, best=best, burnin = burnin, iter = iter, thin=thin))

>Fehler in obj[[i]][[which.max(sapply(obj[[i]], logLik))]] :
>attempt to select less than one element in get1index

我猜 dtm 的某些东西没有起作用,因为它说有 9875 个文档但根本没有条款。但我完全不知道为什么这些代码适用于一个样本而不适用于另一个样本。请告诉我我是否在代码上做错了什么,或者您是否发现了任何其他错误。

提前致谢!

4

1 回答 1

-1

terms = 0 这就是为什么你有概率

于 2017-02-22T14:39:52.103 回答