5

如果我有一个字符串:

moon <- "The cow jumped over the moon with a silver plate in its mouth" 

有没有办法可以提取"moon". 邻居可以是“月亮”周围的 2 或 3 个词。

所以如果我的

"The cow jumped over the moon with a silver plate in its mouth"

我希望我的输出只是:

"jumped over the moon with a silver"

我知道str_locate如果我想按字符提取,我可以使用,但不确定如何使用“单词”来做到这一点。这可以在R中完成吗?

谢谢和问候, 西马克

4

3 回答 3

4

使用strsplit

x <- strsplit(str, " ")[[1]]
i <- which(x == "moon")
paste(x[seq(max(1, (i-2)), min((i+2), length(x)))], collapse= " ")
于 2013-08-01T00:40:17.867 回答
4

这是我的做法:

keyword <- "moon"
lookaround <- 2
pattern <- paste0("([[:alpha:]]+ ){0,", lookaround, "}", keyword, 
                "( [[:alpha:]]+){0,", lookaround, "}")

regmatches(str, regexpr(pattern, str))[[1]]
# [1] "The cow jumped over"

想法:搜索任何字符,后跟一个空格,最少出现 0 次,最多出现“lookaround”(此处为 2)次,然后是“keyword”(此处为“moon”),然后是空格和一堆字符模式在 0 和“环视”次之间重复。该regexpr函数给出了该模式的开始和停止。regmatches包装这个函数然后从这个开始/停止位置获取子字符串。

注意:如果您想搜索超过 1 次出现的相同模式,regexpr可以替换为。gregexpr


这是将Hong与此答案进行比较的大数据基准测试:

str <- "The cow jumped over the moon with a silver plate in its mouth" 
ll <- rep(str, 1e5)
hong <- function(str) {
    str <- strsplit(str, " ")
    sapply(str, function(y) {
        i <- which(y=="moon")
        paste(y[seq(max(1, (i-2)), min((i+2), length(y)))], collapse= " ")
    })
}

arun <- function(str) {
    keyword <- "moon"
    lookaround <- 2
    pattern <- paste0("([[:alpha:]]+ ){0,", lookaround, "}", keyword, 
                    "( [[:alpha:]]+){0,", lookaround, "}")

    regmatches(str, regexpr(pattern, str))
}

require(microbenchmark)
microbenchmark(t1 <- hong(ll), t2 <- arun(ll), times=10)
# Unit: seconds
#            expr      min       lq   median       uq      max neval
#  t1 <- hong(ll) 6.172986 6.384981 6.478317 6.654690 7.193329    10
#  t2 <- arun(ll) 1.175950 1.192455 1.200674 1.227279 1.326755    10

identical(t1, t2) # [1] TRUE
于 2013-08-01T00:48:13.223 回答
2

这是一种使用tm包的方法(当你只有一把锤子时......)

moon <- "The cow jumped over the moon with a silver plate in its mouth"

require(tm)
my.corpus <- Corpus(VectorSource(moon))
# Tokenizer for n-grams and passed on to the term-document matrix constructor
library(RWeka)
neighborhood  <- 3 # how many words either side of word of interest
neighborhood1 <- 2 + neighborhood  * 2 
ngramTokenizer <- function(x) NGramTokenizer(x, Weka_control(min = neighborhood1, max = neighborhood1))
dtm <- TermDocumentMatrix(my.corpus, control = list(tokenize = ngramTokenizer))
inspect(dtm)

#  find ngrams that have the word of interest in them
word <- 'moon'
subset_ngrams <- dtm$dimnames$Terms[grep(word, dtm$dimnames$Terms)]

# keep only ngrams with the word of interest in the middle. This
# removes duplicates and lets us see what's on either side
# of the word of interest

subset_ngrams <- subset_ngrams[sapply(subset_ngrams, function(i) {
  tmp <- unlist(strsplit(i, split=" "))
  tmp <- tmp[length(tmp) - span]
  tmp} == word)]

# inspect output
subset_ngrams
[1] "jumped over the moon with a silver plate"
于 2013-08-01T03:34:24.167 回答