知道了。
glove_model <- GlobalVectors$new(word_vectors_size = 50,vocabulary =
glove_pruned_vocab,x_max = 20L)
#fit model and get word vectors
word_vectors_main <- glove_model$fit_transform(glove_tcm,n_iter =20,convergence_tol=-1)
word_vectors_context <- glove_model$components
word_vectors <- word_vectors_main+t(word_vectors_context)
创建词嵌入后,构建一个索引,将词(字符串)映射到它们的向量表示(数字)
embeddings_index <- new.env(parent = emptyenv())
for (line in lines) {
values <- strsplit(line, ' ', fixed = TRUE)[[1]]
word <- values[[1]]
coefs <- as.numeric(values[-1])
embeddings_index[[word]] <- coefs
}
接下来,构建一个形状为 (max_words,embedding_dim) 的嵌入矩阵,可以将其加载到嵌入层中。
embedding_dim <- 50 (number of dimensions you wish to represent each word).
embedding_matrix <- array(0,c(max_words,embedding_dim))
for(word in names(word_index)){
index <- word_index[[word]]
if(index < max_words){
embedding_vector <- embeddings_index[[word]]
if(!is.null(embedding_vector)){
embedding_matrix[index+1,] <- embedding_vector #words not found in
the embedding index will all be zeros
}
}
}
We can then load this embedding matrix into the embedding layer, build a
model and then generate predictions.
model_pretrained <- keras_model_sequential() %>% layer_embedding(input_dim = max_words,output_dim = embedding_dim) %>%
layer_flatten()%>%layer_dense(units=32,activation = "relu")%>%layer_dense(units = 1,activation = "sigmoid")
summary(model_pretrained)
#Loading the glove embeddings in the model
get_layer(model_pretrained,index = 1) %>%
set_weights(list(embedding_matrix)) %>% freeze_weights()
model_pretrained %>% compile(optimizer = "rmsprop",loss="binary_crossentropy",metrics=c("accuracy"))
history <-model_pretrained%>%fit(x_train,y_train,validation_data = list(x_val,y_val),
epochs = num_epochs,batch_size = 32)
然后使用标准预测函数生成预测。
检查以下链接。
在 Keras 中使用词嵌入构建模型
预训练的词嵌入