9

我使用 word2vec 将一个小短语(3 到 4 个单词)表示为唯一向量,方法是添加每个单独的单词嵌入或计算单词嵌入的平均值。

从我所做的实验中,我总是得到相同的余弦相似度。我怀疑这与训练后由 word2vec 生成的词向量被归一化为单位长度(欧几里得范数)有关吗?或者我的代码中有错误,或者我遗漏了一些东西。

这是代码:

import numpy as np
from nltk import PunktWordTokenizer
from gensim.models import Word2Vec
from numpy.linalg import norm
from scipy.spatial.distance import cosine

def pattern2vector(tokens, word2vec, AVG=False):
    pattern_vector = np.zeros(word2vec.layer1_size)
    n_words = 0
    if len(tokens) > 1:
        for t in tokens:
            try:
                vector = word2vec[t.strip()]
                pattern_vector = np.add(pattern_vector,vector)
                n_words += 1
            except KeyError, e:
                continue
        if AVG is True:
            pattern_vector = np.divide(pattern_vector,n_words)
    elif len(tokens) == 1:
        try:
            pattern_vector = word2vec[tokens[0].strip()]
        except KeyError:
            pass
    return pattern_vector


def main():
    print "Loading word2vec model ...\n"
    word2vecmodelpath = "/data/word2vec/vectors_200.bin"
    word2vec = Word2Vec.load_word2vec_format(word2vecmodelpath, binary=True)
    pattern_1 = 'founder and ceo'
    pattern_2 = 'co-founder and former chairman'

    tokens_1 = PunktWordTokenizer().tokenize(pattern_1)
    tokens_2 = PunktWordTokenizer().tokenize(pattern_2)
    print "vec1", tokens_1
    print "vec2", tokens_2

    p1 = pattern2vector(tokens_1, word2vec, False)
    p2 = pattern2vector(tokens_2, word2vec, False)
    print "\nSUM"
    print "dot(vec1,vec2)", np.dot(p1,p2)
    print "norm(p1)", norm(p1)
    print "norm(p2)", norm(p2)
    print "dot((norm)vec1,norm(vec2))", np.dot(norm(p1),norm(p2))
    print "cosine(vec1,vec2)",     np.divide(np.dot(p1,p2),np.dot(norm(p1),norm(p2)))
    print "\n"
    print "AVG"
    p1 = pattern2vector(tokens_1, word2vec, True)
    p2 = pattern2vector(tokens_2, word2vec, True)
    print "dot(vec1,vec2)", np.dot(p1,p2)
    print "norm(p1)", norm(p1)
    print "norm(p2)", norm(p2)
    print "dot(norm(vec1),norm(vec2))", np.dot(norm(p1),norm(p2))
    print "cosine(vec1,vec2)",     np.divide(np.dot(p1,p2),np.dot(norm(p1),norm(p2)))


if __name__ == "__main__":
    main()

这是输出:

Loading word2vec model ...

Dimensions 200
vec1 ['founder', 'and', 'ceo']
vec2 ['co-founder', 'and', 'former', 'chairman']

SUM
dot(vec1,vec2) 5.4008677771
norm(p1) 2.19382594282
norm(p2) 2.87226958166
dot((norm)vec1,norm(vec2)) 6.30125952303
cosine(vec1,vec2) 0.857109242583


AVG
dot(vec1,vec2) 0.450072314758
norm(p1) 0.731275314273
norm(p2) 0.718067395416
dot(norm(vec1),norm(vec2)) 0.525104960252
cosine(vec1,vec2) 0.857109242583

我正在使用此处定义的余弦相似度 Cosine Similarity (Wikipedia)。规范和点积的值确实不同。

谁能解释为什么余弦是一样的?

谢谢你,大卫

4

1 回答 1

8

余弦测量两个向量之间的角度,不考虑任何一个向量的长度。当你除以短语的长度时,你只是在缩短向量,而不是改变它的角度位置。所以你的结果对我来说是正确的。

于 2015-05-10T11:12:38.793 回答