0

print(news['title'][5]) 秘鲁-厄瓜多尔边境地区发生7.5级地震

print(analyser.polarity_scores(news['title'][5])) {'neg':0.0,'neu':1.0,'pos':0.0,'compound':0.0}

from nltk.tokenize import word_tokenize, RegexpTokenizer

import pandas as pd

from vaderSentiment.vaderSentiment import SentimentIntensityAnalyzer


analyzer = SentimentIntensityAnalyzer()


sentence = news['title'][5]

tokenized_sentence = nltk.word_tokenize(sentence)
pos_word_list=[]
neu_word_list=[]
neg_word_list=[]

for word in tokenized_sentence:
    if (analyzer.polarity_scores(word)['compound']) >= 0.1:
        pos_word_list.append(word)
    elif (analyzer.polarity_scores(word)['compound']) <= -0.1:
        neg_word_list.append(word)
    else:
        neu_word_list.append(word)                

print('Positive:',pos_word_list)
print('Neutral:',neu_word_list)
print('Negative:',neg_word_list) 
score = analyzer.polarity_scores(sentence)
print('\nScores:', score)

正面:[] 中性:['Magnitude', '7.5', 'quake', 'hits', 'Peru-Ecuador', 'border', 'region', '-', 'The', 'Hindu'] 负面:[]

分数:{'neg': 0.0, 'neu': 1.0, 'pos': 0.0, 'compound': 0.0}

new_words = {
    'Peru-Ecuador': -2.0,
    'quake': -3.4,
}

analyser.lexicon.update(new_words)
print(analyzer.polarity_scores(sentence))

{'neg':0.0,'neu':1.0,'pos':0.0,'compound':0.0}

from nltk.tokenize import word_tokenize, RegexpTokenizer

import pandas as pd

from vaderSentiment.vaderSentiment import SentimentIntensityAnalyzer


analyzer = SentimentIntensityAnalyzer()


sentence = news['title'][5]

tokenized_sentence = nltk.word_tokenize(sentence)
pos_word_list=[]
neu_word_list=[]
neg_word_list=[]

for word in tokenized_sentence:
    if (analyzer.polarity_scores(word)['compound']) >= 0.1:
        pos_word_list.append(word)
    elif (analyzer.polarity_scores(word)['compound']) <= -0.1:
        neg_word_list.append(word)
    else:
        neu_word_list.append(word)                

print('Positive:',pos_word_list)
print('Neutral:',neu_word_list)
print('Negative:',neg_word_list) 
score = analyzer.polarity_scores(sentence)
print('\nScores:', score)

正面:[] 中性:['Magnitude', '7.5', 'quake', 'hits', 'Peru-Ecuador', 'border', 'region', '-', 'The', 'Hindu'] 负面:[]

分数:{'neg': 0.0, 'neu': 1.0, 'pos': 0.0, 'compound': 0.0}

4

1 回答 1

4

您使用的代码绝对没问题。更新您使用的字典analyser而不是analyzer(不知道为什么您没有收到错误)。

new_words = {
    'Peru-Ecuador': -2.0,
    'quake': -3.4,
}
​
analyzer.lexicon.update(new_words)
print(analyzer.polarity_scores(sentence))

输出:

{'neg': 0.355, 'neu': 0.645, 'pos': 0.0, 'compound': -0.6597}

还有一点要注意(不确定您是否犯了这个错误。)您不应该再次导入该库。因为您更新的单词将消失。步骤应该是:

  1. 导入库和字典
  2. 更新字典(此步骤后您不应再次导入库)
  3. 计算情绪分数
于 2019-03-22T10:03:43.043 回答