首先,您导入未排序库的方式太混乱了。还有很多未使用的进口。经过一番谷歌搜索,让我们减少进口并坚持下去:
from collections import defaultdict
import nltk
from nltk.classify import MaxentClassifier, accuracy
from nltk.corpus import movie_reviews
然后我发现这featx
是 Jacob Perkins 在他的书中使用的一些示例模块,这是一个更好的来源(https://github.com/sophist114/Python/blob/master/EmotionAnalysis.py)。因此,让我们这里有一个文档版本,其中对函数的作用进行了一些解释:
def bag_of_words(words):
"""
Change a document into a BOW feature vector represented by a dict object.
"""
return dict([(word, True) for word in words])
def label_feats_from_corpus(corp, feature_detector=bag_of_words):
"""
Change the corpus into a feature matrix. Sometimes the proceess is
known as vectorization. The default is the use BOW features.
"""
label_feats = defaultdict(list)
for label in corp.categories():
for fileid in corp.fileids(categories=[label]):
feats = feature_detector(corp.words(fileids=[fileid]))
label_feats[label].append(feats)
return label_feats
def split_label_feats(lfeats, split=0.75):
"""
Splits corpus into train and test portion.
This module is used after using `label_feats_from_corpus`.
"""
train_feats = []
test_feats = []
for label, feats in lfeats.iteritems():
cutoff = int(len(feats) * split)
train_feats.extend([(feat, label) for feat in feats[:cutoff]])
test_feats.extend([(feat, label) for feat in feats[cutoff:]])
return train_feats, test_feats
现在让我们来看看模型训练和测试的过程,首先是特征提取:
# Extract features from corpus and for each document label it with the appropriate labels.
label_feats = label_feats_from_corpus(movie_reviews)
让我们看看调用后得到了什么label_feats_from_corpus
:
for label in label_feats:
for document in label_feats[label]:
print label, document
break
break
[出去]:
neg {u'all': True, u'concept': True, u'skip': True, u'go': True, u'seemed': True, u'suits': True, u'presents': True, u'to': True, u'sitting': True, u'very': True, u'horror': True, u'continues': True, u'every': True, u'exact': True, u'cool': True, u'entire': True, u'did': True, u'dig': True, u'flick': True, u'neighborhood': True, u'crow': True, u'street': True, u'video': True, u'further': True, u'even': True, u'what': True, u'hide': True, u'giving': True, u'new': True, u'ever': True, u'here': True, u'understanding': True, u'entertain': True, u'studio': True, u'others': True, u'kudos': True, u'weird': True, u'makes': True, u'explained': True, u'rarely': True, u'plot': True, u'fed': True, u'disappearances': True, u'from': True, u'would': True, u'&': True, u'two': True, u'music': True, u'films': True, u'themselves': True, u'until': True, u'more': True, u'teen': True, u'clue': True, u'stick': True, u'given': True, u'me': True, u'this': True, u'package': True, u'movies': True, u'making': True, u'my': True, u'give': True, u'fuck': True, u'want': True, u'sense': True, u'!': True, u'holds': True, u'write': True, u'how': True, u'hot': True, u'stir': True, u'okay': True, u'beauty': True, u'mess': True, u'overall': True, u'after': True, u'coming': True, u'such': True, u'guys': True, u'types': True, u'a': True, u'downshifts': True, u'chasing': True, u'redundant': True, u'so': True, u'enter': True, u'playing': True, u'executed': True, u'over': True, u'insight': True, u'years': True, u'still': True, u'its': True, u'before': True, u'thrilling': True, u'somewhere': True, u',': True, u'actually': True, u'meantime': True, u'production': True, u'main': True, u'might': True, u'then': True, u'good': True, u'break': True, u'they': True, u'half': True, u'not': True, u'now': True, u'always': True, u'didn': True, u'arrow': True, u'mean': True, u'bentley': True, u'generation': True, u'idea': True, u'engaging': True, u'happen': True, u'out': True, u"'": True, u'since': True, u'7': True, u'got': True, u'highway': True, u'shows': True, u'blair': True, u'turning': True, u'little': True, u'completely': True, u'shelves': True, u'starts': True, u'terribly': True, u'american': True, u'jumbled': True, u'chopped': True, u'one': True, u'fantasy': True, u'visions': True, u'guess': True, u'"': True, u'2': True, u'too': True, u'wrapped': True, u'final': True, u'slasher': True, u'that': True, u'explanation': True, u'took': True, u'part': True, u'attempt': True, u'10': True, u'kind': True, u'scenes': True, u'feeling': True, u'and': True, u'mind': True, u'sad': True, u'have': True, u'need': True, u'seem': True, u'apparently': True, u'-': True, u'also': True, u'which': True, u'sure': True, u'normal': True, u'who': True, u'most': True, u'don': True, u'drive': True, u'ways': True, u'entertaining': True, u'review': True, u'came': True, u'ending': True, u'find': True, u'touches': True, u'craziness': True, u'(': True, u'should': True, u'only': True, u'going': True, u'pretty': True, u'joblo': True, u'folks': True, u'8': True, u'do': True, u'his': True, u'get': True, u'watch': True, u'feels': True, u'despite': True, u'him': True, u'bad': True, u'where': True, u'lazy': True, u'see': True, u'decided': True, u'are': True, u'sorta': True, u'movie': True, u'nightmare': True, u'3': True, u'unravel': True, u'melissa': True, u'correctly': True, u'flicks': True, u'we': True, u'packaged': True, u'nightmares': True, u'genre': True, u'20': True, u'memento': True, u'both': True, u'accident': True, u's': True, u'witch': True, u'point': True, u'character': True, u'whatever': True, u'tons': True, u'simply': True, u'church': True, u'throughout': True, u'decent': True, u'been': True, u'.': True, u'secret': True, u'life': True, u'kids': True, u'personally': True, u'look': True, u'these': True, u'plain': True, u'harder': True, u'apparitions': True, u'while': True, u'neat': True, u've': True, u'is': True, u'it': True, u'couples': True, u'someone': True, u'in': True, u'chase': True, u'different': True, u')': True, u'things': True, u'make': True, u'same': True, u'member': True, u'strange': True, u'9': True, u'party': True, u'applaud': True, u'drink': True, u'director': True, u'running': True, u'characters': True, u'off': True, u'i': True, u'salvation': True, u'well': True, u'obviously': True, u'edge': True, u'echoes': True, u'the': True, u'away': True, u'just': True, u'generally': True, u'elm': True, u'excites': True, u'seems': True, u'snag': True, u'wes': True, u'4': True, u'has': True, u'big': True, u'showing': True, u'five': True, u'know': True, u'world': True, u'bit': True, u'password': True, u'dreams': True, u'like': True, u'lost': True, u'audience': True, u't': True, u'looooot': True, u'because': True, u'deal': True, u'people': True, u'back': True, u'dead': True, u'unraveling': True, u'critique': True, u'confusing': True, u'for': True, u'bottom': True, u'/': True, u'does': True, u'assuming': True, u'?': True, u'be': True, u'although': True, u'by': True, u'on': True, u'about': True, u'oh': True, u'of': True, u'runtime': True, u'or': True, u'own': True, u'strangeness': True, u'into': True, u'down': True, u'your': True, u'her': True, u'there': True, u'start': True, u'way': True, u'biggest': True, u':': True, u'head': True, u'offering': True, u'but': True, u'taken': True, u'line': True, u'trying': True, u'with': True, u'he': True, u'up': True, u'us': True, u'problem': True, u'minutes': True, u'figured': True, u'doesn': True, u'an': True, u'as': True, u'girlfriend': True, u'mold': True, u'sagemiller': True, u'film': True, u'again': True, u'no': True, u'when': True, u'actors': True, u'you': True, u'really': True, u'dies': True, u'problems': True, u'ago': True}
所以我们得到一个带有neg
标签的文档,对于文档中的每个单词,我们看到所有单词都是 True。目前,每个文档只包含它所具有的特征(即单词)。
让我们继续:
# Let's split the data up into train and test.
train_feats, test_feats = split_label_feats(label_feat)
现在我们看到split_label_feats
改变键值结构使得 train_feats 的每次迭代都为我们提供了一个包含 (features, label) 元组的文档
for features, label in train_documents:
label, features
break
print len(train_documents)
print len(test_documents)
# Get the number of documents in movie_review corpus
num_docs_in_corpus = len(list(chain(*[movie_reviews.fileids(categories=[cat]) for cat in movie_reviews.categories()])))
print len(train_documents) + len(test_documents) == num_docs_in_corpus
[出去]:
1500
500
True
因此,当您运行该行时,似乎该错误只能由您的最后两行代码引起:
# To train the tagger.
me_classifier = nltk.MaxentClassifier.train(train_documents, algorithm='iis', trace=0, max_iter=3)
您会收到这些警告,但请注意代码仍在构建模型!!!!所以这只是由于下溢引起的警告,请参阅什么是 C 中的算术下溢和溢出?
构建分类器需要一段时间,但不要害怕,等到它完成并且不要ctr + c
结束 python 进程。如果您终止该进程,您将看到以下内容:
Training stopped: keyboard interrupt
因此,让我们了解为什么会出现警告,给出了 4 个警告:
/usr/local/lib/python2.7/dist-packages/nltk/classify/maxent.py:1306: RuntimeWarning: overflow encountered in power
exp_nf_delta = 2 ** nf_delta
/usr/local/lib/python2.7/dist-packages/nltk/classify/maxent.py:1308: RuntimeWarning: invalid value encountered in multiply
sum1 = numpy.sum(exp_nf_delta * A, axis=0)
/usr/local/lib/python2.7/dist-packages/nltk/classify/maxent.py:1309: RuntimeWarning: invalid value encountered in multiply
sum2 = numpy.sum(nf_exp_nf_delta * A, axis=0)
/usr/local/lib/python2.7/dist-packages/nltk/classify/maxent.py:1315: RuntimeWarning: invalid value encountered in divide
deltas -= (ffreq_empirical - sum1) / -sum2
它们都指向用于计算 NLTk 的 maxent 实现中的 delta 的相同函数,即https://github.com/nltk/nltk/blob/develop/nltk/classify/maxent.py#L1208。您会发现此增量计算特定于 IIS(改进的迭代缩放)算法。
此时需要了解机器学习和监督学习,https://en.wikipedia.org/wiki/Supervised_learning
要回答您的问题,变暖只是表明 delta 在某些时候难以计算,但处理起来仍然是合理的,可能是因为计算 delta 时有一些超小的值。该算法正在运行。不是挂,是训练。
为了欣赏 MaxEnt 在 NLTK 中的简洁实现,我建议您阅读本课程https://www.youtube.com/playlist?list=PL6397E4B26D00A269或更多核心机器学习课程,请访问https://www。 coursera.org/course/ml
训练分类器需要时间和计算量,等待足够长的时间后,您应该会看到它确实:
print accuracy(me_classifier, test_feats)
[出去]:
0.5
您可以看到准确度很差,正如预期的那样,因为增量计算太过分了,0.5 是您的基线。通过上面列出的课程,您应该能够在知道它们是如何产生的以及如何调整它们之后产生更好的分类器。
顺便说一句,请记住腌制您的分类器,以便下次不必重新训练它,请参阅在 NLTK 中保存朴素贝叶斯训练分类器和腌制经过训练的分类器会产生与直接从新但经过相同训练的分类器获得的结果不同的结果
这是完整的代码:
from itertools import chain
from collections import defaultdict
import nltk
from nltk.classify import MaxentClassifier, accuracy
from nltk.corpus import movie_reviews
def bag_of_words(words):
"""
Change a document into a BOW feature vector represented by a dict object.
"""
return dict([(word, True) for word in words])
def label_feats_from_corpus(corp, feature_detector=bag_of_words):
"""
Change the corpus into a feature matrix. Sometimes the proceess is
known as vectorization. The default is the use BOW features.
"""
label_feats = defaultdict(list)
for label in corp.categories():
for fileid in corp.fileids(categories=[label]):
feats = feature_detector(corp.words(fileids=[fileid]))
label_feats[label].append(feats)
return label_feats
def split_label_feats(lfeats, split=0.75):
"""
Splits corpus into train and test portion.
This module is used after using `label_feats_from_corpus`.
"""
train_feats = []
test_feats = []
for label, feats in lfeats.iteritems():
cutoff = int(len(feats) * split)
train_feats.extend([(feat, label) for feat in feats[:cutoff]])
test_feats.extend([(feat, label) for feat in feats[cutoff:]])
return train_feats, test_feats
# Extract features from corpus and for each document label it with the appropriate labels.
label_feats = label_feats_from_corpus(movie_reviews)
'''
for label in label_feats:
for document in label_feats[label]:
print label, document
break
break
'''
# Let's split the data up into train and test.
train_documents, test_documents = split_label_feats(label_feats)
'''
# Now we see that the `split_label_feats` change the key value structure such that each iteration of train_feats gives us a document with a tuple of the (features, label)
for features, label in train_documents:
print label, features
break
print len(train_documents)
print len(test_documents)
# Get the number of documents in movie_review corpus
num_docs_in_corpus = len(list(chain(*[movie_reviews.fileids(categories=[cat]) for cat in movie_reviews.categories()])))
print len(train_documents) + len(test_documents) == num_docs_in_corpus
'''
# To train the tagger.
me_classifier = nltk.MaxentClassifier.train(train_documents, algorithm='iis', trace=0, max_iter=3)
print accuracy(me_classifier, test_feats)