我正在使用朴素贝叶斯多项式模型。我应该使用 train 方法中看到的伪代码。所以这些是我的问题:
1)我已经放入了大部分代码,但我有一些问题主要是在提取词汇,计算类中的文档数量,以及连接类中所有文档的文本。
2)我还注意到我需要的火车方法只需要文档(又名 train_doc)。所以我不知道如何调整以获得 C 类。
def train(self, documents):
# TRAINMULTINOMIALNB(C,D)
# 1 V <-- EXTRACTVOCABULARY(D)
# 2 N <-- COUNTDOCS(D)
# 3 for each c in C
# 4 do Nc <-- COUNTDOCSINCLASS(D, c)
# 5 prior[c] <-- Nc/N
# 6 textc <-- CONCATENATETEXTOFALLDOCSINCLASS(D, c)
# 7 for each t in V
# 8 do Tct <-- COUNTTOKENSOFTERM(textc, t)
# 9 for each t in V
# 10 do condprob[t][c] <-- Tct+1
# 11 return V, prior, condprob
"""
prior={}
N = len(documents)
#Vocab
V = Counter()
for d in documents:
V.update(doc[***])
#COUNTDOCSINCLASS(C,D)
cdic = Counter(C)
for d2 in documents:
for label in C:
cdic.update({label:int(math.ceil(float(doc[***])))})
#CONCATENATETEXTOFALLDOCSINCLASS(documents,C)
ctoadic = defaultdict(Counter)
for d3 in document:
for label2 in C:
if(float(***)>0):
ctoadic[label].update(doc[***])
#used to get term by class it is in
tii = defaultdict(Counter)
for label,word in ctoadic.iteritems():
for w in word:
tii[w].update({l:word[w]})
#getCondProb(tii,ctofadic,C)
gcp = defaultdict(lambda: defaultdict(float))
tnw ={} #total number of words in that label
for l,v inctofadic.iteritems():
tnwl[l] = sum(v.values())
for w,count in tii.iteritems():
#for 0 occurences
z = [zeroo for zeroo in C if zeroo not in count.keys()]
for ling in z:
gcp[w[ling]=1.0/(len(ctofadic[ling])+tnw[ling])
for ling,val in count.iteritems():
gcp[w][ling]=float(val+1)/(len(ctofadic[ling])+tnw[ling])
#Prior
for c in C:
prior[c] = cdic[c] / float(N)
return V,prior,gcp