我正在训练 CRF 来做一些标记工作。原始输入有大约 180 万字,我正在尝试不同的标签集和功能以获得最佳结果。但是,现在的训练过程很长。调整参数真的很痛苦。
我想知道不同训练算法之间的效率有什么区别:lbfgs、l2sgd、ap、pa 和 arow。训练模型的快速算法是什么?目前我正在使用 lbfgs。
另外我很好奇如果我购买更好的 CPU 或 RAM,训练会更快吗?我正在使用 I7-9700k 和 32GB RAM 进行训练。sklearn 是否受益于超线程?如果是,我可以购买新的 9900k,因为 9700k 不支持超线程。
#bigram,window size 5
def word2features_2_5(wordlist, i):
#list of features
features = {
'bias': 1.0,
'word': wordlist[i][0],
}
if i > 0:
features.update({'word[-1]': wordlist[i-1][0],})
features.update({'word[-1,0]': wordlist[i-1][0]+wordlist[i][0]},)
if i > 1:
features.update({'word[-2]': wordlist[i-2][0],})
features.update({'word[-2,-1]': wordlist[i-2][0]+wordlist[i-1][0]},)
if i < len(wordlist)-1:
features.update({'word[1]': wordlist[i+1][0],})
features.update({'word[0,1]': wordlist[i][0]+wordlist[i+1][0]},)
if i < len(wordlist)-2:
features.update({'word[2]': wordlist[i+2][0],})
features.update({'word[1,2]': wordlist[i+1][0]+wordlist[i+2][0]},)
return features
def get_labels(wordlist):
y = []
for word in wordlist:
y.append(word[1])
return y
def get_features(wordlist, number):
x = []
for i in range(0, len(wordlist)):
features = []
if (number == 13):
features = word2features_1_3(wordlist, i)
elif (number == 15):
features = word2features_1_5(wordlist, i)
elif (number == 25):
features = word2features_2_5(wordlist, i)
x.append(features)
return x
length = len(wordlist_6)
total_x = get_features(wordlist_6, 25)
fold_1_x = total_x[0:length//4]
fold_2_x = total_x[length//4:length//2]
fold_3_x = total_x[length//2:length//4 * 3]
fold_4_x = total_x[length//4 * 3:length]
total_y = get_labels(wordlist_6)
fold_1_y = total_y[0:length//4]
fold_2_y = total_y[length//4:length//2]
fold_3_y = total_y[length//2:length//4 * 3]
fold_4_y = total_y[length//4 * 3:length]
train_x = [fold_1_x, fold_2_x, fold_3_x, fold_4_x]
train_y = [fold_1_y, fold_2_y, fold_3_y, fold_4_y]
crf = sklearn_crfsuite.CRF(
algorithm='lbfgs',
all_possible_transitions=True,
all_possible_states = True
)
labels = ['B1','B2','B3', 'E', 'S','M']
f1_scorer = make_scorer(metrics.flat_f1_score,
average='weighted', labels=labels)
params_space = {
'c1': scipy.stats.uniform(0.01,4.99),
'c2': scipy.stats.uniform(0.01,4.99),
}
rs = RandomizedSearchCV(crf, params_space,
cv=4,
verbose=1,
n_jobs=-1,
n_iter=20,
scoring=f1_scorer)
rs.fit(train_x, train_y)
#train set performance
train_x = [total_x]
train_y = [total_y]
y_pred = rs.predict(train_x)
print("accuray of experiment 6(dev) is: " + str(accuracy_5(y_pred,
train_y)))
#test set performance
test_x = [get_features(goldlist_6, 25)]
test_y = [get_labels(goldlist_6)]
y_pred = rs.predict(test_x)
print("accuray of experiment 6(test) is: " + str(accuracy_5(y_pred, test_y)))
上面的代码花了我 422 分钟。我有 6 个标签和 10 个功能。我仍然想继续使用更多功能和不同的标签集进行实验。如何加快训练速度?