我正在尝试变形分析包含 txt 文件的完整文件夹。
使用https://code.google.com/archive/p/foma/
这是我编写的代码。我将每个单词传递给 python 中的 foma fst,但是在运行 1900 个文件中的 143 个文件后,循环被无限期卡住了。我尝试在循环中评论 foma 调用 applyup并且文件被写入新文件夹(非根形式),没有任何问题。
class Lemmatizer:
def __init__(self, inputFolderPath=None, outputFolderPath=None, fomaBinFilePath="Konkani.bin"):
self.inputFolderPath = inputFolderPath
self.outputFolderPath = outputFolderPath
self.fomaBinFilePath=fomaBinFilePath
self.net = foma.foma_fsm_read_binary_file(fomaBinFilePath)
self.ah = foma.foma_apply_init(self.net)
def lemmatize_folder(self):
net = foma.foma_fsm_read_binary_file(self.fomaBinFilePath)
ah = foma.foma_apply_init(net)
if not os.path.exists(self.outputFolderPath):
os.makedirs(self.outputFolderPath)
for root, dirs, files in os.walk(self.inputFolderPath):
for file in filter(lambda file: file.endswith('.txt'), files):
with codecs.open(os.path.join(self.outputFolderPath, file), 'w') as outputFile:
with codecs.open(os.path.join(root, file), 'r','utf-8') as inputFile:
for line in inputFile:
for word in nltk.word_tokenize(line):
result = foma.foma_apply_up(ah, word)
# result = None
if result is not None:
print file
print result.split('+', 1)[0]
# outputFile.write(result.split('+', 1)[0])
else:
outputFile.write(word.encode('utf-8'))
outputFile.write(' ')
outputFile.write('\n')
有没有人遇到过类似的问题?foma fst 是否对初始化后可以调用的次数有一些限制?