这是由 bi-gram 支持的 tri_gram 标记器(由 uni-gram 支持),主要的回退跟踪器是正则表达式跟踪器。因此,如果任何其他标记器未能根据此处定义的规则对其进行标记,则此处的最后一个标记将留给正则表达式。希望这可以帮助您构建自己的规则正则表达式标记器。
from nltk.corpus import brown
import sys
from nltk import pos_tag
from nltk.tokenize import word_tokenize
import nltk
from nltk import ne_chunk
def tri_gram():
##Trigram tagger done by training data from brown corpus
b_t_sents=brown.tagged_sents(categories='news')
##Making n-gram tagger using Turing backoff
default_tagger = nltk.RegexpTagger(
[(r'^-?[0-9]+(.[0-9]+)?$', 'CD'), # cardinal numbers
(r'(The|the|A|a|An|an)$', 'AT'), # articles
(r'.*able$', 'JJ'), # adjectives
(r'.*ness$', 'NN'), # nouns formed from adjectives
(r'.*ly$', 'RB'), # adverbs
(r'.*s$', 'NNS'), # plural nouns
(r'.*ing$', 'VBG'), # gerunds
(r'.*ed$', 'VBD'), # past tense verbs
(r'.*', 'NN') # nouns (default)
])
u_gram_tag=nltk.UnigramTagger(b_t_sents,backoff=default_tagger)
b_gram_tag=nltk.BigramTagger(b_t_sents,backoff=u_gram_tag)
t_gram_tag=nltk.TrigramTagger(b_t_sents,backoff=b_gram_tag)
##pos of given text
f_read=open(sys.argv[1],'r')
given_text=f_read.read();
segmented_lines=nltk.sent_tokenize(given_text)
for text in segmented_lines:
words=word_tokenize(text)
sent = t_gram_tag.tag(words)
print ne_chunk(sent)
tri_gram()