• 6 学习分类文本


    学习分类文本

    监督式分类

    性别鉴定

    from nltk.corpus import names
    import random
    def gender_features(word):
        features = {}
        features['last_letter'] = word[-1]
        return features
    
    names = ([(name,'male') for name in names.words('male.txt')]+[(name,'female') for name in names.words('female.txt')])
    random.shuffle(names)
    
    featuresets = [((gender_features(n)),g) for (n,g) in names]
    print(featuresets)
    train_set,test_set = featuresets[500:],featuresets[:500]
    classifier = nltk.NaiveBayesClassifier.train(train_set)#朴素贝叶斯
    
    res = classifier.classify(gender_features('Trinity'))
    print(res)
    accuracy_ = nltk.classify.accuracy(classifier,test_set)
    print(accuracy_)   #0.768
    
    #检查分类器,确定哪些特征对于区分名字的性别是最有效的
    most_features = classifier.show_most_informative_features(5)
    print(most_features)
    
    最重要的前五特征:
    Most Informative Features
                 last_letter = 'a'            female : male   =     39.9 : 1.0
                 last_letter = 'k'              male : female =     31.2 : 1.0
                 last_letter = 'f'              male : female =     15.9 : 1.0
                 last_letter = 'p'              male : female =     11.9 : 1.0
                 last_letter = 'v'              male : female =     11.2 : 1.0
                        
    
    #选择正确的特征
    def gender_features2(word):
        features = {}
        features['first_letter'] = word[0].lower()
        features['last_letter'] = word[1].lower()
        for letter in 'abcdefghijklmnopqrstuvwxyz':
            features['count(%s)' % letter] = word.lower().count(letter)
            features['has(%s)' %letter] = (letter in word.lower())
        return features
    res = gender_features2('work')
    print(res)
    
    def gender_features3(word):
        features = {}
        features['suffix1'] = word[-1:]
        features['suffix2'] = word[-2:]
        return features
    
    featuresets = [(gender_features(n),g) for (n,g) in names]
    train_set1,test_set1 = featuresets[1500:],featuresets[:500]
    devtest_names1 = featuresets[500:1500]
    print(devtest_names1[0])#({'last_letter': 'e'}, 'female')
    
    train_names = names[1500:]
    devtest_names = names[500:1500]
    test_names = names[:1500]
    train_set = [(gender_features3(n),g) for (n,g) in train_names]
    test_set = [(gender_features3(n),g) for (n,g) in test_names]
    devtest_set = [(gender_features3(n),g) for (n,g) in devtest_names]
    
    classifier = nltk.NaiveBayesClassifier.train(train_set)
    print(nltk.classify.accuracy(classifier,test_set))
    
    errors = []
    for (name,tag) in devtest_names:#预测名字性别时出错的错误列表
        print(name,tag)#{'last_letter': 'o'} male错误   #Zachariah male对
        print(gender_features(name))
        guess = classifier.classify(gender_features3(name))
        if guess != tag:
            errors.append((tag,guess,name))
    print(errors)
    for (tag,guess,name) in sorted(errors):
        print('correct=%-8s guess=%-8s name=%-30s')
    print(nltk.classify.accuracy(classifier,test_set))#0.798
    
    #文档分类
    #将电影评论语料库归类为正面或负面
    from nltk.corpus import movie_reviews
    documents = [(list(movie_reviews.words(fileid)),category) for category in movie_reviews.categories() for fileid in movie_reviews.fileids(category)]
    print('documents',documents)
    random.shuffle(documents)
    
    all_words = nltk.FreqDist(w.lower() for w in movie_reviews.words())
    word_features = list(all_words.keys())[:2000]
    def document_features(document):#文档分类的特征提取器,其特征表示每个词是否在一个给定的文档中
        document_words = set(document)
        features = {}
        for word in word_features:
            features['contains(%s)'%word] = (word in document_words)
        return features
    
    print(document_features(movie_reviews.words('pos/cv957_8737.txt'))) #{'contains(plot)': True, 'contains(:)': True, ...}
    
    #训练和测试分类器以进行文档分类
    featuresets = [(document_features(d),c) for (d,c) in documents]
    train_set,test_set = featuresets[100:],featuresets[:100]
    classifier = nltk.NaiveBayesClassifier.train(train_set)
    
    print(nltk.classify.accuracy(classifier,test_set))
    print(classifier.show_most_informative_features(5))
    
    #词性标注
    from nltk.corpus import brown
    suffix_fdist = nltk.FreqDist()
    
    for word in brown.words():#找出最常见的后缀
        word = word.lower()
        suffix_fdist[word[-1:]] += 1
        suffix_fdist[word[-2:]] += 1
        suffix_fdist[word[-3:]] += 1
    common_suffixes = suffix_fdist.most_common(100)#获得常见特征链表
    
    def pos_features(word):#定义一个特征提取器函数,用来检查给定单词的后缀
        features = {}
        for suffix in common_suffixes:
            # features['endswith(%s)'% suffix] = word.lower().endswith(suffix)
            features['endswith(%s)' %suffix] = word.lower().endswith(suffix)
        return features
    
    tagged_words = brown.tagged_words(categories = 'news')
    featuresets = [(pos_features(n),g) for (n,g) in tagged_words]
    print(featuresets)
    size = int(len(featuresets) * 0.1)
    train_set,test_set = featuresets[size:],featuresets[:size]
    classifier = nltk.DecisionTreeClassifier.train(train_set)#决策树分类器
    print(nltk.classify.accuracy(classifier,test_set))
    
    suffix_fdist = nltk.FreqDist ()
    for word in brown.words ():
        word = word.lower ()
        # suffix_fdist.inc(word[-1:]) python2
        suffix_fdist[word[-1:]] += 1  # python3
        suffix_fdist[word[-2:]] += 1
        suffix_fdist[word[-3:]] += 1
    
    common_suffixes = suffix_fdist.most_common(100)  # 获得常见特征链表
    
    #定义特征提取器:
    def pos_features(word):
        features = {}
        for (suffix, times) in common_suffixes:
            features['endswith(%s)' % suffix] = word.lower().endswith(suffix)
        return features
    
    
    tagged_words = brown.tagged_words (categories='news')
    featuresets = [(pos_features(n), g) for (n, g) in tagged_words] #['endswith(ns)': False, 'endswith(ith)': False, ...]
    # print(featuresets)
    size = int(len(featuresets) * 0.1)
    
    train_set, test_set = featuresets[size:], featuresets[:size]
    classifier = nltk.DecisionTreeClassifier.train (train_set)  # “决策树分类器”
    print(nltk.classify.accuracy (classifier, test_set))
    
    # 探索上下文语境
    # 一个词性分类器,它的特征检测器检查一个词的上下文以便决定应该分配哪个词性标记,特别的,前面的词也作为特征
    def pos_features(sentence,i):
        features  = {'suffix(1)':sentence[i][-1:],
                     'suffix(2)':sentence[i][-2:],
                     'suffix(3)':sentence[i][-3:],
                     }
        if i == 0:
            features["prev-word"] = "<START>"
        else:
            features["prev-word"] = sentence[i-1]
        return features
    
    print(brown.sents()[0])#['The', 'Fulton', 'County', 'Grand', 'Jury', 'said', 'Friday', 'an', 'investigation', 'of', "Atlanta's", 'recent', 'primary', 'election', 'produced', '``', 'no', 'evidence', "''", 'that', 'any', 'irregularities', 'took', 'place', '.']
    res14 = pos_features(brown.sents()[0],2)
    print(res14)#{'suffix(1)': 'y', 'suffix(2)': 'ty', 'suffix(3)': 'nty', 'prev-word': '.'}
    tagged_sents = brown.tagged_sents(categories = 'news')
    print(tagged_sents)#[[('The', 'AT'), ('Fulton', 'NP-TL'), ('County', 'NN-TL'), ('Grand', 'JJ-TL'),...]
    featuresets = []
    for tagged_sent in tagged_sents:
        untagged_sent = nltk.tag.untag(tagged_sent)
        print(untagged_sent)#['The', 'Fulton', 'County', 'Grand', 'Jury', 'said', 'Friday', 'an', 'investigation', 'of', "Atlanta's", 'recent', 'primary', 'election', 'produced', '``', 'no', 'evidence', "''", 'that', 'any', 'irregularities', 'took', 'place', '.']
        for i , (word,tag) in enumerate(tagged_sent):
            featuresets.append((pos_features(untagged_sent,i),tag))
            print(featuresets) #({'suffix(1)': 's', 'suffix(2)': 'ts', 'suffix(3)': 'nts', 'prev-word': '<START>'}, 'NNS'),
    size = int(len(featuresets) * 0.1)
    train_set,test_set = featuresets[size:],featuresets[:size]
    classifier = nltk.NaiveBayesClassifier.train(train_set)
    accuracy_ = nltk.classify.accuracy(classifier,test_set)
    print(accuracy_)#0.740029835902536
    
    序列分类
    # 为第一个输入找到最佳标签,然后再次基础上找到对应的下一个输入的最佳标签。
    # 不断重复,以至所有输入都被贴上标签。所以,我们需要提供一个参数history,用来扩展特征。
    def pos_features(sentence,i,history):
        features = {'suffix(1)': sentence[i][-1:],
                    'suffix(2)': sentence[i][-2:],
                    'suffix(3)': sentence[i][-3:],
                    }
        if i == 0:
            features["prev-word"] = "<START>"
            features["prev-tag"] = "<START>"
        else:
            features["prev-word"] = sentence[i - 1]
            features["prev-tag"] = history[i - 1]
        return features
    
    class ConsecutivePosTagger(nltk.TaggerI):
        def __init__(self,train_sents):
            train_set = []
            for tagged_sent in train_sents:
                untagged_sent = nltk.tag.untag(tagged_sent)
                history = []
                for i, (word,tag) in enumerate(tagged_sent):
                    featureset = pos_features(untagged_sent,i,history)
                    train_set.append((featureset,tag))
                    history.append(tag)
                self.classifier = nltk.NaiveBayesClassifier.train(train_set)
    
        def tag(self, sentence):
            history = []
            for i, word in enumerate(sentence):
                featureset = pos_features(sentence,i,history)
                tag = self.classifier.classify(featureset)
                history.append(tag)
            return zip(sentence,history)
    
        
    tagged_sents = brown.tagged_sents(categories = 'news')
    size = int(len(tagged_sents) * 0.1)
    train_sents,test_sents = tagged_sents[size:],tagged_sents[:size]
    tagger = ConsecutivePosTagger(train_sents)
    print(tagger.evaluate(test_sents))
    
    #其他序列分类方法
    #这种方法的缺点是一旦做出决定便没办法更改,例如:如果决定将一个词标注为名词,但后来发现应该是动词,那就没有办法修复我们的错误了。
    

    监督式分类的举例

    #句子分割
    sents = nltk.corpus.treebank_raw.sents()
    tokens = []
    boundaries = set()
    offset = 0
    for sent in nltk.corpus.treebank_raw.sents():
        tokens.extend(sent)#单独句子标识符的合并链表
        offset += len(sent)
        boundaries.add(offset - 1)#包含所有句子-边界标识符索引的集合
        
    print("tokens",tokens)#['.', 'START', 'Pierre', 'Vinken', ',', '61', 'years', 'old',...]
    print("offset",offset)#offset 101797
    print("boundaries",boundaries)#{1, 90116, 16389, 40968, 81929, 24587,}
    
    def punct_features(tokens,i):
        return {
            'next-word-capitalized':tokens[i+1][0].isupper(),
            'prevword':tokens[i-1].lower(),
            'punc':tokens[i],
            'prev-word-is-one-char':len(tokens[i-1]) == 1
        }
        
    featuresets = [(punct_features(tokens,i),(i in boundaries)) for i in range(1,len(tokens) - 1) if tokens[i] in '.?!']
    
    print("featuresets",featuresets)#[({'next-word-capitalized': False, 'prevword': 'nov', 'punc': '.', 'prev-word-is-one-char': False}, False), ...]
    
    size = int(len(featuresets) * 0.1)
    train_set,test_set = featuresets[size:],featuresets[:size]
    classifier = nltk.NaiveBayesClassifier.train(train_set)
    res17 = nltk.classify.accuracy(classifier,test_set)
    print(res17)#0.936026936026936
    
    #基于分类的判句器  只需检查每个标点符号,看它是否是边界标识符,在边界标识符处分割词链表
    def segment_sentences(words):
        start = []
        sents = []
        for i, word in words:
            if word in '.?!' and classifier.classify(punct_features(words,i)) == True:
                sents.append(words[start:i+1])
                start = i + 1
        if start < len(words):
            sents.append(words[start:])
            return sents
            
    #识别对话行为类型
    #利用NPS聊天语料库建立一个分类器,用来识别新的即时消息帖子的对话行为类型
    # posts = nltk.corpus.nps_chat.xml_posts()[:10000]#每个帖子的xml注释
    # print("posts",posts)#[<Element 'Post' at 0x000001CDCB86CF98>, <Element 'Post' at 0x000001CDCB885CC8>, ...]
    
    #定义一个简单的特征提取器,用于检查帖子包含什么词
    def diglogue_act_features(post):
        features = {}
        for word in nltk.word_tokenize(post):
            features['contains(%s)' %word.lower()] = True
        return features
        
    featuresets = [(diglogue_act_features(post.text),post.get('class')) for post in posts]
    print("featuresets",featuresets) #({'contains(part)': True}, 'System'), ({'contains(part)': True}, 'System'), ({'contains(sup)': True, 'contains(yoll)': True}, 'Greet'),...}
    
    size = int(len(featuresets) * 0.1)
    train_set,test_set = featuresets[size:],featuresets[:size]
    classifier = nltk.NaiveBayesClassifier.train(train_set)
    print(nltk.classify.accuracy(classifier,test_set))#0.668
    
    #识别文字蕴涵(Recognizing textual entailment,RTE)是判断文本T内的一个给定片段是否继承着另一个叫做“假设”的文本
    #如果根据前提P能够推理得出假设H,那么就说P蕴含H, P->H
    def rte_features(rtepair):
        extractor = nltk.RTEFeatureExtractor(rtepair)
        features = {}
        features['word_overlap'] = len(extractor.overlap('word'))
        features['word_hyp_extra'] = len(extractor.hyp_extra('word'))
        features['ne_overlap'] = len(extractor.overlap('ne'))
        features['ne_hyp_extra'] = len(extractor.hyp_extra('ne'))
        return features
        
    rtepair = nltk.corpus.rte.pairs(['rte3_dev.xml'])[33]
    extractor = nltk.RTEFeatureExtractor(rtepair)
    print(extractor.text_words)#{'Davudi', 'Co', 'terrorism.', 'Organisation', 'fledgling', 'at', 'SCO', 'that', 'central', 'Soviet', 'operation', 'was', 'fight', 'Iran', 'China', 'meeting', 'Parviz', 'binds', 'republics', 'former', 'Asia', 'Shanghai', 'together', 'representing', 'Russia', 'association', 'four'}
    print(extractor.hyp_extra('word'))#{'member'}
    

    评估

    测试集
    准确度
    精确度和召回率
    混淆矩阵
    交叉验证
    

    决策树

    #计算标签链表的熵
    import math
    def entropy(labels):
        fredist = nltk.FreqDist(labels)
        probs = [fredist.freq(l) for l in nltk.FreqDist(labels)]
        print(probs)#[0.75, 0.25]
        return -sum([p * math.log(p,2) for p in probs])
    
    print(entropy(['male','female','male','male']))#0.8112781244591328
    

    朴素贝叶斯分类器

    潜在概率模型
    零计数和平滑
    非二元特征
    独立的朴素性
    双重计数的原因
    

    最大熵分类器

    最大熵模型
    熵的最大化
    生成式分类器(一张地形图) V.S. 条件分类器(一张地平线的图片)
    e.g. 朴素贝叶斯分类器是生成式分类器,最大熵分类器是条件式分类器
    1.生成式分类器建立一个模型预测P(input,label),即(input,label) 对的联合概率
    2.条件式分类器建立一个模型预测P(label|input)---一个给定输入值的标签概率
    

    为语言模式建模

  • 相关阅读:
    luogu P2827 蚯蚓
    CHOI1001/1002 火车进出栈问题
    hdoj4699 Editor
    反弹shell监控
    AppScan 9.0.3.6 crack
    Spectre & Meltdown Checker – CPU芯片漏洞检查脚本Linux版
    Microsoft IIS WebDav 'ScStoragePathFromUrl' Remote Buffer Overflow (CVE-2017-7269)
    Shodan新手使用指南
    The Art of Subdomain Enumeration (转)
    DDOS攻击方式总结 (转)
  • 原文地址:https://www.cnblogs.com/nxf-rabbit75/p/9534316.html
Copyright © 2020-2023  润新知