import nltk import random from nltk.corpus import movie_reviews documents = [(list(movie_reviews.words(fileid)), category) for category in movie_reviews.categories() for fileid in movie_reviews.fileids(category)] random.shuffle(documents) all_words = [] for w in movie_reviews.words(): all_words.append(w.lower()) all_words = nltk.FreqDist(all_words) word_features = list(all_words.keys())[:3000] def find_features(document): words = set(document) features = {} for w in word_features: features[w] = (w in words) return features print((find_features(movie_reviews.words('neg/cv000_29416.txt')))) featuresets = [(find_features(rev), category) for (rev, category) in documents] # set that we'll train our classifier with training_set = featuresets[:1900] # set that we'll test against. testing_set = featuresets[1900:] classifier = nltk.NaiveBayesClassifier.train(training_set) print("Classifier accuracy percent:",(nltk.classify.accuracy(classifier, testing_set))*100) classifier.show_most_informative_features(15) ###################### Most Informative Features insulting = True neg : pos = 10.6 : 1.0 ludicrous = True neg : pos = 10.1 : 1.0 winslet = True pos : neg = 9.0 : 1.0 detract = True pos : neg = 8.4 : 1.0 breathtaking = True pos : neg = 8.1 : 1.0 silverstone = True neg : pos = 7.6 : 1.0 excruciatingly = True neg : pos = 7.6 : 1.0 warns = True pos : neg = 7.0 : 1.0 tracy = True pos : neg = 7.0 : 1.0 insipid = True neg : pos = 7.0 : 1.0 freddie = True neg : pos = 7.0 : 1.0 damon = True pos : neg = 5.9 : 1.0 debate = True pos : neg = 5.9 : 1.0 ordered = True pos : neg = 5.8 : 1.0 lang = True pos : neg = 5.7 : 1.0 #############################
##保存和恢复模型
save_classifier = open("naivebayes.pickle","wb") pickle.dump(classifier, save_classifier) save_classifier.close() classifier_f = open("naivebayes.pickle", "rb") classifier = pickle.load(classifier_f) classifier_f.close()
使用nltk自带的继承于ClassifierI的投票器进行集体分类评估,模型包括nltk的classifier和sklearn的一些分类模型
读取文本并统计出前3000的频繁词汇,然后标记这3000个词的好坏,具体判断标准看这3000词是否是事先有好坏标记的词袋里的词
import nltk import random from nltk.corpus import movie_reviews from nltk.classify.scikitlearn import SklearnClassifier import pickle from sklearn.naive_bayes import MultinomialNB, BernoulliNB from sklearn.linear_model import LogisticRegression, SGDClassifier from sklearn.svm import SVC, LinearSVC, NuSVC from nltk.classify import ClassifierI from statistics import mode ##定义VoteClassifier继承于ClassifierI class VoteClassifier(ClassifierI): def __init__(self, *classifiers): self._classifiers = classifiers
##返回众数,即投票最多的项 def classify(self, features): votes = [] for c in self._classifiers: v = c.classify(features) votes.append(v) return mode(votes)
##定义置信区间 def confidence(self, features): votes = [] for c in self._classifiers: v = c.classify(features) votes.append(v) choice_votes = votes.count(mode(votes)) conf = choice_votes / len(votes) return conf documents = [(list(movie_reviews.words(fileid)), category) for category in movie_reviews.categories() for fileid in movie_reviews.fileids(category)] random.shuffle(documents) all_words = [] for w in movie_reviews.words(): all_words.append(w.lower()) all_words = nltk.FreqDist(all_words) ##取出现最多的前3000个词 word_features = list(all_words.keys())[:3000] ##标记词的好坏 def find_features(document): words = set(document) features = {} for w in word_features: features[w] = (w in words) return features #print((find_features(movie_reviews.words('neg/cv000_29416.txt')))) featuresets = [(find_features(rev), category) for (rev, category) in documents] training_set = featuresets[:1900] testing_set = featuresets[1900:] #classifier = nltk.NaiveBayesClassifier.train(training_set) classifier_f = open("naivebayes.pickle","rb") classifier = pickle.load(classifier_f) classifier_f.close() print("Original Naive Bayes Algo accuracy percent:", (nltk.classify.accuracy(classifier, testing_set))*100) classifier.show_most_informative_features(15) MNB_classifier = SklearnClassifier(MultinomialNB()) MNB_classifier.train(training_set) print("MNB_classifier accuracy percent:", (nltk.classify.accuracy(MNB_classifier, testing_set))*100) BernoulliNB_classifier = SklearnClassifier(BernoulliNB()) BernoulliNB_classifier.train(training_set) print("BernoulliNB_classifier accuracy percent:", (nltk.classify.accuracy(BernoulliNB_classifier, testing_set))*100) LogisticRegression_classifier = SklearnClassifier(LogisticRegression()) LogisticRegression_classifier.train(training_set) print("LogisticRegression_classifier accuracy percent:", (nltk.classify.accuracy(LogisticRegression_classifier, testing_set))*100) SGDClassifier_classifier = SklearnClassifier(SGDClassifier()) SGDClassifier_classifier.train(training_set) print("SGDClassifier_classifier accuracy percent:", (nltk.classify.accuracy(SGDClassifier_classifier, testing_set))*100) ##SVC_classifier = SklearnClassifier(SVC()) ##SVC_classifier.train(training_set) ##print("SVC_classifier accuracy percent:", (nltk.classify.accuracy(SVC_classifier, testing_set))*100) LinearSVC_classifier = SklearnClassifier(LinearSVC()) LinearSVC_classifier.train(training_set) print("LinearSVC_classifier accuracy percent:", (nltk.classify.accuracy(LinearSVC_classifier, testing_set))*100) NuSVC_classifier = SklearnClassifier(NuSVC()) NuSVC_classifier.train(training_set) print("NuSVC_classifier accuracy percent:", (nltk.classify.accuracy(NuSVC_classifier, testing_set))*100) voted_classifier = VoteClassifier(classifier, NuSVC_classifier, LinearSVC_classifier, SGDClassifier_classifier, MNB_classifier, BernoulliNB_classifier, LogisticRegression_classifier) print("voted_classifier accuracy percent:", (nltk.classify.accuracy(voted_classifier, testing_set))*100) print("Classification:", voted_classifier.classify(testing_set[0][0]), "Confidence %:",voted_classifier.confidence(testing_set[0][0])*100) print("Classification:", voted_classifier.classify(testing_set[1][0]), "Confidence %:",voted_classifier.confidence(testing_set[1][0])*100) print("Classification:", voted_classifier.classify(testing_set[2][0]), "Confidence %:",voted_classifier.confidence(testing_set[2][0])*100) print("Classification:", voted_classifier.classify(testing_set[3][0]), "Confidence %:",voted_classifier.confidence(testing_set[3][0])*100) print("Classification:", voted_classifier.classify(testing_set[4][0]), "Confidence %:",voted_classifier.confidence(testing_set[4][0])*100) print("Classification:", voted_classifier.classify(testing_set[5][0]), "Confidence %:",voted_classifier.confidence(testing_set[5][0])*100) ####################################
out:
Original Naive Bayes Algo accuracy percent: 66.0 Most Informative Features thematic = True pos : neg = 9.1 : 1.0 secondly = True pos : neg = 8.5 : 1.0 narrates = True pos : neg = 7.8 : 1.0 layered = True pos : neg = 7.1 : 1.0 rounded = True pos : neg = 7.1 : 1.0 supreme = True pos : neg = 7.1 : 1.0 crappy = True neg : pos = 6.9 : 1.0 uplifting = True pos : neg = 6.2 : 1.0 ugh = True neg : pos = 5.3 : 1.0 gaining = True pos : neg = 5.1 : 1.0 mamet = True pos : neg = 5.1 : 1.0 wanda = True neg : pos = 4.9 : 1.0 onset = True neg : pos = 4.9 : 1.0 fantastic = True pos : neg = 4.5 : 1.0 milos = True pos : neg = 4.4 : 1.0 MNB_classifier accuracy percent: 67.0 BernoulliNB_classifier accuracy percent: 67.0 LogisticRegression_classifier accuracy percent: 68.0 SGDClassifier_classifier accuracy percent: 57.99999999999999 LinearSVC_classifier accuracy percent: 67.0 NuSVC_classifier accuracy percent: 65.0 voted_classifier accuracy percent: 65.0 Classification: neg Confidence %: 100.0 Classification: pos Confidence %: 57.14285714285714 Classification: neg Confidence %: 57.14285714285714 Classification: neg Confidence %: 57.14285714285714 Classification: pos Confidence %: 57.14285714285714 Classification: pos Confidence %: 85.71428571428571 #########################################