中文词频统计
1. 下载一长篇中文小说。
月牙儿 老舍
2. 从文件读取待分析文本。
text = open('zh.txt',encoding='UTF-8').read()
3. 安装并使用jieba进行中文分词。
pip install jieba
import jieba
jieba.lcut(text)
4. 更新词库,加入所分析对象的专业词汇。
jieba.add_word('我') jieba.add_word('妈妈') jieba.add_word('月牙儿') words = list(jieba.cut(mytext))
5. 生成词频统计
dele = {'。','!','?','的','“','”','(',')',' ','》','《',','} wordDict = {} wordSet = set(words)-dele ##去除无语义符号 for w in wordSet: if len(w)>1: wordDict[w] = words.count(w)
6. 排序
wordlist = sorted(wordDict.items(), key = lambda x:x[1], reverse = True)
7. 排除语法型词汇,代词、冠词、连词等停用词。
stops
tokens=[token for token in wordsls if token not in stops]
8. 输出词频最大TOP20,把结果存放到文件里
text = open('zh.txt',encoding='UTF-8').read() ##更新词库,添加词 jieba.add_word('我') jieba.add_word('妈妈') jieba.add_word('月牙儿') words = list(jieba.cut(text)) ##生成词频统计,排除语法型词汇,代词、冠词、连词等停用词 dele = {'。','!','?','的','呢','哦','“','”','(',')',' ','》','《',','} wordDict = {} wordSet = set(words)-dele for w in wordSet: if len(w)>1: wordDict[w] = words.count(w) ##排序 wordlist = sorted(wordDict.items(), key = lambda x:x[1], reverse = True) cut = " ".join(words) ##生成词云 mywc = WordCloud().generate(cut) plt.imshow(mywc) plt.axis("off") ##显示词云 plt.show() ##输出词频最大TOP20 for i in range(20): print(wordlist[i]) ##把结果存放到文件里 pd.DataFrame(data=wordlist).to_csv('text.csv', encoding='UTF-8')