• 中文词频统计与词云生成


    本次作业来源于:https://edu.cnblogs.com/campus/gzcc/GZCC-16SE1/homework/2822

    中文词频统计

    1. 下载一长篇中文小说。

    2. 从文件读取待分析文本。

    # -*- coding: utf-8 -*-
    import struct
    import os
     
    # 拼音表偏移,
    startPy = 0x1540;
     
    # 汉语词组表偏移
    startChinese = 0x2628;
     
    # 全局拼音表
    GPy_Table = {}
     
    # 解析结果
    # 元组(词频,拼音,中文词组)的列表
     
     
    # 原始字节码转为字符串
    def byte2str(data):
        pos = 0
        str = ''
        while pos < len(data):
            c = chr(struct.unpack('H', bytes([data[pos], data[pos + 1]]))[0])
            if c != chr(0):
                str += c
            pos += 2
        return str
     
    # 获取拼音表
    def getPyTable(data):
        data = data[4:]
        pos = 0
        while pos < len(data):
            index = struct.unpack('H', bytes([data[pos],data[pos + 1]]))[0]
            pos += 2
            lenPy = struct.unpack('H', bytes([data[pos], data[pos + 1]]))[0]
            pos += 2
            py = byte2str(data[pos:pos + lenPy])
     
            GPy_Table[index] = py
            pos += lenPy
     
    # 获取一个词组的拼音
    def getWordPy(data):
        pos = 0
        ret = ''
        while pos < len(data):
            index = struct.unpack('H', bytes([data[pos], data[pos + 1]]))[0]
            ret += GPy_Table[index]
            pos += 2
        return ret
     
    # 读取中文表
    def getChinese(data):
        GTable = []
        pos = 0
        while pos < len(data):
            # 同音词数量
            same = struct.unpack('H', bytes([data[pos], data[pos + 1]]))[0]
     
            # 拼音索引表长度
            pos += 2
            py_table_len = struct.unpack('H', bytes([data[pos], data[pos + 1]]))[0]
     
            # 拼音索引表
            pos += 2
            py = getWordPy(data[pos: pos + py_table_len])
     
            # 中文词组
            pos += py_table_len
            for i in range(same):
                # 中文词组长度
                c_len = struct.unpack('H', bytes([data[pos], data[pos + 1]]))[0]
                # 中文词组
                pos += 2
                word = byte2str(data[pos: pos + c_len])
                # 扩展数据长度
                pos += c_len
                ext_len = struct.unpack('H', bytes([data[pos], data[pos + 1]]))[0]
                # 词频
                pos += 2
                count = struct.unpack('H', bytes([data[pos], data[pos + 1]]))[0]
     
                # 保存
                GTable.append((count, py, word))
     
                # 到下个词的偏移位置
                pos += ext_len
        return GTable
     
     
    def scel2txt(file_name):
        print('-' * 60)
        with open(file_name, 'rb') as f:
            data = f.read()
     
        print("词库名:", byte2str(data[0x130:0x338])) # .encode('GB18030')
        print("词库类型:", byte2str(data[0x338:0x540]))
        print("描述信息:", byte2str(data[0x540:0xd40]))
        print("词库示例:", byte2str(data[0xd40:startPy]))
     
        getPyTable(data[startPy:startChinese])
        getChinese(data[startChinese:])
        return getChinese(data[startChinese:])
     
    if __name__ == '__main__':
        # scel所在文件夹路径
        in_path = r"F:	ext"   #修改为你的词库文件存放文件夹
        # 输出词典所在文件夹路径
        out_path = r"F:	ext"  # 转换之后文件存放文件夹
        fin = [fname for fname in os.listdir(in_path) if fname[-5:] == ".scel"]
        for f in fin:
            try:
                for word in scel2txt(os.path.join(in_path, f)):
                    file_path=(os.path.join(out_path, str(f).split('.')[0] + '.txt'))
                    # 保存结果
                    with open(file_path,'a+',encoding='utf-8')as file:
                        file.write(word[2] + '
    ')
                os.remove(os.path.join(in_path, f))
            except Exception as e:
                print(e)
                pass

    3. 生成词云

    import requests
    from bs4 import BeautifulSoup
    from fake_useragent import UserAgent
    import re
    import jieba
    from wordcloud import WordCloud
    import matplotlib.pyplot as plt
    
    def get_txt_from_net():
        c_str = []
        ua = UserAgent()
        headers = {'User_Agent': ua.random}
        for i in range(1,18):
            url = "http://t.icesmall.cn/book/53/826/"+str(i)+".html"
            html = requests.get(url, headers=headers)
            html.encoding = 'utf-8'
            soup = BeautifulSoup(html.text,'lxml')
            s = soup.find('div',id="Content").get_text()
            s = re.sub(r'p{.*?}','',s).lstrip().rstrip().strip()
            c_str.append(s)
        c_txt = ''.join(c_str)
        with open('Ctxt.txt','w',encoding='utf-8') as f:
            f.write(c_txt)
    
    def get_word_from_txt():
        with open('Ctxt.txt','r',encoding='utf-8') as f:
            ctxt = f.read()
        jieba.load_userdict('people.txt')  # 词库文本文件
        stxt = jieba.lcut(ctxt)
        stops = open('停用词表.txt','r',encoding='utf-8').read()
        stops = stops.split()
        tokens = [token for token in stxt if token not in stops]
        tokenstr = " ".join(tokens)
        ciyun = WordCloud(background_color = '#36f',width=400,height=300,margin = 1).generate(tokenstr)
        stxtword = set(tokens)
        stxtcount = {}
        for i in stxtword:
            if len(i) == 1:
                continue
            stxtcount[i] = tokens.count(i)
        stxtcount = sorted(stxtcount.items(),key=lambda key:key[1],reverse=True)
        stxtcount = stxtcount[:20]
        for i in range(20):
            print(stxtcount[i])
        plt.imshow(ciyun)
        plt.axis("off")
        plt.show()
        ciyun.to_file(r'The_Kite_Runner.jpg')
    
    if __name__ == '__main__':
        get_txt_from_net()
        get_word_from_txt()

    4. 更新词库,加入所分析对象的专业词汇。

  • 相关阅读:
    EntityFramework中的线程安全,又是Dictionary
    记一次w3wp占用CPU过高的解决过程(Dictionary和线程安全)
    这一个月
    使用Nginx解决IIS绑定域名导致应用程序重启的问题
    Bootstrap for MVC:Html.Bootstrap().TextBoxFor(model=>model.Name)
    Orchard 刨析:Logging
    Orchard 刨析:Caching
    Orchard 刨析:前奏曲
    Orchard 刨析:导航篇
    数据集
  • 原文地址:https://www.cnblogs.com/Winslow-liujie/p/10597009.html
Copyright © 2020-2023  润新知