• tensorflow深度学习的验证码识别


     本项目介绍利用深度学习技术(tensorflow),来识别验证码(4位验证码,具体的验证码的长度可以自己生成,可以在自己进行训练)

    【项目的源代码在下面会给出github地址,在git下载即可(模型要自己训练,模型文件较大,这里不上传)】

    验证码也是根据系统程序自己进行生成,然后用这些作为 训练集,然后进行训练,保存模型,对测试集进行测试,确定训练模型的准确度

    首先就是测试集的生成:

    # coding: utf-8
    
    # In[1]:
    
    # 验证码生成库
    from captcha.image import ImageCaptcha  # pip install captcha
    import numpy as np
    from PIL import Image
    import random
    import sys
     
    number = ['0','1','2','3','4','5','6','7','8','9']
    # alphabet = ['a','b','c','d','e','f','g','h','i','j','k','l','m','n','o','p','q','r','s','t','u','v','w','x','y','z']
    # ALPHABET = ['A','B','C','D','E','F','G','H','I','J','K','L','M','N','O','P','Q','R','S','T','U','V','W','X','Y','Z']
    
    def random_captcha_text(char_set=number, captcha_size=4):
        # 验证码列表
        captcha_text = []
        for i in range(captcha_size):
            #随机选择
            c = random.choice(char_set)
            #加入验证码列表
            captcha_text.append(c)
        return captcha_text
     
    # 生成字符对应的验证码
    def gen_captcha_text_and_image():
        image = ImageCaptcha()
        #获得随机生成的验证码
        captcha_text = random_captcha_text()
        #把验证码列表转为字符串
        captcha_text = ''.join(captcha_text)
        #生成验证码
        captcha = image.generate(captcha_text)
        image.write(captcha_text, '../image/' + captcha_text + '.jpg')  # 写到文件
    
    #数量少于10000,因为重名
    num = 10000
    if __name__ == '__main__':
        for i in range(num):
            gen_captcha_text_and_image()
            sys.stdout.write('
    >> Creating image %d/%d' % (i+1, num))
            sys.stdout.flush()
        sys.stdout.write('
    ')
        sys.stdout.flush()
                            
        print("生成完毕")
    
    
    # In[ ]:
    
    
    
    
    # In[ ]:

    文件的路径自己进行设置和修改,训练集大小自己设定(这里面只是单单创建的数字验证码,需要带上大小写英文的自己设置即可,还有就是验证码的字符长度自己进行设置)

    然后就是对这些文件进行格式的转换(将一堆的图片文件转换为tfrecord文件)

    # coding: utf-8
    
    # In[1]:
    
    import tensorflow as tf
    import os
    import random
    import math
    import sys
    from PIL import Image
    import numpy as np
    
    
    # In[2]:
    
    #验证集数量
    _NUM_TEST = 500
    
    #随机种子
    _RANDOM_SEED = 0
    
    #数据集路径
    DATASET_DIR = "../image/"
    
    #tfrecord文件存放路径
    TFRECORD_DIR = "../captcha/"
    
    
    #判断tfrecord文件是否存在
    def _dataset_exists(dataset_dir):
        for split_name in ['train', 'test']:
            output_filename = os.path.join(dataset_dir,split_name + '.tfrecords')
            if not tf.gfile.Exists(output_filename):
                return False
        return True
    
    #获取所有验证码图片
    def _get_filenames_and_classes(dataset_dir):
        photo_filenames = []
        for filename in os.listdir(dataset_dir):
            #获取文件路径
            path = os.path.join(dataset_dir, filename)
            photo_filenames.append(path)
        return photo_filenames
    
    def int64_feature(values):
        if not isinstance(values, (tuple, list)):
            values = [values]
        return tf.train.Feature(int64_list=tf.train.Int64List(value=values))
    
    def bytes_feature(values):
        return tf.train.Feature(bytes_list=tf.train.BytesList(value=[values]))
    
    def image_to_tfexample(image_data, label0, label1, label2, label3):
        #Abstract base class for protocol messages.
        return tf.train.Example(features=tf.train.Features(feature={
          'image': bytes_feature(image_data),
          'label0': int64_feature(label0),
          'label1': int64_feature(label1),
          'label2': int64_feature(label2),
          'label3': int64_feature(label3),
        }))
    
    #把数据转为TFRecord格式
    def _convert_dataset(split_name, filenames, dataset_dir):
        assert split_name in ['train', 'test']
    
        with tf.Session() as sess:
            #定义tfrecord文件的路径+名字
            output_filename = os.path.join(TFRECORD_DIR,split_name + '.tfrecords')
            with tf.python_io.TFRecordWriter(output_filename) as tfrecord_writer:
                for i,filename in enumerate(filenames):
                    try:
                        sys.stdout.write('
    >> Converting image %d/%d' % (i+1, len(filenames)))
                        sys.stdout.flush()
    
                        #读取图片
                        image_data = Image.open(filename)  
                        #根据模型的结构resize
                        image_data = image_data.resize((224, 224))
                        #灰度化
                        image_data = np.array(image_data.convert('L'))
                        #将图片转化为bytes
                        image_data = image_data.tobytes()              
    
                        #获取label
                        labels = filename.split('/')[-1][0:4]
                        num_labels = []
                        for j in range(4):
                            num_labels.append(int(labels[j]))
                                                
                        #生成protocol数据类型
                        example = image_to_tfexample(image_data, num_labels[0], num_labels[1], num_labels[2], num_labels[3])
                        tfrecord_writer.write(example.SerializeToString())
                        
                    except IOError as e:
                        print('Could not read:',filename)
                        print('Error:',e)
                        print('Skip it
    ')
        sys.stdout.write('
    ')
        sys.stdout.flush()
    
    #判断tfrecord文件是否存在
    if _dataset_exists(TFRECORD_DIR):
        print('tfcecord文件已存在')
    else:
        #获得所有图片
        photo_filenames = _get_filenames_and_classes(DATASET_DIR)
    
        #把数据切分为训练集和测试集,并打乱
        random.seed(_RANDOM_SEED)
        random.shuffle(photo_filenames)
        training_filenames = photo_filenames[_NUM_TEST:]
        testing_filenames = photo_filenames[:_NUM_TEST]
    
        #数据转换
        _convert_dataset('train', training_filenames, DATASET_DIR)
        _convert_dataset('test', testing_filenames, DATASET_DIR)
        print('生成tfcecord文件')
    
    
    # In[ ]:

    测试集的大小自己进行设定

    其次就是对这些图片文件进行训练;

    import os
    import tensorflow as tf
    from PIL import Image
    from lib.nets2 import nets_factory
    import numpy as np
    
    os.environ["CUDA_VISIBLE_DEVICES"] = '0'   #指定第一块GPU可用
    config = tf.ConfigProto()
    config.gpu_options.per_process_gpu_memory_fraction = 0.5  # 程序最多只能占用指定gpu50%的显存
    config.gpu_options.allow_growth = True      #程序按需申请内存
    sess = tf.Session(config = config)
    
    
    
    # 不同字符数量
    CHAR_SET_LEN = 10
    # 图片高度
    IMAGE_HEIGHT = 60
    # 图片宽度
    IMAGE_WIDTH = 160
    # 批次
    BATCH_SIZE = 28
    # tfrecord文件存放路径
    TFRECORD_FILE = "C:workspacePythondeep-learningcardCimages/train.tfrecords"
    
    # placeholder
    x = tf.placeholder(tf.float32, [None, 224, 224])
    y0 = tf.placeholder(tf.float32, [None])
    
    # 学习率
    lr = tf.Variable(0.001, dtype=tf.float32)
    
    
    # 从tfrecord读出数据
    def read_and_decode(filename):
        # 根据文件名生成一个队列
        filename_queue = tf.train.string_input_producer([filename])
        reader = tf.TFRecordReader()
        # 返回文件名和文件
        _, serialized_example = reader.read(filename_queue)
        features = tf.parse_single_example(serialized_example,
                                           features={
                                               'image' : tf.FixedLenFeature([], tf.string),
                                               'label0': tf.FixedLenFeature([], tf.int64),
                                           })
        # 获取图片数据
        image = tf.decode_raw(features['image'], tf.uint8)
        # tf.train.shuffle_batch必须确定shape
        image = tf.reshape(image, [224, 224])
        # 图片预处理
        image = tf.cast(image, tf.float32) / 255.0
        image = tf.subtract(image, 0.5)
        image = tf.multiply(image, 2.0)
        # 获取label
        label0 = tf.cast(features['label0'], tf.int32)
        return image, label0
    
    
    # 获取图片数据和标签
    image, label0 = read_and_decode(TFRECORD_FILE)
    
    # 使用shuffle_batch可以随机打乱
    image_batch, label_batch0= tf.train.shuffle_batch(
        [image, label0], batch_size=BATCH_SIZE,
        capacity=50000, min_after_dequeue=10000, num_threads=1)
    
    # 定义网络结构
    train_network_fn = nets_factory.get_network_fn(
        'alexnet_v2',
        num_classes=CHAR_SET_LEN * 1,
        weight_decay=0.0005,
        is_training=True)
    
    with tf.Session() as sess:
        # inputs: a tensor of size [batch_size, height, width, channels]
        X = tf.reshape(x, [BATCH_SIZE, 224, 224, 1])
        # 数据输入网络得到输出值
        logits, end_points = train_network_fn(X)
    
        # 把标签转成one_hot的形式
        one_hot_labels0 = tf.one_hot(indices=tf.cast(y0, tf.int32), depth=CHAR_SET_LEN)
    
        # 把标签转成长度为40的向量
        label_40 = tf.concat([one_hot_labels0], 1)
        # 计算loss
        loss_40 = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(logits=logits, labels=label_40))
        # 优化loss
        optimizer_40 = tf.train.AdamOptimizer(learning_rate=lr).minimize(loss_40)
        # 计算准确率
        correct_prediction_40 = tf.equal(tf.argmax(label_40, 1), tf.argmax(logits, 1))
        accuracy_40 = tf.reduce_mean(tf.cast(correct_prediction_40, tf.float32))
    
        # 用于保存模型
        saver = tf.train.Saver()
        # 初始化
        sess.run(tf.global_variables_initializer())
    
        # 创建一个协调器,管理线程
        coord = tf.train.Coordinator()
        # 启动QueueRunner, 此时文件名队列已经进队
        threads = tf.train.start_queue_runners(sess=sess, coord=coord)
    
        for i in range(3500):
            # 获取一个批次的数据和标签
            b_image, b_label0 = sess.run([image_batch, label_batch0])
            # 优化模型
            sess.run(optimizer_40, feed_dict={x: b_image, y0: b_label0})
            # 每迭代20次计算一次loss和准确率
            if i % 4 == 0:
                # 每迭代3000次降低一次学习率
                if i % 400 == 0:
                    sess.run(tf.assign(lr, lr / 3))
                acc, loss_ = sess.run([accuracy_40, loss_40], feed_dict={x: b_image,
                                                                         y0: b_label0})
                learning_rate = sess.run(lr)
                print("Iter:%d  Loss:%.4f  Accuracy:%.4f  Learning_rate:%.4f" % (i, loss_, acc, learning_rate))
    
                #             acc0,acc1,acc2,acc3,loss_ = sess.run([accuracy0,accuracy1,accuracy2,accuracy3,total_loss],feed_dict={x: b_image,
                #                                                                                                                 y0: b_label0,
                #                                                                                                                 y1: b_label1,
                #                                                                                                                 y2: b_label2,
                #                                                                                                                 y3: b_label3})
                #             learning_rate = sess.run(lr)
                #             print ("Iter:%d  Loss:%.3f  Accuracy:%.2f,%.2f,%.2f,%.2f  Learning_rate:%.4f" % (i,loss_,acc0,acc1,acc2,acc3,learning_rate))
    
                # 保存模型
                if i == 3300:
                    saver.save(sess, "C:workspacePythondeep-learningcardCmodels/crack_captcha1.model", global_step=i)
                    break
                    # 通知其他线程关闭
        coord.request_stop()
        # 其他所有线程关闭之后,这一函数才能返回
        coord.join(threads)

    这里就是对图片进行训练,然后保存模型(其中的训练次数还有学习率迭代可以根据自己的数据集的大小进行不同的设定)

    最后就是对测试集进行模型的准确度的测试:

    # coding: utf-8
    
    # In[1]:
    
    import os
    import tensorflow as tf 
    from PIL import Image
    from nets import nets_factory
    import numpy as np
    import matplotlib.pyplot as plt  
    
    
    # In[2]:
    
    # 不同字符数量
    CHAR_SET_LEN = 10
    # 图片高度
    IMAGE_HEIGHT = 60 
    # 图片宽度
    IMAGE_WIDTH = 160  
    # 批次
    BATCH_SIZE = 1
    # tfrecord文件存放路径
    TFRECORD_FILE = "D:/Tensorflow/captcha/test.tfrecords"
    
    # placeholder
    x = tf.placeholder(tf.float32, [None, 224, 224])  
    
    # 从tfrecord读出数据
    def read_and_decode(filename):
        # 根据文件名生成一个队列
        filename_queue = tf.train.string_input_producer([filename])
        reader = tf.TFRecordReader()
        # 返回文件名和文件
        _, serialized_example = reader.read(filename_queue)   
        features = tf.parse_single_example(serialized_example,
                                           features={
                                               'image' : tf.FixedLenFeature([], tf.string),
                                               'label0': tf.FixedLenFeature([], tf.int64),
                                               'label1': tf.FixedLenFeature([], tf.int64),
                                               'label2': tf.FixedLenFeature([], tf.int64),
                                               'label3': tf.FixedLenFeature([], tf.int64),
                                           })
        # 获取图片数据
        image = tf.decode_raw(features['image'], tf.uint8)
        # 没有经过预处理的灰度图
        image_raw = tf.reshape(image, [224, 224])
        # tf.train.shuffle_batch必须确定shape
        image = tf.reshape(image, [224, 224])
        # 图片预处理
        image = tf.cast(image, tf.float32) / 255.0
        image = tf.subtract(image, 0.5)
        image = tf.multiply(image, 2.0)
        # 获取label
        label0 = tf.cast(features['label0'], tf.int32)
        label1 = tf.cast(features['label1'], tf.int32)
        label2 = tf.cast(features['label2'], tf.int32)
        label3 = tf.cast(features['label3'], tf.int32)
    
        return image, image_raw, label0, label1, label2, label3
    
    
    # In[3]:
    
    # 获取图片数据和标签
    image, image_raw, label0, label1, label2, label3 = read_and_decode(TFRECORD_FILE)
    
    #使用shuffle_batch可以随机打乱
    image_batch, image_raw_batch, label_batch0, label_batch1, label_batch2, label_batch3 = tf.train.shuffle_batch(
            [image, image_raw, label0, label1, label2, label3], batch_size = BATCH_SIZE,
            capacity = 50000, min_after_dequeue=10000, num_threads=1)
    
    #定义网络结构
    train_network_fn = nets_factory.get_network_fn(
        'alexnet_v2',
        num_classes=CHAR_SET_LEN,
        weight_decay=0.0005,
        is_training=False)
    
    with tf.Session() as sess:
        # inputs: a tensor of size [batch_size, height, width, channels]
        X = tf.reshape(x, [BATCH_SIZE, 224, 224, 1])
        # 数据输入网络得到输出值
        logits0,logits1,logits2,logits3,end_points = train_network_fn(X)
        
        # 预测值
        predict0 = tf.reshape(logits0, [-1, CHAR_SET_LEN])  
        predict0 = tf.argmax(predict0, 1)  
    
        predict1 = tf.reshape(logits1, [-1, CHAR_SET_LEN])  
        predict1 = tf.argmax(predict1, 1)  
    
        predict2 = tf.reshape(logits2, [-1, CHAR_SET_LEN])  
        predict2 = tf.argmax(predict2, 1)  
    
        predict3 = tf.reshape(logits3, [-1, CHAR_SET_LEN])  
        predict3 = tf.argmax(predict3, 1)  
    
        # 初始化
        sess.run(tf.global_variables_initializer())
        # 载入训练好的模型
        saver = tf.train.Saver()
        saver.restore(sess,'./captcha/models/crack_captcha.model-6000')
    
        # 创建一个协调器,管理线程
        coord = tf.train.Coordinator()
        # 启动QueueRunner, 此时文件名队列已经进队
        threads = tf.train.start_queue_runners(sess=sess, coord=coord)
    
        for i in range(10):
            # 获取一个批次的数据和标签
            b_image, b_image_raw, b_label0, b_label1 ,b_label2 ,b_label3 = sess.run([image_batch, 
                                                                        image_raw_batch, 
                                                                        label_batch0, 
                                                                        label_batch1, 
                                                                        label_batch2, 
                                                                        label_batch3])
            # 显示图片
            img=Image.fromarray(b_image_raw[0],'L')
            plt.imshow(img)
            plt.axis('off')
            plt.show()
            # 打印标签
            print('label:',b_label0, b_label1 ,b_label2 ,b_label3)
            # 预测
            label0,label1,label2,label3 = sess.run([predict0,predict1,predict2,predict3], feed_dict={x: b_image})
            # 打印预测值
            print('predict:',label0,label1,label2,label3) 
                    
        # 通知其他线程关闭
        coord.request_stop()
        # 其他所有线程关闭之后,这一函数才能返回
        coord.join(threads)

     git的程序下载地址为:https://github.com/H-Designer/Tensorflow-Verification_Code



  • 相关阅读:
    支持向量机SVM知识梳理和在sklearn库中的应用
    Android P HIDL demo代码编写 (原创)
    Android P HIDL服务绑定模式与直通模式的分析 (原创)
    NFCApplication 启动分析(原创)
    Android native进程间通讯的实例 (原创)
    android 6.0 高通平台sensor 工作机制及流程(原创)
    NFC OMA 访问
    NXP NFC移植及学习笔记(原创)
    自定义View实战
    从0开始学自定义View -1
  • 原文地址:https://www.cnblogs.com/zhaochunhui/p/11323940.html
Copyright © 2020-2023  润新知