• 运用DBN 自定义图片输入源码(python3 可直接运行,亲测可用)


    代码:

      1 #urllib is used to download the utils file from deeplearning.net
      2 import urllib.request
      3 response = urllib.request.urlopen('http://deeplearning.net/tutorial/code/utils.py')
      4 content = response.read()
      5 target = open('utils.py', 'wb+')
      6 target.write(content)
      7 target.close()
      8 #Import the math function for calculations
      9 import math
     10 #Tensorflow library. Used to implement machine learning models
     11 import tensorflow as tf
     12 #Numpy contains helpful functions for efficient mathematical calculations
     13 import numpy as np
     14 #Image library for image manipulation
     15 from PIL import Image
     16 #import Image
     17 #Utils file
     18 import os
     19 
     20 def create():
     21     cwd = './data/'
     22 
     23     '''
     24     此处我加载的数据目录如下:
     25     0 -- img1.jpg
     26          img2.jpg
     27          img3.jpg
     28          ...
     29     1 -- img1.jpg
     30          img2.jpg
     31          ...
     32     2 -- ...
     33      这里的0, 1, 2...就是类别,也就是下文中的classes
     34      classes是我根据自己数据类型定义的一个列表,大家可以根据自己的数据情况灵活运用
     35     ...
     36     '''
     37 
     38     writer = tf.python_io.TFRecordWriter("train.tfrecords")
     39     classes= ['1','2']
     40     print(classes)
     41     class_path_img='./data/1/'
     42     class_path_label = './data/2/'
     43     for img_name in os.listdir(class_path_img):
     44         img_path = class_path_img + img_name
     45         print(img_path)
     46         img = Image.open(img_path)
     47         img = img.resize((125, 125))
     48         img_raw = img.tobytes()  # 将图片转化为原生bytes
     49 
     50         img_label_path=class_path_label+img_name
     51         img_label=Image.open(img_label_path)
     52         img_label=img_label.resize((125, 125))
     53         img_label_raw=img_label.tobytes()
     54         example = tf.train.Example(features=tf.train.Features(feature={
     55             "label": tf.train.Feature(bytes_list=tf.train.BytesList(value=[img_label_raw])),
     56             'img_raw': tf.train.Feature(bytes_list=tf.train.BytesList(value=[img_raw]))
     57         }))
     58         writer.write(example.SerializeToString())  # 序列化为字符串
     59     writer.close()
     60 
     61 def read_and_decode(filename):
     62     #根据文件名生成一个队列
     63     filename_queue = tf.train.string_input_producer([filename])
     64 
     65     reader = tf.TFRecordReader()
     66     _, serialized_example = reader.read(filename_queue)   #返回文件名和文件
     67     features = tf.parse_single_example(serialized_example,
     68                                        features={
     69                                            'label': tf.FixedLenFeature([], tf.string),
     70                                            'img_raw' : tf.FixedLenFeature([], tf.string),
     71                                        })
     72 
     73     img = tf.decode_raw(features['img_raw'], tf.uint8)
     74     img = tf.reshape(img, [125, 125, 3])
     75     img = tf.cast(img, tf.float32) * (1. / 255) - 0.5
     76 
     77     label = tf.decode_raw(features['label'], tf.uint8)
     78     label = tf.reshape(label, [125, 125, 3])
     79     label = tf.cast(label, tf.float32) * (1. / 255) - 0.5
     80 
     81     return img, label
     82 img, label = read_and_decode("train.tfrecords")
     83 img_batch, label_batch = tf.train.shuffle_batch([img, label],
     84                                                     batch_size=3, capacity=2000,
     85                                                     min_after_dequeue=1000)
     86 print(img)
     87 init = tf.initialize_all_variables()
     88 print(img_batch)
     89 with tf.Session() as sess:
     90     sess.run(init)
     91     threads = tf.train.start_queue_runners(sess=sess)
     92     for i in range(1):
     93         val, lab = sess.run([img_batch, label_batch])
     94         # 我们也可以根据需要对val, l进行处理
     95         #l = to_categorical(l, 12)
     96         val = val.reshape((-1, (125 * 125 * 3)))
     97         lab = lab.reshape((-1, (125 * 125 * 3)))
     98         trX=val
     99         trY=lab
    100 
    101 '''
    102 #导入MNIST数据
    103 
    104 #Getting the MNIST data provided by Tensorflow
    105 old_v = tf.compat.v1.logging.get_verbosity()
    106 tf.compat.v1.logging.set_verbosity(tf.compat.v1.logging.ERROR)
    107 from tensorflow.examples.tutorials.mnist import input_data
    108 
    109 #Loading in the mnist data
    110 mnist = input_data.read_data_sets("MNIST_data/", one_hot=True)
    111 tf.compat.v1.logging.set_verbosity(old_v)
    112 trX, trY, teX, teY = mnist.train.images, mnist.train.labels, mnist.test.images,
    113     mnist.test.labels
    114 
    115 
    116 '''
    117 
    118 
    119 
    120 #构建RBM层
    121 
    122 #Class that defines the behavior of the RBM
    123 class RBM(object):
    124 
    125     def __init__(self, input_size, output_size):
    126         #Defining the hyperparameters
    127         self._input_size = input_size #Size of input
    128         self._output_size = output_size #Size of output
    129         self.epochs = 5 #Amount of training iterations
    130         self.learning_rate = 1.0 #The step used in gradient descent
    131         self.batchsize = 100 #The size of how much data will be used for training per sub iteration
    132 
    133         #Initializing weights and biases as matrices full of zeroes
    134         self.w = np.zeros([input_size, output_size], np.float32) #Creates and initializes the weights with 0
    135         self.hb = np.zeros([output_size], np.float32) #Creates and initializes the hidden biases with 0
    136         self.vb = np.zeros([input_size], np.float32) #Creates and initializes the visible biases with 0
    137 
    138 
    139     #Fits the result from the weighted visible layer plus the bias into a sigmoid curve
    140     def prob_h_given_v(self, visible, w, hb):
    141         #Sigmoid
    142         return tf.nn.sigmoid(tf.matmul(visible, w) + hb)
    143 
    144     #Fits the result from the weighted hidden layer plus the bias into a sigmoid curve
    145     def prob_v_given_h(self, hidden, w, vb):
    146         return tf.nn.sigmoid(tf.matmul(hidden, tf.transpose(w)) + vb)
    147 
    148     #Generate the sample probability
    149     def sample_prob(self, probs):
    150         return tf.nn.relu(tf.sign(probs - tf.random_uniform(tf.shape(probs))))
    151 
    152     #Training method for the model
    153     def train(self, X):
    154         #Create the placeholders for our parameters
    155         _w = tf.placeholder("float", [self._input_size, self._output_size])
    156         _hb = tf.placeholder("float", [self._output_size])
    157         _vb = tf.placeholder("float", [self._input_size])
    158 
    159         prv_w = np.zeros([self._input_size, self._output_size], np.float32) #Creates and initializes the weights with 0
    160         prv_hb = np.zeros([self._output_size], np.float32) #Creates and initializes the hidden biases with 0
    161         prv_vb = np.zeros([self._input_size], np.float32) #Creates and initializes the visible biases with 0
    162 
    163         print("_w",_w.shape)
    164         print("_hb",_hb.shape)
    165         print("_vb",_vb.shape)
    166         print("prv_w", prv_w.shape)
    167         print("prv_hb", prv_hb.shape)
    168         print("prv_vb", prv_vb.shape)
    169 
    170 
    171         cur_w = np.zeros([self._input_size, self._output_size], np.float32)
    172         cur_hb = np.zeros([self._output_size], np.float32)
    173         cur_vb = np.zeros([self._input_size], np.float32)
    174         v0 = tf.placeholder("float", [None, self._input_size])
    175 
    176         #Initialize with sample probabilities
    177         h0 = self.sample_prob(self.prob_h_given_v(v0, _w, _hb))
    178         v1 = self.sample_prob(self.prob_v_given_h(h0, _w, _vb))
    179         h1 = self.prob_h_given_v(v1, _w, _hb)
    180 
    181         #Create the Gradients
    182         positive_grad = tf.matmul(tf.transpose(v0), h0)
    183         negative_grad = tf.matmul(tf.transpose(v1), h1)
    184 
    185         #Update learning rates for the layers
    186         update_w = _w + self.learning_rate *(positive_grad - negative_grad) / tf.to_float(tf.shape(v0)[0])
    187         update_vb = _vb +  self.learning_rate * tf.reduce_mean(v0 - v1, 0)
    188         update_hb = _hb +  self.learning_rate * tf.reduce_mean(h0 - h1, 0)
    189 
    190         #Find the error rate
    191         err = tf.reduce_mean(tf.square(v0 - v1))
    192 
    193         #Training loop
    194         with tf.Session() as sess:
    195             sess.run(tf.initialize_all_variables())
    196             #For each epoch
    197             for epoch in range(self.epochs):
    198                 #For each step/batch
    199                 for start, end in zip(range(0, len(X), self.batchsize),range(self.batchsize,len(X), self.batchsize)):
    200                     batch = X[start:end]
    201 
    202                     #Update the rates
    203                     cur_w = sess.run(update_w, feed_dict={v0: batch, _w: prv_w, _hb: prv_hb, _vb: prv_vb})
    204                     cur_hb = sess.run(update_hb, feed_dict={v0: batch, _w: prv_w, _hb: prv_hb, _vb: prv_vb})
    205                     cur_vb = sess.run(update_vb, feed_dict={v0: batch, _w: prv_w, _hb: prv_hb, _vb: prv_vb})
    206                     prv_w = cur_w
    207                     prv_hb = cur_hb
    208                     prv_vb = cur_vb
    209                 error=sess.run(err, feed_dict={v0: X, _w: cur_w, _vb: cur_vb, _hb: cur_hb})
    210                 print('Epoch: %d' % epoch,'reconstruction error: %f' % error)
    211             self.w = prv_w
    212             self.hb = prv_hb
    213             self.vb = prv_vb
    214 
    215     #Create expected output for our DBN
    216     def rbm_outpt(self, X):
    217         input_X = tf.constant(X)
    218         _w = tf.constant(self.w)
    219         _hb = tf.constant(self.hb)
    220         out = tf.nn.sigmoid(tf.matmul(input_X, _w) + _hb)
    221         with tf.Session() as sess:
    222             sess.run(tf.global_variables_initializer())
    223             return sess.run(out)
    224 
    225 #建立DBN
    226 
    227 RBM_hidden_sizes = [500, 200 , 50 ] #create 2 layers of RBM with size 400 and 100
    228 
    229 #Since we are training, set input as training data
    230 inpX = trX
    231 
    232 #Create list to hold our RBMs
    233 rbm_list = []
    234 
    235 #Size of inputs is the number of inputs in the training set
    236 print("AAAA")
    237 print(inpX.shape)
    238 input_size = inpX.shape[1]
    239 
    240 #For each RBM we want to generate
    241 for i, size in enumerate(RBM_hidden_sizes):
    242     print('RBM: ',i,' ',input_size,'->', size)
    243     rbm_list.append(RBM(input_size, size))
    244     input_size = size
    245 
    246 
    247 
    248 
    249 #神经网络
    250 
    251 class NN(object):
    252 
    253     def __init__(self, sizes, X, Y):
    254         #Initialize hyperparameters
    255         self._sizes = sizes
    256         self._X = X
    257         self._Y = Y
    258         self.w_list = []
    259         self.b_list = []
    260         self._learning_rate =  1.0
    261         self._momentum = 0.0
    262         self._epoches = 10
    263         self._batchsize = 100
    264         input_size = X.shape[1]
    265 
    266         #initialization loop
    267         for size in self._sizes + [Y.shape[1]]:
    268             #Define upper limit for the uniform distribution range
    269             max_range = 4 * math.sqrt(6. / (input_size + size))
    270 
    271             #Initialize weights through a random uniform distribution
    272             self.w_list.append(
    273                 np.random.uniform( -max_range, max_range, [input_size, size]).astype(np.float32))
    274 
    275             #Initialize bias as zeroes
    276             self.b_list.append(np.zeros([size], np.float32))
    277             input_size = size
    278 
    279     #load data from rbm
    280     def load_from_rbms(self, dbn_sizes,rbm_list):
    281         #Check if expected sizes are correct
    282         assert len(dbn_sizes) == len(self._sizes)
    283 
    284         for i in range(len(self._sizes)):
    285             #Check if for each RBN the expected sizes are correct
    286             assert dbn_sizes[i] == self._sizes[i]
    287 
    288         #If everything is correct, bring over the weights and biases
    289         for i in range(len(self._sizes)):
    290             self.w_list[i] = rbm_list[i].w
    291             self.b_list[i] = rbm_list[i].hb
    292 
    293     #Training method
    294     def train(self):
    295         #Create placeholders for input, weights, biases, output
    296         _a = [None] * (len(self._sizes) + 2)
    297         _w = [None] * (len(self._sizes) + 1)
    298         _b = [None] * (len(self._sizes) + 1)
    299         _a[0] = tf.placeholder("float", [None, self._X.shape[1]])
    300         y = tf.placeholder("float", [None, self._Y.shape[1]])
    301         print("AAAAAA")
    302         print(self._X.shape[1])
    303         print(self._Y.shape[1])
    304         #Define variables and activation functoin
    305         for i in range(len(self._sizes) + 1):
    306             _w[i] = tf.Variable(self.w_list[i])
    307             _b[i] = tf.Variable(self.b_list[i])
    308         for i in range(1, len(self._sizes) + 2):
    309             _a[i] = tf.nn.sigmoid(tf.matmul(_a[i - 1], _w[i - 1]) + _b[i - 1])
    310 
    311         #Define the cost function
    312         cost = tf.reduce_mean(tf.square(_a[-1] - y))
    313 
    314         #Define the training operation (Momentum Optimizer minimizing the Cost function)
    315         train_op = tf.train.MomentumOptimizer(
    316             self._learning_rate, self._momentum).minimize(cost)
    317 
    318         #Prediction operation
    319         predict_op = tf.argmax(_a[-1], 1)
    320 
    321         #Training Loop
    322         with tf.Session() as sess:
    323             #Initialize Variables
    324             sess.run(tf.global_variables_initializer())
    325 
    326             #For each epoch
    327             for i in range(self._epoches):
    328 
    329                 #For each step
    330                 for start, end in zip(
    331                     range(0, len(self._X), self._batchsize), range(self._batchsize, len(self._X), self._batchsize)):
    332 
    333                     #Run the training operation on the input data
    334                     sess.run(train_op, feed_dict={
    335                         _a[0]: self._X[start:end], y: self._Y[start:end]})
    336                 for j in range(len(self._sizes) + 1):
    337                     #Retrieve weights and biases
    338                     self.w_list[j] = sess.run(_w[j])
    339                     self.b_list[j] = sess.run(_b[j])
    340 
    341                 print("Accuracy rating for epoch " + str(i) + ": " + str(np.mean(np.argmax(self._Y, axis=1) ==
    342                               sess.run(predict_op, feed_dict={_a[0]: self._X, y: self._Y}))))
    343 
    344 
    345 if __name__ =='__main__':
    346     ##训练数据集
    347     # For each RBM in our list
    348     for rbm in rbm_list:
    349         print('New RBM:')
    350         # Train a new one
    351         rbm.train(inpX)
    352         # Return the output layer
    353         inpX = rbm.rbm_outpt(inpX)
    354 
    355     print("正在训练。。。。。。")
    356     nNet = NN(RBM_hidden_sizes, trX, trY)
    357     nNet.load_from_rbms(RBM_hidden_sizes, rbm_list)
    358     nNet.train()
  • 相关阅读:
    Oracle-数据库的隔离级别测试
    Oracle Filter执行计划
    webug4.0靶场之文件包含
    webug4.0靶场之文件上传
    webug4.0靶场之越权查看admin
    webug4.0靶场之支付漏洞
    webug4.0靶场之越权修改密码
    webug4.0靶场通关之XSS
    webug4.0靶场通关之任意文件下载
    MSF学习之旅之信息收集工具使用
  • 原文地址:https://www.cnblogs.com/smartisn/p/12445518.html
Copyright © 2020-2023  润新知