按照上节步骤, TensorFlow 默认安装在 /usr/lib/python/site-packages/tensorflow/ (也有可能是 /usr/local/lib……)下,查看目录结构:
1 # tree -d -L 3 /usr/lib/python2.7/site-packages/tensorflow/ 2 /usr/lib/python2.7/site-packages/tensorflow/ 3 ├── contrib 4 │ ├── bayesflow 5 │ │ └── python 6 │ ├── cmake 7 │ ├── copy_graph 8 │ │ └── python 9 │ ├── crf 10 │ │ └── python 11 │ ├── cudnn_rnn 12 │ │ ├── ops 13 │ │ └── python 14 │ ├── distributions 15 │ │ └── python 16 │ ├── factorization 17 │ │ └── python 18 │ ├── ffmpeg 19 │ │ └── ops 20 │ ├── framework 21 │ │ └── python 22 │ ├── graph_editor 23 │ ├── grid_rnn 24 │ │ └── python 25 │ ├── layers 26 │ │ ├── ops 27 │ │ └── python 28 │ ├── learn 29 │ │ └── python 30 │ ├── linear_optimizer 31 │ │ ├── ops 32 │ │ └── python 33 │ ├── lookup 34 │ ├── losses 35 │ │ └── python 36 │ ├── metrics 37 │ │ ├── ops 38 │ │ └── python 39 │ ├── opt 40 │ │ └── python 41 │ ├── quantization 42 │ │ ├── kernels 43 │ │ ├── ops 44 │ │ └── python 45 │ ├── rnn 46 │ │ └── python 47 │ ├── session_bundle 48 │ ├── slim 49 │ │ └── python 50 │ ├── tensorboard 51 │ │ └── plugins 52 │ ├── tensor_forest 53 │ │ ├── client 54 │ │ ├── data 55 │ │ ├── hybrid 56 │ │ └── python 57 │ ├── testing 58 │ │ └── python 59 │ ├── training 60 │ │ └── python 61 │ └── util 62 ├── core 63 │ ├── example 64 │ ├── framework 65 │ ├── lib 66 │ │ └── core 67 │ ├── protobuf 68 │ └── util 69 ├── examples 70 │ └── tutorials 71 │ └── mnist 72 ├── include 73 │ ├── Eigen 74 │ │ └── src 75 │ ├── external 76 │ │ └── eigen_archive 77 │ ├── google 78 │ │ └── protobuf 79 │ ├── tensorflow 80 │ │ └── core 81 │ ├── third_party 82 │ │ └── eigen3 83 │ └── unsupported 84 │ └── Eigen 85 ├── models 86 │ ├── embedding 87 │ ├── image 88 │ │ ├── alexnet 89 │ │ ├── cifar10 90 │ │ ├── imagenet 91 │ │ └── mnist 92 │ └── rnn 93 │ ├── ptb 94 │ └── translate 95 ├── python 96 │ ├── client 97 │ ├── debug 98 │ │ └── cli 99 │ ├── framework 100 │ ├── lib 101 │ │ ├── core 102 │ │ └── io 103 │ ├── ops 104 │ ├── platform 105 │ ├── saved_model 106 │ ├── summary 107 │ │ └── impl 108 │ ├── training 109 │ ├── user_ops 110 │ └── util 111 │ └── protobuf 112 ├── tensorboard 113 │ ├── backend 114 │ ├── dist 115 │ ├── lib 116 │ │ ├── css 117 │ │ └── python 118 │ └── plugins 119 │ └── projector 120 └── tools 121 └── pip_package 122 123 119 directories
上节运行 MNIST 例程的命令为:
# python -m tensorflow.models.image.mnist.convolutional
对应文件为 /usr/lib/python2.7/site-packages/tensorflow/models/image/mnist/convolutional.py
打开例程源码:
1 # Copyright 2015 Google Inc. All Rights Reserved. 2 # 3 # Licensed under the Apache License, Version 2.0 (the "License"); 4 # you may not use this file except in compliance with the License. 5 # You may obtain a copy of the License at 6 # 7 # http://www.apache.org/licenses/LICENSE-2.0 8 # 9 # Unless required by applicable law or agreed to in writing, software 10 # distributed under the License is distributed on an "AS IS" BASIS, 11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 # See the License for the specific language governing permissions and 13 # limitations under the License. 14 # ============================================================================== 15 16 """Simple, end-to-end, LeNet-5-like convolutional MNIST model example. 17 18 This should achieve a test error of 0.7%. Please keep this model as simple and 19 linear as possible, it is meant as a tutorial for simple convolutional models. 20 Run with --self_test on the command line to execute a short self-test. 21 """ 22 from __future__ import absolute_import 23 from __future__ import division 24 from __future__ import print_function 25 26 import gzip 27 import os 28 import sys 29 import time 30 31 import numpy 32 from six.moves import urllib 33 from six.moves import xrange # pylint: disable=redefined-builtin 34 import tensorflow as tf 35 36 # 数据源 37 SOURCE_URL = 'http://yann.lecun.com/exdb/mnist/' 38 # 工作目录,存放下载的数据 39 WORK_DIRECTORY = 'data' 40 # MNIST 数据集特征: 41 # 图像尺寸 28x28 42 IMAGE_SIZE = 28 43 # 黑白图像 44 NUM_CHANNELS = 1 45 # 像素值0~255 46 PIXEL_DEPTH = 255 47 # 标签分10个类别 48 NUM_LABELS = 10 49 # 验证集共 5000 个样本 50 VALIDATION_SIZE = 5000 51 # 随机数种子,可设为 None 表示真的随机 52 SEED = 66478 53 # 批处理大小为64 54 BATCH_SIZE = 64 55 # 数据全集一共过10遍网络 56 NUM_EPOCHS = 10 57 # 验证集批处理大小也是64 58 EVAL_BATCH_SIZE = 64 59 # 验证时间间隔,每训练100个批处理,做一次评估 60 EVAL_FREQUENCY = 100 61 62 63 tf.app.flags.DEFINE_boolean("self_test", False, "True if running a self test.") 64 FLAGS = tf.app.flags.FLAGS 65 66 # 如果下载过了数据,就不再重复下载 67 def maybe_download(filename): 68 """Download the data from Yann's website, unless it's already here.""" 69 if not tf.gfile.Exists(WORK_DIRECTORY): 70 tf.gfile.MakeDirs(WORK_DIRECTORY) 71 filepath = os.path.join(WORK_DIRECTORY, filename) 72 if not tf.gfile.Exists(filepath): 73 filepath, _ = urllib.request.urlretrieve(SOURCE_URL + filename, filepath) 74 with tf.gfile.GFile(filepath) as f: 75 size = f.Size() 76 print('Successfully downloaded', filename, size, 'bytes.') 77 return filepath 78 79 # 抽取数据,变为 4维张量[图像索引,y, x, c] 80 # 去均值、做归一化,范围变到[-0.5, 0.5] 81 def extract_data(filename, num_images): 82 """Extract the images into a 4D tensor [image index, y, x, channels]. 83 84 Values are rescaled from [0, 255] down to [-0.5, 0.5]. 85 """ 86 print('Extracting', filename) 87 with gzip.open(filename) as bytestream: 88 bytestream.read(16) 89 buf = bytestream.read(IMAGE_SIZE * IMAGE_SIZE * num_images) 90 data = numpy.frombuffer(buf, dtype=numpy.uint8).astype(numpy.float32) 91 data = (data - (PIXEL_DEPTH / 2.0)) / PIXEL_DEPTH 92 data = data.reshape(num_images, IMAGE_SIZE, IMAGE_SIZE, 1) 93 return data 94 95 # 抽取图像标签 96 def extract_labels(filename, num_images): 97 """Extract the labels into a vector of int64 label IDs.""" 98 print('Extracting', filename) 99 with gzip.open(filename) as bytestream: 100 bytestream.read(8) 101 buf = bytestream.read(1 * num_images) 102 labels = numpy.frombuffer(buf, dtype=numpy.uint8).astype(numpy.int64) 103 return labels 104 105 # 假数据,用于功能自测 106 def fake_data(num_images): 107 """Generate a fake dataset that matches the dimensions of MNIST.""" 108 data = numpy.ndarray( 109 shape=(num_images, IMAGE_SIZE, IMAGE_SIZE, NUM_CHANNELS), 110 dtype=numpy.float32) 111 labels = numpy.zeros(shape=(num_images,), dtype=numpy.int64) 112 for image in xrange(num_images): 113 label = image % 2 114 data[image, :, :, 0] = label - 0.5 115 labels[image] = label 116 return data, labels 117 # 计算分类错误率 118 def error_rate(predictions, labels): 119 """Return the error rate based on dense predictions and sparse labels.""" 120 return 100.0 - ( 121 100.0 * 122 numpy.sum(numpy.argmax(predictions, 1) == labels) / 123 predictions.shape[0]) 124 125 126 127 128 # 主函数 129 def main(argv=None): # pylint: disable=unused-argument 130 if FLAGS.self_test: 131 print('Running self-test.') 132 train_data, train_labels = fake_data(256) 133 validation_data, validation_labels = fake_data(EVAL_BATCH_SIZE) 134 test_data, test_labels = fake_data(EVAL_BATCH_SIZE) 135 num_epochs = 1 136 else: 137 # 下载数据 138 train_data_filename = maybe_download('train-images-idx3-ubyte.gz') 139 train_labels_filename = maybe_download('train-labels-idx1-ubyte.gz') 140 test_data_filename = maybe_download('t10k-images-idx3-ubyte.gz') 141 test_labels_filename = maybe_download('t10k-labels-idx1-ubyte.gz') 142 143 # 载入数据到numpy 144 train_data = extract_data(train_data_filename, 60000) 145 train_labels = extract_labels(train_labels_filename, 60000) 146 test_data = extract_data(test_data_filename, 10000) 147 test_labels = extract_labels(test_labels_filename, 10000) 148 149 # 产生评测集 150 validation_data = train_data[:VALIDATION_SIZE, ...] 151 validation_labels = train_labels[:VALIDATION_SIZE] 152 train_data = train_data[VALIDATION_SIZE:, ...] 153 train_labels = train_labels[VALIDATION_SIZE:] 154 num_epochs = NUM_EPOCHS 155 train_size = train_labels.shape[0] 156 157 # 训练样本和标签将从这里送入网络。 158 # 每训练迭代步,占位符节点将被送入一个批处理数据 159 # 训练数据节点 160 train_data_node = tf.placeholder( 161 tf.float32, 162 shape=(BATCH_SIZE, IMAGE_SIZE, IMAGE_SIZE, NUM_CHANNELS)) 163 # 训练标签节点 164 train_labels_node = tf.placeholder(tf.int64, shape=(BATCH_SIZE,)) 165 # 评测数据节点 166 eval_data = tf.placeholder( 167 tf.float32, 168 shape=(EVAL_BATCH_SIZE, IMAGE_SIZE, IMAGE_SIZE, NUM_CHANNELS)) 169 170 # 下面这些变量是网络的可训练权值 171 # conv1 权值维度为 32 x channels x 5 x 5, 32 为特征图数目 172 conv1_weights = tf.Variable( 173 tf.truncated_normal([5, 5, NUM_CHANNELS, 32], # 5x5 filter, depth 32. 174 stddev=0.1, 175 seed=SEED)) 176 # conv1 偏置 177 conv1_biases = tf.Variable(tf.zeros([32])) 178 # conv2 权值维度为 64 x 32 x 5 x 5 179 conv2_weights = tf.Variable( 180 tf.truncated_normal([5, 5, 32, 64], 181 stddev=0.1, 182 seed=SEED)) 183 conv2_biases = tf.Variable(tf.constant(0.1, shape=[64])) 184 # 全连接层 fc1 权值,神经元数目为512 185 fc1_weights = tf.Variable( # fully connected, depth 512. 186 tf.truncated_normal( 187 [IMAGE_SIZE // 4 * IMAGE_SIZE // 4 * 64, 512], 188 stddev=0.1, 189 seed=SEED)) 190 fc1_biases = tf.Variable(tf.constant(0.1, shape=[512])) 191 # fc2 权值,维度与标签类数目一致 192 fc2_weights = tf.Variable( 193 tf.truncated_normal([512, NUM_LABELS], 194 stddev=0.1, 195 seed=SEED)) 196 fc2_biases = tf.Variable(tf.constant(0.1, shape=[NUM_LABELS])) 197 198 # 两个网络:训练网络和评测网络 199 # 它们共享权值 200 201 # 实现 LeNet-5 模型,该函数输入为数据,输出为fc2的响应 202 # 第二个参数区分训练网络还是评测网络 203 def model(data, train=False): 204 """The Model definition.""" 205 # 二维卷积,使用“不变形”补零(即输出特征图与输入尺寸一致)。 206 conv = tf.nn.conv2d(data, 207 conv1_weights, 208 strides=[1, 1, 1, 1], 209 padding='SAME') 210 # 加偏置、过激活函数一块完成 211 relu = tf.nn.relu(tf.nn.bias_add(conv, conv1_biases)) 212 # 最大值下采样 213 pool = tf.nn.max_pool(relu, 214 ksize=[1, 2, 2, 1], 215 strides=[1, 2, 2, 1], 216 padding='SAME') 217 # 第二个卷积层 218 conv = tf.nn.conv2d(pool, 219 conv2_weights, 220 strides=[1, 1, 1, 1], 221 padding='SAME') 222 relu = tf.nn.relu(tf.nn.bias_add(conv, conv2_biases)) 223 pool = tf.nn.max_pool(relu, 224 ksize=[1, 2, 2, 1], 225 strides=[1, 2, 2, 1], 226 padding='SAME') 227 # 特征图变形为2维矩阵,便于送入全连接层 228 pool_shape = pool.get_shape().as_list() 229 reshape = tf.reshape( 230 pool, 231 [pool_shape[0], pool_shape[1] * pool_shape[2] * pool_shape[3]]) 232 # 全连接层,注意“+”运算自动广播偏置 233 hidden = tf.nn.relu(tf.matmul(reshape, fc1_weights) + fc1_biases) 234 # 训练阶段,增加 50% dropout;而评测阶段无需该操作 235 if train: 236 hidden = tf.nn.dropout(hidden, 0.5, seed=SEED) 237 return tf.matmul(hidden, fc2_weights) + fc2_biases 238 239 # Training computation: logits + cross-entropy loss. 240 # 训练阶段计算: 对数+交叉熵 损失函数 241 logits = model(train_data_node, True) 242 loss = tf.reduce_mean(tf.nn.sparse_softmax_cross_entropy_with_logits( 243 logits, train_labels_node)) 244 245 246 # 全连接层参数进行 L2 正则化 247 regularizers = (tf.nn.l2_loss(fc1_weights) + tf.nn.l2_loss(fc1_biases) + 248 tf.nn.l2_loss(fc2_weights) + tf.nn.l2_loss(fc2_biases)) 249 # 将正则项加入损失函数 250 loss += 5e-4 * regularizers 251 252 # 优化器: 设置一个变量,每个批处理递增,控制学习速率衰减 253 batch = tf.Variable(0) 254 # 指数衰减 255 learning_rate = tf.train.exponential_decay( 256 0.01, # 基本学习速率 257 batch * BATCH_SIZE, # 当前批处理在数据全集中的位置 258 train_size, # Decay step. 259 0.95, # Decay rate. 260 staircase=True) 261 # Use simple momentum for the optimization. 262 optimizer = tf.train.MomentumOptimizer(learning_rate, 263 0.9).minimize(loss, 264 global_step=batch) 265 266 # 用softmax 计算训练批处理的预测概率 267 train_prediction = tf.nn.softmax(logits) 268 269 # 用 softmax 计算评测批处理的预测概率 270 eval_prediction = tf.nn.softmax(model(eval_data)) 271 272 # Small utility function to evaluate a dataset by feeding batches of data to 273 # {eval_data} and pulling the results from {eval_predictions}. 274 # Saves memory and enables this to run on smaller GPUs. 275 def eval_in_batches(data, sess): 276 """Get all predictions for a dataset by running it in small batches.""" 277 size = data.shape[0] 278 if size < EVAL_BATCH_SIZE: 279 raise ValueError("batch size for evals larger than dataset: %d" % size) 280 predictions = numpy.ndarray(shape=(size, NUM_LABELS), dtype=numpy.float32) 281 for begin in xrange(0, size, EVAL_BATCH_SIZE): 282 end = begin + EVAL_BATCH_SIZE 283 if end <= size: 284 predictions[begin:end, :] = sess.run( 285 eval_prediction, 286 feed_dict={eval_data: data[begin:end, ...]}) 287 else: 288 batch_predictions = sess.run( 289 eval_prediction, 290 feed_dict={eval_data: data[-EVAL_BATCH_SIZE:, ...]}) 291 predictions[begin:, :] = batch_predictions[begin - size:, :] 292 return predictions 293 294 295 # Create a local session to run the training. 296 start_time = time.time() 297 with tf.Session() as sess: 298 # Run all the initializers to prepare the trainable parameters. 299 tf.initialize_all_variables().run() 300 print('Initialized!') 301 # Loop through training steps. 302 for step in xrange(int(num_epochs * train_size) // BATCH_SIZE): 303 # Compute the offset of the current minibatch in the data. 304 # Note that we could use better randomization across epochs. 305 offset = (step * BATCH_SIZE) % (train_size - BATCH_SIZE) 306 batch_data = train_data[offset:(offset + BATCH_SIZE), ...] 307 batch_labels = train_labels[offset:(offset + BATCH_SIZE)] 308 # This dictionary maps the batch data (as a numpy array) to the 309 # node in the graph it should be fed to. 310 feed_dict = {train_data_node: batch_data, 311 train_labels_node: batch_labels} 312 # Run the graph and fetch some of the nodes. 313 _, l, lr, predictions = sess.run( 314 [optimizer, loss, learning_rate, train_prediction], 315 feed_dict=feed_dict) 316 if step % EVAL_FREQUENCY == 0: 317 elapsed_time = time.time() - start_time 318 start_time = time.time() 319 print('Step %d (epoch %.2f), %.1f ms' % 320 (step, float(step) * BATCH_SIZE / train_size, 321 1000 * elapsed_time / EVAL_FREQUENCY)) 322 print('Minibatch loss: %.3f, learning rate: %.6f' % (l, lr)) 323 print('Minibatch error: %.1f%%' % error_rate(predictions, batch_labels)) 324 print('Validation error: %.1f%%' % error_rate( 325 eval_in_batches(validation_data, sess), validation_labels)) 326 sys.stdout.flush() 327 # Finally print the result! 328 test_error = error_rate(eval_in_batches(test_data, sess), test_labels) 329 print('Test error: %.1f%%' % test_error) 330 if FLAGS.self_test: 331 print('test_error', test_error) 332 assert test_error == 0.0, 'expected 0.0 test_error, got %.2f' % ( 333 test_error,) 334 # 程序入口点 335 if __name__ == '__main__': 336 tf.app.run()
如果还有问题未能得到解决,搜索887934385交流群,进入后下载资料工具安装包等。最后,感谢观看!