#!/usr/bin/env python
import os
os.environ['TF_CPP_MIN_LOG+LEVEL'] = '2'#加上这一句可以少显示一些没有必要的红字
import tensorflow as tf
import numpy as np
#设置训练样本
train_X = np.linspace(-1,1,100)
train_Y = 2*train_X + np.random.randn(*train_X.shape)*0.33 +10
#设置训练模型
X = tf.placeholder("float")
Y = tf.placeholder("float")
w = tf.Variable(0.0,name = 'weight')
b = tf.Variable(0.0,name = 'bias')
loss = tf.square(Y - X*w - b)
train_op = tf.train.GradientDescentOptimizer(0.01).minimize(loss)
#开始训练
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
epoch = 1
for i in range(10):
for (x,y) in zip(train_X,train_Y):
_, w_value, b_value = sess.run([train_op,w,b],feed_dict={X:x,Y:y})
print('Epoch: {},w: {},b: {}'.format(epoch,w_value,b_value))
epoch += 1
目前这个程序还有很多地方没有搞懂,先跑一跑例程看看效果如何。从结果来看,最终的训练成果能够接近于预设的数据