线性回归原理
线性方程 y=kx+b
import os
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2' # 修改警告级别,不显示警告
import tensorflow as tf
import pandas as pd
import matplotlib.pyplot as plt
data = pd.read_csv('A.csv')
print(data)
plt.scatter(data.Education,data.Income)
plt.show()
预测目标与损失函数
目标:预测函数f(x)与真实值之间的整体误差最小。
损失函数:使用均方误差作为成本函数,也就是预测值和真实值之间差的平方取均值。
import os
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2' # 修改警告级别,不显示警告
import tensorflow as tf
import pandas as pd
import matplotlib.pyplot as plt
data = pd.read_csv('A.csv')
# print(data)
# plt.scatter(data.Education,data.Income)
# plt.show()
x = data.Education
y = data.Income
model = tf.keras.Sequential()
model.add(tf.keras.layers.Dense(1,input_shape=(1,)))
model.summary()
import os
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2' # 修改警告级别,不显示警告
import tensorflow as tf
import pandas as pd
import matplotlib.pyplot as plt
data = pd.read_csv('A.csv')
# print(data)
# plt.scatter(data.Education,data.Income)
# plt.show()
x = data.Education
y = data.Income
model = tf.keras.Sequential()
model.add(tf.keras.layers.Dense(1,input_shape=(1,)))
# model.summary() # ax+b
model.compile(optimizer='adam',loss='mse')# 编译(优化方法:admaoptimizer='adam',损失函数:均方差loss='mse')
history = model.fit(x,y,epochs=5000) # 训练x,y5000次寻找a和b的最小值
print(history)
import os
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2' # 修改警告级别,不显示警告
import tensorflow as tf
import pandas as pd
import matplotlib.pyplot as plt
data = pd.read_csv('A.csv')
# print(data)
# plt.scatter(data.Education,data.Income)
# plt.show()
x = data.Education
y = data.Income
model = tf.keras.Sequential()
model.add(tf.keras.layers.Dense(1,input_shape=(1,)))
# model.summary() # ax+b
model.compile(optimizer='adam',loss='mse')# 编译(优化方法:admaoptimizer='adam',损失函数:均方差loss='mse')
history = model.fit(x,y,epochs=5000) # 训练x,y5000次寻找a和b的最小值
print(history)
print(model.predict(x)) # 预测现有的x值收入
print("20年的预测收入为:",model.predict(pd.Series([20]))) # 预测20年的收入