本文主要是介绍tensorflow实现非线性回归实例,希望对大家解决编程问题提供一定的参考价值,需要的开发者们随着小编来一起学习吧!
import tensorflow as tf
import numpy as np
import os
import matplotlib.pyplot as plt
os.environ['TF_CPP_MIN_LOG_LEVEL']='2'# R25=10k B25/50=3470 NTC热敏电阻特性数据表# 加载样本数据 格式:电阻值(K) 温度值(℃)
dat = np.loadtxt('trainData.txt')# 数据归一化处理
R = dat[:,0]
T = dat[:,1]R_K = R.max()-R.min()
R_B = R.min()T_K = T.max()-T.min()
T_B = T.min()R = (R - R_B)/R_K
T = (T - T_B)/T_KR = R.reshape(141,1)
T = T.reshape(141,1)X = tf.placeholder(tf.float32, shape = [None, 1])
Y = tf.placeholder(tf.float32, shape = [None, 1])# 定义层
def add_layer(input,in_size,out_size,activation_fun):""":param input: 输入数据:param in_size: 输入矩阵列数:param out_size: 输出矩阵列数:param activation_fun:激活函数:return:输出矩阵"""weights = tf.Variable(tf.random_normal([in_size, out_size]))bias = tf.Variable(tf.zeros([1, out_size]))z_i = tf.matmul(input, weights) + biasreturn activation_fun(z_i)# 正向传播# 添加隐藏层 该层10个神经元
out_h = add_layer(X,1,10,tf.nn.sigmoid)# 输出层
out = add_layer(out_h,10,1,tf.nn.sigmoid)# 定义损失函数
loss = tf.reduce_mean(tf.reduce_sum(tf.square(out - Y), reduction_indices=[1]))# 学习率为
learning_rate = 20.2# 梯度下降优化器,让损失最小化
train_step = tf.train.GradientDescentOptimizer(learning_rate).minimize(loss)# 初始化tensor flow中的变量
init = tf.global_variables_initializer()with tf.Session() as sess:sess.run(init)for i in range(10000):sess.run(train_step, feed_dict={X: R, Y: T})if i % 100== 0:# 打印损失值e = sess.run(loss, feed_dict={X: R, Y: T})print(e)# 预测值y = sess.run(out, feed_dict={X: R, Y: T})y = y*T_K+T_Bdat_R = dat[:,0]dat_T = dat[:,1]plt.rcParams['font.sans-serif']=['SimHei']plt.rcParams['axes.unicode_minus'] = Falseplt.xlabel("电阻")plt.ylabel("温度")plt.plot(dat_R ,dat_T,'r',label='R-T特性曲线')plt.plot(dat_R,y,label='拟合曲线')plt.legend()plt.grid()plt.show()
运行结果:
这篇关于tensorflow实现非线性回归实例的文章就介绍到这儿,希望我们推荐的文章对编程师们有所帮助!