本文主要是介绍一元线性回归梯度下降代码,希望对大家解决编程问题提供一定的参考价值,需要的开发者们随着小编来一起学习吧!
#代价函数
def compute_cost(x, y, w, b):m = x.shape[0] cost = 0for i in range(m):f_wb = w * x[i] + bcost = cost + (f_wb - y[i])**2total_cost = 1 / (2 * m) * costreturn total_cost#计算梯度函数
def compute_gradient(x, y, w, b): m = x.shape[0] dj_dw = 0dj_db = 0for i in range(m): f_wb = w * x[i] + b dj_dw_i = (f_wb - y[i]) * x[i] dj_db_i = f_wb - y[i] dj_db += dj_db_idj_dw += dj_dw_i dj_dw = dj_dw / m dj_db = dj_db / m return dj_dw, dj_db#梯度下降函数
def gradient_descent(x, y, w_in, b_in, alpha, num_iters, cost_function, gradient_function): '''x:输入向量,numpy.ndarrayy:输出向量,numpy.ndarrayw_in:初始wb_in:初始balpha:学习率num_iters:迭代次数cost_function:代价函数gradient_function:计算梯度函数'''J_history = [] #记录训练过程中的所有代价p_history = [] #记录训练过程中所有(w,b)b = b_inw = w_infor i in range(num_iters):# 计算偏导,更新参数w,bdj_dw, dj_db = gradient_function(x, y, w , b) b = b - alpha * dj_db w = w - alpha * dj_dw # 保存当前代价J和参数(w,b)->可用于后续可视化J_history.append( cost_function(x, y, w , b))p_history.append([w,b])# 打印其中十次训练信息if i% math.ceil(num_iters/10) == 0:print(f"Iteration {i}: Cost {J_history[-1]} ",f"dj_dw: {dj_dw}, dj_db: {dj_db} ",f"w: {w}, b:{b}")print{f'final w:{w},b:{b}'}#输出目标值与预测值对比y_hat=w*x+b for i in range(x.shape[0]):print(f'target value:{y[i]},predicted value:{y_hat[i]},error:{y[i]-y_hat[i]}')return w, b, J_history, p_history
这篇关于一元线性回归梯度下降代码的文章就介绍到这儿,希望我们推荐的文章对编程师们有所帮助!