本文主要是介绍王权富贵:通过BP的反向传输查看神经网络最匹配的特征图--《Python神经网络编程》的学习笔记,希望对大家解决编程问题提供一定的参考价值,需要的开发者们随着小编来一起学习吧!
这是使用BP反向还原机器认为最匹配的图案
(使用MNIST手写体数据库:https://download.csdn.net/download/a1103688841/10867644)
比如:0
下面开始代码介绍:
这里的类中最后一个函数backquery()是重点。这里有个问题应为使用S函数和S反函数所以值的范围不一样,需要校准。
import numpy
import scipy.special
import matplotlib.pyplot
class neuralNetwork:def __init__(self, inputnodes, hiddennodes, outputnodes, learningrate):self.inodes = inputnodesself.hnodes = hiddennodesself.onodes = outputnodesself.wih = numpy.random.normal(0.0, pow(self.inodes, -0.5), (self.hnodes, self.inodes))self.who = numpy.random.normal(0.0, pow(self.hnodes, -0.5), (self.onodes, self.hnodes))self.lr = learningrate#激活函数不可以随意更改,反向传输这里求导是固定的self.activation_function = lambda x: scipy.special.expit(x)self.inverse_activation_function = lambda x: scipy.special.logit(x)passdef train(self, inputs_list, targets_list):inputs = numpy.array(inputs_list, ndmin=2).Ttargets = numpy.array(targets_list, ndmin=2).Thidden_inputs = numpy.dot(self.wih, inputs)hidden_outputs = self.activation_function(hidden_inputs)final_inputs = numpy.dot(self.who, hidden_outputs)final_outputs = self.activation_function(final_inputs)output_errors = targets - final_outputshidden_errors = numpy.dot(self.who.T, output_errors) self.who += self.lr * numpy.dot((output_errors * final_outputs * (1.0 - final_outputs)), numpy.transpose(hidden_outputs))self.wih += self.lr * numpy.dot((hidden_errors * hidden_outputs * (1.0 - hidden_outputs)), numpy.transpose(inputs))passdef query(self, inputs_list):inputs = numpy.array(inputs_list, ndmin=2).Thidden_inputs = numpy.dot(self.wih, inputs)hidden_outputs = self.activation_function(hidden_inputs)final_inputs = numpy.dot(self.who, hidden_outputs)final_outputs = self.activation_function(final_inputs)return final_outputsdef backquery(self, targets_list):#这里是重点 主要是有效值的部分需要校准final_outputs = numpy.array(targets_list, ndmin=2).Tfinal_inputs = self.inverse_activation_function(final_outputs)hidden_outputs = numpy.dot(self.who.T, final_inputs)hidden_outputs -= numpy.min(hidden_outputs)hidden_outputs /= numpy.max(hidden_outputs)hidden_outputs *= 0.98hidden_outputs += 0.01hidden_inputs = self.inverse_activation_function(hidden_outputs)inputs = numpy.dot(self.wih.T, hidden_inputs)inputs -= numpy.min(inputs)inputs /= numpy.max(inputs)inputs *= 0.98inputs += 0.01return inputs
这里是开始训练,得到需要的权重。
input_nodes = 784
hidden_nodes = 200
output_nodes = 10
learning_rate = 0.1n = neuralNetwork(input_nodes,hidden_nodes,output_nodes, learning_rate)
training_data_file = open("mnist_train.csv", 'r')
training_data_list = training_data_file.readlines()
training_data_file.close()epochs = 5
for e in range(epochs):for record in training_data_list:all_values = record.split(',')inputs = (numpy.asfarray(all_values[1:]) / 255.0 * 0.99) + 0.01targets = numpy.zeros(output_nodes) + 0.01targets[int(all_values[0])] = 0.99n.train(inputs, targets)passpasstest_data_file = open("mnist_test.csv", 'r')
test_data_list = test_data_file.readlines()
test_data_file.close()scorecard = []
for record in test_data_list:all_values = record.split(',')correct_label = int(all_values[0])inputs = (numpy.asfarray(all_values[1:]) / 255.0 * 0.99) + 0.01outputs = n.query(inputs)label = numpy.argmax(outputs)if (label == correct_label):scorecard.append(1)else:scorecard.append(0)passpassscorecard_array = numpy.asarray(scorecard)
print ("performance = ", scorecard_array.sum() / scorecard_array.size)
下面正式调用反向推理函数。
label = 1
# create the output signals for this label
targets = numpy.zeros(output_nodes) + 0.01
# all_values[0] is the target label for this record
targets[label] = 0.99
print(targets)# get image data
image_data = n.backquery(targets)# plot image data
matplotlib.pyplot.imshow(image_data.reshape(28,28), cmap='Greys', interpolation='None')
下面是还原图片。
这篇关于王权富贵:通过BP的反向传输查看神经网络最匹配的特征图--《Python神经网络编程》的学习笔记的文章就介绍到这儿,希望我们推荐的文章对编程师们有所帮助!