本文主要是介绍【DL--22】实现神经网络算法NeuralNetwork以及手写数字识别,希望对大家解决编程问题提供一定的参考价值,需要的开发者们随着小编来一起学习吧!
1.NeuralNetwork.py
#coding:utf-8import numpy as np#定义双曲函数和他们的导数
def tanh(x):return np.tanh(x)def tanh_deriv(x):return 1.0 - np.tanh(x)**2def logistic(x):return 1/(1 + np.exp(-x))def logistic_derivative(x):return logistic(x)*(1-logistic(x))#定义NeuralNetwork 神经网络算法
class NeuralNetwork:#初始化,layes表示的是一个list,eg[10,10,3]表示第一层10个神经元,第二层10个神经元,第三层3个神经元def __init__(self, layers, activation='tanh'):""":param layers: A list containing the number of units in each layer.Should be at least two values:param activation: The activation function to be used. Can be"logistic" or "tanh""""if activation == 'logistic':self.activation = logisticself.activation_deriv = logistic_derivativeelif activation == 'tanh':self.activation = tanhself.activation_deriv = tanh_derivself.weights = []#循环从1开始,相当于以第二层为基准,进行权重的初始化for i in range(1, len(layers) - 1):#对当前神经节点的前驱赋值self.weights.append((2*np.random.random((layers[i - 1] + 1, layers[i] + 1))-1)*0.25)#对当前神经节点的后继赋值self.weights.append((2*np.random.random((layers[i] + 1, layers[i + 1]))-1)*0.25)#训练函数 ,X矩阵,每行是一个实例 ,y是每个实例对应的结果,learning_rate 学习率,# epochs,表示抽样的方法对神经网络进行更新的最大次数def fit(self, X, y, learning_rate=0.2, epochs=10000):X = np.atleast_2d(X) #确定X至少是二维的数据temp = np.ones([X.shape[0], X.shape[1]+1]) #初始化矩阵temp[:, 0:-1] = X # adding the bias unit to the input layerX = tempy = np.array(y) #把list转换成array的形式for k in range(epochs):#随机选取一行,对神经网络进行更新i = np.random.randint(X.shape[0])a = [X[i]]#完成所有正向的更新for l in range(len(self.weights)):a.append(self.activation(np.dot(a[l], self.weights[l])))#error = y[i] - a[-1]deltas = [error * self.activation_deriv(a[-1])]#开始反向计算误差,更新权重for l in range(len(a) - 2, 0, -1): # we need to begin at the second to last layerdeltas.append(deltas[-1].dot(self.weights[l].T)*self.activation_deriv(a[l]))deltas.reverse()for i in range(len(self.weights)):layer = np.atleast_2d(a[i])delta = np.atleast_2d(deltas[i])self.weights[i] += learning_rate * layer.T.dot(delta)#预测函数def predict(self, x):x = np.array(x)temp = np.ones(x.shape[0]+1)temp[0:-1] = xa = tempfor l in range(0, len(self.weights)):a = self.activation(np.dot(a, self.weights[l]))return a
2、基于NeuralNetwork的手写数字识别
#-*-coding:utf-8-*-
import sys
reload(sys)
sys.setdefaultencoding('utf-8')import numpy as np
from sklearn.datasets import load_digits
from sklearn.metrics import confusion_matrix, classification_report
from sklearn.preprocessing import LabelBinarizer
from NeuralNetwork import NeuralNetwork
from sklearn.cross_validation import train_test_splitdigits = load_digits()
X = digits.data
y = digits.target
X -= X.min() # normalize the values to bring them into the range 0-1
X /= X.max()###############################训练模型########################
nn = NeuralNetwork([64,100,10],'logistic')
X_train, X_test, y_train, y_test = train_test_split(X, y)labels_train = LabelBinarizer().fit_transform(y_train)
labels_test = LabelBinarizer().fit_transform(y_test)
print "start fitting"
nn.fit(X_train,labels_train,epochs=3000)###############预测结果###############################
predictions = []
for i in range(X_test.shape[0]):o = nn.predict(X_test[i] )predictions.append(np.argmax(o))###############混淆矩阵#####################################
print confusion_matrix(y_test,predictions)
print classification_report(y_test,predictions)#################打印预测结果#####################
# for each in predictions:
# print each# for each in y_test:
# print each
3、运行结果:
start fitting
[[44 0 0 0 0 0 0 0 0 0][ 0 44 0 0 0 1 0 0 2 0][ 0 1 39 0 0 0 0 0 0 0][ 0 1 0 49 0 0 0 2 2 0][ 0 2 0 0 34 0 0 2 1 0][ 0 2 0 0 1 44 1 0 0 3][ 1 2 0 0 0 0 43 0 0 0][ 0 0 0 0 0 0 0 41 0 0][ 0 4 0 0 0 1 0 1 31 2][ 0 4 0 0 0 0 0 1 1 43]]precision recall f1-score support0 0.98 1.00 0.99 441 0.73 0.94 0.82 472 1.00 0.97 0.99 403 1.00 0.91 0.95 544 0.97 0.87 0.92 395 0.96 0.86 0.91 516 0.98 0.93 0.96 467 0.87 1.00 0.93 418 0.84 0.79 0.82 399 0.90 0.88 0.89 49avg / total 0.92 0.92 0.92 450Process finished with exit code 0
这篇关于【DL--22】实现神经网络算法NeuralNetwork以及手写数字识别的文章就介绍到这儿,希望我们推荐的文章对编程师们有所帮助!