有兩點需要強調下:
? (2)中的結果比(1)中的結果多了一個求和公式,這是因為計算隱層與輸入層之間的參數時,輸出層與隱層的每一個節點都有影響。
? (2)中參數更新的結果可以復用(1)中的參數更新結果,從某種程度上,與反向傳播這個算法名稱不謀而合,不得不驚嘆。
4. 完整代碼實現
# -*- coding: utf-8 -*- import random import numpy as np class Network(object): def __init__(self, sizes): """參數sizes表示每一層神經元的個數,如[2,3,1],表示第一層有2個神經元,第二層有3個神經元,第三層有1個神經元.""" self.num_layers = len(sizes) self.sizes = sizes self.biases = [np.random.randn(y, 1) for y in sizes[1:]] self.weights = [np.random.randn(y, x) for x, y in zip(sizes[:-1], sizes[1:])] def feedforward(self, a): """前向傳播""" for b, w in zip(self.biases, self.weights): a = sigmoid(np.dot(w, a)+b) return a def SGD(self, training_data, epochs, mini_batch_size, eta, test_data=None): """隨機梯度下降""" if test_data: n_test = len(test_data) n = len(training_data) for j in xrange(epochs): random.shuffle(training_data) mini_batches = [ training_data[k:k+mini_batch_size] for k in xrange(0, n, mini_batch_size)] for mini_batch in mini_batches: self.update_mini_batch(mini_batch, eta) if test_data: print "Epoch {0}: {1} / {2}".format(j, self.evaluate(test_data), n_test) else: print "Epoch {0} complete".format(j) def update_mini_batch(self, mini_batch, eta): """使用后向傳播算法進行參數更新.mini_batch是一個元組(x, y)的列表、eta是學習速率""" nabla_b = [np.zeros(b.shape) for b in self.biases] nabla_w = [np.zeros(w.shape) for w in self.weights] for x, y in mini_batch: delta_nabla_b, delta_nabla_w = self.backprop(x, y) nabla_b = [nb+dnb for nb, dnb in zip(nabla_b, delta_nabla_b)] nabla_w = [nw+dnw for nw, dnw in zip(nabla_w, delta_nabla_w)] self.weights = [w-(eta/len(mini_batch))*nw for w, nw in zip(self.weights, nabla_w)] self.biases = [b-(eta/len(mini_batch))*nb for b, nb in zip(self.biases, nabla_b)] def backprop(self, x, y): """返回一個元組(nabla_b, nabla_w)代表目標函數的梯度.""" nabla_b = [np.zeros(b.shape) for b in self.biases] nabla_w = [np.zeros(w.shape) for w in self.weights] # 前向傳播 activation = x activations = [x] # list to store all the activations, layer by layer zs = [] # list to store all the z vectors, layer by layer for b, w in zip(self.biases, self.weights): z = np.dot(w, activation)+b zs.append(z) activation = sigmoid(z) activations.append(activation) # backward pass delta = self.cost_derivative(activations[-1], y) * sigmoid_prime(zs[-1]) nabla_b[-1] = delta nabla_w[-1] = np.dot(delta, activations[-2].transpose()) """l = 1 表示最后一層神經元,l = 2 是倒數第二層神經元, 依此類推.""" for l in xrange(2, self.num_layers): z = zs[-l] sp = sigmoid_prime(z) delta = np.dot(self.weights[-l+1].transpose(), delta) * sp nabla_b[-l] = delta nabla_w[-l] = np.dot(delta, activations[-l-1].transpose()) return (nabla_b, nabla_w) def evaluate(self, test_data): """返回分類正確的個數""" test_results = [(np.argmax(self.feedforward(x)), y) for (x, y) in test_data] return sum(int(x == y) for (x, y) in test_results) def cost_derivative(self, output_activations, y): return (output_activations-y) def sigmoid(z): return 1.0/(1.0+np.exp(-z)) def sigmoid_prime(z): """sigmoid函數的導數""" return sigmoid(z)*(1-sigmoid(z))5. 簡單應用
# -*- coding: utf-8 -*- from network import * def vectorized_result(j,nclass): """離散數據進行one-hot""" e = np.zeros((nclass, 1)) e[j] = 1.0 return e def get_format_data(X,y,isTest): ndim = X.shape[1] nclass = len(np.unique(y)) inputs = [np.reshape(x, (ndim, 1)) for x in X] if not isTest: results = [vectorized_result(y,nclass) for y in y] else: results = y data = zip(inputs, results) return data #隨機生成數據 from sklearn.datasets import * np.random.seed(0) X, y = make_moons(200, noise=0.20) ndim = X.shape[1] nclass = len(np.unique(y)) #劃分訓練、測試集 from sklearn.cross_validation import train_test_split train_x,test_x,train_y,test_y = train_test_split(X,y,test_size=0.2,random_state=0) training_data = get_format_data(train_x,train_y,False) test_data = get_format_data(test_x,test_y,True) net = Network(sizes=[ndim,10,nclass]) net.SGD(training_data=training_data,epochs=5,mini_batch_size=10,eta=0.1,test_data=test_data)參考文獻
(1)周志華《機器學習》
(2)https://github.com/mnielsen/neural-networks-and-deep-learning
(3)https://zhuanlan.zhihu.com/p/21525237
版權聲明:
如需轉載,請注明出處:雪倫_( )
原文鏈接:https://blog.csdn.net/a819825294/article/details/53393837
評論
查看更多