numpy-backward-prop.py 8.8 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223
  1. #!/usr/bin/env python
  2. # encoding: utf-8
  3. """
  4. @author: HuRuiFeng
  5. @file: 7.9-backward-prop.py
  6. @time: 2020/2/24 17:32
  7. @desc: 7.9 反向传播算法实战的代码
  8. """
  9. import matplotlib.pyplot as plt
  10. import numpy as np
  11. import seaborn as sns
  12. from sklearn.datasets import make_moons
  13. from sklearn.model_selection import train_test_split
  14. plt.rcParams['font.size'] = 16
  15. plt.rcParams['font.family'] = ['STKaiti']
  16. plt.rcParams['axes.unicode_minus'] = False
  17. def load_dataset():
  18. # 采样点数
  19. N_SAMPLES = 2000
  20. # 测试数量比率
  21. TEST_SIZE = 0.3
  22. # 利用工具函数直接生成数据集
  23. X, y = make_moons(n_samples=N_SAMPLES, noise=0.2, random_state=100)
  24. # 将 2000 个点按着 7:3 分割为训练集和测试集
  25. X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=TEST_SIZE, random_state=42)
  26. return X, y, X_train, X_test, y_train, y_test
  27. def make_plot(X, y, plot_name, XX=None, YY=None, preds=None, dark=False):
  28. # 绘制数据集的分布, X 为 2D 坐标, y 为数据点的标签
  29. if (dark):
  30. plt.style.use('dark_background')
  31. else:
  32. sns.set_style("whitegrid")
  33. plt.figure(figsize=(16, 12))
  34. axes = plt.gca()
  35. axes.set(xlabel="$x_1$", ylabel="$x_2$")
  36. plt.title(plot_name, fontsize=30)
  37. plt.subplots_adjust(left=0.20)
  38. plt.subplots_adjust(right=0.80)
  39. if XX is not None and YY is not None and preds is not None:
  40. plt.contourf(XX, YY, preds.reshape(XX.shape), 25, alpha=1, cmap=plt.cm.Spectral)
  41. plt.contour(XX, YY, preds.reshape(XX.shape), levels=[.5], cmap="Greys", vmin=0, vmax=.6)
  42. # 绘制散点图,根据标签区分颜色
  43. plt.scatter(X[:, 0], X[:, 1], c=y.ravel(), s=40, cmap=plt.cm.Spectral, edgecolors='none')
  44. plt.savefig('数据集分布.svg')
  45. plt.close()
  46. class Layer:
  47. # 全连接网络层
  48. def __init__(self, n_input, n_neurons, activation=None, weights=None,
  49. bias=None):
  50. """
  51. :param int n_input: 输入节点数
  52. :param int n_neurons: 输出节点数
  53. :param str activation: 激活函数类型
  54. :param weights: 权值张量,默认类内部生成
  55. :param bias: 偏置,默认类内部生成
  56. """
  57. # 通过正态分布初始化网络权值,初始化非常重要,不合适的初始化将导致网络不收敛
  58. self.weights = weights if weights is not None else np.random.randn(n_input, n_neurons) * np.sqrt(1 / n_neurons)
  59. self.bias = bias if bias is not None else np.random.rand(n_neurons) * 0.1
  60. self.activation = activation # 激活函数类型,如’sigmoid’
  61. self.last_activation = None # 激活函数的输出值o
  62. self.error = None # 用于计算当前层的delta 变量的中间变量
  63. self.delta = None # 记录当前层的delta 变量,用于计算梯度
  64. # 网络层的前向传播函数实现如下,其中last_activation 变量用于保存当前层的输出值:
  65. def activate(self, x):
  66. # 前向传播函数
  67. r = np.dot(x, self.weights) + self.bias # X@W+b
  68. # 通过激活函数,得到全连接层的输出o
  69. self.last_activation = self._apply_activation(r)
  70. return self.last_activation
  71. # 上述代码中的self._apply_activation 函数实现了不同类型的激活函数的前向计算过程,
  72. # 尽管此处我们只使用Sigmoid 激活函数一种。代码如下:
  73. def _apply_activation(self, r):
  74. # 计算激活函数的输出
  75. if self.activation is None:
  76. return r # 无激活函数,直接返回
  77. # ReLU 激活函数
  78. elif self.activation == 'relu':
  79. return np.maximum(r, 0)
  80. # tanh 激活函数
  81. elif self.activation == 'tanh':
  82. return np.tanh(r)
  83. # sigmoid 激活函数
  84. elif self.activation == 'sigmoid':
  85. return 1 / (1 + np.exp(-r))
  86. return r
  87. # 针对于不同类型的激活函数,它们的导数计算实现如下:
  88. def apply_activation_derivative(self, r):
  89. # 计算激活函数的导数
  90. # 无激活函数,导数为1
  91. if self.activation is None:
  92. return np.ones_like(r)
  93. # ReLU 函数的导数实现
  94. elif self.activation == 'relu':
  95. grad = np.array(r, copy=True)
  96. grad[r > 0] = 1.
  97. grad[r <= 0] = 0.
  98. return grad
  99. # tanh 函数的导数实现
  100. elif self.activation == 'tanh':
  101. return 1 - r ** 2
  102. # Sigmoid 函数的导数实现
  103. elif self.activation == 'sigmoid':
  104. return r * (1 - r)
  105. return r
  106. # 神经网络模型
  107. class NeuralNetwork:
  108. def __init__(self):
  109. self._layers = [] # 网络层对象列表
  110. def add_layer(self, layer):
  111. # 追加网络层
  112. self._layers.append(layer)
  113. # 网络的前向传播只需要循环调各个网络层对象的前向计算函数即可,代码如下:
  114. # 前向传播
  115. def feed_forward(self, X):
  116. for layer in self._layers:
  117. # 依次通过各个网络层
  118. X = layer.activate(X)
  119. return X
  120. def backpropagation(self, X, y, learning_rate):
  121. # 反向传播算法实现
  122. # 前向计算,得到输出值
  123. output = self.feed_forward(X)
  124. for i in reversed(range(len(self._layers))): # 反向循环
  125. layer = self._layers[i] # 得到当前层对象
  126. # 如果是输出层
  127. if layer == self._layers[-1]: # 对于输出层
  128. layer.error = y - output # 计算2 分类任务的均方差的导数
  129. # 关键步骤:计算最后一层的delta,参考输出层的梯度公式
  130. layer.delta = layer.error * layer.apply_activation_derivative(output)
  131. else: # 如果是隐藏层
  132. next_layer = self._layers[i + 1] # 得到下一层对象
  133. layer.error = np.dot(next_layer.weights, next_layer.delta)
  134. # 关键步骤:计算隐藏层的delta,参考隐藏层的梯度公式
  135. layer.delta = layer.error * layer.apply_activation_derivative(layer.last_activation)
  136. # 循环更新权值
  137. for i in range(len(self._layers)):
  138. layer = self._layers[i]
  139. # o_i 为上一网络层的输出
  140. o_i = np.atleast_2d(X if i == 0 else self._layers[i - 1].last_activation)
  141. # 梯度下降算法,delta 是公式中的负数,故这里用加号
  142. layer.weights += layer.delta * o_i.T * learning_rate
  143. def train(self, X_train, X_test, y_train, y_test, learning_rate, max_epochs):
  144. # 网络训练函数
  145. # one-hot 编码
  146. y_onehot = np.zeros((y_train.shape[0], 2))
  147. y_onehot[np.arange(y_train.shape[0]), y_train] = 1
  148. # 将One-hot 编码后的真实标签与网络的输出计算均方误差,并调用反向传播函数更新网络参数,循环迭代训练集1000 遍即可
  149. mses = []
  150. accuracys = []
  151. for i in range(max_epochs + 1): # 训练1000 个epoch
  152. for j in range(len(X_train)): # 一次训练一个样本
  153. self.backpropagation(X_train[j], y_onehot[j], learning_rate)
  154. if i % 10 == 0:
  155. # 打印出MSE Loss
  156. mse = np.mean(np.square(y_onehot - self.feed_forward(X_train)))
  157. mses.append(mse)
  158. accuracy = self.accuracy(self.predict(X_test), y_test.flatten())
  159. accuracys.append(accuracy)
  160. print('Epoch: #%s, MSE: %f' % (i, float(mse)))
  161. # 统计并打印准确率
  162. print('Accuracy: %.2f%%' % (accuracy * 100))
  163. return mses, accuracys
  164. def predict(self, X):
  165. return self.feed_forward(X)
  166. def accuracy(self, X, y):
  167. return np.sum(np.equal(np.argmax(X, axis=1), y)) / y.shape[0]
  168. def main():
  169. X, y, X_train, X_test, y_train, y_test = load_dataset()
  170. # 调用 make_plot 函数绘制数据的分布,其中 X 为 2D 坐标, y 为标签
  171. make_plot(X, y, "Classification Dataset Visualization ")
  172. plt.show()
  173. nn = NeuralNetwork() # 实例化网络类
  174. nn.add_layer(Layer(2, 25, 'sigmoid')) # 隐藏层 1, 2=>25
  175. nn.add_layer(Layer(25, 50, 'sigmoid')) # 隐藏层 2, 25=>50
  176. nn.add_layer(Layer(50, 25, 'sigmoid')) # 隐藏层 3, 50=>25
  177. nn.add_layer(Layer(25, 2, 'sigmoid')) # 输出层, 25=>2
  178. mses, accuracys = nn.train(X_train, X_test, y_train, y_test, 0.01, 1000)
  179. x = [i for i in range(0, 101, 10)]
  180. # 绘制MES曲线
  181. plt.title("MES Loss")
  182. plt.plot(x, mses[:11], color='blue')
  183. plt.xlabel('Epoch')
  184. plt.ylabel('MSE')
  185. plt.savefig('训练误差曲线.svg')
  186. plt.close()
  187. # 绘制Accuracy曲线
  188. plt.title("Accuracy")
  189. plt.plot(x, accuracys[:11], color='blue')
  190. plt.xlabel('Epoch')
  191. plt.ylabel('Accuracy')
  192. plt.savefig('网络测试准确率.svg')
  193. plt.close()
  194. if __name__ == '__main__':
  195. main()