原文:TensorFlow Tutorials

译者:飞龙

协议:CC BY-NC-SA 4.0

1.1 TensorFlow 基本操作

致谢:派生于 Aymeric Damien 的 TensorFlow 示例

配置

参考配置指南

  1. import tensorflow as tf
  2. # 基本的常量操作
  3. # 由构造器返回的值
  4. # 表示常量操作的输出
  5. a = tf.constant(2)
  6. b = tf.constant(3)
  7. # 加载默认图
  8. with tf.Session() as sess:
  9. print "a=2, b=3"
  10. print "Addition with constants: %i" % sess.run(a+b)
  11. print "Multiplication with constants: %i" % sess.run(a*b)
  12. '''
  13. a=2, b=3
  14. Addition with constants: 5
  15. Multiplication with constants: 6
  16. '''
  17. # 作为图输入的变量的基本操作
  18. # 由构造器返回的值
  19. # 表示变量操作的输出(运行会话时定义为输出)
  20. # TF 图输入
  21. a = tf.placeholder(tf.int16)
  22. b = tf.placeholder(tf.int16)
  23. # 定义一些操作
  24. add = tf.add(a, b)
  25. mul = tf.mul(a, b)
  26. # 加载默认图
  27. with tf.Session() as sess:
  28. # 使用遍历输入运行每个操作
  29. print "Addition with variables: %i" % sess.run(add, feed_dict={a: 2, b: 3})
  30. print "Multiplication with variables: %i" % sess.run(mul, feed_dict={a: 2, b: 3})
  31. '''
  32. Addition with variables: 5
  33. Multiplication with variables: 6
  34. '''
  35. # ----------------
  36. # 更多细节:
  37. # 来自 TF 官方教程的矩阵乘法教程
  38. # 创建常量操作,产生 1x2 矩阵
  39. # 操作作为节点添加到默认图中
  40. #
  41. # 由构造器返回的值
  42. # 表示常量操作的输出
  43. matrix1 = tf.constant([[3., 3.]])
  44. # 创建另一个常量,产生 2x1 矩阵
  45. matrix2 = tf.constant([[2.],[2.]])
  46. # 创建 Matmul 操作,它接受 'matrix1' 和 'matrix2' 作为输入
  47. # 返回的值 'product' 表示矩阵乘法的结果
  48. product = tf.matmul(matrix1, matrix2)
  49. # 为了执行 matmul 操作,我们调用会话的 'run()' 方法,传入 'product'
  50. # 它表示 matmul 操作的输出。这对调用表明我们向获取 matmul 操作的输出
  51. #
  52. # 操作所需的所有输入都由会话自动运行
  53. # 它们通常是并行运行的
  54. #
  55. # 'run(product)' 的调用会执行图中的三个操作:
  56. # 两个常量和 matmul
  57. #
  58. # 操作的输出在 'result' 中返回,作为 NumPy `ndarray` 对象
  59. with tf.Session() as sess:
  60. result = sess.run(product)
  61. print result
  62. # [[ 12.]]

1.2 TensorFlow 线性回归

致谢:派生于 Aymeric Damien 的 TensorFlow 示例

配置

参考配置指南

  1. import tensorflow as tf
  2. import numpy
  3. import matplotlib.pyplot as plt
  4. rng = numpy.random
  5. # 参数
  6. learning_rate = 0.01
  7. training_epochs = 2000
  8. display_step = 50
  9. # 训练数据
  10. train_X = numpy.asarray([3.3,4.4,5.5,6.71,6.93,4.168,9.779,6.182,7.59,2.167,7.042,10.791,5.313,7.997,5.654,9.27,3.1])
  11. train_Y = numpy.asarray([1.7,2.76,2.09,3.19,1.694,1.573,3.366,2.596,2.53,1.221,2.827,3.465,1.65,2.904,2.42,2.94,1.3])
  12. n_samples = train_X.shape[0]
  13. # TF 图输入
  14. X = tf.placeholder("float")
  15. Y = tf.placeholder("float")
  16. # 创建模型
  17. # 设置模型权重
  18. W = tf.Variable(rng.randn(), name="weight")
  19. b = tf.Variable(rng.randn(), name="bias")
  20. # 构造线性模型
  21. activation = tf.add(tf.mul(X, W), b)
  22. # 最小化平方误差
  23. cost = tf.reduce_sum(tf.pow(activation-Y, 2))/(2*n_samples) #L2 loss
  24. optimizer = tf.train.GradientDescentOptimizer(learning_rate).minimize(cost) #Gradient descent
  25. # 初始化变量
  26. init = tf.initialize_all_variables()
  27. # 加载图
  28. with tf.Session() as sess:
  29. sess.run(init)
  30. # 拟合所有训练数据
  31. for epoch in range(training_epochs):
  32. for (x, y) in zip(train_X, train_Y):
  33. sess.run(optimizer, feed_dict={X: x, Y: y})
  34. # 展示每一步的日志
  35. if epoch % display_step == 0:
  36. print "Epoch:", '%04d' % (epoch+1), "cost=", \
  37. "{:.9f}".format(sess.run(cost, feed_dict={X: train_X, Y:train_Y})), \
  38. "W=", sess.run(W), "b=", sess.run(b)
  39. print "Optimization Finished!"
  40. print "cost=", sess.run(cost, feed_dict={X: train_X, Y: train_Y}), \
  41. "W=", sess.run(W), "b=", sess.run(b)
  42. # 展示图
  43. plt.plot(train_X, train_Y, 'ro', label='Original data')
  44. plt.plot(train_X, sess.run(W) * train_X + sess.run(b), label='Fitted line')
  45. plt.legend()
  46. plt.show()
  47. '''
  48. Epoch: 0001 cost= 3.389688730 W= 0.0198441 b= -0.273522
  49. Epoch: 0051 cost= 0.134034902 W= 0.383208 b= -0.159746
  50. Epoch: 0101 cost= 0.127440125 W= 0.375261 b= -0.102578
  51. Epoch: 0151 cost= 0.121607177 W= 0.367787 b= -0.0488099
  52. Epoch: 0201 cost= 0.116448022 W= 0.360758 b= 0.00175997
  53. Epoch: 0251 cost= 0.111884907 W= 0.354146 b= 0.0493223
  54. Epoch: 0301 cost= 0.107848980 W= 0.347928 b= 0.0940558
  55. Epoch: 0351 cost= 0.104279339 W= 0.34208 b= 0.136129
  56. Epoch: 0401 cost= 0.101122171 W= 0.336579 b= 0.1757
  57. Epoch: 0451 cost= 0.098329842 W= 0.331405 b= 0.212917
  58. Epoch: 0501 cost= 0.095860250 W= 0.32654 b= 0.247921
  59. Epoch: 0551 cost= 0.093676031 W= 0.321963 b= 0.280843
  60. Epoch: 0601 cost= 0.091744311 W= 0.317659 b= 0.311807
  61. Epoch: 0651 cost= 0.090035893 W= 0.313611 b= 0.340929
  62. Epoch: 0701 cost= 0.088524953 W= 0.309804 b= 0.36832
  63. Epoch: 0751 cost= 0.087188691 W= 0.306222 b= 0.394082
  64. Epoch: 0801 cost= 0.086007021 W= 0.302854 b= 0.418311
  65. Epoch: 0851 cost= 0.084961981 W= 0.299687 b= 0.441099
  66. Epoch: 0901 cost= 0.084037818 W= 0.296708 b= 0.462532
  67. Epoch: 0951 cost= 0.083220571 W= 0.293905 b= 0.48269
  68. Epoch: 1001 cost= 0.082497880 W= 0.29127 b= 0.50165
  69. Epoch: 1051 cost= 0.081858821 W= 0.288791 b= 0.519481
  70. Epoch: 1101 cost= 0.081293717 W= 0.28646 b= 0.536251
  71. Epoch: 1151 cost= 0.080794014 W= 0.284267 b= 0.552026
  72. Epoch: 1201 cost= 0.080352172 W= 0.282205 b= 0.566861
  73. Epoch: 1251 cost= 0.079961479 W= 0.280265 b= 0.580815
  74. Epoch: 1301 cost= 0.079616025 W= 0.278441 b= 0.593939
  75. Epoch: 1351 cost= 0.079310589 W= 0.276725 b= 0.606284
  76. Epoch: 1401 cost= 0.079040587 W= 0.275111 b= 0.617893
  77. Epoch: 1451 cost= 0.078801893 W= 0.273594 b= 0.62881
  78. Epoch: 1501 cost= 0.078590907 W= 0.272167 b= 0.639077
  79. Epoch: 1551 cost= 0.078404360 W= 0.270824 b= 0.648734
  80. Epoch: 1601 cost= 0.078239456 W= 0.269562 b= 0.657817
  81. Epoch: 1651 cost= 0.078093678 W= 0.268374 b= 0.66636
  82. Epoch: 1701 cost= 0.077964827 W= 0.267257 b= 0.674395
  83. Epoch: 1751 cost= 0.077850945 W= 0.266207 b= 0.681952
  84. Epoch: 1801 cost= 0.077750273 W= 0.265219 b= 0.68906
  85. Epoch: 1851 cost= 0.077661335 W= 0.264289 b= 0.695745
  86. Epoch: 1901 cost= 0.077582702 W= 0.263416 b= 0.702033
  87. Epoch: 1951 cost= 0.077513263 W= 0.262593 b= 0.707947
  88. Optimization Finished!
  89. cost= 0.077453 W= 0.261835 b= 0.713401
  90. '''
  91. from IPython.display import Image
  92. Image(filename='linearreg.png')

一、TensorFlow - 图1

1.3 TensorFlow Logistic 回归

致谢:派生于 Aymeric Damien 的 TensorFlow 示例

配置

参考配置指南

  1. # 导入 MINST 数据
  2. import input_data
  3. mnist = input_data.read_data_sets("/tmp/data/", one_hot=True)
  4. '''
  5. Extracting /tmp/data/train-images-idx3-ubyte.gz
  6. Extracting /tmp/data/train-labels-idx1-ubyte.gz
  7. Extracting /tmp/data/t10k-images-idx3-ubyte.gz
  8. Extracting /tmp/data/t10k-labels-idx1-ubyte.gz
  9. '''
  10. import tensorflow as tf
  11. # 参数
  12. learning_rate = 0.01
  13. training_epochs = 25
  14. batch_size = 100
  15. display_step = 1
  16. # TF 图输入
  17. x = tf.placeholder("float", [None, 784]) # mnist 数据图像,形状为 28*28=784
  18. y = tf.placeholder("float", [None, 10]) # 0-9 数字识别 => 10 个类
  19. # 创建模型
  20. # 设置模型权重
  21. W = tf.Variable(tf.zeros([784, 10]))
  22. b = tf.Variable(tf.zeros([10]))
  23. # 构造模型
  24. activation = tf.nn.softmax(tf.matmul(x, W) + b) # Softmax
  25. # 最小化交叉熵误差
  26. # 交叉熵
  27. cost = -tf.reduce_sum(y*tf.log(activation))
  28. # 梯度下降
  29. optimizer = tf.train.GradientDescentOptimizer(learning_rate).minimize(cost)
  30. # 初始化变量
  31. init = tf.initialize_all_variables()
  32. # 加载图
  33. with tf.Session() as sess:
  34. sess.run(init)
  35. # 训练循环
  36. for epoch in range(training_epochs):
  37. avg_cost = 0.
  38. total_batch = int(mnist.train.num_examples/batch_size)
  39. # 遍历所有批量
  40. for i in range(total_batch):
  41. batch_xs, batch_ys = mnist.train.next_batch(batch_size)
  42. # 使用批量数据拟合训练
  43. sess.run(optimizer, feed_dict={x: batch_xs, y: batch_ys})
  44. # 计算平均损失
  45. avg_cost += sess.run(cost, feed_dict={x: batch_xs, y: batch_ys})/total_batch
  46. # 展示每一步的日志
  47. if epoch % display_step == 0:
  48. print "Epoch:", '%04d' % (epoch+1), "cost=", "{:.9f}".format(avg_cost)
  49. print "Optimization Finished!"
  50. # 测试模型
  51. correct_prediction = tf.equal(tf.argmax(activation, 1), tf.argmax(y, 1))
  52. # 计算准确率
  53. accuracy = tf.reduce_mean(tf.cast(correct_prediction, "float"))
  54. print "Accuracy:", accuracy.eval({x: mnist.test.images, y: mnist.test.labels})
  55. '''
  56. Epoch: 0001 cost= 29.860479714
  57. Epoch: 0002 cost= 22.080549484
  58. Epoch: 0003 cost= 21.237104595
  59. Epoch: 0004 cost= 20.460196280
  60. Epoch: 0005 cost= 20.185128237
  61. Epoch: 0006 cost= 19.940297202
  62. Epoch: 0007 cost= 19.645111119
  63. Epoch: 0008 cost= 19.507218031
  64. Epoch: 0009 cost= 19.389794492
  65. Epoch: 0010 cost= 19.177005816
  66. Epoch: 0011 cost= 19.082493615
  67. Epoch: 0012 cost= 19.072873598
  68. Epoch: 0013 cost= 18.938005402
  69. Epoch: 0014 cost= 18.891806430
  70. Epoch: 0015 cost= 18.839480221
  71. Epoch: 0016 cost= 18.769349510
  72. Epoch: 0017 cost= 18.590865587
  73. Epoch: 0018 cost= 18.623413677
  74. Epoch: 0019 cost= 18.546149085
  75. Epoch: 0020 cost= 18.432274895
  76. Epoch: 0021 cost= 18.358189004
  77. Epoch: 0022 cost= 18.380014628
  78. Epoch: 0023 cost= 18.499993471
  79. Epoch: 0024 cost= 18.386477311
  80. Epoch: 0025 cost= 18.258080609
  81. Optimization Finished!
  82. Accuracy: 0.9048
  83. '''

1.4 TensorFlow 最近邻

致谢:派生于 Aymeric Damien 的 TensorFlow 示例

配置

参考配置指南

  1. import numpy as np
  2. import tensorflow as tf
  3. # 导入 MINST 数据
  4. import input_data
  5. mnist = input_data.read_data_sets("/tmp/data/", one_hot=True)
  6. '''
  7. Extracting /tmp/data/train-images-idx3-ubyte.gz
  8. Extracting /tmp/data/train-labels-idx1-ubyte.gz
  9. Extracting /tmp/data/t10k-images-idx3-ubyte.gz
  10. Extracting /tmp/data/t10k-labels-idx1-ubyte.gz
  11. '''
  12. # 这个例子中,我们限制 mnist 数据
  13. Xtr, Ytr = mnist.train.next_batch(5000) # 训练集 5000 个(nn 候选)
  14. Xte, Yte = mnist.test.next_batch(200) # 测试集 200 个
  15. # 将图像的形状变为一维
  16. Xtr = np.reshape(Xtr, newshape=(-1, 28*28))
  17. Xte = np.reshape(Xte, newshape=(-1, 28*28))
  18. # TF 图输入
  19. xtr = tf.placeholder("float", [None, 784])
  20. xte = tf.placeholder("float", [784])
  21. # 使用 L1 距离计算最近邻
  22. distance = tf.reduce_sum(tf.abs(tf.add(xtr, tf.neg(xte))), reduction_indices=1)
  23. # 预测:获取最小距离的下标(最近邻)
  24. pred = tf.arg_min(distance, 0)
  25. accuracy = 0.
  26. # 初始化变量
  27. init = tf.initialize_all_variables()
  28. # 加载图
  29. with tf.Session() as sess:
  30. sess.run(init)
  31. # 遍历测试数据
  32. for i in range(len(Xte)):
  33. # 获取最近邻
  34. nn_index = sess.run(pred, feed_dict={xtr: Xtr, xte: Xte[i,:]})
  35. # 获取最近邻的类标签,并将其与真实标签比较
  36. print "Test", i, "Prediction:", np.argmax(Ytr[nn_index]), \
  37. "True Class:", np.argmax(Yte[i])
  38. # 计算准确率
  39. if np.argmax(Ytr[nn_index]) == np.argmax(Yte[i]):
  40. accuracy += 1./len(Xte)
  41. print "Done!"
  42. print "Accuracy:", accuracy
  43. '''
  44. Test 0 Prediction: 7 True Class: 7
  45. Test 1 Prediction: 2 True Class: 2
  46. Test 2 Prediction: 1 True Class: 1
  47. Test 3 Prediction: 0 True Class: 0
  48. Test 4 Prediction: 4 True Class: 4
  49. Test 5 Prediction: 1 True Class: 1
  50. Test 6 Prediction: 4 True Class: 4
  51. Test 7 Prediction: 9 True Class: 9
  52. Test 8 Prediction: 8 True Class: 5
  53. Test 9 Prediction: 9 True Class: 9
  54. Test 10 Prediction: 0 True Class: 0
  55. Test 11 Prediction: 0 True Class: 6
  56. Test 12 Prediction: 9 True Class: 9
  57. Test 13 Prediction: 0 True Class: 0
  58. Test 14 Prediction: 1 True Class: 1
  59. Test 15 Prediction: 5 True Class: 5
  60. Test 16 Prediction: 4 True Class: 9
  61. Test 17 Prediction: 7 True Class: 7
  62. Test 18 Prediction: 3 True Class: 3
  63. Test 19 Prediction: 4 True Class: 4
  64. Test 20 Prediction: 9 True Class: 9
  65. Test 21 Prediction: 6 True Class: 6
  66. Test 22 Prediction: 6 True Class: 6
  67. Test 23 Prediction: 5 True Class: 5
  68. Test 24 Prediction: 4 True Class: 4
  69. Test 25 Prediction: 0 True Class: 0
  70. Test 26 Prediction: 7 True Class: 7
  71. Test 27 Prediction: 4 True Class: 4
  72. Test 28 Prediction: 0 True Class: 0
  73. Test 29 Prediction: 1 True Class: 1
  74. Test 30 Prediction: 3 True Class: 3
  75. Test 31 Prediction: 1 True Class: 1
  76. Test 32 Prediction: 3 True Class: 3
  77. Test 33 Prediction: 4 True Class: 4
  78. Test 34 Prediction: 7 True Class: 7
  79. Test 35 Prediction: 2 True Class: 2
  80. Test 36 Prediction: 7 True Class: 7
  81. Test 37 Prediction: 1 True Class: 1
  82. Test 38 Prediction: 2 True Class: 2
  83. Test 39 Prediction: 1 True Class: 1
  84. Test 40 Prediction: 1 True Class: 1
  85. Test 41 Prediction: 7 True Class: 7
  86. Test 42 Prediction: 4 True Class: 4
  87. Test 43 Prediction: 1 True Class: 2
  88. Test 44 Prediction: 3 True Class: 3
  89. Test 45 Prediction: 5 True Class: 5
  90. Test 46 Prediction: 1 True Class: 1
  91. Test 47 Prediction: 2 True Class: 2
  92. Test 48 Prediction: 4 True Class: 4
  93. Test 49 Prediction: 4 True Class: 4
  94. Test 50 Prediction: 6 True Class: 6
  95. Test 51 Prediction: 3 True Class: 3
  96. Test 52 Prediction: 5 True Class: 5
  97. Test 53 Prediction: 5 True Class: 5
  98. Test 54 Prediction: 6 True Class: 6
  99. Test 55 Prediction: 0 True Class: 0
  100. Test 56 Prediction: 4 True Class: 4
  101. Test 57 Prediction: 1 True Class: 1
  102. Test 58 Prediction: 9 True Class: 9
  103. Test 59 Prediction: 5 True Class: 5
  104. Test 60 Prediction: 7 True Class: 7
  105. Test 61 Prediction: 8 True Class: 8
  106. Test 62 Prediction: 9 True Class: 9
  107. Test 63 Prediction: 3 True Class: 3
  108. Test 64 Prediction: 7 True Class: 7
  109. Test 65 Prediction: 4 True Class: 4
  110. Test 66 Prediction: 6 True Class: 6
  111. Test 67 Prediction: 4 True Class: 4
  112. Test 68 Prediction: 3 True Class: 3
  113. Test 69 Prediction: 0 True Class: 0
  114. Test 70 Prediction: 7 True Class: 7
  115. Test 71 Prediction: 0 True Class: 0
  116. Test 72 Prediction: 2 True Class: 2
  117. Test 73 Prediction: 7 True Class: 9
  118. Test 74 Prediction: 1 True Class: 1
  119. Test 75 Prediction: 7 True Class: 7
  120. Test 76 Prediction: 3 True Class: 3
  121. Test 77 Prediction: 7 True Class: 2
  122. Test 78 Prediction: 9 True Class: 9
  123. Test 79 Prediction: 7 True Class: 7
  124. Test 80 Prediction: 7 True Class: 7
  125. Test 81 Prediction: 6 True Class: 6
  126. Test 82 Prediction: 2 True Class: 2
  127. Test 83 Prediction: 7 True Class: 7
  128. Test 84 Prediction: 8 True Class: 8
  129. Test 85 Prediction: 4 True Class: 4
  130. Test 86 Prediction: 7 True Class: 7
  131. Test 87 Prediction: 3 True Class: 3
  132. Test 88 Prediction: 6 True Class: 6
  133. Test 89 Prediction: 1 True Class: 1
  134. Test 90 Prediction: 3 True Class: 3
  135. Test 91 Prediction: 6 True Class: 6
  136. Test 92 Prediction: 9 True Class: 9
  137. Test 93 Prediction: 3 True Class: 3
  138. Test 94 Prediction: 1 True Class: 1
  139. Test 95 Prediction: 4 True Class: 4
  140. Test 96 Prediction: 1 True Class: 1
  141. Test 97 Prediction: 7 True Class: 7
  142. Test 98 Prediction: 6 True Class: 6
  143. Test 99 Prediction: 9 True Class: 9
  144. Test 100 Prediction: 6 True Class: 6
  145. Test 101 Prediction: 0 True Class: 0
  146. Test 102 Prediction: 5 True Class: 5
  147. Test 103 Prediction: 4 True Class: 4
  148. Test 104 Prediction: 9 True Class: 9
  149. Test 105 Prediction: 9 True Class: 9
  150. Test 106 Prediction: 2 True Class: 2
  151. Test 107 Prediction: 1 True Class: 1
  152. Test 108 Prediction: 9 True Class: 9
  153. Test 109 Prediction: 4 True Class: 4
  154. Test 110 Prediction: 8 True Class: 8
  155. Test 111 Prediction: 7 True Class: 7
  156. Test 112 Prediction: 3 True Class: 3
  157. Test 113 Prediction: 9 True Class: 9
  158. Test 114 Prediction: 7 True Class: 7
  159. Test 115 Prediction: 9 True Class: 4
  160. Test 116 Prediction: 9 True Class: 4
  161. Test 117 Prediction: 4 True Class: 4
  162. Test 118 Prediction: 9 True Class: 9
  163. Test 119 Prediction: 7 True Class: 2
  164. Test 120 Prediction: 5 True Class: 5
  165. Test 121 Prediction: 4 True Class: 4
  166. Test 122 Prediction: 7 True Class: 7
  167. Test 123 Prediction: 6 True Class: 6
  168. Test 124 Prediction: 7 True Class: 7
  169. Test 125 Prediction: 9 True Class: 9
  170. Test 126 Prediction: 0 True Class: 0
  171. Test 127 Prediction: 5 True Class: 5
  172. Test 128 Prediction: 8 True Class: 8
  173. Test 129 Prediction: 5 True Class: 5
  174. Test 130 Prediction: 6 True Class: 6
  175. Test 131 Prediction: 6 True Class: 6
  176. Test 132 Prediction: 5 True Class: 5
  177. Test 133 Prediction: 7 True Class: 7
  178. Test 134 Prediction: 8 True Class: 8
  179. Test 135 Prediction: 1 True Class: 1
  180. Test 136 Prediction: 0 True Class: 0
  181. Test 137 Prediction: 1 True Class: 1
  182. Test 138 Prediction: 6 True Class: 6
  183. Test 139 Prediction: 4 True Class: 4
  184. Test 140 Prediction: 6 True Class: 6
  185. Test 141 Prediction: 7 True Class: 7
  186. Test 142 Prediction: 2 True Class: 3
  187. Test 143 Prediction: 1 True Class: 1
  188. Test 144 Prediction: 7 True Class: 7
  189. Test 145 Prediction: 1 True Class: 1
  190. Test 146 Prediction: 8 True Class: 8
  191. Test 147 Prediction: 2 True Class: 2
  192. Test 148 Prediction: 0 True Class: 0
  193. Test 149 Prediction: 1 True Class: 2
  194. Test 150 Prediction: 9 True Class: 9
  195. Test 151 Prediction: 9 True Class: 9
  196. Test 152 Prediction: 5 True Class: 5
  197. Test 153 Prediction: 5 True Class: 5
  198. Test 154 Prediction: 1 True Class: 1
  199. Test 155 Prediction: 5 True Class: 5
  200. Test 156 Prediction: 6 True Class: 6
  201. Test 157 Prediction: 0 True Class: 0
  202. Test 158 Prediction: 3 True Class: 3
  203. Test 159 Prediction: 4 True Class: 4
  204. Test 160 Prediction: 4 True Class: 4
  205. Test 161 Prediction: 6 True Class: 6
  206. Test 162 Prediction: 5 True Class: 5
  207. Test 163 Prediction: 4 True Class: 4
  208. Test 164 Prediction: 6 True Class: 6
  209. Test 165 Prediction: 5 True Class: 5
  210. Test 166 Prediction: 4 True Class: 4
  211. Test 167 Prediction: 5 True Class: 5
  212. Test 168 Prediction: 1 True Class: 1
  213. Test 169 Prediction: 4 True Class: 4
  214. Test 170 Prediction: 9 True Class: 4
  215. Test 171 Prediction: 7 True Class: 7
  216. Test 172 Prediction: 2 True Class: 2
  217. Test 173 Prediction: 3 True Class: 3
  218. Test 174 Prediction: 2 True Class: 2
  219. Test 175 Prediction: 1 True Class: 7
  220. Test 176 Prediction: 1 True Class: 1
  221. Test 177 Prediction: 8 True Class: 8
  222. Test 178 Prediction: 1 True Class: 1
  223. Test 179 Prediction: 8 True Class: 8
  224. Test 180 Prediction: 1 True Class: 1
  225. Test 181 Prediction: 8 True Class: 8
  226. Test 182 Prediction: 5 True Class: 5
  227. Test 183 Prediction: 0 True Class: 0
  228. Test 184 Prediction: 2 True Class: 8
  229. Test 185 Prediction: 9 True Class: 9
  230. Test 186 Prediction: 2 True Class: 2
  231. Test 187 Prediction: 5 True Class: 5
  232. Test 188 Prediction: 0 True Class: 0
  233. Test 189 Prediction: 1 True Class: 1
  234. Test 190 Prediction: 1 True Class: 1
  235. Test 191 Prediction: 1 True Class: 1
  236. Test 192 Prediction: 0 True Class: 0
  237. Test 193 Prediction: 4 True Class: 9
  238. Test 194 Prediction: 0 True Class: 0
  239. Test 195 Prediction: 1 True Class: 3
  240. Test 196 Prediction: 1 True Class: 1
  241. Test 197 Prediction: 6 True Class: 6
  242. Test 198 Prediction: 4 True Class: 4
  243. Test 199 Prediction: 2 True Class: 2
  244. Done!
  245. Accuracy: 0.92
  246. '''

1.5 TensorFlow AlexNet

致谢:派生于 Aymeric Damien 的 TensorFlow 示例

配置

参考配置指南

  1. # 导入 MINST 数据
  2. import input_data
  3. mnist = input_data.read_data_sets("/tmp/data/", one_hot=True)
  4. '''
  5. Extracting /tmp/data/train-images-idx3-ubyte.gz
  6. Extracting /tmp/data/train-labels-idx1-ubyte.gz
  7. Extracting /tmp/data/t10k-images-idx3-ubyte.gz
  8. Extracting /tmp/data/t10k-labels-idx1-ubyte.gz
  9. '''
  10. import tensorflow as tf
  11. # 参数
  12. learning_rate = 0.001
  13. training_iters = 300000
  14. batch_size = 64
  15. display_step = 100
  16. # 网络参数
  17. n_input = 784 # MNIST 数据输入(图像大小:28x28)
  18. n_classes = 10 # MNIST 全部类别(0-9 的数字)
  19. dropout = 0.8 # 丢弃,单元被保留的概率
  20. # TF 图输入
  21. x = tf.placeholder(tf.float32, [None, n_input])
  22. y = tf.placeholder(tf.float32, [None, n_classes])
  23. keep_prob = tf.placeholder(tf.float32) # 丢弃(保留的概率)
  24. # 创建 AlexNet 模型
  25. def conv2d(name, l_input, w, b):
  26. return tf.nn.relu(tf.nn.bias_add(tf.nn.conv2d(l_input, w, strides=[1, 1, 1, 1],
  27. padding='SAME'),b), name=name)
  28. def max_pool(name, l_input, k):
  29. return tf.nn.max_pool(l_input, ksize=[1, k, k, 1], strides=[1, k, k, 1],
  30. padding='SAME', name=name)
  31. def norm(name, l_input, lsize=4):
  32. return tf.nn.lrn(l_input, lsize, bias=1.0, alpha=0.001 / 9.0, beta=0.75, name=name)
  33. def alex_net(_X, _weights, _biases, _dropout):
  34. # 改变输入图片的形状
  35. _X = tf.reshape(_X, shape=[-1, 28, 28, 1])
  36. # 卷积层
  37. conv1 = conv2d('conv1', _X, _weights['wc1'], _biases['bc1'])
  38. # 最大池化(下采样)
  39. pool1 = max_pool('pool1', conv1, k=2)
  40. # 应用标准化
  41. norm1 = norm('norm1', pool1, lsize=4)
  42. # 应用丢弃
  43. norm1 = tf.nn.dropout(norm1, _dropout)
  44. # 卷积层
  45. conv2 = conv2d('conv2', norm1, _weights['wc2'], _biases['bc2'])
  46. # 最大池化(下采样)
  47. pool2 = max_pool('pool2', conv2, k=2)
  48. # 应用标准化
  49. norm2 = norm('norm2', pool2, lsize=4)
  50. # 应用丢弃
  51. norm2 = tf.nn.dropout(norm2, _dropout)
  52. # 卷积层
  53. conv3 = conv2d('conv3', norm2, _weights['wc3'], _biases['bc3'])
  54. # 最大池化(下采样)
  55. pool3 = max_pool('pool3', conv3, k=2)
  56. # 应用标准化
  57. norm3 = norm('norm3', pool3, lsize=4)
  58. # 应用丢弃
  59. norm3 = tf.nn.dropout(norm3, _dropout)
  60. # 全连接层
  61. # 修改 conv3 输出的形状来匹配密集层的输入
  62. dense1 = tf.reshape(norm3, [-1, _weights['wd1'].get_shape().as_list()[0]])
  63. # Relu 激活
  64. dense1 = tf.nn.relu(tf.matmul(dense1, _weights['wd1']) + _biases['bd1'], name='fc1')
  65. # Relu 激活
  66. dense2 = tf.nn.relu(tf.matmul(dense1, _weights['wd2']) + _biases['bd2'], name='fc2')
  67. # 输出,类的预测
  68. out = tf.matmul(dense2, _weights['out']) + _biases['out']
  69. return out
  70. # 储存层的权重和偏置
  71. weights = {
  72. 'wc1': tf.Variable(tf.random_normal([3, 3, 1, 64])),
  73. 'wc2': tf.Variable(tf.random_normal([3, 3, 64, 128])),
  74. 'wc3': tf.Variable(tf.random_normal([3, 3, 128, 256])),
  75. 'wd1': tf.Variable(tf.random_normal([4*4*256, 1024])),
  76. 'wd2': tf.Variable(tf.random_normal([1024, 1024])),
  77. 'out': tf.Variable(tf.random_normal([1024, 10]))
  78. }
  79. biases = {
  80. 'bc1': tf.Variable(tf.random_normal([64])),
  81. 'bc2': tf.Variable(tf.random_normal([128])),
  82. 'bc3': tf.Variable(tf.random_normal([256])),
  83. 'bd1': tf.Variable(tf.random_normal([1024])),
  84. 'bd2': tf.Variable(tf.random_normal([1024])),
  85. 'out': tf.Variable(tf.random_normal([n_classes]))
  86. }
  87. # 构造模型
  88. pred = alex_net(x, weights, biases, keep_prob)
  89. # 定义损失和优化器
  90. cost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(pred, y))
  91. optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate).minimize(cost)
  92. # 评估模型
  93. correct_pred = tf.equal(tf.argmax(pred,1), tf.argmax(y,1))
  94. accuracy = tf.reduce_mean(tf.cast(correct_pred, tf.float32))
  95. # 初始化变量
  96. init = tf.global_variables_initializer()
  97. # 加载图
  98. with tf.Session() as sess:
  99. sess.run(init)
  100. step = 1
  101. # 持续训练,直到达到最大迭代
  102. while step * batch_size < training_iters:
  103. batch_xs, batch_ys = mnist.train.next_batch(batch_size)
  104. # 使用批量数据拟合训练
  105. sess.run(optimizer, feed_dict={x: batch_xs, y: batch_ys, keep_prob: dropout})
  106. if step % display_step == 0:
  107. # 计算批量准确率
  108. acc = sess.run(accuracy, feed_dict={x: batch_xs, y: batch_ys, keep_prob: 1.})
  109. # 计算批量损失
  110. loss = sess.run(cost, feed_dict={x: batch_xs, y: batch_ys, keep_prob: 1.})
  111. print "Iter " + str(step*batch_size) + ", Minibatch Loss= " \
  112. + "{:.6f}".format(loss) + ", Training Accuracy= " + "{:.5f}".format(acc)
  113. step += 1
  114. print "Optimization Finished!"
  115. # 为 256 个 mnist 测试图像计算准确率
  116. print "Testing Accuracy:", sess.run(accuracy, feed_dict={x: mnist.test.images[:256],
  117. y: mnist.test.labels[:256],
  118. keep_prob: 1.})
  119. '''
  120. Iter 6400, Minibatch Loss= 29666.185547, Training Accuracy= 0.59375
  121. Iter 12800, Minibatch Loss= 22125.562500, Training Accuracy= 0.60938
  122. Iter 19200, Minibatch Loss= 22631.134766, Training Accuracy= 0.59375
  123. Iter 25600, Minibatch Loss= 18498.414062, Training Accuracy= 0.62500
  124. Iter 32000, Minibatch Loss= 11318.283203, Training Accuracy= 0.70312
  125. Iter 38400, Minibatch Loss= 12076.280273, Training Accuracy= 0.70312
  126. Iter 44800, Minibatch Loss= 8195.520508, Training Accuracy= 0.82812
  127. Iter 51200, Minibatch Loss= 5176.181641, Training Accuracy= 0.84375
  128. Iter 57600, Minibatch Loss= 8951.896484, Training Accuracy= 0.81250
  129. Iter 64000, Minibatch Loss= 10096.946289, Training Accuracy= 0.78125
  130. Iter 70400, Minibatch Loss= 11466.641602, Training Accuracy= 0.68750
  131. Iter 76800, Minibatch Loss= 7469.824219, Training Accuracy= 0.78125
  132. Iter 83200, Minibatch Loss= 4147.449219, Training Accuracy= 0.89062
  133. Iter 89600, Minibatch Loss= 5904.782227, Training Accuracy= 0.82812
  134. Iter 96000, Minibatch Loss= 718.493713, Training Accuracy= 0.93750
  135. Iter 102400, Minibatch Loss= 2184.151367, Training Accuracy= 0.93750
  136. Iter 108800, Minibatch Loss= 2354.463135, Training Accuracy= 0.89062
  137. Iter 115200, Minibatch Loss= 8612.959961, Training Accuracy= 0.81250
  138. Iter 121600, Minibatch Loss= 2225.773926, Training Accuracy= 0.84375
  139. Iter 128000, Minibatch Loss= 160.583618, Training Accuracy= 0.96875
  140. Iter 134400, Minibatch Loss= 1524.846069, Training Accuracy= 0.93750
  141. Iter 140800, Minibatch Loss= 3501.871094, Training Accuracy= 0.89062
  142. Iter 147200, Minibatch Loss= 661.977051, Training Accuracy= 0.96875
  143. Iter 153600, Minibatch Loss= 367.857788, Training Accuracy= 0.98438
  144. Iter 160000, Minibatch Loss= 1735.458740, Training Accuracy= 0.90625
  145. Iter 166400, Minibatch Loss= 209.320374, Training Accuracy= 0.95312
  146. Iter 172800, Minibatch Loss= 1788.553955, Training Accuracy= 0.90625
  147. Iter 179200, Minibatch Loss= 912.995544, Training Accuracy= 0.93750
  148. Iter 185600, Minibatch Loss= 2534.074463, Training Accuracy= 0.87500
  149. Iter 192000, Minibatch Loss= 73.052612, Training Accuracy= 0.96875
  150. Iter 198400, Minibatch Loss= 1609.606323, Training Accuracy= 0.93750
  151. Iter 204800, Minibatch Loss= 1823.219727, Training Accuracy= 0.96875
  152. Iter 211200, Minibatch Loss= 578.051086, Training Accuracy= 0.96875
  153. Iter 217600, Minibatch Loss= 1532.326172, Training Accuracy= 0.89062
  154. Iter 224000, Minibatch Loss= 769.775269, Training Accuracy= 0.95312
  155. Iter 230400, Minibatch Loss= 2614.737793, Training Accuracy= 0.92188
  156. Iter 236800, Minibatch Loss= 938.664368, Training Accuracy= 0.95312
  157. Iter 243200, Minibatch Loss= 1520.495605, Training Accuracy= 0.93750
  158. Iter 249600, Minibatch Loss= 657.419739, Training Accuracy= 0.95312
  159. Iter 256000, Minibatch Loss= 522.802124, Training Accuracy= 0.90625
  160. Iter 262400, Minibatch Loss= 211.188477, Training Accuracy= 0.96875
  161. Iter 268800, Minibatch Loss= 520.451172, Training Accuracy= 0.92188
  162. Iter 275200, Minibatch Loss= 1418.759155, Training Accuracy= 0.89062
  163. Iter 281600, Minibatch Loss= 241.748596, Training Accuracy= 0.96875
  164. Iter 288000, Minibatch Loss= 0.000000, Training Accuracy= 1.00000
  165. Iter 294400, Minibatch Loss= 1535.772827, Training Accuracy= 0.92188
  166. Optimization Finished!
  167. Testing Accuracy: 0.980469
  168. '''

1.6 TensorFlow 卷积神经网络

致谢:派生于 Aymeric Damien 的 TensorFlow 示例

配置

参考配置指南

  1. # 导入 MINST 数据
  2. import input_data
  3. mnist = input_data.read_data_sets("/tmp/data/", one_hot=True)
  4. '''
  5. Extracting /tmp/data/train-images-idx3-ubyte.gz
  6. Extracting /tmp/data/train-labels-idx1-ubyte.gz
  7. Extracting /tmp/data/t10k-images-idx3-ubyte.gz
  8. Extracting /tmp/data/t10k-labels-idx1-ubyte.gz
  9. '''
  10. import tensorflow as tf
  11. # 参数
  12. learning_rate = 0.001
  13. training_iters = 100000
  14. batch_size = 128
  15. display_step = 20
  16. # 网络参数
  17. n_input = 784 # MNIST 数据输入(图像大小:28x28)
  18. n_classes = 10 # MNIST 全部类别(0-9 的数字)
  19. dropout = 0.75 # 丢弃,单元被保留的概率
  20. # TF 图输入
  21. x = tf.placeholder(tf.float32, [None, n_input])
  22. y = tf.placeholder(tf.float32, [None, n_classes])
  23. keep_prob = tf.placeholder(tf.float32) #dropout (keep probability)
  24. # 创建模型
  25. def conv2d(img, w, b):
  26. return tf.nn.relu(tf.nn.bias_add(tf.nn.conv2d(img, w, strides=[1, 1, 1, 1],
  27. padding='SAME'),b))
  28. def max_pool(img, k):
  29. return tf.nn.max_pool(img, ksize=[1, k, k, 1], strides=[1, k, k, 1], padding='SAME')
  30. def conv_net(_X, _weights, _biases, _dropout):
  31. # 改变输入图片的形状
  32. _X = tf.reshape(_X, shape=[-1, 28, 28, 1])
  33. # 卷积层
  34. conv1 = conv2d(_X, _weights['wc1'], _biases['bc1'])
  35. # 最大池化(下采样)
  36. conv1 = max_pool(conv1, k=2)
  37. # 应用丢弃
  38. conv1 = tf.nn.dropout(conv1, _dropout)
  39. # 卷积层
  40. conv2 = conv2d(conv1, _weights['wc2'], _biases['bc2'])
  41. # 最大池化(下采样)
  42. conv2 = max_pool(conv2, k=2)
  43. # 应用丢弃
  44. conv2 = tf.nn.dropout(conv2, _dropout)
  45. # 全连接层
  46. # Reshape conv2 output to fit dense layer input
  47. dense1 = tf.reshape(conv2, [-1, _weights['wd1'].get_shape().as_list()[0]])
  48. # Relu 激活
  49. dense1 = tf.nn.relu(tf.add(tf.matmul(dense1, _weights['wd1']), _biases['bd1']))
  50. # 应用丢弃
  51. dense1 = tf.nn.dropout(dense1, _dropout) # 应用丢弃
  52. # 输出,类的预测
  53. out = tf.add(tf.matmul(dense1, _weights['out']), _biases['out'])
  54. return out
  55. # 储存层的权重和偏置
  56. weights = {
  57. # 5x5 卷积, 1 输入, 32 输出
  58. 'wc1': tf.Variable(tf.random_normal([5, 5, 1, 32])),
  59. # 5x5 卷积, 32 输入, 64 输出
  60. 'wc2': tf.Variable(tf.random_normal([5, 5, 32, 64])),
  61. # 全连接, 7*7*64 输入, 1024 输出
  62. 'wd1': tf.Variable(tf.random_normal([7*7*64, 1024])),
  63. # 1024 输入, 10 输出(类别预测)
  64. 'out': tf.Variable(tf.random_normal([1024, n_classes]))
  65. }
  66. biases = {
  67. 'bc1': tf.Variable(tf.random_normal([32])),
  68. 'bc2': tf.Variable(tf.random_normal([64])),
  69. 'bd1': tf.Variable(tf.random_normal([1024])),
  70. 'out': tf.Variable(tf.random_normal([n_classes]))
  71. }
  72. # 构造模型
  73. pred = conv_net(x, weights, biases, keep_prob)
  74. # 定义损失和优化器
  75. cost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(pred, y))
  76. optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate).minimize(cost)
  77. # 评估模型
  78. correct_pred = tf.equal(tf.argmax(pred,1), tf.argmax(y,1))
  79. accuracy = tf.reduce_mean(tf.cast(correct_pred, tf.float32))
  80. # 初始化变量
  81. init = tf.global_variables_initializer()
  82. # 加载图
  83. with tf.Session() as sess:
  84. sess.run(init)
  85. step = 1
  86. # 持续训练,直到达到最大迭代
  87. while step * batch_size < training_iters:
  88. batch_xs, batch_ys = mnist.train.next_batch(batch_size)
  89. # 使用批量数据拟合训练
  90. sess.run(optimizer, feed_dict={x: batch_xs, y: batch_ys, keep_prob: dropout})
  91. if step % display_step == 0:
  92. # 计算批量准确率
  93. acc = sess.run(accuracy, feed_dict={x: batch_xs, y: batch_ys, keep_prob: 1.})
  94. # 计算批量损失
  95. loss = sess.run(cost, feed_dict={x: batch_xs, y: batch_ys, keep_prob: 1.})
  96. print "Iter " + str(step*batch_size) + ", Minibatch Loss= " + \
  97. "{:.6f}".format(loss) + ", Training Accuracy= " + "{:.5f}".format(acc)
  98. step += 1
  99. print "Optimization Finished!"
  100. # 计算准确率 for 256 mnist test images
  101. print "Testing Accuracy:", sess.run(accuracy, feed_dict={x: mnist.test.images[:256],
  102. y: mnist.test.labels[:256],
  103. keep_prob: 1.})
  104. '''
  105. Iter 2560, Minibatch Loss= 26046.011719, Training Accuracy= 0.21094
  106. Iter 5120, Minibatch Loss= 10456.769531, Training Accuracy= 0.52344
  107. Iter 7680, Minibatch Loss= 6273.207520, Training Accuracy= 0.71875
  108. Iter 10240, Minibatch Loss= 6276.231445, Training Accuracy= 0.64062
  109. Iter 12800, Minibatch Loss= 4188.221680, Training Accuracy= 0.77344
  110. Iter 15360, Minibatch Loss= 2717.077637, Training Accuracy= 0.80469
  111. Iter 17920, Minibatch Loss= 4057.120361, Training Accuracy= 0.81250
  112. Iter 20480, Minibatch Loss= 1696.550415, Training Accuracy= 0.87500
  113. Iter 23040, Minibatch Loss= 2525.317627, Training Accuracy= 0.85938
  114. Iter 25600, Minibatch Loss= 2341.906738, Training Accuracy= 0.87500
  115. Iter 28160, Minibatch Loss= 4200.535156, Training Accuracy= 0.79688
  116. Iter 30720, Minibatch Loss= 1888.964355, Training Accuracy= 0.89062
  117. Iter 33280, Minibatch Loss= 2167.645996, Training Accuracy= 0.84375
  118. Iter 35840, Minibatch Loss= 1932.107544, Training Accuracy= 0.89844
  119. Iter 38400, Minibatch Loss= 1562.430054, Training Accuracy= 0.90625
  120. Iter 40960, Minibatch Loss= 1676.755249, Training Accuracy= 0.84375
  121. Iter 43520, Minibatch Loss= 1003.626099, Training Accuracy= 0.93750
  122. Iter 46080, Minibatch Loss= 1176.615479, Training Accuracy= 0.86719
  123. Iter 48640, Minibatch Loss= 1260.592651, Training Accuracy= 0.88281
  124. Iter 51200, Minibatch Loss= 1399.667969, Training Accuracy= 0.86719
  125. Iter 53760, Minibatch Loss= 1259.961426, Training Accuracy= 0.89844
  126. Iter 56320, Minibatch Loss= 1415.800781, Training Accuracy= 0.89062
  127. Iter 58880, Minibatch Loss= 1835.365967, Training Accuracy= 0.85156
  128. Iter 61440, Minibatch Loss= 1395.168823, Training Accuracy= 0.90625
  129. Iter 64000, Minibatch Loss= 973.283569, Training Accuracy= 0.88281
  130. Iter 66560, Minibatch Loss= 818.093811, Training Accuracy= 0.92969
  131. Iter 69120, Minibatch Loss= 1178.744263, Training Accuracy= 0.92188
  132. Iter 71680, Minibatch Loss= 845.889709, Training Accuracy= 0.89844
  133. Iter 74240, Minibatch Loss= 1259.505615, Training Accuracy= 0.90625
  134. Iter 76800, Minibatch Loss= 738.037109, Training Accuracy= 0.89844
  135. Iter 79360, Minibatch Loss= 862.499146, Training Accuracy= 0.93750
  136. Iter 81920, Minibatch Loss= 739.704041, Training Accuracy= 0.90625
  137. Iter 84480, Minibatch Loss= 652.880310, Training Accuracy= 0.95312
  138. Iter 87040, Minibatch Loss= 635.464600, Training Accuracy= 0.92969
  139. Iter 89600, Minibatch Loss= 933.166626, Training Accuracy= 0.90625
  140. Iter 92160, Minibatch Loss= 213.874893, Training Accuracy= 0.96094
  141. Iter 94720, Minibatch Loss= 609.575684, Training Accuracy= 0.91406
  142. Iter 97280, Minibatch Loss= 560.208008, Training Accuracy= 0.93750
  143. Iter 99840, Minibatch Loss= 963.577148, Training Accuracy= 0.90625
  144. Optimization Finished!
  145. Testing Accuracy: 0.960938
  146. '''

1.7 TensorFlow 多层感知机

致谢:派生于 Aymeric Damien 的 TensorFlow 示例

配置

参考配置指南

  1. # 导入 MINST 数据
  2. import input_data
  3. mnist = input_data.read_data_sets("/tmp/data/", one_hot=True)
  4. '''
  5. Extracting /tmp/data/train-images-idx3-ubyte.gz
  6. Extracting /tmp/data/train-labels-idx1-ubyte.gz
  7. Extracting /tmp/data/t10k-images-idx3-ubyte.gz
  8. Extracting /tmp/data/t10k-labels-idx1-ubyte.gz
  9. '''
  10. import tensorflow as tf
  11. # 参数
  12. learning_rate = 0.001
  13. training_epochs = 15
  14. batch_size = 100
  15. display_step = 1
  16. # 网络参数
  17. n_hidden_1 = 256 # 第一层的特征数量
  18. n_hidden_2 = 256 # 第二层的特征数量
  19. n_input = 784 # MNIST 数据输入(图像形状:28x28)
  20. n_classes = 10 # MNIST 全部类比(0-9 的数字)
  21. # TF 图输入
  22. x = tf.placeholder("float", [None, n_input])
  23. y = tf.placeholder("float", [None, n_classes])
  24. # 创建模型
  25. def multilayer_perceptron(_X, _weights, _biases):
  26. # 带有 RELU 激活的隐层
  27. layer_1 = tf.nn.relu(tf.add(tf.matmul(_X, _weights['h1']), _biases['b1']))
  28. # 带有 RELU 激活的隐层
  29. layer_2 = tf.nn.relu(tf.add(tf.matmul(layer_1, _weights['h2']), _biases['b2']))
  30. return tf.matmul(layer_2, weights['out']) + biases['out']
  31. # 储存层的权重和偏置
  32. weights = {
  33. 'h1': tf.Variable(tf.random_normal([n_input, n_hidden_1])),
  34. 'h2': tf.Variable(tf.random_normal([n_hidden_1, n_hidden_2])),
  35. 'out': tf.Variable(tf.random_normal([n_hidden_2, n_classes]))
  36. }
  37. biases = {
  38. 'b1': tf.Variable(tf.random_normal([n_hidden_1])),
  39. 'b2': tf.Variable(tf.random_normal([n_hidden_2])),
  40. 'out': tf.Variable(tf.random_normal([n_classes]))
  41. }
  42. # 构造模型
  43. pred = multilayer_perceptron(x, weights, biases)
  44. # 定义损失和优化器
  45. # Softmax 损失
  46. cost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(pred, y))
  47. # Adam 优化器
  48. optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate).minimize(cost)
  49. # 初始化变量
  50. init = tf.global_variables_initializer()
  51. # 加载图
  52. with tf.Session() as sess:
  53. sess.run(init)
  54. # 训练循环
  55. for epoch in range(training_epochs):
  56. avg_cost = 0.
  57. total_batch = int(mnist.train.num_examples/batch_size)
  58. # 遍历所有批量
  59. for i in range(total_batch):
  60. batch_xs, batch_ys = mnist.train.next_batch(batch_size)
  61. # 使用批量数据拟合训练
  62. sess.run(optimizer, feed_dict={x: batch_xs, y: batch_ys})
  63. # 计算平均损失
  64. avg_cost += sess.run(cost, feed_dict={x: batch_xs, y: batch_ys})/total_batch
  65. # 展示每一步的日志
  66. if epoch % display_step == 0:
  67. print "Epoch:", '%04d' % (epoch+1), "cost=", "{:.9f}".format(avg_cost)
  68. print "Optimization Finished!"
  69. # 测试模型
  70. correct_prediction = tf.equal(tf.argmax(pred, 1), tf.argmax(y, 1))
  71. # 计算准确率
  72. accuracy = tf.reduce_mean(tf.cast(correct_prediction, "float"))
  73. print "Accuracy:", accuracy.eval({x: mnist.test.images, y: mnist.test.labels})
  74. '''
  75. Epoch: 0001 cost= 160.113980416
  76. Epoch: 0002 cost= 38.665780694
  77. Epoch: 0003 cost= 24.118004577
  78. Epoch: 0004 cost= 16.440921303
  79. Epoch: 0005 cost= 11.689460141
  80. Epoch: 0006 cost= 8.469423468
  81. Epoch: 0007 cost= 6.223237230
  82. Epoch: 0008 cost= 4.560174118
  83. Epoch: 0009 cost= 3.250516910
  84. Epoch: 0010 cost= 2.359658795
  85. Epoch: 0011 cost= 1.694081847
  86. Epoch: 0012 cost= 1.167997509
  87. Epoch: 0013 cost= 0.872986831
  88. Epoch: 0014 cost= 0.630616366
  89. Epoch: 0015 cost= 0.487381571
  90. Optimization Finished!
  91. Accuracy: 0.9462
  92. '''

1.8 TensorFlow 循环神经网络

致谢:派生于 Aymeric Damien 的 TensorFlow 示例

配置

参考配置指南

  1. # 导入 MINST 数据
  2. import input_data
  3. mnist = input_data.read_data_sets("/tmp/data/", one_hot=True)
  4. import tensorflow as tf
  5. from tensorflow.models.rnn import rnn, rnn_cell
  6. import numpy as np
  7. '''
  8. Extracting /tmp/data/train-images-idx3-ubyte.gz
  9. Extracting /tmp/data/train-labels-idx1-ubyte.gz
  10. Extracting /tmp/data/t10k-images-idx3-ubyte.gz
  11. Extracting /tmp/data/t10k-labels-idx1-ubyte.gz
  12. '''
  13. '''
  14. 为了使用 reccurent 神经网络对图像进行分类,我们将每个图像的行视为像素序列。
  15. 由于 MNIST 图像形状为 28*28 px,因此我们将为每个样本处理为 28 个 28 步序列。
  16. '''
  17. # 参数
  18. learning_rate = 0.001
  19. training_iters = 100000
  20. batch_size = 128
  21. display_step = 10
  22. # 网络参数
  23. n_input = 28 # MNIST 数据输入(图像大小:28x28)
  24. n_steps = 28 # 时间步骤
  25. n_hidden = 128 # 隐层的特征数量
  26. n_classes = 10 # MNIST 全部类别(0-9 的数字)
  27. # TF 图输入
  28. x = tf.placeholder("float", [None, n_steps, n_input])
  29. istate = tf.placeholder("float", [None, 2*n_hidden]) # 状态和单元 => 2x n_hidden
  30. y = tf.placeholder("float", [None, n_classes])
  31. # 定义权重
  32. weights = {
  33. 'hidden': tf.Variable(tf.random_normal([n_input, n_hidden])), # 隐层权重
  34. 'out': tf.Variable(tf.random_normal([n_hidden, n_classes]))
  35. }
  36. biases = {
  37. 'hidden': tf.Variable(tf.random_normal([n_hidden])),
  38. 'out': tf.Variable(tf.random_normal([n_classes]))
  39. }
  40. def RNN(_X, _istate, _weights, _biases):
  41. # 输入形状:(batch_size, n_steps, n_input)
  42. _X = tf.transpose(_X, [1, 0, 2]) # 转置 n_steps 和 batch_size
  43. # 改变形状来准备隐层激活的输入
  44. _X = tf.reshape(_X, [-1, n_input]) # (n_steps*batch_size, n_input)
  45. # 线性激活
  46. _X = tf.matmul(_X, _weights['hidden']) + _biases['hidden']
  47. # 定义 lstm cell
  48. lstm_cell = rnn_cell.BasicLSTMCell(n_hidden, forget_bias=1.0)
  49. # 分割数据,因为 RNN 单元需要输入的列表,用于 RNN 内部循环
  50. _X = tf.split(0, n_steps, _X) # n_steps * (batch_size, n_hidden)
  51. # 获得 lstm 单元输入
  52. outputs, states = rnn.rnn(lstm_cell, _X, initial_state=_istate)
  53. # 线性激活
  54. # 获取内部循环的最后输入
  55. return tf.matmul(outputs[-1], _weights['out']) + _biases['out']
  56. pred = RNN(x, istate, weights, biases)
  57. # 定义损失和优化器
  58. cost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(pred, y)) # Softmax 损失
  59. optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate).minimize(cost) # Adam 优化器
  60. # 评估模型
  61. correct_pred = tf.equal(tf.argmax(pred,1), tf.argmax(y,1))
  62. accuracy = tf.reduce_mean(tf.cast(correct_pred, tf.float32))
  63. # 初始化变量
  64. init = tf.global_variables_initializer()
  65. # 加载图
  66. with tf.Session() as sess:
  67. sess.run(init)
  68. step = 1
  69. # 持续训练,直到达到最大迭代
  70. while step * batch_size < training_iters:
  71. batch_xs, batch_ys = mnist.train.next_batch(batch_size)
  72. # 重塑数据来获取 28 个元素的 28 序列
  73. batch_xs = batch_xs.reshape((batch_size, n_steps, n_input))
  74. # 使用批量数据拟合训练
  75. sess.run(optimizer, feed_dict={x: batch_xs, y: batch_ys,
  76. istate: np.zeros((batch_size, 2*n_hidden))})
  77. if step % display_step == 0:
  78. # 计算批量准确率
  79. acc = sess.run(accuracy, feed_dict={x: batch_xs, y: batch_ys,
  80. istate: np.zeros((batch_size, 2*n_hidden))})
  81. # 计算批量损失
  82. loss = sess.run(cost, feed_dict={x: batch_xs, y: batch_ys,
  83. istate: np.zeros((batch_size, 2*n_hidden))})
  84. print "Iter " + str(step*batch_size) + ", Minibatch Loss= " + "{:.6f}".format(loss) + \
  85. ", Training Accuracy= " + "{:.5f}".format(acc)
  86. step += 1
  87. print "Optimization Finished!"
  88. # 为 256 个 mnist 测试图像计算准确率
  89. test_len = 256
  90. test_data = mnist.test.images[:test_len].reshape((-1, n_steps, n_input))
  91. test_label = mnist.test.labels[:test_len]
  92. print "Testing Accuracy:", sess.run(accuracy, feed_dict={x: test_data, y: test_label,
  93. istate: np.zeros((test_len, 2*n_hidden))})
  94. '''
  95. Iter 1280, Minibatch Loss= 1.888242, Training Accuracy= 0.39844
  96. Iter 2560, Minibatch Loss= 1.519879, Training Accuracy= 0.47656
  97. Iter 3840, Minibatch Loss= 1.238005, Training Accuracy= 0.63281
  98. Iter 5120, Minibatch Loss= 0.933760, Training Accuracy= 0.71875
  99. Iter 6400, Minibatch Loss= 0.832130, Training Accuracy= 0.73438
  100. Iter 7680, Minibatch Loss= 0.979760, Training Accuracy= 0.70312
  101. Iter 8960, Minibatch Loss= 0.821921, Training Accuracy= 0.71875
  102. Iter 10240, Minibatch Loss= 0.710566, Training Accuracy= 0.79688
  103. Iter 11520, Minibatch Loss= 0.578501, Training Accuracy= 0.82812
  104. Iter 12800, Minibatch Loss= 0.765049, Training Accuracy= 0.75000
  105. Iter 14080, Minibatch Loss= 0.582995, Training Accuracy= 0.78125
  106. Iter 15360, Minibatch Loss= 0.575092, Training Accuracy= 0.79688
  107. Iter 16640, Minibatch Loss= 0.701214, Training Accuracy= 0.75781
  108. Iter 17920, Minibatch Loss= 0.561972, Training Accuracy= 0.78125
  109. Iter 19200, Minibatch Loss= 0.394480, Training Accuracy= 0.85938
  110. Iter 20480, Minibatch Loss= 0.356244, Training Accuracy= 0.91406
  111. Iter 21760, Minibatch Loss= 0.632163, Training Accuracy= 0.78125
  112. Iter 23040, Minibatch Loss= 0.269334, Training Accuracy= 0.90625
  113. Iter 24320, Minibatch Loss= 0.485007, Training Accuracy= 0.86719
  114. Iter 25600, Minibatch Loss= 0.569704, Training Accuracy= 0.78906
  115. Iter 26880, Minibatch Loss= 0.267697, Training Accuracy= 0.92188
  116. Iter 28160, Minibatch Loss= 0.381177, Training Accuracy= 0.90625
  117. Iter 29440, Minibatch Loss= 0.350800, Training Accuracy= 0.87500
  118. Iter 30720, Minibatch Loss= 0.356782, Training Accuracy= 0.90625
  119. Iter 32000, Minibatch Loss= 0.322511, Training Accuracy= 0.89062
  120. Iter 33280, Minibatch Loss= 0.309195, Training Accuracy= 0.90625
  121. Iter 34560, Minibatch Loss= 0.535408, Training Accuracy= 0.83594
  122. Iter 35840, Minibatch Loss= 0.281643, Training Accuracy= 0.92969
  123. Iter 37120, Minibatch Loss= 0.290962, Training Accuracy= 0.89844
  124. Iter 38400, Minibatch Loss= 0.204718, Training Accuracy= 0.93750
  125. Iter 39680, Minibatch Loss= 0.205882, Training Accuracy= 0.92969
  126. Iter 40960, Minibatch Loss= 0.481441, Training Accuracy= 0.84375
  127. Iter 42240, Minibatch Loss= 0.348245, Training Accuracy= 0.89844
  128. Iter 43520, Minibatch Loss= 0.274692, Training Accuracy= 0.90625
  129. Iter 44800, Minibatch Loss= 0.171815, Training Accuracy= 0.94531
  130. Iter 46080, Minibatch Loss= 0.171035, Training Accuracy= 0.93750
  131. Iter 47360, Minibatch Loss= 0.235800, Training Accuracy= 0.89844
  132. Iter 48640, Minibatch Loss= 0.235974, Training Accuracy= 0.93750
  133. Iter 49920, Minibatch Loss= 0.207323, Training Accuracy= 0.92188
  134. Iter 51200, Minibatch Loss= 0.212989, Training Accuracy= 0.91406
  135. Iter 52480, Minibatch Loss= 0.151774, Training Accuracy= 0.95312
  136. Iter 53760, Minibatch Loss= 0.090070, Training Accuracy= 0.96875
  137. Iter 55040, Minibatch Loss= 0.264714, Training Accuracy= 0.92969
  138. Iter 56320, Minibatch Loss= 0.235086, Training Accuracy= 0.92969
  139. Iter 57600, Minibatch Loss= 0.160302, Training Accuracy= 0.95312
  140. Iter 58880, Minibatch Loss= 0.106515, Training Accuracy= 0.96875
  141. Iter 60160, Minibatch Loss= 0.236039, Training Accuracy= 0.94531
  142. Iter 61440, Minibatch Loss= 0.279540, Training Accuracy= 0.90625
  143. Iter 62720, Minibatch Loss= 0.173585, Training Accuracy= 0.93750
  144. Iter 64000, Minibatch Loss= 0.191009, Training Accuracy= 0.92188
  145. Iter 65280, Minibatch Loss= 0.210331, Training Accuracy= 0.89844
  146. Iter 66560, Minibatch Loss= 0.223444, Training Accuracy= 0.94531
  147. Iter 67840, Minibatch Loss= 0.278210, Training Accuracy= 0.91406
  148. Iter 69120, Minibatch Loss= 0.174290, Training Accuracy= 0.95312
  149. Iter 70400, Minibatch Loss= 0.188701, Training Accuracy= 0.94531
  150. Iter 71680, Minibatch Loss= 0.210277, Training Accuracy= 0.94531
  151. Iter 72960, Minibatch Loss= 0.249951, Training Accuracy= 0.95312
  152. Iter 74240, Minibatch Loss= 0.209853, Training Accuracy= 0.92188
  153. Iter 75520, Minibatch Loss= 0.049742, Training Accuracy= 0.99219
  154. Iter 76800, Minibatch Loss= 0.250095, Training Accuracy= 0.92969
  155. Iter 78080, Minibatch Loss= 0.133853, Training Accuracy= 0.95312
  156. Iter 79360, Minibatch Loss= 0.110206, Training Accuracy= 0.97656
  157. Iter 80640, Minibatch Loss= 0.141906, Training Accuracy= 0.93750
  158. Iter 81920, Minibatch Loss= 0.126872, Training Accuracy= 0.94531
  159. Iter 83200, Minibatch Loss= 0.138925, Training Accuracy= 0.95312
  160. Iter 84480, Minibatch Loss= 0.128652, Training Accuracy= 0.96094
  161. Iter 85760, Minibatch Loss= 0.099837, Training Accuracy= 0.96094
  162. Iter 87040, Minibatch Loss= 0.119000, Training Accuracy= 0.95312
  163. Iter 88320, Minibatch Loss= 0.179807, Training Accuracy= 0.95312
  164. Iter 89600, Minibatch Loss= 0.141792, Training Accuracy= 0.96094
  165. Iter 90880, Minibatch Loss= 0.142424, Training Accuracy= 0.96094
  166. Iter 92160, Minibatch Loss= 0.159564, Training Accuracy= 0.96094
  167. Iter 93440, Minibatch Loss= 0.111984, Training Accuracy= 0.95312
  168. Iter 94720, Minibatch Loss= 0.238978, Training Accuracy= 0.92969
  169. Iter 96000, Minibatch Loss= 0.068002, Training Accuracy= 0.97656
  170. Iter 97280, Minibatch Loss= 0.191819, Training Accuracy= 0.94531
  171. Iter 98560, Minibatch Loss= 0.081197, Training Accuracy= 0.99219
  172. Iter 99840, Minibatch Loss= 0.206797, Training Accuracy= 0.95312
  173. Optimization Finished!
  174. Testing Accuracy: 0.941406
  175. '''

1.9 TensorFlow 基本的多 GPU 计算

致谢:派生于 Aymeric Damien 的 TensorFlow 示例

配置

参考配置指南

本教程要求你的计算机有2个GPU

  • “/cpu:0”:你的机器的 CPU
  • “/gpu:0”:你的机器的第一个 GPU
  • “/gpu:1”:你的机器的第二个 GPU
  • 对于这个示例,我们使用两个 GTX-980
  1. import numpy as np
  2. import tensorflow as tf
  3. import datetime
  4. # 处理器的日志
  5. log_device_placement = True
  6. # 需要执行的乘法数量
  7. n = 10
  8. # 示例:在两个 GPU 上计算 A^n + B^n
  9. # 创建随机的大型矩阵
  10. A = np.random.rand(1e4, 1e4).astype('float32')
  11. B = np.random.rand(1e4, 1e4).astype('float32')
  12. # 创建图来储存结果
  13. c1 = []
  14. c2 = []
  15. # 定义矩阵的幂
  16. def matpow(M, n):
  17. if n < 1: # n < 1 的抽象情况
  18. return M
  19. else:
  20. return tf.matmul(M, matpow(M, n-1))
  21. # 单 GPU 计算
  22. with tf.device('/gpu:0'):
  23. a = tf.constant(A)
  24. b = tf.constant(B)
  25. # 计算 A^n 和 B^n 并在 c1 中储存结果
  26. c1.append(matpow(a, n))
  27. c1.append(matpow(b, n))
  28. with tf.device('/cpu:0'):
  29. sum = tf.add_n(c1) # c1 中所有元素的和,也就是 A^n + B^n
  30. t1_1 = datetime.datetime.now()
  31. with tf.Session(config=tf.ConfigProto(log_device_placement=log_device_placement)) as sess:
  32. # 运行操作
  33. sess.run(sum)
  34. t2_1 = datetime.datetime.now()
  35. # 多 GPU 计算
  36. # GPU:0 计算 A^n
  37. with tf.device('/gpu:0'):
  38. # 计算 A^n 并在 c2 中储存结果
  39. a = tf.constant(A)
  40. c2.append(matpow(a, n))
  41. #GPU:1 计算 B^n
  42. with tf.device('/gpu:1'):
  43. # 计算 B^n 并在 c2 中储存结果
  44. b = tf.constant(B)
  45. c2.append(matpow(b, n))
  46. with tf.device('/cpu:0'):
  47. sum = tf.add_n(c2) # c2 中所有元素的和,也就是 A^n + B^n
  48. t1_2 = datetime.datetime.now()
  49. with tf.Session(config=tf.ConfigProto(log_device_placement=log_device_placement)) as sess:
  50. # 运行操作
  51. sess.run(sum)
  52. t2_2 = datetime.datetime.now()
  53. print "Single GPU computation time: " + str(t2_1-t1_1)
  54. print "Multi GPU computation time: " + str(t2_2-t1_2)
  55. '''
  56. Single GPU computation time: 0:00:11.833497
  57. Multi GPU computation time: 0:00:07.085913
  58. '''

1.10 TensorFlow 图的可视化

致谢:派生于 Aymeric Damien 的 TensorFlow 示例

配置

参考配置指南

  1. import tensorflow as tf
  2. import numpy
  3. # 导入 MINST 数据
  4. import input_data
  5. mnist = input_data.read_data_sets("/tmp/data/", one_hot=True)
  6. '''
  7. Extracting /tmp/data/train-images-idx3-ubyte.gz
  8. Extracting /tmp/data/train-labels-idx1-ubyte.gz
  9. Extracting /tmp/data/t10k-images-idx3-ubyte.gz
  10. Extracting /tmp/data/t10k-labels-idx1-ubyte.gz
  11. '''
  12. # 使用来自之前示例的 Logistic 回归
  13. # 参数
  14. learning_rate = 0.01
  15. training_epochs = 10
  16. batch_size = 100
  17. display_step = 1
  18. # TF 图输入
  19. x = tf.placeholder("float", [None, 784], name='x') # mnist 数据图像,形状为 28*28=784
  20. y = tf.placeholder("float", [None, 10], name='y') # 0-9 数字识别 => 10 个类
  21. # 创建模型
  22. # 设置模型权重
  23. W = tf.Variable(tf.zeros([784, 10]), name="weights")
  24. b = tf.Variable(tf.zeros([10]), name="bias")
  25. # 构造模型
  26. activation = tf.nn.softmax(tf.matmul(x, W) + b) # Softmax
  27. # 最小化交叉熵误差
  28. cost = -tf.reduce_sum(y*tf.log(activation)) # 交叉熵
  29. optimizer = tf.train.GradientDescentOptimizer(learning_rate).minimize(cost) # 梯度下降
  30. # 初始化变量
  31. init = tf.initialize_all_variables()
  32. # 加载图
  33. with tf.Session() as sess:
  34. sess.run(init)
  35. # 将日志写入器设为文件夹 '/tmp/tensorflow_logs'
  36. summary_writer = tf.train.SummaryWriter('/tmp/tensorflow_logs', graph_def=sess.graph_def)
  37. # 训练循环
  38. for epoch in range(training_epochs):
  39. avg_cost = 0.
  40. total_batch = int(mnist.train.num_examples/batch_size)
  41. # 遍历所有批量
  42. for i in range(total_batch):
  43. batch_xs, batch_ys = mnist.train.next_batch(batch_size)
  44. # 使用批量数据拟合训练
  45. sess.run(optimizer, feed_dict={x: batch_xs, y: batch_ys})
  46. # 计算平均损失
  47. avg_cost += sess.run(cost, feed_dict={x: batch_xs, y: batch_ys})/total_batch
  48. # 展示每一步的日志
  49. if epoch % display_step == 0:
  50. print "Epoch:", '%04d' % (epoch+1), "cost=", "{:.9f}".format(avg_cost)
  51. print "Optimization Finished!"
  52. # 测试模型
  53. correct_prediction = tf.equal(tf.argmax(activation, 1), tf.argmax(y, 1))
  54. # 计算准确率
  55. accuracy = tf.reduce_mean(tf.cast(correct_prediction, "float"))
  56. print "Accuracy:", accuracy.eval({x: mnist.test.images, y: mnist.test.labels})

运行命令行

  1. tensorboard --logdir=/tmp/tensorflow_logs

在你的浏览器中打开 http://localhost:6006/

  1. # 图的可视化
  2. # Tensorflow 使你很容易可视化所有计算图
  3. # 你可以点击图的任何部分,来获取更多细节

一、TensorFlow - 图2

  1. # 权重细节

一、TensorFlow - 图3

  1. # 梯度下降细节

一、TensorFlow - 图4

1.11 TensorFlow 损失可视化

致谢:派生于 Aymeric Damien 的 TensorFlow 示例

配置

参考配置指南

  1. import tensorflow as tf
  2. import numpy
  3. # 导入 MINST 数据
  4. import input_data
  5. mnist = input_data.read_data_sets("/tmp/data/", one_hot=True)
  6. '''
  7. Extracting /tmp/data/train-images-idx3-ubyte.gz
  8. Extracting /tmp/data/train-labels-idx1-ubyte.gz
  9. Extracting /tmp/data/t10k-images-idx3-ubyte.gz
  10. Extracting /tmp/data/t10k-labels-idx1-ubyte.gz
  11. '''
  12. # 使用来自之前示例的 Logistic 回归
  13. # 参数
  14. learning_rate = 0.01
  15. training_epochs = 10
  16. batch_size = 100
  17. display_step = 1
  18. # TF 图输入
  19. x = tf.placeholder("float", [None, 784], name='x') # mnist 数据图像,形状为 28*28=784
  20. y = tf.placeholder("float", [None, 10], name='y') # 0-9 数字识别 => 10 个类
  21. # 创建模型
  22. # 设置模型权重
  23. W = tf.Variable(tf.zeros([784, 10]), name="weights")
  24. b = tf.Variable(tf.zeros([10]), name="bias")
  25. # 构造模型
  26. activation = tf.nn.softmax(tf.matmul(x, W) + b) # Softmax
  27. # 最小化交叉熵误差
  28. cost = -tf.reduce_sum(y*tf.log(activation)) # 交叉熵
  29. optimizer = tf.train.GradientDescentOptimizer(learning_rate).minimize(cost) # 梯度下降
  30. # 初始化变量
  31. init = tf.initialize_all_variables()
  32. # 创建汇总来监控损失函数
  33. tf.scalar_summary("loss", cost)
  34. # 将所有汇总合并为一个操作
  35. merged_summary_op = tf.merge_all_summaries()
  36. # 加载图
  37. with tf.Session() as sess:
  38. sess.run(init)
  39. # 将日志写入器设为文件夹 '/tmp/tensorflow_logs'
  40. summary_writer = tf.train.SummaryWriter('/tmp/tensorflow_logs', graph_def=sess.graph_def)
  41. # 训练循环
  42. for epoch in range(training_epochs):
  43. avg_cost = 0.
  44. total_batch = int(mnist.train.num_examples/batch_size)
  45. # 遍历所有批量
  46. for i in range(total_batch):
  47. batch_xs, batch_ys = mnist.train.next_batch(batch_size)
  48. # 使用批量数据拟合训练
  49. sess.run(optimizer, feed_dict={x: batch_xs, y: batch_ys})
  50. # 计算平均损失
  51. avg_cost += sess.run(cost, feed_dict={x: batch_xs, y: batch_ys})/total_batch
  52. # 在每个迭代中写日志
  53. summary_str = sess.run(merged_summary_op, feed_dict={x: batch_xs, y: batch_ys})
  54. summary_writer.add_summary(summary_str, epoch*total_batch + i)
  55. # 展示每一步的日志
  56. if epoch % display_step == 0:
  57. print "Epoch:", '%04d' % (epoch+1), "cost=", "{:.9f}".format(avg_cost)
  58. print "Optimization Finished!"
  59. # 测试模型
  60. correct_prediction = tf.equal(tf.argmax(activation, 1), tf.argmax(y, 1))
  61. # 计算准确率
  62. accuracy = tf.reduce_mean(tf.cast(correct_prediction, "float"))
  63. print "Accuracy:", accuracy.eval({x: mnist.test.images, y: mnist.test.labels})

运行命令行

  1. tensorboard --logdir=/tmp/tensorflow_logs

在你的浏览器中打开 http://localhost:6006/

  1. # 每个小批量步骤的损失

一、TensorFlow - 图5