1 Mnist数据集

2.1 简介

提供6W张2828像素点的0~9手写数字图片和标签,用于训练
提供1W张28
28像素点的0~9手写数字图片和标签,用于测试
每张图片的784个像素点(28*28=784)组成长度为784的一位数组,作为输入特征。
图片的标签以一位数组形式给出,每个元素表示对应分类出现的概率。

2.2 常用函数

(1)从 集合中取出全部变量,生成一个列表

  1. tf.get_collection("")

(2)列表对应元素相加

  1. tf.add_n([])

(3)把x转为dtype类型

  1. tf.cast(x,dtype)

(4)返回最大值索引号,如tf.argmax([1,0,0])返回0

  1. tf.argmax(x,axis)

(5)返回home/name

  1. os.path.join("home","name")

(6)其内定义的节点在计算图中

  1. with tf.Graph().as_default() as g:

(7)按指定拆分符对字符串切片,返回分割后的列表。如’./model/mnist_model-1001 ‘.split(‘-‘)[-1]

  1. 字符串.split()

(8)保存模型

  1. saver = tf.train.Saver()
  2. with tf.Session() as sess:
  3. ...
  4. #将当前会话加载到指定路径
  5. saver.save(sess, os.path.join(MODEL_SAVE_PATH, MODEL_NAME), global_step=global_step)

(9)加载模型

  1. with tf.Session() as sess:
  2. ckpt = tf.train.get_checkpoint_state(存储路径)
  3. # 若模型存在,则加载出模型到当前对话,在测试数据集上进行准确率验证,并打印出当前轮数下的准确率
  4. if ckpt and ckpt.model_checkpoint_path:
  5. saver.restore(sess, ckpt.model_checkpoint_path)

(10)实例化可还原平均值的saver

  1. # 实例化具有滑动平均的saver对象,从而在会话被加载时模型中的所有参数被赋值为各自的滑动平均值,增强模型的稳定性
  2. ema = tf.train.ExponentialMovingAverage(mnist_backward.MOVING_AVERAGE_DECAY)
  3. ema_restore = ema.variables_to_restore()
  4. saver = tf.train.Saver(ema_restore)

(11)准确率计算方法

  1. # 计算模型在测试集上的准确率
  2. correct_prediction = tf.equal(tf.argmax(y, 1), tf.argmax(y_, 1))
  3. accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))

(12)断点续训
表示如果程序中断,下次训练从中断的位置继续训练模型,而不是从头开始训练

  1. ckpt = tf.train.get_checkpoint_state(mnist_backward.MODEL_SAVE_PATH)
  2. # 若模型存在,则加载出模型到当前对话,在测试数据集上进行准确率验证,并打印出当前轮数下的准确率
  3. if ckpt and ckpt.model_checkpoint_path:
  4. saver.restore(sess, ckpt.model_checkpoint_path)

2 代码实现

2.1 代码结构

(1)forward.py

  1. # 定义前向传播过程
  2. def forward(x,regularizer):
  3. w =
  4. b =
  5. y =
  6. return y
  7. # 给权重赋初值
  8. def get_weight(shape, regularizer):
  9. # 给偏置赋初值
  10. def get_bias(shape):

(2)backward.py

  1. def backward(mnist):
  2. x =
  3. y_=
  4. y =
  5. global_step =
  6. loss =
  7. <正则化、指数衰减学习率、滑动平均>
  8. train_step =
  9. 实例化saver
  10. with tf.Session() as sess:
  11. 初始化
  12. for i in range(STEPS):
  13. sess.run(train_step,feed_dict = {x:,y_:})
  14. if i %轮数 ==0:
  15. print
  16. saver.save()

其中损失函数loss包含正则化regularization
backward.py中加入

  1. ce = tf.nn.sparse_softmax_cross_entropy_with_logits(logits=y,labels =tf.argmax(y_,1))
  2. cem =tf.reduce_mean(ce)
  3. #loss = cem + tf.add_n(tf.get_collection('losses'))# 正则化的loss

forward.py中加入

  1. if regularizer != None:tf.add_to_collection('losses', tf.contrib.layers.l2_regularizer(regularizer)(w))

学习率learning_rate
backward.py中加入

  1. ## 指数衰减学习率
  2. learning_rate = tf.train.exponential_decay(
  3. LEARNING_RATE_BASE,
  4. global_step,
  5. #数据集总样本数/Batch_size
  6. LEARNING_RATE_DECAY,
  7. staircase =True

滑动平均ema
backward.py中加入

  1. ema = tf.train.ExponentialMovingAverage(MOVING_AVERAGE_DECAY, global_step)#滑动平均
  2. ema_op = ema.apply(tf.trainable_variables())
  3. with tf.control_dependencies([train_step,ema_op]):
  4. train_op = tf.no_op(name='train')

(3)test.py

  1. def test(mnist):
  2. with tf.Graph().as_default() as g:
  3. 定义x y_ y
  4. 实例化可还原滑动平均值的saver
  5. 计算正确率
  6. while True:
  7. with tf.Session() as sess:
  8. 加载ckpy模型ckpt = tf.train.get_checkpoint_state(mnist_backward.MODEL_SAVE_PATH)
  9. 如果已经有ckpt模型则恢复if ckpt and ckpt.model_checkpoint_path:
  10. 恢复会话saver.restore(sess, ckpt.model_checkpoint_path)
  11. 恢复轮数 global_step = ckpt.model_checkpoint_path.split('/')[-1].split('-')[-1]
  12. 计算准确率accuracy_score = sess.run(accuracy, feed_dict={x: mnist.test.images, y_: mnist.test.labels})
  13. 打印提示 print("After %s training step(s), test accuracy = %g" % (global_step, accuracy_score))
  14. 如果没有模型else
  15. 给出提示 print('No checkpoint file found')
  16. return
  17. def main():
  18. mnist = input_Data.read_data_sets("./data/",one_hot=True)
  19. test(mnist)
  20. if __main__=='__main__':
  21. main()

2.2 完整代码

(1)mnist_forward.py

  1. # 1前向传播过程
  2. import tensorflow as tf
  3. # 网络输入节点为784个(代表每张输入图片的像素个数)
  4. INPUT_NODE = 784
  5. # 输出节点为10个(表示输出为数字0-9的十分类)
  6. OUTPUT_NODE = 10
  7. # 隐藏层节点500个
  8. LAYER1_NODE = 500
  9. def get_weight(shape, regularizer):
  10. # 参数满足截断正态分布,并使用正则化,
  11. w = tf.Variable(tf.truncated_normal(shape, stddev=0.1))
  12. # w = tf.Variable(tf.random_normal(shape,stddev=0.1))
  13. # 将每个参数的正则化损失加到总损失中
  14. if regularizer != None: tf.add_to_collection('losses', tf.contrib.layers.l2_regularizer(regularizer)(w))
  15. return w
  16. def get_bias(shape):
  17. # 初始化的一维数组,初始化值为全 0
  18. b = tf.Variable(tf.zeros(shape))
  19. return b
  20. def forward(x, regularizer):
  21. # 由输入层到隐藏层的参数w1形状为[784,500]
  22. w1 = get_weight([INPUT_NODE, LAYER1_NODE], regularizer)
  23. # 由输入层到隐藏的偏置b1形状为长度500的一维数组,
  24. b1 = get_bias([LAYER1_NODE])
  25. # 前向传播结构第一层为输入 x与参数 w1矩阵相乘加上偏置 b1 ,再经过relu函数 ,得到隐藏层输出 y1。
  26. y1 = tf.nn.relu(tf.matmul(x, w1) + b1)
  27. # 由隐藏层到输出层的参数w2形状为[500,10]
  28. w2 = get_weight([LAYER1_NODE, OUTPUT_NODE], regularizer)
  29. # 由隐藏层到输出的偏置b2形状为长度10的一维数组
  30. b2 = get_bias([OUTPUT_NODE])
  31. # 前向传播结构第二层为隐藏输出 y1与参 数 w2 矩阵相乘加上偏置 矩阵相乘加上偏置 b2,得到输出 y。
  32. # 由于输出 。由于输出 y要经过softmax oftmax 函数,使其符合概率分布,故输出y不经过 relu函数
  33. y = tf.matmul(y1, w2) + b2
  34. return y

(2)mnist_backward.py

  1. #2反向传播过程
  2. #引入tensorflow、input_data、前向传播mnist_forward和os模块
  3. import tensorflow as tf
  4. from tensorflow.examples.tutorials.mnist import input_data
  5. import mnist_forward
  6. import os
  7. #每轮喂入神经网络的图片数
  8. BATCH_SIZE = 200
  9. #初始学习率
  10. LEARNING_RATE_BASE = 0.1
  11. #学习率衰减率
  12. LEARNING_RATE_DECAY = 0.99
  13. #正则化系数
  14. REGULARIZER = 0.0001
  15. #训练轮数
  16. STEPS = 50000
  17. #滑动平均衰减率
  18. MOVING_AVERAGE_DECAY = 0.99
  19. #模型保存路径
  20. MODEL_SAVE_PATH="./model/"
  21. #模型保存名称
  22. MODEL_NAME="mnist_model"
  23. def backward(mnist):
  24. #用placeholder给训练数据x和标签y_占位
  25. x = tf.placeholder(tf.float32, [None, mnist_forward.INPUT_NODE])
  26. y_ = tf.placeholder(tf.float32, [None, mnist_forward.OUTPUT_NODE])
  27. #调用mnist_forward文件中的前向传播过程forword()函数,并设置正则化,计算训练数据集上的预测结果y
  28. y = mnist_forward.forward(x, REGULARIZER)
  29. #当前计算轮数计数器赋值,设定为不可训练类型
  30. global_step = tf.Variable(0, trainable=False)
  31. #调用包含所有参数正则化损失的损失函数loss
  32. ce = tf.nn.sparse_softmax_cross_entropy_with_logits(logits=y, labels=tf.argmax(y_, 1))
  33. cem = tf.reduce_mean(ce)
  34. loss = cem + tf.add_n(tf.get_collection('losses'))
  35. #设定指数衰减学习率learning_rate
  36. learning_rate = tf.train.exponential_decay(
  37. LEARNING_RATE_BASE,
  38. global_step,
  39. mnist.train.num_examples / BATCH_SIZE,
  40. LEARNING_RATE_DECAY,
  41. staircase=True)
  42. #使用梯度衰减算法对模型优化,降低损失函数
  43. #train_step = tf.train.GradientDescentOptimizer(learning_rate).minimize(loss, global_step=global_step)
  44. train_step = tf.train.MomentumOptimizer(learning_rate,0.9).minimize(loss, global_step=global_step)
  45. #train_step = tf.train.AdamOptimizer(learning_rate).minimize(loss, global_step=global_step)
  46. #定义参数的滑动平均
  47. ema = tf.train.ExponentialMovingAverage(MOVING_AVERAGE_DECAY, global_step)
  48. ema_op = ema.apply(tf.trainable_variables())
  49. #实例化可还原滑动平均的saver
  50. #在模型训练时引入滑动平均可以使模型在测试数据上表现的更加健壮
  51. with tf.control_dependencies([train_step,ema_op]):
  52. train_op = tf.no_op(name='train')
  53. saver = tf.train.Saver()
  54. with tf.Session() as sess:
  55. #所有参数初始化
  56. init_op = tf.global_variables_initializer()
  57. sess.run(init_op)
  58. #每次喂入batch_size组(即200组)训练数据和对应标签,循环迭代steps轮
  59. for i in range(STEPS):
  60. xs, ys = mnist.train.next_batch(BATCH_SIZE)
  61. _, loss_value, step = sess.run([train_op, loss, global_step], feed_dict={x: xs, y_: ys})
  62. if i % 1000 == 0:
  63. print("After %d training step(s), loss on training batch is %g." % (step, loss_value))
  64. #将当前会话加载到指定路径
  65. saver.save(sess, os.path.join(MODEL_SAVE_PATH, MODEL_NAME), global_step=global_step)
  66. def main():
  67. #读入mnist
  68. mnist = input_data.read_data_sets("./data/", one_hot=True)
  69. #反向传播
  70. backward(mnist)
  71. if __name__ == '__main__':
  72. main()

(3)mnist_test.py

  1. # 验证网络的准确性和泛化性
  2. import time
  3. import tensorflow as tf
  4. from tensorflow.examples.tutorials.mnist import input_data
  5. import mnist_forward
  6. import mnist_backward
  7. # 程序5秒的循环间隔时间
  8. TEST_INTERVAL_SECS = 5
  9. def test(mnist):
  10. # 利用tf.Graph()复现之前定义的计算图
  11. with tf.Graph().as_default() as g:
  12. # 利用placeholder给训练数据x和标签y_占位
  13. x = tf.placeholder(tf.float32, [None, mnist_forward.INPUT_NODE])
  14. y_ = tf.placeholder(tf.float32, [None, mnist_forward.OUTPUT_NODE])
  15. # 调用mnist_forward文件中的前向传播过程forword()函数
  16. y = mnist_forward.forward(x, None)
  17. # 实例化具有滑动平均的saver对象,从而在会话被加载时模型中的所有参数被赋值为各自的滑动平均值,增强模型的稳定性
  18. ema = tf.train.ExponentialMovingAverage(mnist_backward.MOVING_AVERAGE_DECAY)
  19. ema_restore = ema.variables_to_restore()
  20. saver = tf.train.Saver(ema_restore)
  21. # 计算模型在测试集上的准确率
  22. correct_prediction = tf.equal(tf.argmax(y, 1), tf.argmax(y_, 1))
  23. accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
  24. while True:
  25. with tf.Session() as sess:
  26. # 加载指定路径下的ckpt
  27. ckpt = tf.train.get_checkpoint_state(mnist_backward.MODEL_SAVE_PATH)
  28. # 若模型存在,则加载出模型到当前对话,在测试数据集上进行准确率验证,并打印出当前轮数下的准确率
  29. if ckpt and ckpt.model_checkpoint_path:
  30. saver.restore(sess, ckpt.model_checkpoint_path)
  31. global_step = ckpt.model_checkpoint_path.split('/')[-1].split('-')[-1]
  32. accuracy_score = sess.run(accuracy, feed_dict={x: mnist.test.images, y_: mnist.test.labels})
  33. print("After %s training step(s), test accuracy = %g" % (global_step, accuracy_score))
  34. # 若模型不存在,则打印出模型不存在的提示,从而test()函数完成
  35. else:
  36. print('No checkpoint file found')
  37. return
  38. time.sleep(TEST_INTERVAL_SECS)
  39. def main():
  40. # 加载指定路径下的测试数据集
  41. mnist = input_data.read_data_sets("./data/", one_hot=True)
  42. test(mnist)
  43. if __name__ == '__main__':
  44. main()