tensorflow中使用tf.GradientTape来记录正向运算过程,(要求数据都是浮点型)然后反播磁带自动得到梯度值。

1、利用梯度磁带求导数

  1. # f(x) = a*x**2 + b*x + c的导数
  2. x = tf.Variable(0.0, name="x", dtype=tf.float32)
  3. a = tf.constant(1.0)
  4. b = tf.constant(-2.0)
  5. c = tf.constant(1.0)
  6. with tf.GradientTape() as tape:
  7. y = a*tf.pow(x, 2) + b*x + c
  8. dy_dx = tape.gradient(y,x)
  9. dy_dx
  10. # 求函数的二阶导数
  11. with tf.GradientTape(persistent=True) as tape2:
  12. with tf.GradientTape() as tape1:
  13. y = a*tf.pow(x,2) + b*x + c
  14. dy_dx = tape1.gradient(y,x)
  15. dy2_dx2= tape2.gradient(dy_dx,x)
  16. dy2_dx2
  1. # 对常量张量也可以求导,需要增加watch
  2. with tf.GradientTape() as tape:
  3. tape.watch([a,b,c])
  4. y = a*tf.pow(x,2) + b*x + c
  5. dy_dx,dy_da,dy_db,dy_dc = tape.gradient(y,[x,a,b,c])
  6. print(dy_da)
  7. print(dy_dc)

2、利用梯度磁带和优化器求最小值

  1. # 求f(x) = a*x**2 + b*x + c的最小值
  2. # 使用optimizer.apply_gradients
  3. x = tf.Variable(0.0,name = "x",dtype = tf.float32)
  4. a = tf.constant(1.0)
  5. b = tf.constant(-2.0)
  6. c = tf.constant(1.0)
  7. optimizer = tf.keras.optimizers.SGD(learning_rate=0.001)
  8. for _ in range(1000):
  9. with tf.GradientTape() as tape:
  10. y = a*tf.pow(x,2) + b*x + c
  11. dy_dx = tape.gradient(y,x)
  12. optimizer.apply_gradients(grads_and_vars=[(dy_dx, x)])