image.pngimage.pngimage.pngimage.pngimage.pngimage.pngimage.pngimage.pngimage.pngimage.pngimage.pngimage.pngimage.pngimage.pngimage.pngimage.pngimage.pngimage.pngimage.pngimage.pngimage.png

    1. import matplotlib.pyplot as plt
    2. # prepare the training set
    3. x_data = [1.0, 2.0, 3.0]
    4. y_data = [2.0, 4.0, 6.0]
    5. # initial guess of weight
    6. w = 1.0
    7. # define the model linear model y = w*x
    8. def forward(x):
    9. return x*w
    10. #define the cost function MSE
    11. def cost(xs, ys):
    12. cost = 0
    13. for x, y in zip(xs,ys):
    14. y_pred = forward(x)
    15. cost += (y_pred - y)**2
    16. return cost / len(xs)
    17. # define the gradient function gd
    18. def gradient(xs,ys):
    19. grad = 0
    20. for x, y in zip(xs,ys):
    21. grad += 2*x*(x*w - y)
    22. return grad / len(xs)
    23. epoch_list = []
    24. cost_list = []
    25. print('predict (before training)', 4, forward(4))
    26. for epoch in range(100):
    27. cost_val = cost(x_data, y_data)
    28. grad_val = gradient(x_data, y_data)
    29. w-= 0.01 * grad_val # 0.01 learning rate
    30. print('epoch:', epoch, 'w=', w, 'loss=', cost_val)
    31. epoch_list.append(epoch)
    32. cost_list.append(cost_val)
    33. print('predict (after training)', 4, forward(4))
    34. plt.plot(epoch_list,cost_list)
    35. plt.ylabel('cost')
    36. plt.xlabel('epoch')
    37. plt.show()
    1. # Numpy
    2. import numpy
    3. # For plotting
    4. import matplotlib.pyplot as plt
    5. from matplotlib.pyplot import figure
    6. def forward(w, x):
    7. return x * w
    8. def cost(x_cor, y_cor, w):
    9. y_hat = forward(w, x_cor)
    10. loss = (y_hat - y_cor) ** 2
    11. return loss.sum() / len(x_cor)
    12. def gradient(x_cor, y_cor, w):
    13. grad = 2 * x_cor * (w * x_cor - y_cor)
    14. return grad.sum() / len(x_cor)
    15. x_data = numpy.array([1.0, 2.0, 3.0])
    16. y_data = numpy.array([2.0, 4.0, 6.0])
    17. num_epochs = 100
    18. lr = 0.01
    19. w_train = numpy.array([1.0])
    20. epoch_cor = []
    21. loss_cor = []
    22. for epoch in range(num_epochs):
    23. mse_loss = cost(x_data, y_data, w_train)
    24. loss_cor.append(mse_loss)
    25. w_train -= lr * gradient(x_data, y_data, w_train)
    26. epoch_cor.append(epoch + 1)
    27. plt.figure()
    28. plt.plot(epoch_cor, loss_cor, c='b')
    29. plt.xlabel('Epoch')
    30. plt.ylabel('Cost')
    31. plt.show()

    image.png
    image.pngimage.pngimage.pngimage.png

    1. import matplotlib.pyplot as plt
    2. x_data = [1.0, 2.0, 3.0]
    3. y_data = [2.0, 4.0, 6.0]
    4. w = 1.0
    5. def forward(x):
    6. return x*w
    7. # calculate loss function
    8. def loss(x, y):
    9. y_pred = forward(x)
    10. return (y_pred - y)**2
    11. # define the gradient function sgd
    12. def gradient(x, y):
    13. return 2*x*(x*w - y)
    14. epoch_list = []
    15. loss_list = []
    16. print('predict (before training)', 4, forward(4))
    17. for epoch in range(100):
    18. for x,y in zip(x_data, y_data):
    19. grad = gradient(x,y)
    20. w = w - 0.01*grad # update weight by every grad of sample of training set
    21. print("\tgrad:", x, y,grad)
    22. l = loss(x,y)
    23. print("progress:",epoch,"w=",w,"loss=",l)
    24. epoch_list.append(epoch)
    25. loss_list.append(l)
    26. print('predict (after training)', 4, forward(4))
    27. plt.plot(epoch_list,loss_list)
    28. plt.ylabel('loss')
    29. plt.xlabel('epoch')
    30. plt.show()

    随机梯度下降法在神经网络中被证明是有效的。效率较低(时间复杂度较高),学习性能较好。

    随机梯度下降法和梯度下降法的主要区别在于:

    1、损失函数由cost()更改为loss()。cost是计算所有训练数据的损失,loss是计算一个训练函数的损失。对应于源代码则是少了两个for循环。
    2、梯度函数gradient()由计算所有训练数据的梯度更改为计算一个训练数据的梯度。
    3、本算法中的随机梯度主要是指,每次拿一个训练数据来训练,然后更新梯度参数。本算法中梯度总共更新100(epoch)x3 = 300次。梯度下降法中梯度总共更新100(epoch)次。

    综合梯度下降和随机梯度下降算法,折中:batch(mini-patch)
    image.png