image.png
    image.png
    1、计算样本权重
    训练数据中的每个样本,赋予其权重,即样本权重,用向量D表示,这些权重都初始化成相等值。假设有n个样本的训练集:
    AdaBoost - 图3
    设定每个样本的权重都是相等的,即1/n。

    2、计算错误率
    利用第一个弱学习算法h1对其进行学习,学习完成后进行错误率ε的统计:
    AdaBoost - 图4
    3、计算弱学习算法权重
    弱学习算法也有一个权重,用向量α表示,利用错误率计算权重α:
    AdaBoost - 图5
    4、更新样本权重
    在第一次学习完成后,需要重新调整样本的权重,以使得在第一分类中被错分的样本的权重,在接下来的学习中可以重点对其进行学习:
    AdaBoost - 图6
    其中,h_t(x_i) = y_i表示对第i个样本训练正确,不等于则表示分类错误。Z_t是一个归一化因子:

    AdaBoost - 图7
    AdaBoost - 图8
    5、AdaBoost算法
    重复进行学习,这样经过t轮的学习后,就会得到t个弱学习算法、权重、弱分类器的输出以及最终的AdaBoost算法的输出,分别如下:
    AdaBoost - 图9

    1. def loadSimpData():
    2. datMat = np.matrix([[ 1. , 2.1],
    3. [ 1.5, 1.6],
    4. [ 1.3, 1. ],
    5. [ 1. , 1. ],
    6. [ 2. , 1. ]])
    7. classLabels = [1.0, 1.0, -1.0, -1.0, 1.0]
    8. return datMat,classLabels
    9. def showDataSet(dataMat, labelMat):
    10. data_plus = [] #正样本
    11. data_minus = [] #负样本
    12. for i in range(len(dataMat)):
    13. if labelMat[i] > 0:
    14. data_plus.append(dataMat[i])
    15. else:
    16. data_minus.append(dataMat[i])
    17. data_plus_np = np.array(data_plus) #转换为numpy矩阵
    18. data_minus_np = np.array(data_minus) #转换为numpy矩阵
    19. plt.scatter(np.transpose(data_plus_np)[0], np.transpose(data_plus_np)[1]) #正样本散点图
    20. plt.scatter(np.transpose(data_minus_np)[0], np.transpose(data_minus_np)[1]) #负样本散点图
    21. plt.show()
    22. if __name__ == '__main__':
    23. dataArr,classLabels = loadSimpData()
    24. showDataSet(dataArr,classLabels)
    1. def loadSimpData():
    2. datMat = np.matrix([[ 1. , 2.1],
    3. [ 1.5, 1.6],
    4. [ 1.3, 1. ],
    5. [ 1. , 1. ],
    6. [ 2. , 1. ]])
    7. classLabels = [1.0, 1.0, -1.0, -1.0, 1.0]
    8. return datMat,classLabels
    9. def stumpClassify(dataMatrix,dimen,threshVal,threshIneq):
    10. retArray = np.ones((np.shape(dataMatrix)[0],1)) #初始化retArray为1
    11. if threshIneq == 'lt':
    12. retArray[dataMatrix[:,dimen] <= threshVal] = -1.0 #如果小于阈值,则赋值为-1
    13. else:
    14. retArray[dataMatrix[:,dimen] > threshVal] = -1.0 #如果大于阈值,则赋值为-1
    15. return retArray
    16. def buildStump(dataArr,classLabels,D):
    17. dataMatrix = np.mat(dataArr); labelMat = np.mat(classLabels).T
    18. m,n = np.shape(dataMatrix)
    19. numSteps = 10.0; bestStump = {}; bestClasEst = np.mat(np.zeros((m,1)))
    20. minError = float('inf') #最小误差初始化为正无穷大
    21. for i in range(n): #遍历所有特征
    22. rangeMin = dataMatrix[:,i].min(); rangeMax = dataMatrix[:,i].max() #找到特征中最小的值和最大值
    23. stepSize = (rangeMax - rangeMin) / numSteps #计算步长
    24. for j in range(-1, int(numSteps) + 1):
    25. for inequal in ['lt', 'gt']: #大于和小于的情况,均遍历。lt:less than,gt:greater than
    26. threshVal = (rangeMin + float(j) * stepSize) #计算阈值
    27. predictedVals = stumpClassify(dataMatrix, i, threshVal, inequal)#计算分类结果
    28. errArr = np.mat(np.ones((m,1))) #初始化误差矩阵
    29. errArr[predictedVals == labelMat] = 0 #分类正确的,赋值为0
    30. weightedError = D.T * errArr #计算误差
    31. print("split: dim %d, thresh %.2f, thresh ineqal: %s, the weighted error is %.3f" % (i, threshVal, inequal, weightedError))
    32. if weightedError < minError: #找到误差最小的分类方式
    33. minError = weightedError
    34. bestClasEst = predictedVals.copy()
    35. bestStump['dim'] = i
    36. bestStump['thresh'] = threshVal
    37. bestStump['ineq'] = inequal
    38. return bestStump,minError,bestClasEst
    39. if __name__ == '__main__':
    40. dataArr,classLabels = loadSimpData()
    41. D = np.mat(np.ones((5, 1)) / 5)
    42. bestStump,minError,bestClasEst = buildStump(dataArr,classLabels,D)
    43. print('bestStump:\n', bestStump)
    44. print('minError:\n', minError)
    45. print('bestClasEst:\n', bestClasEst)
    1. def loadSimpData():
    2. datMat = np.matrix([[ 1. , 2.1],
    3. [ 1.5, 1.6],
    4. [ 1.3, 1. ],
    5. [ 1. , 1. ],
    6. [ 2. , 1. ]])
    7. classLabels = [1.0, 1.0, -1.0, -1.0, 1.0]
    8. return datMat,classLabels
    9. def stumpClassify(dataMatrix,dimen,threshVal,threshIneq):
    10. retArray = np.ones((np.shape(dataMatrix)[0],1)) #初始化retArray为1
    11. if threshIneq == 'lt':
    12. retArray[dataMatrix[:,dimen] <= threshVal] = -1.0 #如果小于阈值,则赋值为-1
    13. else:
    14. retArray[dataMatrix[:,dimen] > threshVal] = -1.0 #如果大于阈值,则赋值为-1
    15. return retArray
    16. def buildStump(dataArr,classLabels,D):
    17. dataMatrix = np.mat(dataArr); labelMat = np.mat(classLabels).T
    18. m,n = np.shape(dataMatrix)
    19. numSteps = 10.0; bestStump = {}; bestClasEst = np.mat(np.zeros((m,1)))
    20. minError = float('inf') #最小误差初始化为正无穷大
    21. for i in range(n): #遍历所有特征
    22. rangeMin = dataMatrix[:,i].min(); rangeMax = dataMatrix[:,i].max() #找到特征中最小的值和最大值
    23. stepSize = (rangeMax - rangeMin) / numSteps #计算步长
    24. for j in range(-1, int(numSteps) + 1):
    25. for inequal in ['lt', 'gt']: #大于和小于的情况,均遍历。lt:less than,gt:greater than
    26. threshVal = (rangeMin + float(j) * stepSize) #计算阈值
    27. predictedVals = stumpClassify(dataMatrix, i, threshVal, inequal)#计算分类结果
    28. errArr = np.mat(np.ones((m,1))) #初始化误差矩阵
    29. errArr[predictedVals == labelMat] = 0 #分类正确的,赋值为0
    30. weightedError = D.T * errArr #计算误差
    31. print("split: dim %d, thresh %.2f, thresh ineqal: %s, the weighted error is %.3f" % (i, threshVal, inequal, weightedError))
    32. if weightedError < minError: #找到误差最小的分类方式
    33. minError = weightedError
    34. bestClasEst = predictedVals.copy()
    35. bestStump['dim'] = i
    36. bestStump['thresh'] = threshVal
    37. bestStump['ineq'] = inequal
    38. return bestStump, minError, bestClasEst
    39. def adaBoostTrainDS(dataArr, classLabels, numIt = 40):
    40. weakClassArr = []
    41. m = np.shape(dataArr)[0]
    42. D = np.mat(np.ones((m, 1)) / m) #初始化权重
    43. aggClassEst = np.mat(np.zeros((m,1)))
    44. for i in range(numIt):
    45. bestStump, error, classEst = buildStump(dataArr, classLabels, D) #构建单层决策树
    46. print("D:",D.T)
    47. alpha = float(0.5 * np.log((1.0 - error) / max(error, 1e-16))) #计算弱学习算法权重alpha,使error不等于0,因为分母不能为0
    48. bestStump['alpha'] = alpha #存储弱学习算法权重
    49. weakClassArr.append(bestStump) #存储单层决策树
    50. print("classEst: ", classEst.T)
    51. expon = np.multiply(-1 * alpha * np.mat(classLabels).T, classEst) #计算e的指数项
    52. D = np.multiply(D, np.exp(expon))
    53. D = D / D.sum() #根据样本权重公式,更新样本权重
    54. #计算AdaBoost误差,当误差为0的时候,退出循环
    55. aggClassEst += alpha * classEst
    56. print("aggClassEst: ", aggClassEst.T)
    57. aggErrors = np.multiply(np.sign(aggClassEst) != np.mat(classLabels).T, np.ones((m,1))) #计算误差
    58. errorRate = aggErrors.sum() / m
    59. print("total error: ", errorRate)
    60. if errorRate == 0.0: break #误差为0,退出循环
    61. return weakClassArr, aggClassEst
    62. if __name__ == '__main__':
    63. dataArr,classLabels = loadSimpData()
    64. weakClassArr, aggClassEst = adaBoostTrainDS(dataArr, classLabels)
    65. print(weakClassArr)
    66. print(aggClassEst)