1. from math import log
    2. import operator
    3. def createDataSet():
    4. dataSet = [[0, 0, 0, 0, 'N'],
    5. [0, 0, 0, 1, 'N'],
    6. [1, 0, 0, 0, 'Y'],
    7. [2, 1, 0, 0, 'Y'],
    8. [2, 2, 1, 0, 'Y'],
    9. [2, 2, 1, 1, 'N'],
    10. [1, 2, 1, 1, 'Y']]
    11. labels = ['outlook', 'temperature', 'humidity', 'windy']
    12. return dataSet, labels
    13. def calcShannonEnt(dataSet): # 计算熵
    14. numEntries = len(dataSet)
    15. labelCounts = {}
    16. for featVec in dataSet:
    17. currentLabel = featVec[-1]
    18. if currentLabel not in labelCounts.keys():
    19. labelCounts[currentLabel] = 0
    20. labelCounts[currentLabel] += 1 # 数每一类各多少个, {'Y': 4, 'N': 3}
    21. shannonEnt = 0.0
    22. for key in labelCounts:
    23. prob = float(labelCounts[key]) / numEntries
    24. shannonEnt -= prob * log(prob, 2)
    25. return shannonEnt
    26. def chooseBestFeatureToSplit(dataSet):
    27. numFeatures = len(dataSet[0]) - 1 # feature个数
    28. baseEntropy = calcShannonEnt(dataSet) # 整个dataset的熵
    29. bestInfoGainRatio = 0.0
    30. bestFeature = -1
    31. for i in range(numFeatures):
    32. featList = [example[i] for example in dataSet] # 每个feature的list
    33. uniqueVals = set(featList) # 每个list的唯一值集合
    34. newEntropy = 0.0
    35. splitInfo = 0.0
    36. for value in uniqueVals:
    37. subDataSet = splitDataSet(dataSet, i, value) # 每个唯一值对应的剩余feature的组成子集
    38. prob = len(subDataSet) / float(len(dataSet))
    39. newEntropy += prob * calcShannonEnt(subDataSet)
    40. splitInfo += -prob * log(prob, 2)
    41. infoGain = baseEntropy - newEntropy # 这个feature的infoGain
    42. if (splitInfo == 0): # fix the overflow bug
    43. continue
    44. infoGainRatio = infoGain / splitInfo # 这个feature的infoGainRatio增益率
    45. if (infoGainRatio > bestInfoGainRatio): # 选择最大的gain ratio
    46. bestInfoGainRatio = infoGainRatio
    47. bestFeature = i # 选择最大的gain ratio对应的feature
    48. return bestFeature
    49. def splitDataSet(dataSet, axis, value):
    50. retDataSet = []
    51. for featVec in dataSet:
    52. if featVec[axis] == value: # 只看当第i列的值=value时的item
    53. reduceFeatVec = featVec[:axis] # featVec的第i列给除去
    54. reduceFeatVec.extend(featVec[axis + 1:])
    55. retDataSet.append(reduceFeatVec)
    56. return retDataSet
    57. def createTree(dataSet, labels):
    58. classList = [example[-1] for example in dataSet] # ['N', 'N', 'Y', 'Y', 'Y', 'N', 'Y']
    59. if classList.count(classList[0]) == len(classList):
    60. # classList所有元素都相等,即类别完全相同,停止划分
    61. return classList[0] # splitDataSet(dataSet, 0, 0)此时全是N,返回N
    62. if len(dataSet[0]) == 1: # [0, 0, 0, 0, 'N']
    63. # 遍历完所有特征时返回出现次数最多的
    64. return majorityCnt(classList)
    65. bestFeat = chooseBestFeatureToSplit(dataSet) # 0-> 2
    66. # 选择最大的gain ratio对应的feature
    67. bestFeatLabel = labels[bestFeat] # outlook -> windy
    68. myTree = {bestFeatLabel: {}}
    69. # 多重字典构建树{'outlook': {0: 'N'
    70. del (labels[bestFeat]) # ['temperature', 'humidity', 'windy'] -> ['temperature', 'humidity']
    71. featValues = [example[bestFeat] for example in dataSet] # [0, 0, 1, 2, 2, 2, 1]
    72. uniqueVals = set(featValues)
    73. for value in uniqueVals:
    74. subLabels = labels[:] # ['temperature', 'humidity', 'windy']
    75. myTree[bestFeatLabel][value] = createTree(splitDataSet(dataSet, bestFeat, value), subLabels)
    76. # 划分数据,为下一层计算准备
    77. return myTree
    78. def majorityCnt(classList): # 如果属性完全相同,却不具有相同的类别,则采用少数服从多数的原则进行划分
    79. classCount = {}
    80. for vote in classList:
    81. if vote not in classCount.keys():
    82. classCount[vote] = 0
    83. else:
    84. classCount[vote] += 1
    85. sortedClassCount = sorted(classCount.items(), key=operator.itemgetter(1), reverse=True)
    86. return sortedClassCount[0][0]

    https://github.com/cdqncn/JueCeShu/blob/master/myTree.py