from math import log
import operator
def calcShannonEnt(dataSet):
"""
计算给定数据的香农熵
"""
numEntries = len(dataSet)
labelCounts = {}
for featVec in dataSet: # 从样本集中读取每个样本
currentLabel = featVec[-1] # 读取样本的标签
# print(currentLabel)
if currentLabel not in labelCounts.keys(): # 该样本标签不是在这个字典中
labelCounts[currentLabel] = 0 # 将该样本标签存入字典中
labelCounts[currentLabel] += 1 # 该标签下的样本数统计
shannonEnt = 0.0
for key in labelCounts:
prob = float(labelCounts[key])/numEntries # 计算每个分类的概率
shannonEnt -= prob * log(prob,2) # 计算香农商, P35页香农公式
return shannonEnt
def createDataSet():
"""
模拟创建一个数据集
"""
dataSet = [[1, 1, 'yes'],
[1, 1, 'yes'],
[1, 0, 'no'],
[0, 1, 'no'],
[0, 1, 'no']]
labels = ['no surfacing','flippers']
#change to discrete values
return dataSet, labels
# test calcShannonEnt
dataset, labels = createDataSet()
print(calcShannonEnt(dataset))
dataset[0][-1]='maybe'
print(calcShannonEnt(dataset))
0.9709505944546686
1.3709505944546687
def splitDataSet(dataSet, axis, value):
"""
按照指定特征划分数据集
:dataSet:样本数据集
:axis: 指定划分的特征的索引(维度)
:value: 指定特征值
:return: 返回满足这个划分要求的样本(剔除了该特征值)
比如 特征1:是否有钱,特征2:是否长得帅,
当指定按照特征1划分时,数据集将被分为两类,有钱和没有钱。
当指定按照特征2划分时,数据集将被分为两类,长的帅和长得丑。
"""
retDataSet = []
for featVec in dataSet:
if featVec[axis] == value: # 该样本的axis维度的特征值==将指定的特征值
# print(featVec[axis+1:])
# 以下两行代码的功能是将指定划分特征剔除样本列表
# 例如指定特征为是否长得帅, ['有钱','长的帅','长的高'] --> ['有钱','长的高']
reducedFeatVec = featVec[:axis] # 将指定特征之前的特征提取出来
reducedFeatVec.extend(featVec[axis+1:]) # 将指定特征之后的特征提取出来加入到列表中
retDataSet.append(reducedFeatVec)
return retDataSet
# test splitDataSet
dataset, labels = createDataSet()
splitDataSet(dataset, 0, 1)
def chooseBestFeatureToSplit(dataSet):
"""
选择最好的数据集划分方式
例如:总样本数为10,根据某一个特征来划分,特征值a1的样本数量为5,信息熵为H1; 特征值a2为3,信息熵为H2; 特征值a3为2,信息熵为H3;
则更加这一特征划分的信息熵为:newEntropy = 5/10*H1 + 3/10*H2 + 2/10*H3
"""
numFeatures = len(dataSet[0]) - 1 # 最后一列是分类标签 dataSet[0]行长度
baseEntropy = calcShannonEnt(dataSet) # 计算初始数据集的香农熵
bestInfoGain = 0.0 # 最好信息增益
bestFeature = -1
for i in range(numFeatures): # 遍历所有特征
featList = [example[i] for example in dataSet] # 将所有样本该特征维度的值提取出来
uniqueVals = set(featList) # 获取得到唯一的标签集合
newEntropy = 0.0 # 某一特征划分的信息熵
for value in uniqueVals:
subDataSet = splitDataSet(dataSet, i, value) # 划分数据集
prob = len(subDataSet)/float(len(dataSet))
# 划分后的数据集的香农熵所占总数据集的比例
newEntropy += prob * calcShannonEnt(subDataSet)
infoGain = baseEntropy - newEntropy # 计算信息增益
if (infoGain > bestInfoGain): # 该分类的信息增益大于最好信息增益
bestInfoGain = infoGain # 更新信息增益
bestFeature = i
return bestFeature # 返回最好特征划分的索引
#test chooseBestFeatureToSplit
dataset, labels = createDataSet()
chooseBestFeatureToSplit(dataset)
def majorityCnt(classList):
"""
获得出现次数最多的分类名称
"""
classCount={} # 存储了classList中每个类标签出现的频率
for vote in classList:
if vote not in classCount.keys():
classCount[vote] = 0
classCount[vote] += 1
sortedClassCount = sorted(classCount.items(), key=operator.itemgetter(1), reverse=True)
# print(sortedClassCount) # [('flippers', 2), ('no surfacing', 1)]
return sortedClassCount[0][0] # flippers
labels = ['no surfacing', 'flippers', 'flippers']
majorityCnt(labels)
def createTree(dataSet,labels):
"""
创建决策树
"""
classList = [example[-1] for example in dataSet] # 类别列表
if classList.count(classList[0]) == len(classList): # 类别完全相同(第一个类别的数目是列表长度)即所有样本均为同一类
return classList[0] # 类别相同返回该类别
if len(dataSet[0]) == 1: # 遍历完所有特征
return majorityCnt(classList)
bestFeat = chooseBestFeatureToSplit(dataSet) # 当前最好的划分特征
bestFeatLabel = labels[bestFeat]
myTree = {bestFeatLabel:{}}
del(labels[bestFeat])
featValues = [example[bestFeat] for example in dataSet]
uniqueVals = set(featValues)
for value in uniqueVals:
subLabels = labels[:] #copy all of labels, so trees don't mess up existing labels
myTree[bestFeatLabel][value] = createTree(splitDataSet(dataSet, bestFeat, value),subLabels)
return myTree