机器学习实战(四)朴素贝叶斯(NaivaBayes)
基于概率论的分类方法:朴素贝叶斯。(条件概率)
朴素贝叶斯:朴素贝叶斯是一种简单但是非常强大的线性分类器。它在垃圾邮件分类,疾病诊断中都取得了很大的成功。它只所以称为朴素,是因为它假设特征之间是相互独立的,但是在现实生活中,这种假设基本上是不成立的。那么即使是在假设不成立的条件下,它依然表现的很好,尤其是在小规模样本的情况下。但是,如果每个特征之间有很强的关联性和非线性的分类问题会导致朴素贝叶斯模型有很差的分类效果。
"""
数据集
"""
from numpy import *
def loadDataSet():
postingList=[['my', 'dog', 'has', 'flea', 'problems', 'help', 'please'],
['maybe', 'not', 'take', 'him', 'to', 'dog', 'park', 'stupid'],
['my', 'dalmation', 'is', 'so', 'cute', 'I', 'love', 'him'],
['stop', 'posting', 'stupid', 'worthless', 'garbage'],
['mr', 'licks', 'ate', 'my', 'steak', 'how', 'to', 'stop', 'him'],
['quit', 'buying', 'worthless', 'dog', 'food', 'stupid']]
# 0代表正常言论,1代表侮辱性
classVec = [0,1,0,1,0,1]
return postingList,classVec
def createVocabList(dataSet):
#创建一个空集
vocabSet = set([])
#将文档中的词存放到set,不重复
for document in dataSet:
#创建两个集合的并集
vocabSet = vocabSet | set(document)
return list(vocabSet)
"""
词集模型(set-of-words) 每个词出现一次
"""
def setOfWords2Vec(vocabList, inputSet):
# 创建一个其中所含袁术都为0的向量
returnVec = [0]*len(vocabList)
for word in inputSet:
if word in vocabList:
# 若存在就记为1
returnVec[vocabList.index(word)] = 1
else:
print "the word: %s is not in my vocabulary!" % word
return returnVec
"""
词袋模型(bag-of-words model) 每个词出现多次
"""
def bagOfWords2VecMN(vocabList, inputSet):
returnVec = [0]*len(vocabList)
for word in inputSet:
# 每当遇到一个单词 就会增加词向量中的对应值,而不是设为1
if word in vocabList:
returnVec[vocabList.index(word)] += 1
return returnVec
"""
朴素贝叶斯分类器训练函数
trainMatrix:文档矩阵
trainCategory:由每篇文档类别标签所构成的向量
"""
def trainNB0(trainMatrix,trainCategory):
#print trainMatrix
#print trainCategory
#有多少个文件
numTrainDocs = len(trainMatrix)
#print numTrainDocs
#获取训练词有多少个
numWords = len(trainMatrix[0])
# 统计trainCategory中1的个数,因为里面只有1,0
#文档概率
pAbusive = sum(trainCategory)/float(numTrainDocs)
# 初始化概率
p0Num = ones(numWords)#修改前:zeros(numWords) 构造0矩阵,所有的值都为0
p1Num = ones(numWords)#修改前:zeros(numWords)
p0Denom = 2.0 #分母 修改前:0.0
p1Denom = 2.0 #修改前:0.0
#遍历训练集trainMatrix中所有文档,一旦某个词在某一文档中出现,对应个数(p1Num侮辱性/p0Num正常)就+1
#-而且在所有文档中,该文档的总词数也相应+1 ? 统计总侮辱性次数
#print numTrainDocs
for i in range(numTrainDocs):
if trainCategory[i] == 1:
#向量相加
p1Num += trainMatrix[i]
p1Denom += sum(trainMatrix[i]) # 总侮辱性词数
else:
p0Num += trainMatrix[i]
p0Denom += sum(trainMatrix[i])
#对每个元素做除法
# 修改后:对乘积去自然对数,防止因大部分因子非常小导致程序下溢出,最终得到0
# f(x)与ln(f(x))在相同区域同时增加或减少,也在相同点上取到极值,因此虽然他们取值不同,但不影响最终结果
# ln(a*b)=ln(a)+ln(b)
p1Vect = log(p1Num/p1Denom) #修改前p1Num/p1Denom 某个词占总侮辱性词的概率的对数
p0Vect = log(p0Num/p0Denom) #修改前p0Num/p0Denom
return p0Vect,p1Vect,pAbusive
"""
朴素贝叶斯分类函数
"""
"""
vec2Classify:待分类的向量
p0Vec, p1Vec, pClass1:使用trainNB0计算得到的3个概率
"""
def classifyNB(vec2Classify, p0Vec, p1Vec, pClass1):
#print vec2Classify
# 元素相乘,numpy的数组计算链两个向量相乘,这里的相乘是对应元素相乘
#先将两个向量第一个元素相乘,然后将第二个元素相乘 ...
#再将词汇表中所有的词对应值相加,再将该值加到类别的对数概率上
p1 = sum(vec2Classify * p1Vec) + log(pClass1)
p0 = sum(vec2Classify * p0Vec) + log(1.0-pClass1)
if p1 > p0:
return 1
else:
return 0
def testingNB():
listOPosts,listClasses = loadDataSet()
myVocabList = createVocabList(listOPosts)
trainMat=[]
for postinDoc in listOPosts:
trainMat.append(setOfWords2Vec(myVocabList, postinDoc))
p0V,p1V,pAb = trainNB0(array(trainMat),array(listClasses))
testEntry = ['love', 'my', 'dalmation']
thisDoc = array(setOfWords2Vec(myVocabList, testEntry))
print testEntry,'Classified as: ',classifyNB(thisDoc,p0V,p1V,pAb)
testEntry = ['stupid','garbage']
thisDoc = array(setOfWords2Vec(myVocabList, testEntry))
print testEntry,'Classified as: ',classifyNB(thisDoc,p0V,p1V,pAb)
"""
测试训练函数
"""
#从预先加载值中调入数据
listOPosts,listClasses=loadDataSet()
#构建一个包含所有词列表的myVocabList
myVocabList = createVocabList(listOPosts)
#print myVocabList
#使用词向量来填充trainMat列表
trainMat = []
for postinDoc in listOPosts:
trainMat.append(setOfWords2Vec(myVocabList,postinDoc))
#print postinDoc
#print trainMat
#print listClasses
#给出属于侮辱性文档的概率以及两个类别的概率向量
p0V,p1V,pAb=trainNB0(trainMat,listClasses)
p0V
array([-2.56494936, -2.56494936, -2.56494936, -3.25809654, -3.25809654,
-2.56494936, -2.56494936, -2.56494936, -3.25809654, -2.56494936,
-2.56494936, -2.56494936, -2.56494936, -3.25809654, -3.25809654,
-2.15948425, -3.25809654, -3.25809654, -2.56494936, -3.25809654,
-2.56494936, -2.56494936, -3.25809654, -2.56494936, -2.56494936,
-2.56494936, -3.25809654, -2.56494936, -3.25809654, -2.56494936,
-2.56494936, -1.87180218])
"""
测试朴素贝叶斯分类器
"""
testingNB()
['love', 'my', 'dalmation'] Classified as: 0
['stupid', 'garbage'] Classified as: 1
"""
文本解析
"""
import re
#除单词、数字外的任意字符串
regEx = re.compile('\\W*')
mySent = 'This book is the best book on Python or M.L. I have ever laid wtes upon.'
listOfTokens = regEx.split(mySent)
# 去除空字符串
[tok for tok in listOfTokens if len(tok) > 0]
#全装成小写
[tok.lower() for tok in listOfTokens if len(tok)]
#读取文本文件
emailText = open('F:\\study\\email\\ham\\6.txt').read()
listOfTokens=regEx.split(emailText)
"""
使用朴素贝叶斯进行交叉验证
文本解析及完整的垃圾邮件测试函数
"""
def textParse(bigString):
import re
"""
分割从文本在读取的串
"""
#拆分
listOfTokens = re.split(r'\w', bigString)
#长度>2 全转化成小写
return [tok.lower() for tok in listOfTokens if len(tok) > 2]
def spamTest():
docList = [];classList = [];fullText = []
#导入并解析文件
#读取文件,设置类别spam(垃圾邮件) 1 ham 0
for i in range(1,26):
wordList = textParse(open('F:\\study\\email\\spam\\%d.txt' % i).read())
docList.append(wordList)
fullText.extend(wordList)
classList.append(1)
wordList = textParse(open('F:\\study\\email\\ham\\%d.txt' % i).read())
docList.append(wordList)
fullText.extend(wordList)
classList.append(0)
#创建无重复的词集
vocabList = createVocabList(docList)
trainingSet = range(50);
testSet = []
#随机构建训练集
for i in range(10):
#random.uniform(x,y)随机生成下一个实数,范围[x,)
#是否会重复?(即使每次的len(trainingSet)不一样)
randIndex = int(random.uniform(0,len(trainingSet)))
#将选取的文档编号添加到测试集
testSet.append(trainingSet[randIndex])
#并将其从训练集删除
del(trainingSet[randIndex])
trainMat = []
trainClasses = []
for docIndex in trainingSet:
#创建词集模型,若docList[docIndex]在vocabList中出现,则记1
#并将得到的矩阵添加到trainMat
trainMat.append(setOfWords2Vec(vocabList, docList[docIndex]))
#将对应的分类添加到trainClasses
trainClasses.append(classList[docIndex])
#调用trainNBO获取到对应的概率向量 pSpam:训练样本中,垃圾邮件占总邮件的概率
#trainNB0(trainMat, trainClasses)
p0V,p1V,pSpam = trainNB0(array(trainMat), array(trainClasses))
errorCount = 0
for docIndex in testSet:
#创建测试集
wordVector = setOfWords2Vec(vocabList, docList[docIndex])
#进行分类,并将分类结果与正式分类结果比较,错误则错误数+1
#print classifyNB(array(wordVector), p0V, p1V, pSpam),classList[docIndex]
if classifyNB(array(wordVector), p0V, p1V, pSpam) != classList[docIndex]:
errorCount += 1
print 'the error rate is: ',float(errorCount)/len(testSet)
"""
使用朴素贝叶斯进行交叉验证,测试垃圾邮件分类器
"""
spamTest();
the error rate is: 0.1
#4.7节 使用朴素贝叶斯分类器从个人广告中获取区域倾向
#目前在内网环境,暂时先放着