本文主要是介绍《机器学习实战(Scala实现)》(四)——朴素贝叶斯,希望对大家解决编程问题提供一定的参考价值,需要的开发者们随着小编来一起学习吧!
原理
关于算法原理可以参阅:http://blog.csdn.net/u011239443/article/details/53735609#t35
构建词向量
python
def loadDataSet():postingList=[['my', 'dog', 'has', 'flea', 'problems', 'help', 'please'],['maybe', 'not', 'take', 'him', 'to', 'dog', 'park', 'stupid'],['my', 'dalmation', 'is', 'so', 'cute', 'I', 'love', 'him'],['stop', 'posting', 'stupid', 'worthless', 'garbage'],['mr', 'licks', 'ate', 'my', 'steak', 'how', 'to', 'stop', 'him'],['quit', 'buying', 'worthless', 'dog', 'food', 'stupid']]# 1 代表侮辱性的词 0则不是classVec = [0,1,0,1,0,1] return postingList,classVecdef createVocabList(dataSet):# 创建空集合vocabSet = set([]) for document in dataSet:# 合并两个集合vocabSet = vocabSet | set(document) return list(vocabSet)def setOfWords2Vec(vocabList, inputSet):returnVec = [0]*len(vocabList)for word in inputSet:if word in vocabList:returnVec[vocabList.index(word)] = 1else: print "the word: %s is not in my Vocabulary!" % wordreturn returnVec
训练与测试算法
python
训练算法
def trainNB0(trainMatrix,trainCategory):numTrainDocs = len(trainMatrix)numWords = len(trainMatrix[0])pAbusive = sum(trainCategory)/float(numTrainDocs)p0Num = ones(numWords); p1Num = ones(numWords) p0Denom = 2.0; p1Denom = 2.0 for i in range(numTrainDocs):if trainCategory[i] == 1:p1Num += trainMatrix[i]p1Denom += sum(trainMatrix[i])else:p0Num += trainMatrix[i]p0Denom += sum(trainMatrix[i])p1Vect = log(p1Num/p1Denom) p0Vect = log(p0Num/p0Denom) return p0Vect,p1Vect,pAbusive
- 这里的
pAbusive
其实应该计算的是各个类别的概率。但是我们这里是类别只有 0 和 1 的二分类,所以只要返回一个类别为 1 的概率给后续程序就行了。 p0Num = ones(numWords); p1Num = ones(numWords)
和p0Denom = 2.0; p1Denom = 2.0
是为了避免后续计算log中的指和分母值取到0
。p1Num/p1Denom
得到向量第i个特征即 p(wi/c1) ,而 log(p(w/c1))=log(p(w1/c1))+log(p(w2/c1))+...+log(p(wn/c1)) 。所以我们只需要将该向量中的每个特征取对数,再累加就能得到 log(p(w/c1))
测试算法
def classifyNB(vec2Classify, p0Vec, p1Vec, pClass1):p1 = sum(vec2Classify * p1Vec) + log(pClass1) p0 = sum(vec2Classify * p0Vec) + log(1.0 - pClass1)if p1 > p0:return 1else: return 0def testingNB():listOPosts,listClasses = loadDataSet()myVocabList = createVocabList(listOPosts)trainMat=[]for postinDoc in listOPosts:trainMat.append(setOfWords2Vec(myVocabList, postinDoc))p0V,p1V,pAb = trainNB0(array(trainMat),array(listClasses))testEntry = ['love', 'my', 'dalmation']thisDoc = array(setOfWords2Vec(myVocabList, testEntry))print testEntry,'classified as: ',classifyNB(thisDoc,p0V,p1V,pAb)testEntry = ['stupid', 'garbage']thisDoc = array(setOfWords2Vec(myVocabList, testEntry))print testEntry,'classified as: ',classifyNB(thisDoc,p0V,p1V,pAb)
由于 p(w) 是相同的,所以我们只需要比较 p(w/c0)p(c0) 和 p(w/c1)p(c1) 的大小,即 log(p(w/c0)p(c0)) 和 log(p(w/c1)p(c1)) 的大小。如: log(p(w/c0)p(c0))=log(p(w/c0))+log(p(c0))= sum(vec2Classify * p0Vec) + log(1.0 - pClass1)
scala
package NativeBayesimport scala.collection.mutable.ArrayBufferobject NativeBayes {def loadDataSet() = {val postingList = Array(Array("my", "dog", "has", "flea", "problems", "help", "please"),Array("maybe", "not", "take", "him", "to", "dog", "park", "stupid"),Array("my", "dalmation", "is", "so", "cute", "I", "love", "him"),Array("stop", "posting", "stupid", "worthless", "garbage"),Array("mr", "licks", "ate", "my", "steak", "how", "to", "stop", "him"),Array("quit", "buying", "worthless", "dog", "food", "stupid"))//1 代表不良信息, 反之为 0 val classVec = Array(0, 1, 0, 1, 0, 1)(postingList, classVec)}def setOfWords2Vec(vocabList: Array[String], inputSet: Array[String]) = {val returnVec = new Array[Int](vocabList.length)val vocabListWithIndex = vocabList.zipWithIndexfor (word <- inputSet) {if (vocabList.contains(word))returnVec(vocabListWithIndex.filter(_._1 == word)(0)._2) = 1else printf("the word: %s is not in my Vocabulary!\n", word)}returnVec}def trainNB0(trainMatrix: Array[Array[Int]], trainCategory: Array[Int]) = {val numTrainDocs = trainMatrix.lengthval numWords = trainMatrix(0).lengthval pAbusive = trainCategory.sum / numTrainDocs.toDoublevar p0Num = Array.fill(numWords)(1)var p1Num = Array.fill(numWords)(1)var p0Denom = 2.0var p1Denom = 2.0for (i <- 0 to numTrainDocs - 1) {if (trainCategory(i) == 1) {var cnt = 0p1Num = p1Num.map { x =>val v = x + trainMatrix(i)(cnt)cnt += 1v}p1Denom += trainMatrix(i).sum} else {var cnt = 0p0Num = p0Num.map { x =>val v = x + trainMatrix(i)(cnt)cnt += 1v}p0Denom += trainMatrix(i).sum}}(p1Num.map(x => math.log(x / p1Denom)), p0Num.map(x => Math.log(x / p0Denom)), pAbusive)}def classifyNB(vec2Classify: Array[Int], p0Vec: Array[Double], p1Vec: Array[Double], pClass1: Double) = {var cnt = 0val p1 = vec2Classify.map { x =>val v = x * p1Vec(cnt)cnt += 1v}.sum + math.log(pClass1)cnt = 0val p0 = vec2Classify.map { x =>val v = x * p0Vec(cnt)cnt += 1v}.sum + math.log(1.0 - pClass1)if (p1 > p0) 1 else 0}def main(args: Array[String]): Unit = {val DataSet = loadDataSet()val listOPosts = DataSet._1val listClasses = DataSet._2val myVocabList = listOPosts.reduce((a1, a2) => a1.++:(a2)).distinctvar trainMat = new ArrayBuffer[Array[Int]](listOPosts.length)listOPosts.foreach(postinDoc => trainMat.append(setOfWords2Vec(myVocabList, postinDoc)))val p = trainNB0(trainMat.toArray, listClasses)val p0V = p._2val p1V = p._1val pAb = p._3val testEntry = Array("love", "my", "dalmation")val thisDoc = setOfWords2Vec(myVocabList, testEntry)println(testEntry.mkString(",") + " classified as: " + classifyNB(thisDoc, p0V, p1V, pAb))val testEntry2 = Array("stupid", "garbage")val thisDoc2 = setOfWords2Vec(myVocabList, testEntry2)println(testEntry2.mkString(",") + " classified as: " + classifyNB(thisDoc2, p0V, p1V, pAb))}
}
这篇关于《机器学习实战(Scala实现)》(四)——朴素贝叶斯的文章就介绍到这儿,希望我们推荐的文章对编程师们有所帮助!