适用数据类型:标称型

使用Python进行文本分类:

从文本中构建词向量:

  1. def loadDataSet():
  2. postingList = [['my', 'dog', 'has', 'flea', 'problems', 'help', 'please'],
  3. ['maybe', 'not', 'take', 'him', 'to', 'dog', 'park', 'stupid'],
  4. ['my', 'dalmation', 'is', 'so', 'cute', 'I', 'love', 'him'],
  5. ['stop', 'posting', 'stupid', 'worthless', 'garbage'],
  6. ['mr', 'licks', 'ate', 'my', 'steak', 'how', 'to', 'stop', 'him'],
  7. ['quit', 'buying', 'worthless', 'dog', 'food', 'stupid']]
  8. classVec = [0, 1, 0, 1, 0, 1] #1 is abusive, 0 not
  9. return postingList, classVec
  10. #利用集合set创造不重复的词组列表:
  11. def createVocabList(dataSet):
  12. vocabSet = set([]) #create empty set
  13. for document in dataSet:
  14. vocabSet = vocabSet | set(document) #union of the two sets
  15. return list(vocabSet)
  16. def setOfWords2Vec(vocabList, inputSet):
  17. returnVec = [0]*len(vocabList)
  18. for word in inputSet:
  19. if word in vocabList:
  20. returnVec[vocabList.index(word)] = 1
  21. else: print("the word: %s is not in my Vocabulary!" % word)
  22. return returnVec

image.png

image.png

  1. import numpy as np
  2. def trainNB0(trainMatrix, trainCategory):
  3. numTrainDocs = len(trainMatrix)
  4. numWords = len(trainMatrix[0])
  5. pAbusive = sum(trainCategory)/float(numTrainDocs)
  6. p0Num = np.ones(numWords); p1Num = np.ones(numWords) #change to np.ones()
  7. p0Denom = 2.0; p1Denom = 2.0 #change to 2.0
  8. for i in range(numTrainDocs):
  9. if trainCategory[i] == 1:
  10. p1Num += trainMatrix[i]
  11. p1Denom += sum(trainMatrix[i])
  12. else:
  13. p0Num += trainMatrix[i]
  14. p0Denom += sum(trainMatrix[i])
  15. p1Vect = np.log(p1Num/p1Denom) #change to np.log()
  16. p0Vect = np.log(p0Num/p0Denom) #change to np.log()
  17. return p0Vect, p1Vect, pAbusive


image.png

修改分类器:

下溢出问题:使用对乘积取自然对数的方法。

  1. def classifyNB(vec2Classify, p0Vec, p1Vec, pClass1):
  2. p1 = sum(vec2Classify * p1Vec) + np.log(pClass1) #element-wise mult
  3. p0 = sum(vec2Classify * p0Vec) + np.log(1.0 - pClass1)
  4. if p1 > p0:
  5. return 1
  6. else:
  7. return 0
  8. def testingNB():
  9. listOPosts, listClasses = loadDataSet()
  10. myVocabList = createVocabList(listOPosts)
  11. trainMat = []
  12. for postinDoc in listOPosts:
  13. trainMat.append(setOfWords2Vec(myVocabList, postinDoc))
  14. p0V, p1V, pAb = trainNB0(np.array(trainMat), np.array(listClasses))
  15. testEntry = ['love', 'my', 'dalmation']
  16. thisDoc = np.array(setOfWords2Vec(myVocabList, testEntry))
  17. print(testEntry, 'classified as: ', classifyNB(thisDoc, p0V, p1V, pAb))
  18. testEntry = ['stupid', 'garbage']
  19. thisDoc = np.array(setOfWords2Vec(myVocabList, testEntry))
  20. print(testEntry, 'classified as: ', classifyNB(thisDoc, p0V, p1V, pAb))

image.png

  1. def bagOfWords2VecMN(vocabList, inputSet):
  2. returnVec = [0]*len(vocabList)
  3. for word in inputSet:
  4. if word in vocabList:
  5. returnVec[vocabList.index(word)] += 1
  6. return returnVec

(词集模型:每个词出现或不出现;词袋模型,每个词出现的次数多少)

示例:使用朴素贝叶斯过滤垃圾邮件

image.png

问题:为什么没办法实现字符的分隔

可以换一种方法
image.png
https://www.cnblogs.com/xiaokuangnvhai/p/11213308.html
https://www.cnblogs.com/xp1315458571/p/13720333.html

  1. def textParse(bigString): #input is big string, #output is word list
  2. import re
  3. listOfTokens = re.split(r'\W+', bigString)
  4. return [tok.lower() for tok in listOfTokens if len(tok) > 2]
  5. def spamTest():
  6. docList = []; classList = []; fullText = []
  7. for i in range(1, 26):
  8. wordList = textParse(open('email/spam/%d.txt' % i, encoding="ISO-8859-1").read())
  9. docList.append(wordList)
  10. fullText.extend(wordList)
  11. classList.append(1)
  12. wordList = textParse(open('email/ham/%d.txt' % i, encoding="ISO-8859-1").read())
  13. docList.append(wordList)
  14. fullText.extend(wordList)
  15. classList.append(0)
  16. vocabList = createVocabList(docList)#create vocabulary
  17. trainingSet = range(50); testSet = [] #create test set
  18. for i in range(10):
  19. randIndex = int(np.random.uniform(0, len(trainingSet)))
  20. testSet.append(trainingSet[randIndex])
  21. del(list(trainingSet)[randIndex])
  22. trainMat = []; trainClasses = []
  23. for docIndex in trainingSet:#train the classifier (get probs) trainNB0
  24. trainMat.append(bagOfWords2VecMN(vocabList, docList[docIndex]))
  25. trainClasses.append(classList[docIndex])
  26. p0V, p1V, pSpam = trainNB0(np.array(trainMat), np.array(trainClasses))
  27. errorCount = 0
  28. for docIndex in testSet: #classify the remaining items
  29. wordVector = bagOfWords2VecMN(vocabList, docList[docIndex])
  30. if classifyNB(np.array(wordVector), p0V, p1V, pSpam) != classList[docIndex]:
  31. errorCount += 1
  32. print("classification error", docList[docIndex])
  33. print('the error rate is: ', float(errorCount)/len(testSet))
  34. #return vocabList, fullText

示例:从个人广告中获取区域倾向