1、 下载wiki百科数据

维基百科-资料库下载
pages-articles.xml.bz2 为结尾的文件
image.png

2、 解析wiki百科文本数据

  1. python3 wiki_to_txt.py zhwiki-20220201-pages-articles.xml.bz2
  1. import logging
  2. import sys
  3. from gensim.corpora import WikiCorpus
  4. def main():
  5. if len(sys.argv) != 2:
  6. print("Usage: python3 " + sys.argv[0] + " wiki_data_path")
  7. exit()
  8. logging.basicConfig(format='%(asctime)s : %(levelname)s : %(message)s', level=logging.INFO)
  9. wiki_corpus = WikiCorpus(sys.argv[1], dictionary={})
  10. texts_num = 0
  11. with open("wiki_texts.txt", 'w', encoding='utf-8') as output:
  12. for text in wiki_corpus.get_texts():
  13. output.write(' '.join(text) + '\n')
  14. texts_num += 1
  15. if texts_num % 10000 == 0:
  16. logging.info("已處理 %d 篇文章" % texts_num)
  17. if __name__ == "__main__":
  18. main()
  1. 2022-02-24 10:30:07,609 : INFO : 已處理 10000 篇文章
  2. ......
  3. 2022-02-24 10:44:44,092 : INFO : 已處理 410000 篇文章
  4. 2022-02-24 10:45:09,587 : INFO : finished iterating over Wikipedia corpus of 417371 documents with 96721989 positions (total 3964095 articles, 113681913 positions before pruning articles shorter than 50 words)

3、 繁体文本转简体

使用opencc 将文本数据繁体转简体

  1. opencc -i wiki_texts.txt -o wiki_zh_tw.txt -c t2s.json

4、 分词处理(包含去除停用词)

使用jieba分词对简体中文文本数据做分词,分词后写入txt文件(用于gensim模型训练)

  1. python3 segment.py
  1. import jieba
  2. import logging
  3. def main():
  4. logging.basicConfig(format='%(asctime)s : %(levelname)s : %(message)s', level=logging.INFO)
  5. # jieba custom setting.
  6. jieba.set_dictionary('jieba_dict/dict.txt.big')
  7. # load stopwords set
  8. stopword_set = set()
  9. with open('jieba_dict/stopwords.txt','r', encoding='utf-8') as stopwords:
  10. for stopword in stopwords:
  11. stopword_set.add(stopword.strip('\n'))
  12. output = open('wiki_seg.txt', 'w', encoding='utf-8')
  13. with open('wiki_zh_tw.txt', 'r', encoding='utf-8') as content :
  14. for texts_num, line in enumerate(content):
  15. line = line.strip('\n')
  16. words = jieba.cut(line, cut_all=False)
  17. for word in words:
  18. if word not in stopword_set:
  19. output.write(word + ' ')
  20. output.write('\n')
  21. if (texts_num + 1) % 10000 == 0:
  22. logging.info("已完成前 %d 行的斷詞" % (texts_num + 1))
  23. output.close()
  24. if __name__ == '__main__':
  25. main()

5、 模型训练和应用

  1. python3 train.py
  2. python3 demo.py
  1. # train.py
  2. import logging
  3. from gensim.models import word2vec
  4. def main():
  5. logging.basicConfig(format='%(asctime)s : %(levelname)s : %(message)s', level=logging.INFO)
  6. sentences = word2vec.LineSentence("wiki_seg.txt")
  7. # model = word2vec.Word2Vec(sentences, vector_size=250)
  8. model = word2vec.Word2Vec(sentences, sg=1, window=10, min_count=5, workers=6, vector_size=250)
  9. # 保存模型,供日後使用
  10. model.save("word2vec.model")
  11. # 模型讀取方式
  12. # model = word2vec.Word2Vec.load("your_model_name")
  13. if __name__ == "__main__":
  14. main()
  1. # demo.py
  2. from gensim.models import word2vec
  3. from gensim import models
  4. import logging
  5. def main():
  6. logging.basicConfig(format='%(asctime)s : %(levelname)s : %(message)s', level=logging.INFO)
  7. model = models.Word2Vec.load('word2vec.model')
  8. print("提供 3 種測試模式\n")
  9. print("輸入一個詞,則去尋找前十個該詞的相似詞")
  10. print("輸入兩個詞,則去計算兩個詞的餘弦相似度")
  11. print("輸入三個詞,進行類比推理")
  12. while True:
  13. try:
  14. query = input()
  15. q_list = query.split()
  16. if len(q_list) == 1:
  17. print("相似詞前 10 排序")
  18. res = model.wv.most_similar(q_list[0], topn=10)
  19. for item in res:
  20. print(item[0] + "," + str(item[1]))
  21. elif len(q_list) == 2:
  22. print("計算 Cosine 相似度")
  23. res = model.wv.similarity(q_list[0], q_list[1])
  24. print(res)
  25. else:
  26. print("%s之於%s,如%s之於" % (q_list[0], q_list[2], q_list[1]))
  27. res = model.wv.most_similar([q_list[0], q_list[1]], [q_list[2]], topn=100)
  28. for item in res:
  29. print(item[0] + "," + str(item[1]))
  30. print("----------------------------")
  31. except Exception as e:
  32. print(repr(e))
  33. if __name__ == "__main__":
  34. main()

6、效果图

image.png
数据及代码:https://github.com/SeafyLiang/machine_learning_study/tree/master/nlp_study/gensim_word2vec