机器学习分类通用代码

本文主要提供一种通用的机器学习分类代码,通过重写两个函数完成处理和评价。
提供两种主函数代码,一个用于常规单模型训练和保存,一个用于选取最优模型并生成对比模型数据表格。

主要步骤为:

  • 重写数据预处理函数(本文以文本数据TF-IDF为例子)
  • 重写评价函数(根据需求从metrics中选取,文本以准去率、精确率、召回率、F1值作为评价标准)
  • 载入主函数(数据尽量使用pandas读取结构化类型数据为主,可以先用选优模型主函数调参选取最优模型,再使用单模型调最优模型)

后续版本将要更新的内容:

  • 增加预测函数 (读取保存的模型和预处理配置,预测单条数据或多条数据)
  • 增加分类结果可视化
  • 增加不同的预处理方式样例(增加图片预处理、增加表格数据预处理、增加时序数据预处理)
  • 增加不同的评测方法函数

    导包

    ```python 导包 import json import time import joblib import pickle import jieba import pandas as pd

    sklearn

    from sklearn.preprocessing import LabelEncoder from sklearn.feature_extraction.text import TfidfVectorizer from sklearn.linear_model import RidgeClassifier,LogisticRegression from sklearn.naive_bayes import MultinomialNB,BernoulliNB,ComplementNB from sklearn.ensemble import RandomForestClassifier,AdaBoostClassifier,GradientBoostingClassifier from sklearn.svm import SVC from sklearn.tree import DecisionTreeClassifier from sklearn.model_selection import train_test_split from sklearn.metrics import precision_score,recall_score,f1_score,accuracy_score from lightgbm import LGBMClassifier import warnings warnings.filterwarnings(“ignore”, category=DeprecationWarning) warnings.simplefilter(“ignore”)
  1. <a name="AuGXF"></a>
  2. ## 数据预处理函数
  3. ```python
  4. # 以文本数据为例子,文本数据预处理通常需要使用某种技术转化为稠密向量
  5. # 以TF_IDF为例子
  6. def TF_IDF_Deal(train_data,content_col,label_col):
  7. # 数据预处理部分
  8. train_data.drop(0, inplace=True, axis=0)
  9. train_data = train_data[[content_col,label_col]]
  10. # sample(表示随机抽取 frac表示比例)
  11. # train_data = train_data.sample(frac=1.0)
  12. # 标签编码并保存
  13. target_names = train_data[label_col].value_counts().index
  14. lbl = LabelEncoder().fit(target_names)
  15. train_data[label_col] = lbl.transform(train_data[label_col])
  16. name_label = {i:str(j) for i, j in zip(lbl.classes_, lbl.transform(target_names))}
  17. label_name = {str(j):i for i, j in zip(lbl.classes_, lbl.transform(target_names))}
  18. # target_names = dict(sorted(key_value.items(), key=lambda kv: (kv[1], kv[0])))
  19. with open("label_name.json","w") as f:
  20. json.dump(label_name,f)
  21. with open("name_label.json","w") as f:
  22. json.dump(name_label,f)
  23. # 仅对标题做分词
  24. def strcut(s):
  25. seg_list = jieba.cut(s)
  26. return ' '.join(list(seg_list))
  27. train_title = train_data[content_col].apply(strcut)
  28. # TF-IDF
  29. tfidf = TfidfVectorizer(ngram_range=(1, 1))
  30. tfidf.fit_transform(train_title)
  31. train_title_ttidf = tfidf.fit_transform(train_title)
  32. with open("savemodel/TF_IDF.pkl","wb") as f:
  33. pickle.dump(tfidf,f)
  34. # 分割数据集
  35. tr_x, val_x, tr_tfidf, val_tfidf, tr_y, val_y = train_test_split(
  36. train_data[content_col], train_title_ttidf, train_data[label_col],
  37. stratify=train_data[label_col],
  38. # shuffle=True,
  39. test_size=0.2,
  40. random_state=666
  41. )
  42. return tr_x, val_x, tr_tfidf, val_tfidf, tr_y, val_y

评测函数

  1. def predict_metrics_PRF(clf,clf_name,val_tfidf,val_y,with_return=False):
  2. val_pred = clf.predict(val_tfidf)
  3. accuracy = accuracy_score(y_true=val_y,y_pred=val_pred)
  4. precision = precision_score(y_true=val_y,y_pred=val_pred,average='macro')
  5. recall = recall_score(y_true=val_y,y_pred=val_pred,average='macro')
  6. f1score = f1_score(y_true=val_y,y_pred=val_pred,average='macro')
  7. if with_return:
  8. return accuracy,precision,recall,f1score
  9. else:
  10. print("{}:".format(clf_name))
  11. print("accuracy: {:.3f}\nprecision: {:.3f}".format(accuracy, precision))
  12. print("recall: {:.3f}\nf1score: {:.3f}".format(recall, f1score))

主函数

根据自己数据类型和数据处理方式的不同重写 数据预处理函数
根据自己所需的评价指标重写 评测函数
重写上述两个函数后,可以通过以下结构的代码进行调用,仅用7行代码构成主函数完成一个对机器学习分类模型的训练和保存

  1. if __name__ == '__main__':
  2. # 读取数据
  3. train_data = pd.read_excel('../data/3_训练数据集20210414_A1.xlsx')
  4. # 数据预处理函数
  5. tr_x, val_x, tr_tfidf, val_tfidf, tr_y, val_y = TF_IDF_Deal(train_data, 'NEWS_TITLE', 'LABEL')
  6. # 模型建立、模型训练、模型保存
  7. clf_name = "RidgeClassifier"
  8. clf = RidgeClassifier()
  9. clf.fit(tr_tfidf, tr_y) # 训练
  10. joblib.dump(clf, "savemodel/{}.model".format(clf_name)) # 保存模型
  11. # 评测函数
  12. predict_metrics_PRF(clf, clf_name,val_tfidf, val_y)

选优模型代码

考虑到不管是比赛还是写paper都需要模型对比,故提供一种选优模型代码,通过遍历所有sklearn库中的分类方法来查看最优模型。
注意:

  • 原始代码中模型API都为默认参数,可自己进行调参
  • 某些模型必须使用稠密向量,故没有加入进入,如LDA判别模型等
    1. if __name__ == '__main__':
    2. # company_name = pd.read_excel('../data/2_企业公司名.xlsx', names=['name'])
    3. train_data = pd.read_excel('../data/3_训练数据集20210414_A1.xlsx')
    4. # 数据预处理
    5. tr_x, val_x, tr_tfidf, val_tfidf, tr_y, val_y = TF_IDF_Deal(train_data,'NEWS_TITLE','LABEL')
    6. # model compare
    7. model_names = ['RidgeClassifier','LogisticRegression','SGDClassifier', # 线性模型
    8. 'MultinomialNB','BernoulliNB','ComplementNB', # 贝叶斯模型 未加入GaussianNB
    9. 'KNeighborsClassifier', # 紧邻模型
    10. 'RandomForestClassifier','AdaBoostClassifier','GradientBoostingClassifier', # 集成模型
    11. 'SVC', 'DecisionTreeClassifier', # #支持向量机、决策树模型
    12. 'LGBMClassifier', # LGB kaggle三大杀器之一
    13. ]
    14. models = [RidgeClassifier(),
    15. LogisticRegression(),
    16. SGDClassifier(),
    17. MultinomialNB(),
    18. BernoulliNB(),
    19. ComplementNB(),
    20. KNeighborsClassifier(),
    21. RandomForestClassifier(),
    22. AdaBoostClassifier(),
    23. GradientBoostingClassifier(),
    24. SVC(),
    25. DecisionTreeClassifier(),
    26. LGBMClassifier()
    27. ]
    28. # 找到最优模型
    29. all_time = time.time() # 记录开始时间
    30. metrics_df = pd.DataFrame([],columns=["model","accuracy","precision","recall","f1_score"]) # 存表
    31. for idx,(model,model_name) in enumerate(zip(models,model_names)):
    32. start = time.time()
    33. clf = model.fit(tr_tfidf,tr_y)
    34. # 如果重写了评测函数 需要修改这边的返回值 以及更改生成列表的columns名
    35. accuracy,precision,recall,f1score = predict_metrics_PRF(clf,model_name,val_tfidf,val_y,with_return=True)
    36. metrics_df.loc[len(metrics_df)] = [model_name,accuracy,precision,recall,f1score]
    37. print("{}/{} {} 耗时: {:.3f}sec".format(idx+1,len(model_names),model_name,time.time()-start)) # 打印进度
    38. print("总耗时: {:.3f}sec".format(time.time()-all_time))
    39. print(metrics_df)
    40. metrics_df.to_csv("best_model_metrics.csv",index=False)

效果
image.png


完整代码

  1. #!/usr/bin/env python
  2. # -*- encoding: utf-8 -*-
  3. '''
  4. @File : TF_IDF_Deal.py
  5. @Contact : htkstudy@163.com
  6. @Modify Time @Author @Version @Desciption
  7. ------------ ------- -------- -----------
  8. 2021/5/7 18:29 Armor(htk) 1.0 None
  9. '''
  10. import json
  11. import time
  12. import joblib
  13. import pickle
  14. import jieba
  15. import pandas as pd
  16. from sklearn.preprocessing import LabelEncoder
  17. from sklearn.svm import SVC
  18. from sklearn.tree import DecisionTreeClassifier
  19. from sklearn.neighbors import KNeighborsClassifier
  20. from sklearn.feature_extraction.text import TfidfVectorizer
  21. from sklearn.linear_model import RidgeClassifier,LogisticRegression,SGDClassifier
  22. from sklearn.naive_bayes import MultinomialNB,BernoulliNB,ComplementNB,GaussianNB
  23. from sklearn.ensemble import RandomForestClassifier,AdaBoostClassifier,GradientBoostingClassifier
  24. from sklearn.discriminant_analysis import LinearDiscriminantAnalysis,QuadraticDiscriminantAnalysis
  25. from sklearn.model_selection import train_test_split
  26. from sklearn.metrics import precision_score,recall_score,f1_score,accuracy_score
  27. from lightgbm import LGBMClassifier
  28. import warnings
  29. warnings.filterwarnings("ignore", category=DeprecationWarning)
  30. warnings.simplefilter("ignore")
  31. def TF_IDF_Deal(train_data,content_col,label_col):
  32. # 数据预处理部分
  33. train_data.drop(0, inplace=True, axis=0)
  34. train_data = train_data[[content_col,label_col]]
  35. # sample(表示随机抽取 frac表示比例)
  36. # train_data = train_data.sample(frac=1.0)
  37. # 标签编码并保存
  38. target_names = train_data[label_col].value_counts().index
  39. lbl = LabelEncoder().fit(target_names)
  40. train_data[label_col] = lbl.transform(train_data[label_col])
  41. name_label = {i:str(j) for i, j in zip(lbl.classes_, lbl.transform(target_names))}
  42. label_name = {str(j):i for i, j in zip(lbl.classes_, lbl.transform(target_names))}
  43. # target_names = dict(sorted(key_value.items(), key=lambda kv: (kv[1], kv[0])))
  44. with open("label_name.json","w") as f:
  45. json.dump(label_name,f)
  46. with open("name_label.json","w") as f:
  47. json.dump(name_label,f)
  48. # 仅对标题做分词
  49. def strcut(s):
  50. seg_list = jieba.cut(s)
  51. return ' '.join(list(seg_list))
  52. train_title = train_data[content_col].apply(strcut)
  53. # TF-IDF
  54. tfidf = TfidfVectorizer(ngram_range=(1, 1))
  55. tfidf.fit_transform(train_title)
  56. train_title_ttidf = tfidf.fit_transform(train_title)
  57. with open("savemodel/TF_IDF.pkl","wb") as f:
  58. pickle.dump(tfidf,f)
  59. # 分割数据集
  60. tr_x, val_x, tr_tfidf, val_tfidf, tr_y, val_y = train_test_split(
  61. train_data[content_col], train_title_ttidf, train_data[label_col],
  62. stratify=train_data[label_col],
  63. # shuffle=True,
  64. test_size=0.2,
  65. random_state=666
  66. )
  67. return tr_x, val_x, tr_tfidf, val_tfidf, tr_y, val_y
  68. def predict_metrics_PRF(clf,clf_name,val_tfidf,val_y,with_return=False):
  69. val_pred = clf.predict(val_tfidf)
  70. val_true = val_y.values.tolist()
  71. accuracy = accuracy_score(y_true=val_true,y_pred=val_pred)
  72. precision = precision_score(y_true=val_true,y_pred=val_pred,average='macro')
  73. recall = recall_score(y_true=val_true,y_pred=val_pred,average='macro')
  74. f1score = f1_score(y_true=val_true,y_pred=val_pred,average='macro')
  75. if with_return:
  76. return accuracy,precision,recall,f1score
  77. else:
  78. print("{}:".format(clf_name))
  79. print("accuracy: {:.3f}\nprecision: {:.3f}".format(accuracy, precision))
  80. print("recall: {:.3f}\nf1score: {:.3f}".format(recall, f1score))
  81. def model_example():
  82. # company_name = pd.read_excel('../data/2_企业公司名.xlsx', names=['name'])
  83. train_data = pd.read_excel('../data/3_训练数据集20210414_A1.xlsx')
  84. # 数据预处理
  85. tr_x, val_x, tr_tfidf, val_tfidf, tr_y, val_y = TF_IDF_Deal(train_data, 'NEWS_TITLE', 'LABEL')
  86. # 模型构造
  87. clf_name = "RidgeClassifier"
  88. clf = RidgeClassifier()
  89. clf.fit(tr_tfidf, tr_y) # 训练
  90. joblib.dump(clf, "savemodel/{}.model".format(clf_name)) # 保存模型
  91. # 评价效果
  92. predict_metrics_PRF(clf, clf_name,val_tfidf, val_y)
  93. # if __name__ == '__main__':
  94. # # 数据预处理
  95. # with open("savemodel/TF_IDF.pkl",'rb') as f:
  96. # tfidf = pickle.load(f)
  97. # val_x = tfidf.fit_transform(val_x)
  98. # print(val_x)
  99. # # 加载模型
  100. # clf = joblib.load(savemodel_path)
  101. # # 预测
  102. # val_pred = clf.predict(val_x)
  103. # with open("label_name.json",'r') as f:
  104. # key_values = json.load(f)
  105. # label_pred = [key_values[str(i)] for i in val_pred]
  106. # return val_pred,label_pred
  107. if __name__ == '__main__':
  108. # company_name = pd.read_excel('../data/2_企业公司名.xlsx', names=['name'])
  109. train_data = pd.read_excel('../data/3_训练数据集20210414_A1.xlsx')
  110. # 数据预处理
  111. tr_x, val_x, tr_tfidf, val_tfidf, tr_y, val_y = TF_IDF_Deal(train_data,'NEWS_TITLE','LABEL')
  112. # model compare
  113. model_names = ['RidgeClassifier','LogisticRegression','SGDClassifier', # 线性模型
  114. 'MultinomialNB','BernoulliNB','ComplementNB', # 贝叶斯模型 未加入GaussianNB
  115. 'KNeighborsClassifier', # 紧邻模型
  116. 'RandomForestClassifier','AdaBoostClassifier','GradientBoostingClassifier', # 集成模型
  117. 'SVC', 'DecisionTreeClassifier', # #支持向量机、决策树模型
  118. 'LGBMClassifier', # LGB kaggle三大杀器之一
  119. ]
  120. models = [RidgeClassifier(),
  121. LogisticRegression(),
  122. SGDClassifier(),
  123. MultinomialNB(),
  124. BernoulliNB(),
  125. ComplementNB(),
  126. KNeighborsClassifier(),
  127. RandomForestClassifier(),
  128. AdaBoostClassifier(),
  129. GradientBoostingClassifier(),
  130. SVC(),
  131. DecisionTreeClassifier(),
  132. LGBMClassifier()
  133. ]
  134. # 找到最优模型
  135. all_time = time.time() # 记录开始时间
  136. metrics_df = pd.DataFrame([],columns=["model","accuracy","precision","recall","f1_score"]) # 存表
  137. for idx,(model,model_name) in enumerate(zip(models,model_names)):
  138. start = time.time()
  139. clf = model.fit(tr_tfidf,tr_y)
  140. # 如果重写了评测函数 需要修改这边的返回值 以及更改生成列表的columns名
  141. accuracy,precision,recall,f1score = predict_metrics_PRF(clf,model_name,val_tfidf,val_y,with_return=True)
  142. metrics_df.loc[len(metrics_df)] = [model_name,accuracy,precision,recall,f1score]
  143. print("{}/{} {} 耗时: {:.3f}sec".format(idx+1,len(model_names),model_name,time.time()-start)) # 打印进度
  144. print("总耗时: {:.3f}sec".format(time.time()-all_time))
  145. print(metrics_df)
  146. metrics_df.to_csv("best_model_metrics.csv",index=False)