from sklearn.feature_extraction.text import TfidfVectorizerimport osfrom sklearn.model_selection import train_test_splitfrom sklearn.linear_model import LogisticRegressionfrom sklearn import metricsimport urllibimport matplotlib.pyplot as plt#import sys#reload(sys)#sys.setdefaultencoding('utf8')def loadFile(name): directory = str(os.getcwd()) filepath = os.path.join(directory, name) with open(filepath,'r', encoding='utf8') as f: data = f.readlines() data = list(set(data)) result = [] for d in data: d = str(urllib.parse.unquote(d)) #converting url encoded data to simple string result.append(d) return resultbadQueries=loadFile("d:/pythonLab/badqueries.txt")validQueries=loadFile("d:/pythonLab/goodqueries.txt")badQueries = list(set(badQueries))validQueries = list(set(validQueries))allQueries = badQueries + validQueriesyBad = [1 for i in range(0, len(badQueries))] #labels, 1 for malicious and 0 for cleanyGood = [0 for i in range(0, len(validQueries))]y = yBad + yGoodqueries = allQueries## min_df代表忽略低于频率为0的词条,analyzer定义特征为词(word)或n-gram字符,如果传递给它的调用被用于抽取未处理输入源文件的特征序列,sublinear_tf代表应用线性缩放TF,例如,使用1+log(tf)覆盖tf,ngram_range参数要提取的n-gram的n-values的下限和上限范围,在min_n <= n <= max_n区间的n的全部值vectorizer = TfidfVectorizer(min_df = 0.0, analyzer="char", sublinear_tf=True, ngram_range=(1,3)) #converting data to vectorsX = vectorizer.fit_transform(queries)X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=42) #splitting databadCount = len(badQueries)validCount = len(validQueries)## class_weight : dict或'balanced',默认值:无## 与表单中的类关联的权重。如果没有给出,所有课程都应该有一个重量。{class_label: weight}## “平衡”模式使用y的值自动调整与输入数据中的类频率成反比的权重。n_samples / (n_classes * np.bincount(y))## 请注意,如果指定了sample_weight,这些权重将与sample_weight(通过fit方法传递)相乘。lgs = LogisticRegression(class_weight={1: 2 * validCount / badCount, 0: 1.0}) # class_weight='balanced')lgs.fit(X_train, y_train) #training our model############### Evaluation ###############predicted = lgs.predict(X_test)fpr, tpr, _ = metrics.roc_curve(y_test, (lgs.predict_proba(X_test)[:, 1]))auc = metrics.auc(fpr, tpr)print("Bad samples: %d" % badCount)print("Good samples: %d" % validCount)print("Baseline Constant negative: %.6f" % (validCount / (validCount + badCount)))print("------------")print("Accuracy: %f" % lgs.score(X_test, y_test)) #checking the accuracyprint("Precision: %f" % metrics.precision_score(y_test, predicted))print("Recall: %f" % metrics.recall_score(y_test, predicted))print("F1-Score: %f" % metrics.f1_score(y_test, predicted))print("AUC: %f" % auc)