定义:将样本的特征和样本发生的概率联系起来,概率是一个数
作用:通常用做分类算法,只能解决二分类问题

1.原理

1.1 sigmoid 函数

  • 01  什么是逻辑回归 - 图1 ```python import numpy as np import matplotlib.pyplot as plt

def sigmoid(t): return 1. / (1. + np.exp(-t))

x = np.linspace(-10, 10, 500)

plt.plot(x, sigmoid(x)) plt.show()

  1. ![image.png](https://cdn.nlark.com/yuque/0/2021/png/12405790/1639894859293-b3e51595-9071-49fc-a27b-f268b5014d95.png#clientId=ub695992a-685a-4&from=paste&id=u54c218b7&margin=%5Bobject%20Object%5D&name=image.png&originHeight=252&originWidth=375&originalType=url&ratio=1&size=8295&status=done&style=none&taskId=u6184547f-947b-4098-b6d6-bb4b58097fe)
  2. <a name="jA53u"></a>
  3. ## 1.2 sigmoid 结合线性回归
  4. - [x] ![](https://cdn.nlark.com/yuque/__latex/0e43d3c58b462c3b2a2225ec5f0cc9d4.svg#card=math&code=t%20%3D%20%7B%5Ctheta%7D%5E%7BT%7D%20.%20X_b%20&id=O7G04),![](https://cdn.nlark.com/yuque/__latex/7167d8d05b1d23b1bf89cba0d1b85226.svg#card=math&code=%5Chat%20p%20%3D%5Csigma%28t%29%20%3D%20%5Csigma%28%7B%5Ctheta%7D%5E%7BT%7D%20.%20X_b%20%29%20%3D%20%5Ccfrac%7B1%7D%7B%20%7B1%2Be%5E%7B-%20%7B%5Ctheta%7D%5E%7BT%7D%20.%20X_b%20%7D%20%7D%7D&id=GlQMp)
  5. - [x] 再将概率![](https://cdn.nlark.com/yuque/__latex/38fe0d20f7fce364c09de7c473f09762.svg#card=math&code=%5Chat%20p&id=L3DGM)对应取值:![](https://cdn.nlark.com/yuque/__latex/3e6ff025e183749adb92a5baf2eb68b4.svg#card=math&code=%5Chat%20y%3D%20%5Cbegin%7Bcases%7D%0A1%2C%5Cquad%20%26%5C%20%20%5Chat%20%20p%20%5Cgeq%200.5%20%5C%5C%0A0%2C%5Cquad%20%26%5C%20%5Chat%20p%20%5Cleq%200.5%0A%5Cend%7Bcases%7D%20&id=s4LMy)
  6. <a name="pQi7k"></a>
  7. ## 1.3 损失函数
  8. ![](https://cdn.nlark.com/yuque/__latex/9f6bc23f140e5217809ae370117bb48c.svg#card=math&code=cost%3D%20%5Cbegin%7Bcases%7D%0A%E5%A6%82%E6%9E%9Cy%3D1%2Cp%E8%B6%8A%E5%B0%8F%EF%BC%8Ccost%E8%B6%8A%E5%A4%A7%20%5C%5C%0A%E5%A6%82%E6%9E%9Cy%3D0%2Cp%E8%B6%8A%E5%A4%A7%EF%BC%8Ccost%E8%B6%8A%E5%A4%A7%0A%5Cend%7Bcases%7D%20&id=Au3mD)<br />如何用公式表示这样的损失函数?
  9. <a name="vKvOX"></a>
  10. ### 单个值的损失函数
  11. ![](https://cdn.nlark.com/yuque/__latex/5f3a1ed37aa9928f6ae353c91d3b696e.svg#card=math&code=y%3D%20%5Cbegin%7Bcases%7D%0A-%7Blog%28%5Chat%20p%29%7D%2C%5Cquad%20%26%5C%20y%3D%201%20%5C%5C%0A-%7Blog%281-%20%5Chat%20p%29%7D%2C%5Cquad%20%26%5C%20y%3D%200%0A%5Cend%7Bcases%7D%20&id=LCFGl)<br />分段函数不好用,能不能结合成一个函数表示?<br />![](https://cdn.nlark.com/yuque/__latex/96ea3cc7dad8bf79cb84272330de40c0.svg#card=math&code=-y%7Blog%28%5Chat%20p%29%7D%20%20-%20%20%281-y%29%7Blog%281-%20%5Chat%20p%29%20%7D&id=vRaHD)<br />![image.png](https://cdn.nlark.com/yuque/0/2021/png/12405790/1639901122308-68dbe14e-3854-4d29-9c5d-7365199101f4.png#clientId=ue307a01f-a091-4&from=paste&height=222&id=MKelh&margin=%5Bobject%20Object%5D&name=image.png&originHeight=888&originWidth=879&originalType=binary&ratio=1&size=96461&status=done&style=none&taskId=u9231221b-d83f-4a66-8f00-29bdb1b64dc&width=220)
  12. <a name="gTE7A"></a>
  13. ### 损失函数的平均值
  14. ![](https://cdn.nlark.com/yuque/__latex/8a33bdece5f8b75716632da1e4b96165.svg#card=math&code=J%28%5Ctheta%29%20%3D%20-%20%5Ccfrac%7B1%7D%7Bm%7D%20%7B%20%5Csum_%7Bi%3D1%7D%5Em%20y%7Blog%28%5Chat%20p%29%5E%7B%28i%29%7D%20%7D%20%20%2B%20%20%281-y%5E%7B%28i%29%7D%29%7Blog%281-%20%5Chat%20p%5E%7B%28i%29%7D%20%7D%20%7D%29&id=o9bQi)
  15. ![image.png](https://cdn.nlark.com/yuque/0/2021/png/12405790/1639896927885-397d5ffe-f03b-4f41-8d92-c738f94d1eac.png#clientId=ue307a01f-a091-4&from=paste&height=65&id=u517a6008&margin=%5Bobject%20Object%5D&name=image.png&originHeight=258&originWidth=2302&originalType=binary&ratio=1&size=130457&status=done&style=none&taskId=uffde7392-53aa-461c-912f-93882c70211&width=576)<br />损失函数没有公式解,只能用梯度下降法求解。
  16. <a name="DBJ5i"></a>
  17. ## 1.4 损失函数的梯度
  18. ![](https://cdn.nlark.com/yuque/__latex/c4ead2ac79197a80f8b8bf9c38e544d2.svg#card=math&code=%5Cnabla%20J%28%5Ctheta%29%20%3D%20%5Ccfrac%7B1%7D%7Bm%7D%20%5Ccdot%20%7BX_B%5E%7BT%7D%7D%20%5Ccdot%20%28%20%5Csigma%28X_b%5Ctheta%29%20-%20y%20%29&id=KLjtR)
  19. <a name="F4giF"></a>
  20. # 2.编码
  21. <a name="impSD"></a>
  22. ## 2.1 自定义模型封装
  23. ```python
  24. import numpy as np
  25. from .metrics import accuracy_score
  26. class LogisticRegression:
  27. def __init__(self):
  28. """初始化Logistic Regression模型"""
  29. self.coef_ = None
  30. self.intercept_ = None
  31. self._theta = None
  32. def _sigmoid(self, t):
  33. return 1. / (1. + np.exp(-t))
  34. def fit(self, X_train, y_train, eta=0.01, n_iters=1e4):
  35. """根据训练数据集X_train, y_train, 使用梯度下降法训练Logistic Regression模型"""
  36. assert X_train.shape[0] == y_train.shape[0], \
  37. "the size of X_train must be equal to the size of y_train"
  38. def J(theta, X_b, y):
  39. y_hat = self._sigmoid(X_b.dot(theta))
  40. try:
  41. return - np.sum(y*np.log(y_hat) + (1-y)*np.log(1-y_hat)) / len(y)
  42. except:
  43. return float('inf')
  44. def dJ(theta, X_b, y):
  45. return X_b.T.dot(self._sigmoid(X_b.dot(theta)) - y) / len(y)
  46. def gradient_descent(X_b, y, initial_theta, eta, n_iters=1e4, epsilon=1e-8):
  47. theta = initial_theta
  48. cur_iter = 0
  49. while cur_iter < n_iters:
  50. gradient = dJ(theta, X_b, y)
  51. last_theta = theta
  52. theta = theta - eta * gradient
  53. if (abs(J(theta, X_b, y) - J(last_theta, X_b, y)) < epsilon):
  54. break
  55. cur_iter += 1
  56. return theta
  57. X_b = np.hstack([np.ones((len(X_train), 1)), X_train])
  58. initial_theta = np.zeros(X_b.shape[1])
  59. self._theta = gradient_descent(X_b, y_train, initial_theta, eta, n_iters)
  60. self.intercept_ = self._theta[0]
  61. self.coef_ = self._theta[1:]
  62. return self
  63. def predict_proba(self, X_predict):
  64. """给定待预测数据集X_predict,返回表示X_predict的结果概率向量"""
  65. assert self.intercept_ is not None and self.coef_ is not None, \
  66. "must fit before predict!"
  67. assert X_predict.shape[1] == len(self.coef_), \
  68. "the feature number of X_predict must be equal to X_train"
  69. X_b = np.hstack([np.ones((len(X_predict), 1)), X_predict])
  70. return self._sigmoid(X_b.dot(self._theta))
  71. def predict(self, X_predict):
  72. """给定待预测数据集X_predict,返回表示X_predict的结果向量"""
  73. assert self.intercept_ is not None and self.coef_ is not None, \
  74. "must fit before predict!"
  75. assert X_predict.shape[1] == len(self.coef_), \
  76. "the feature number of X_predict must be equal to X_train"
  77. proba = self.predict_proba(X_predict)
  78. return np.array(proba >= 0.5, dtype='int')
  79. def score(self, X_test, y_test):
  80. """根据测试数据集 X_test 和 y_test 确定当前模型的准确度"""
  81. y_predict = self.predict(X_test)
  82. return accuracy_score(y_test, y_predict)
  83. def __repr__(self):
  84. return "LogisticRegression()"

2.2 建模

  1. # 1.数据准备
  2. import numpy as np
  3. import matplotlib.pyplot as plt
  4. from sklearn import datasets
  5. iris = datasets.load_iris()
  6. X = iris.data
  7. y = iris.target
  8. # 选择两个分类
  9. X = X[y<2,:2]
  10. y = y[y<2]
  11. # 2.可视化
  12. plt.scatter(X[y==0,0], X[y==0,1], color="red")
  13. plt.scatter(X[y==1,0], X[y==1,1], color="blue")
  14. plt.show()

image.png

  1. # 3.数据分割
  2. from playML.model_selection import train_test_split
  3. X_train, X_test, y_train, y_test = train_test_split(X, y, seed=666)
  4. # 4.建模
  5. from playML.LogisticRegression import LogisticRegression
  6. log_reg = LogisticRegression()
  7. log_reg.fit(X_train, y_train)
  8. log_reg.score(X_test, y_test) # 模型准确度: 1.0
  9. #5. 预测

image.png