1.opencv 头文件描述

image.png

2.点积、叉积

若向量a=(a1,b1,c1),向量b=(a2,b2,c2)
向量a·向量b=a1a2 + b1b2 + c1c2
向量a×向量b=(b1c2-b2c1, c1a2-a1c2, a1b2-a2b1)
https://zhidao.baidu.com/question/11925732.html

3.cv::Rect 矩形相关

image.png

3.1 rectangle画矩形

  1. cv::Mat I,B,temp;
  2. I = cv::Mat::zeros(300, 400, CV_8UC3);
  3. B = I.clone();
  4. cv::Rect r1(60,60,200,100);
  5. cv::Rect r2(cv::Point(140, 120), cv::Size(200, 100));
  6. cv::rectangle(I, r1, cv::Scalar(0,255,0), 2);
  7. cv::rectangle(I, r2, cv::Scalar(0,0,255), 2);
  8. cv::imshow("rectangle",I);
  9. cv::Rect rs(r1);
  10. rs += cv::Size(-30, 60);
  11. cv::rectangle(B, r1, cv::Scalar(0,255,0), 2);
  12. cv::rectangle(B, rs, cv::Scalar(0,0,255), 2);
  13. cv::imshow("rectangle2",B);

image.pngimage.png

3.2 cv::RotatedRect 可以旋转矩形

包含一个center(中点)的cv::Point2f变量,一个名为size(矩阵大小)的cv::Size2f变量,以及一个名为angle的浮点数变量表示矩形围绕中心的旋转角度

  1. cv::Mat I,B,temp;
  2. I = cv::Mat(300, 400, CV_8UC3, cv::Scalar(255, 255, 255));
  3. cv::Point2f center(200, 150);
  4. cv::Size2f size(180, 120);
  5. float angle = 30.0;
  6. cv::RotatedRect rRect(center, size, angle);
  7. cv::Point2f vertices[4];
  8. rRect.points(vertices);
  9. for (int i=0;i<4; i++)
  10. {
  11. cv::line(I, vertices[i], vertices[(i+1)%4], cv::Scalar(255, 0, 0), 2);
  12. }
  13. cv::circle(I, center, 2, cv::Scalar(0,0,0), 2);
  14. cv::Rect boundingRect = rRect.boundingRect();//外接矩形
  15. cv::rectangle(I, boundingRect, cv::Scalar(0,0,255),1);
  16. cv::imshow("123",I);

image.png

4. RGB颜色空间

image.png

5.机器学习的分类

按照学习形式可以分为监督学习无监督学习强化学习等。吧在机器学习任务中用于训练、验证或测试的数据都称为样本,其中,用于训练的样本集称为训练集,用于验证模型好坏的样本集称为验证集,用于测试的样本集简称为测试集
监督学习使用的是标注过的带有标签的数据进行训练,通常用于回归、分类等。训练集中的每一个样本都由一个输入对象和标签组成。
无监督学习使用无标签的数据进行训练,通常用于异常检测等任务,机器从没有标签的数据中寻找隐藏的模式,发现数据之间内在的特征和相互关系,从而将其划分为不用的类别。
强化学习与监督学习类似,模型在学习过程中以获得对没有学习过的问题做出正确解答的泛化能力为目标。在强化苏学习过程中,算法对预测结果进行自我评估,从而不断改进。

6. K-means聚类

K-means是无监督学习(数据无标签)
K-means实现原理:https://zhuanlan.zhihu.com/p/104557021
image.png
以上图为例,分步骤解释:
1. 一组已知数据,我们希望能把他分成K簇;
2. 随机选择K(这里K=2)个聚类的初始中心(以红色、蓝色做区分);
3.簇分配:轮询所有样本数据,计算其到K个中心点的距离,将样本归到距离最小的中心点的聚类,把自己标定成聚类中心点的颜色;
4. 移动聚类中心:找到同一簇(红色或蓝色)所有点的平均位置,把簇中心点移动到这个位置上
5. 重复c) d)的步骤
6. 知道中心点不再变动了(或者变动范围在一个很小值),就认为K均值聚合成功了。
函数:

  1. CV_EXPORTS_W double kmeans( InputArray data, int K, InputOutputArray bestLabels,
  2. TermCriteria criteria, int attempts,
  3. int flags, OutputArray centers = noArray() );

参数1:输入,points为输入样本矩阵,每行为一个样本。
参数2:输入,clusterCount为类别数。
参数3:输出,labels是一个一维矩阵,其大小和points一样,存储每个输入样本执行k-means后的类标签,值为0到clusterCount-1。
参数4:输入,TermCriteria()为迭代终止条件,含义如下:
◎ TermCriteria::EPS:当迭代达到期望精度时终止,参数为double epsilon=1.0。
◎ TermCriteria::COUNT:当迭代达到最大迭代次数时终止,参数为int max_Count=10。
参数5:输入,attempts为算法使用不同初始类中心坐标尝试执行的次数,attempts=3。
参数6:输入,flag为算法初始化类中心的方法,KMEANS_PP_CENTERS。
参数7:输出,centers中存放的是算法计算后每个类别的中心坐标,后续代码依据centers画圆。
返回值是聚类完成后的类别紧凑性度量值
另外K-means还可以做图片压缩
代码例子:

  1. // k-means.cpp : 定义控制台应用程序的入口点。
  2. //
  3. #include "stdafx.h"
  4. #include "opencv.hpp"
  5. #include <iostream>
  6. using namespace cv;
  7. using namespace std;
  8. //#define EXAMPLE1
  9. //图片压缩
  10. int main()
  11. {
  12. Mat img = imread("girl.jpg", 1);
  13. Mat samples = img.reshape(0, img.cols*img.rows); // 图像转换成sampleCount行*3通道的矩阵
  14. printf("image : h = %d, w = %d, c = %d\n", img.rows, img.cols, img.channels());
  15. //转换为CV_32FC3浮点型
  16. samples.convertTo(samples, CV_32FC3); // or CV_32F works (too)
  17. printf("samples: h = %d, w = %d, c = %d\n", samples.rows, samples.cols, samples.channels());
  18. //define criteria, number of clusters(K)
  19. TermCriteria criteria = TermCriteria(TermCriteria::EPS + TermCriteria::COUNT, 10, 1.0);//终止条件
  20. int K = 4; // 聚类类别数:4,8,16
  21. Mat labels; // 聚类结果索引矩阵
  22. Mat centers; // 聚类中心
  23. // 执行kmeans()
  24. double compactness = kmeans(samples, K, labels, criteria, 3, KMEANS_PP_CENTERS, centers);
  25. // 将聚类中心转为int型
  26. centers.convertTo(centers, CV_8UC3);
  27. // 按照聚类结果标签labels,对samples重新分配BGR值
  28. samples.convertTo(samples, CV_8UC3);
  29. cout << endl << "--- centers ---" << endl;
  30. cout << centers.at<Vec3b>(0, 0) << endl;
  31. cout << centers.at<Vec3b>(1, 0) << endl;
  32. cout << centers.at<Vec3b>(2, 0) << endl;
  33. cout << centers.at<Vec3b>(3, 0) << endl;
  34. cout << endl << "--- samples original ---" << endl;
  35. cout << samples.at<Vec3b>(1, 0) << endl; // 8U 类型的 RGB 彩色图像使用 <Vec3b>访问Mat像素值
  36. cout << samples.at<Vec3b>(100, 0) << endl;
  37. cout << samples.at<Vec3b>(1000, 0) << endl;
  38. cout << endl << "--- label ---" << endl;
  39. cout << labels.at<int>(1) << endl; // 8U 类型的 RGB 彩色图像使用 <Vec3b>访问Mat像素值
  40. cout << labels.at<int>(100) << endl;
  41. cout << labels.at<int>(1000) << endl;
  42. // 按label标签重新为samples赋值,实现色彩压缩
  43. for (int i = 0; i < labels.rows; i++)
  44. {
  45. int cluster = labels.at<int>(i);
  46. // Vec3b为OpenCV中CV_8UC3类型的RGB彩色图像数据类型
  47. samples.at<Vec3b>(i, 0) = centers.at<Vec3b>(cluster, 0);
  48. }
  49. cout << endl << "--- samples cluttered ---" << endl;
  50. cout << samples.at<Vec3b>(1, 0) << endl;
  51. cout << samples.at<Vec3b>(100, 0) << endl;
  52. cout << samples.at<Vec3b>(1000, 0) << endl;
  53. // 4.输出/显示聚类结果
  54. // 将samples转回img尺寸
  55. Mat img_out = samples.reshape(0, img.rows); // 图像转换成sampleCount行*3通道的矩阵
  56. cout << "Compactness: " << compactness << endl;
  57. imshow("image", img);
  58. imshow("clusters", img_out);
  59. // 保存图像
  60. stringstream ss;
  61. ss << K;
  62. string str = ss.str();
  63. string image_save_name = str + "_cluter.jpg";
  64. imwrite(image_save_name, img_out);
  65. waitKey();
  66. return 0;
  67. }
  68. #ifdef EXAMPLE1
  69. int main()
  70. {
  71. //分类坐标点
  72. // 1. 初始化参数
  73. const int MAX_CLUSTERS = 5; // 最大类别数
  74. Scalar colorTab[] =
  75. {
  76. Scalar(0, 0, 255),
  77. Scalar(0,255,0),
  78. Scalar(255,100,100),
  79. Scalar(255,0,255),
  80. Scalar(0,255,255)
  81. };
  82. Mat img(500, 500, CV_8UC3); // 新建画布
  83. img = Scalar::all(255); // 将画布设置为白色
  84. RNG rng(12345); //随机数产生器
  85. // 主循环
  86. for (;;)
  87. {
  88. // 初始化类别数
  89. int k, clusterCount = rng.uniform(2, MAX_CLUSTERS + 1); // 在[2, MAX_CLUSTERS + 1)区间,随机生成一个整数
  90. // 初始化样本数
  91. int i, sampleCount = rng.uniform(1, 1001); // 在[1, 1001)区间,随机生成一个整数
  92. Mat points(sampleCount, 1, CV_32FC2); // 输入样本矩阵:sampleCount行*1列,浮点型,2通道
  93. Mat labels; // 聚类结果索引矩阵
  94. clusterCount = MIN(clusterCount, sampleCount); // 聚类类别数<样本数
  95. std::vector<Point2f> centers;
  96. cout << points.at<float>(0,0) << endl;
  97. cout << points.at<float>(10,0) << endl;
  98. cout << "---1---" << endl;
  99. // 2. 随机生成输入样本
  100. /* generate random sample from multigaussian distribution */
  101. for (k = 0; k < clusterCount; k++)
  102. {
  103. Point center;
  104. center.x = rng.uniform(0, img.cols);
  105. center.y = rng.uniform(0, img.rows);
  106. // 对样本points指定行进行赋值
  107. Mat pointChunk = points.rowRange(k*sampleCount / clusterCount,
  108. k == clusterCount - 1 ? sampleCount :
  109. (k + 1)*sampleCount / clusterCount);
  110. cout << points.at<float>(0, 0) << endl;
  111. cout << points.at<float>(10, 0) << endl;
  112. cout << "---2---" << endl;
  113. // rng.fill函数,会以center点为中心,产生高斯分布的随机点(位置点),并把位置点保存在矩阵pointChunk中。
  114. rng.fill(pointChunk, RNG::NORMAL, Scalar(center.x, center.y), Scalar(img.cols*0.05, img.rows*0.05));
  115. }
  116. //打乱points中的值,
  117. //第二个参数表示随机交换元素的数量的缩放因子,
  118. //总的交换次数dst.rows*dst.cols*iterFactor,
  119. //第三个参数是个随机发生器,决定选那两个元素交换。
  120. cout << points.at<float>(0, 0) << endl;
  121. cout << points.at<float>(10, 0) << endl;
  122. cout <<"---3---" << endl;
  123. randShuffle(points, 1, &rng);
  124. cout << points.at<float>(0, 0) << endl;
  125. cout << points.at<float>(10, 0) << endl;
  126. cout << "---4---" << endl;
  127. // 3. 执行k-means()算法
  128. // 输入:points为输入样本矩阵,每一行为一个样本
  129. // 输入:clusterCount为类别数
  130. // 输出:labels是一个一维矩阵,其size和points一样,存储每个输入样本执行kmeans算法后的类标签,值为0到clusterCount-1
  131. // 输入:TermCriteria()迭代终止条件:
  132. // TermCriteria::COUNT:当迭代达到最大迭代次数时终止,参数为int max_Count=10
  133. // TermCriteria::EPS:当迭代达到期望精度时终止,参数为double epsilon=1.0
  134. // 输出:centers中存放的是kmeans算法结束后每个类别的中心位置
  135. // 返回值:compactness聚类完成后的类别紧凑性度量值
  136. double compactness = kmeans(points, clusterCount, labels,
  137. TermCriteria(TermCriteria::EPS + TermCriteria::COUNT, 10, 1.0),
  138. 3, KMEANS_PP_CENTERS, centers);
  139. // 4.绘制聚类结果
  140. for (i = 0; i < sampleCount; i++)
  141. {
  142. int clusterIdx = labels.at<int>(i);
  143. Point ipt = points.at<Point2f>(i);
  144. circle(img, ipt, 2, colorTab[clusterIdx], FILLED, LINE_AA);
  145. }
  146. for (i = 0; i < (int)centers.size(); ++i)
  147. {
  148. Point2f c = centers[i];
  149. circle(img, c, 40, colorTab[i], 1, LINE_AA);
  150. }
  151. // 5.输出/显示聚类结果
  152. cout << "Compactness: " << compactness << endl;
  153. imshow("clusters", img);
  154. char key = (char)waitKey();
  155. if (key == 27 || key == 'q' || key == 'Q') // 'ESC'
  156. break;
  157. }
  158. return 0;
  159. }
  160. #endif

7.KNN
https://zhuanlan.zhihu.com/p/85636009
KNN是分类算法中最简单的方法之一。为了判断未知样本的类别,以所有已知类别的样本作为参照,计算未知样本与所有已知样本的距离,从中选取与未知样本距离最近的K个已知样本,根据少数服从多数的投票法则,将未知样本与K个最邻近样本中所属类别占比较多的归为一类。
理解:

  • k=3, 存在两个蓝色,一个绿色,则红星属于蓝色类别;
  • k=5, 存在三个绿色,2个蓝色,则红星属于绿色类别;

image.png
k 的取值很关键,K值选的太大易引起欠拟合,太小容易过拟合,需交叉验证确定K值。
缺点:1.因为训练样本是存在内存中的,所有需要大量的存储空间,而且计算待测样本和训练数据中所有样本的距离,所以非常耗时
2.对于随机分布的数据效果不好。

  1. // KNN.cpp : 定义控制台应用程序的入口点。
  2. //
  3. #include "stdafx.h"
  4. #include "opencv.hpp"
  5. #include <iostream>
  6. using namespace cv;
  7. using namespace cv::ml;
  8. using namespace std;
  9. //#define CLASSIFIER
  10. #define REGRESSION
  11. // 生成训练集与测试集的函数
  12. void generateDataSet(Mat &img, Mat &trainData, Mat &testData, Mat &trainLabel, Mat &testLabel, int train_rows=4);
  13. #ifdef CLASSIFIER
  14. int main()
  15. {
  16. // 用于分类
  17. // 1.读取原始数据
  18. Mat img = imread("digits.png", 1); // 使用图片格式的MNIST数据集(部分)
  19. cvtColor(img, img, CV_BGR2GRAY);
  20. // 2.制作训练集
  21. // 设置训练集、测试集大小
  22. int train_sample_count = 4000;
  23. int test_sample_count = 1000;
  24. int train_rows = 4; // 每类用于训练的行数,4000/10类/100(样本/行)=4
  25. Mat trainData, testData; // 申明训练集与测试集
  26. Mat trainLabel(train_sample_count, 1, CV_32FC1); // 申明训练集标签
  27. Mat testLabel(test_sample_count, 1, CV_32FC1); // 申明测试集标签
  28. // 生成训练集、测试集与标签
  29. generateDataSet(img, trainData, testData, trainLabel, testLabel/*, train_rows*/);
  30. // 3.创建并初始化KNN模型
  31. cv::Ptr<cv::ml::KNearest> knn = cv::ml::KNearest::create(); // 创建knn模型
  32. int K = 5; // 考察的最邻近样本个数
  33. knn->setDefaultK(K);
  34. knn->setIsClassifier(true); // 用于分类
  35. knn->setAlgorithmType(cv::ml::KNearest::BRUTE_FORCE);
  36. // 4.训练
  37. printf("开始训练...\n");
  38. //printf("trainData : rows, cols = %d, %d\n", trainData.rows, trainData.cols);
  39. //printf("trainLabel: rows, cols = %d, %d\n", trainLabel.rows, trainLabel.cols);
  40. knn->train(trainData, cv::ml::ROW_SAMPLE, trainLabel);
  41. printf("训练完成\n\n");
  42. // 5.测试
  43. printf("开始测试...\n");
  44. Mat result;
  45. knn->findNearest(testData, K, result);
  46. //printf("test samples = %d\n", testData.rows);
  47. //printf("result rows = %d\n", result.rows);
  48. // 精度
  49. int count = 0;
  50. for (int i = 0; i < test_sample_count; i++)
  51. {
  52. int predict = int(result.at<float>(i));
  53. int actual = int(testLabel.at<float>(i));
  54. if (predict == actual)
  55. {
  56. printf("label: %d, predict: %d\n", actual, predict);
  57. count++;
  58. }
  59. else
  60. printf("label: %d, predict: %d ×\n", actual, predict);
  61. }
  62. printf("测试完成\n");
  63. // 输出结果
  64. double accuracy = double(count) / double(test_sample_count);
  65. printf("K = %d\n", K);
  66. printf("accuracy = %.4f\n", accuracy);
  67. waitKey();
  68. return 0;
  69. }
  70. #endif
  71. #ifdef REGRESSION
  72. int main()
  73. {
  74. // 用于回归
  75. // 1.读取原始数据
  76. Mat img = imread("digits.png", 1);
  77. cvtColor(img, img, CV_BGR2GRAY);
  78. // 2.制作训练集
  79. // 设置训练集、测试集大小
  80. int train_sample_count = 4000;
  81. int test_sample_count = 1000;
  82. int train_rows = 4; // 每类用于训练的行数,4000/10类/100(样本/行)=4
  83. Mat trainData, testData; // 申明训练集与测试集
  84. Mat trainLabel(train_sample_count, 1, CV_32FC1); // 申明训练集标签
  85. Mat testLabel(test_sample_count, 1, CV_32FC1); // 申明测试集标签
  86. // 生成训练集、测试集与标签
  87. generateDataSet(img, trainData, testData, trainLabel, testLabel/*, train_rows*/);
  88. // 3.创建并初始化KNN模型
  89. cv::Ptr<cv::ml::KNearest> knn = cv::ml::KNearest::create(); // 创建knn模型
  90. int K = 5; // 考察的最邻近样本个数
  91. knn->setDefaultK(K);
  92. knn->setIsClassifier(false); // 用于回归
  93. knn->setAlgorithmType(cv::ml::KNearest::BRUTE_FORCE);
  94. // 4.训练
  95. printf("开始训练...\n");
  96. //printf("trainData : rows, cols = %d, %d\n", trainData.rows, trainData.cols);
  97. //printf("trainLabel: rows, cols = %d, %d\n", trainLabel.rows, trainLabel.cols);
  98. knn->train(trainData, cv::ml::ROW_SAMPLE, trainLabel);
  99. printf("训练完成\n\n");
  100. // 5.测试
  101. printf("开始测试...\n");
  102. Mat result;
  103. knn->findNearest(testData, K, result);
  104. //printf("test samples = %d\n", testData.rows);
  105. //printf("result rows = %d\n", result.rows);
  106. // 精度
  107. int t = 0;
  108. int f = 0;
  109. for (int i = 0; i < test_sample_count; i++)
  110. {
  111. int predict = int(result.at<float>(i));
  112. int actual = int(testLabel.at<float>(i));
  113. if (predict == actual)
  114. {
  115. printf("label: %d, predict: %d\n", actual, predict);
  116. t++;
  117. }
  118. else
  119. {
  120. printf("label: %d, predict: %d ×\n", actual, predict);
  121. f++;
  122. }
  123. }
  124. printf("测试完成\n");
  125. // 输出结果
  126. float accuracy = (t * 1.0) / (t + f);
  127. printf("K = %d\n", K);
  128. printf("accuracy = %.4f\n", accuracy);
  129. waitKey();
  130. return 0;
  131. }
  132. #endif
  133. /** @生成模型的训练集与测试集
  134. 参数1:img ,输入,灰度图像,由固定尺寸小图拼接成的大图,不同类别的小图像依次排列
  135. 参数2:trainData ,输出,训练集,维度为:训练样本数 * 单个样本特征数,CV_32FC3类型
  136. 参数3:testData ,输出,测试集,维度为:测试样本数 * 单个样本特征数,CV_32FC3类型
  137. 参数4:trainLabel,输出,训练集标签,维度为:训练样本数 * 1,CV_32FC1类型
  138. 参数4:testLabel ,输出,测试集标签,维度为:测试样本数 * 1,CV_32FC1类型
  139. 参数5:train_rows,输入,用于训练的样本所占行数,默认4行用于训练,1行用于测试
  140. */
  141. void generateDataSet(Mat &img, Mat &trainData, Mat &testData, Mat &trainLabel, Mat &testLabel, int train_rows)
  142. {
  143. // 初始化图像中切片图与其他参数
  144. int width_slice = 20; // 单个数字切片图像的宽度
  145. int height_slice = 20; // 单个数字切片图像的高度
  146. int row_sample = 100; // 每行样本数100幅小图
  147. int col_sample = 50; // 每列样本数50幅小图
  148. int row_single_number = 5; // 单个数字占5行
  149. int test_rows = row_single_number - train_rows; // 测试样本所占行数
  150. Mat trainMat(train_rows * 20 *10, img.cols, CV_8UC1); // 存放所有训练图片
  151. trainMat = Scalar::all(0);
  152. Mat testMat(test_rows * 20 * 10, img.cols, CV_8UC1); // 存放所有测试图片
  153. testMat = Scalar::all(0);
  154. // 生成测试、训练大图
  155. for (int i = 1; i <= 10 ; i++)
  156. {
  157. Mat tempTrainMat = img.rowRange((i - 1) * row_single_number * 20, (i * row_single_number - 1) * 20).clone();
  158. Mat tempTestMat = img.rowRange((i * row_single_number - 1) * 20, (i * row_single_number) * 20).clone();
  159. imshow("temptrain", tempTrainMat);
  160. imshow("temptest", tempTestMat);
  161. //printf("tempTrainMat(w, h) = %d, %d\n", tempTrainMat.cols, tempTrainMat.rows);
  162. //printf("tempTestMat (w, h) = %d, %d\n", tempTestMat.cols, tempTestMat.rows);
  163. // train
  164. cv::Mat roi_train = trainMat(Rect(0, (i - 1) * train_rows * 20, tempTrainMat.cols, tempTrainMat.rows));
  165. Mat mask_train(roi_train.rows, roi_train.cols, roi_train.depth(), Scalar(1));
  166. // test
  167. cv::Mat roi_test = testMat(Rect(0, (i - 1) * test_rows * 20, tempTestMat.cols, tempTestMat.rows));
  168. Mat mask_test(roi_test.rows, roi_test.cols, roi_test.depth(), Scalar(1));
  169. // 提取的训练测试行分别复制到训练图与测试图中
  170. tempTrainMat.copyTo(roi_train, mask_train);
  171. tempTestMat.copyTo(roi_test, mask_test);
  172. //显示效果图
  173. imshow("trainMat", trainMat);
  174. imshow("tesetMat", testMat);
  175. cv::waitKey(10);
  176. }
  177. // 存大图
  178. imwrite("trainMat.jpg", trainMat);
  179. imwrite("testMat.jpg", testMat);
  180. // 生成训练、测试数据
  181. printf("开始生成训练、测试数据...\n");
  182. Rect roi;
  183. for (int i = 1; i <= col_sample; i++) // 50行:1-50行数字图像
  184. {
  185. //printf("第%d行: \n", i);
  186. for (int j = 1; j <= row_sample; j++) // 100列:1-100列数字图像
  187. {
  188. // 第行为训练集
  189. Mat temp_single_num; // 读取一个数字图像
  190. // 关键步骤:当前切片数字的位置区域
  191. roi = Rect((j-1)*width_slice, (i-1)*height_slice, width_slice, height_slice);
  192. temp_single_num = img(roi).clone(); // 注意此处需要使用深拷贝.clone(),后面才能改变切片图的形状,否则roi内存区域不连续
  193. //imshow("slice", temp_single_num);
  194. //waitKey(1);
  195. if (i % 5 != 0)
  196. //{
  197. // 起始行记为1-4,6-9,11-14...46-49行为测试集
  198. // 将单个数字切片拉成向量连续放入Mat容器中
  199. trainData.push_back(temp_single_num.reshape(0, 1));
  200. //}
  201. else
  202. //{ // 起始行记为1,第5,10,15...50行为测试集
  203. testData.push_back(temp_single_num.reshape(0, 1));
  204. //}
  205. }
  206. }
  207. trainData.convertTo(trainData, CV_32FC1);
  208. testData.convertTo(testData, CV_32FC1);
  209. printf("训练、测试数据已生成\n\n");
  210. // 生成标签
  211. printf("开始生成标签数据...\n");
  212. for (int i = 1; i <= 10; i++)
  213. {
  214. // train label
  215. Mat tmep_label_train = Mat::ones(train_rows * row_sample, 1, CV_32FC1); // 临时存放当前标签的矩阵
  216. tmep_label_train = tmep_label_train * (i - 1); // 标签从0开始
  217. Mat temp = trainLabel.rowRange((i - 1)* train_rows * row_sample, i * train_rows * row_sample);
  218. tmep_label_train.copyTo(temp); // 将临时标签复制到trainLabel对应区域,因为浅拷贝,改变temp即改变trainLabel
  219. // test label
  220. Mat tmep_label_test = Mat::ones(test_rows * row_sample, 1, CV_32FC1);
  221. tmep_label_test = tmep_label_test * (i - 1);
  222. temp = testLabel.rowRange((i - 1)* test_rows * row_sample, i * test_rows * row_sample);
  223. tmep_label_test.copyTo(temp);
  224. }
  225. printf("标签数据已生成\n\n");
  226. //printf("trainLabel(1,400,401,800,801,4000) = %f, %f, %f, %f, %f, %f\n", trainLabel.at<float>(0), trainLabel.at<float>(399), trainLabel.at<float>(400), trainLabel.at<float>(799), trainLabel.at<float>(800), trainLabel.at<float>(3999));
  227. //printf("testLabel (1,100,101,200,201,1000) = %f, %f, %f, %f, %f, %f\n", testLabel.at<float>(0), testLabel.at<float>(99), testLabel.at<float>(100), testLabel.at<float>(199), testLabel.at<float>(200), testLabel.at<float>(999));
  228. //cv::waitKey();
  229. }

7.决策树
决策树是属于监督学习,可以做分类也可以做回归。最早出现的是ID3算法,之后进行改进成了C4.5算法。决策树的核心问题是:自顶向下的各个节点应选择何种属性进行切分,才能获得最好的分类。因此,选择最佳切分属性是决策树的关键所在(意思是如何找到一个最佳属性当作根节点)。
详细介绍:https://www.cnblogs.com/molieren/articles/10664954.html
视频介绍:https://www.bilibili.com/video/BV1Ps411V7px?p=5

  1. // Creating and training a decision tree
  2. #include "stdafx.h"
  3. #include <opencv.hpp>
  4. #include <opencv2/ml/ml.hpp>
  5. #include <iostream>
  6. #define _MUSROOM
  7. //#define _BOSTON
  8. using namespace std;
  9. using namespace cv;
  10. using namespace cv::ml;
  11. #ifdef _BOSTON
  12. int main(int argc, char *argv[]) {
  13. // 1.读取数据
  14. // 1.1 读取训练集
  15. const char *csv_file_name_train = "../boston-house-prices/housing-train.csv";
  16. cv::Ptr<TrainData> dataSetTrain =
  17. TrainData::loadFromCSV(csv_file_name_train, // Input file name
  18. 0, // 从数据文件开头跳过的行数
  19. -1, // 样本标签于此列开始,-1时样本标签为最后一列
  20. -1 // 样本标签于此列结束,-1时为上一个参数所在列
  21. );
  22. // 验证数据读取是否正确
  23. int n_train_samples = dataSetTrain->getNSamples();
  24. if (n_train_samples == 0) {
  25. cerr << "读取文件错误: " << csv_file_name_train << endl;
  26. exit(-1);
  27. }
  28. else {
  29. cout << "从" << csv_file_name_train << "中,读取了" << n_train_samples << "个训练样本" << endl;
  30. }
  31. // 1.2 读取测试集
  32. const char *csv_file_name_test = "../boston-house-prices/housing-test.csv";
  33. cv::Ptr<TrainData> dataSetTest = TrainData::loadFromCSV(csv_file_name_test, 0, -1, -1);
  34. int n_test_samples = dataSetTest->getNSamples();
  35. if (n_test_samples == 0) {
  36. cerr << "读取文件错误: " << csv_file_name_test << endl;
  37. exit(-1);
  38. }
  39. else {
  40. cout << "从" << csv_file_name_test << "中,读取了" << n_test_samples << "个测试样本" << endl;
  41. }
  42. // 2.创建决策树模型
  43. cv::Ptr<RTrees> dtree = RTrees::create();
  44. // 3.设置模型参数
  45. dtree->setMaxDepth(15);//15
  46. dtree->setMinSampleCount(2);//2
  47. dtree->setRegressionAccuracy(0.01f);
  48. dtree->setUseSurrogates(false /* true */);
  49. dtree->setCalculateVarImportance(true); // 开启特征重要性计算
  50. //dtree->setMaxCategories(15);
  51. dtree->setCVFolds(0 /*10*/); //
  52. dtree->setUse1SERule(true/*true*/);
  53. dtree->setTruncatePrunedTree(true);
  54. // 4.训练决策树
  55. cout << "start training..." << endl;
  56. dtree->train(dataSetTrain);
  57. cout << "training success!" << endl;
  58. // 输出样本特征属性重要性
  59. Mat var_importance = dtree->getVarImportance();
  60. if (!var_importance.empty())
  61. {
  62. double rt_imp_sum = sum(var_importance)[0];
  63. printf("var#\timportance (%%):\n");
  64. int i, n = (int)var_importance.total();// 返回矩阵的元素总个数
  65. for (i = 0; i < n; i++)
  66. printf("%-2d\t%-4.1f\n", i, 100.f*var_importance.at<float>(i) / rt_imp_sum);
  67. }
  68. // 5. 训练集精度
  69. cv::Mat train_results;
  70. float MSE_train = dtree->calcError(dataSetTrain,
  71. false, // use train data
  72. train_results);
  73. cv::Mat expected_responses = dataSetTrain->getResponses();
  74. int total_train = 0;
  75. float square_error = 0.0;
  76. cout << endl << "--- train set --- " << endl;
  77. for (int i = 0; i < expected_responses.rows; ++i) {
  78. float responses = train_results.at<float>(i, 0);
  79. float expected = expected_responses.at<float>(i, 0);
  80. square_error += (expected - responses) * (expected - responses);
  81. total_train++;
  82. //cout << "price: " << expected << ",\tpredict: " << responses << endl;
  83. cout << expected << "\t" << responses << endl;
  84. }
  85. // 计算RMSE指标
  86. float RMSE_train = sqrt(square_error / total_train);
  87. // 6. 保存模型
  88. dtree->save("trained_dtree.xml");
  89. // 7. 读取模型
  90. dtree->load("trained_dtree.xml");
  91. // 8. 测试集精度
  92. cv::Mat results_test;
  93. float MSE_test = dtree->calcError(dataSetTest,
  94. true, // use train data: now it is test data actually
  95. results_test);
  96. cv::Mat expected_responses_test = dataSetTest->getResponses();
  97. //cout << expected_responses_test.size() << endl;
  98. int total_test = 0;
  99. square_error = 0.0;
  100. cout << endl << "--- test set --- " << endl;
  101. for (int i = 0; i < expected_responses_test.rows; ++i) {
  102. float responses = results_test.at<float>(i, 0);
  103. float expected = expected_responses_test.at<float>(i, 0);
  104. square_error += (expected - responses) * (expected - responses);
  105. total_test++;
  106. //cout << "price: " << expected << ",\tpredict: " << responses << endl;
  107. cout << expected << "\t" << responses << endl;
  108. }
  109. // 计算RMSE指标
  110. float RMSE_test = sqrt(square_error / total_test);
  111. cout << "train data RMSE = " << RMSE_train << " k USD" << endl;
  112. cout << "test data RMSE = " << RMSE_test << " k USD" << endl;
  113. cout << "train data MSE = " << MSE_train << endl;
  114. cout << "test data MSE = " << MSE_test << endl;
  115. system("pause");
  116. return 0;
  117. }
  118. #endif
  119. #ifdef _MUSROOM
  120. int main(int argc, char *argv[]) {
  121. // 1.读取数据
  122. //const char *csv_file_name = argc >= 2 ? argv[1] : "D:/data/ml-data/mushroom/agaricus-lepiota.data";
  123. const char *csv_file_name = argc >= 2 ? argv[1] : "../mushroom/agaricus-lepiota.data";
  124. // 1.1 读取CSV数据文件
  125. // 函数用法...
  126. cv::Ptr<TrainData> dataSet =
  127. TrainData::loadFromCSV(csv_file_name, // Input file name
  128. 0, // 从数据文件开头跳过的行数
  129. 0, // 样本的标签从此列开始
  130. 1, // 样本输入特征向量从此列开始
  131. "cat[0-22]" // All 23 columns are categorical
  132. );
  133. // Use defaults for delimeter (',') and missch ('?')使用默认的“,”分割特征
  134. // 1.2 验证数据读取是否正确
  135. int n_samples = dataSet->getNSamples();
  136. if (n_samples == 0) {
  137. cerr << "读取文件错误: " << csv_file_name << endl;
  138. exit(-1);
  139. }
  140. else {
  141. cout << "从" << csv_file_name << "中,读取了" << n_samples << "个样本" << endl;
  142. }
  143. // 1.3 划分训练集与测试集
  144. dataSet->setTrainTestSplitRatio(0.9, false); //按90%和10%的比例将数据集为训练集和测试集
  145. int n_train_samples = dataSet->getNTrainSamples();
  146. int n_test_samples = dataSet->getNTestSamples();
  147. cout << "Train Samples: " << n_train_samples << endl
  148. << "Test Samples: " << n_test_samples << endl;
  149. // 2.创建决策树模型
  150. cv::Ptr<RTrees> dtree = RTrees::create();
  151. // 3.设置模型参数
  152. // 3.1 设置类别重要性数组
  153. // Set up priors to penalize "poisonous" 10x as much as "edible"
  154. //float _priors[] = { 1.0, 10.0 };
  155. //cv::Mat priors(1, 2, CV_32F, _priors);
  156. dtree->setMaxDepth(10);//10
  157. dtree->setMinSampleCount(10);//10
  158. dtree->setRegressionAccuracy(0.01f);
  159. dtree->setUseSurrogates(false /* true */);
  160. dtree->setMaxCategories(15);
  161. dtree->setCVFolds(1 /*10*/); // nonzero causes core dump
  162. dtree->setUse1SERule(false/*true*/);
  163. dtree->setTruncatePrunedTree(true);
  164. //dtree->setPriors( priors );
  165. dtree->setPriors(cv::Mat()); // ignore priors for now...
  166. // Now train the model
  167. // NB: we are only using the "train" part of the data set
  168. // 4.训练决策树
  169. cout << "start training..." << endl;
  170. dtree->train(dataSet);
  171. cout << "training success." << endl;
  172. // 5.测试
  173. cv::Mat results_train, results_test;
  174. float train_error = dtree->calcError(dataSet, false, results_train);// use training data
  175. float test_error = dtree->calcError(dataSet, true, results_test); // use test data
  176. std::vector<cv::String> names;
  177. dataSet->getNames(names);
  178. Mat flags = dataSet->getVarSymbolFlags();
  179. // 6.统计输出结果
  180. cv::Mat expected_responses = dataSet->getTestResponses();
  181. int t = 0, f = 0, total = 0;
  182. for (int i = 0; i < dataSet->getNTestSamples(); ++i) {
  183. float responses = results_test.at<float>(i, 0);
  184. float expected = expected_responses.at<float>(i, 0);
  185. cv::String r_str = names[(int)responses];
  186. cv::String e_str = names[(int)expected];
  187. if (responses == expected)
  188. {
  189. t++;
  190. cout << "label: " << e_str << ", predict: " << r_str << endl;
  191. }
  192. else
  193. {
  194. f++;
  195. cout << "label: " << e_str << ", predict: " << r_str << " ×" << endl;
  196. }
  197. total++;
  198. }
  199. cout << "Correct answer = " << t << endl;
  200. cout << "Incorrect answer = " << f << endl;
  201. cout << "Total test sample = " << total << endl;
  202. cout << "[Decision Tree] Correct answers : " << (float(t) / total) << ""<< endl;
  203. cout << "[Decision Tree] Incorrect answers: " << (float(f) / total) << "" << endl;
  204. cout << "[Decision Tree] Error on training data: " << train_error << "%" << endl;
  205. cout << "[Decision Tree] Error on test data: " << test_error << "%" << endl;
  206. system("pause");
  207. return 0;
  208. }
  209. #endif