内容参考自 图灵学院 ElasticSearch专栏

项目地址:

直接参考码云地址: https://gitee.com/zjj19941/ZJJ_ElasticSearch.git

直接看 com.baiqi.elasticsearch.service.JobFullTextServiceTest 测试类

pom依赖

  1. <dependency>
  2. <groupId>org.elasticsearch</groupId>
  3. <artifactId>elasticsearch</artifactId>
  4. <version>7.6.1</version>
  5. </dependency>
  6. <!-- ES的高阶的客户端API -->
  7. <dependency>
  8. <groupId>org.elasticsearch.client</groupId>
  9. <artifactId>elasticsearch-rest-high-level-client</artifactId>
  10. <version>7.6.1</version>
  11. </dependency>

连接ElasticSearch索引库

  1. private RestHighLevelClient restHighLevelClient;
  2. // 索引库的名字
  3. private static final String JOB_IDX = "job_index";
  4. /**
  5. * 连接ElasticSearch索引库
  6. */
  7. public JobFullTextServiceImpl() {
  8. // 建立与ES的连接
  9. // 1. 使用RestHighLevelClient构建客户端连接。
  10. // 2. 基于RestClient.builder方法来构建RestClientBuilder
  11. // 3. 用HttpHost来添加ES的节点
  12. RestClientBuilder restClientBuilder = RestClient.builder(
  13. new HttpHost("zjj101", 9200, "http")
  14. , new HttpHost("zjj102", 9200, "http")
  15. , new HttpHost("zjj103", 9200, "http"));
  16. /* RestClientBuilder restClientBuilder = RestClient.builder(
  17. new HttpHost("192.168.21.130", 9200, "http"));*/
  18. restHighLevelClient = new RestHighLevelClient(restClientBuilder);
  19. }

添加数据

  1. /**添加数据
  2. *
  3. * @param jobDetail
  4. * @throws IOException
  5. */
  6. @Override
  7. public void add(JobDetail jobDetail) throws IOException {
  8. //1. 构建IndexRequest对象,用来描述ES发起请求的数据。
  9. IndexRequest indexRequest = new IndexRequest(JOB_IDX);
  10. //2. 设置文档ID。
  11. indexRequest.id(jobDetail.getId() + "");
  12. //3. 使用FastJSON将实体类对象转换为JSON。
  13. String json = JSONObject.toJSONString(jobDetail);
  14. //4. 使用IndexRequest.source方法设置文档数据,并设置请求的数据为JSON格式。
  15. indexRequest.source(json, XContentType.JSON);
  16. //5. 使用ES High level client调用index方法发起请求,将一个文档添加到索引中。
  17. restHighLevelClient.index(indexRequest, RequestOptions.DEFAULT);
  18. }

根据id查找

  1. /**
  2. * 根据id查找
  3. * @param id
  4. * @return
  5. * @throws IOException
  6. */
  7. @Override
  8. public JobDetail findById(long id) throws IOException {
  9. // 1. 构建GetRequest请求。
  10. GetRequest getRequest = new GetRequest(JOB_IDX, id + "");
  11. // 2. 使用RestHighLevelClient.get发送GetRequest请求,并获取到ES服务器的响应。
  12. GetResponse getResponse = restHighLevelClient.get(getRequest, RequestOptions.DEFAULT);
  13. // 3. 将ES响应的数据转换为JSON字符串 (这个字符串是_source代码块儿里面的数据)
  14. String json = getResponse.getSourceAsString();
  15. // 4. 并使用FastJSON将JSON字符串转换为JobDetail类对象
  16. JobDetail jobDetail = JSONObject.parseObject(json, JobDetail.class);
  17. // 5. 记得:单独设置ID,因为_source里面是没有id这个属性的,如果你需要id这个属性你就得单独设置进去
  18. jobDetail.setId(id);
  19. return jobDetail;
  20. }

更新数据

  1. /**
  2. * 更新数据
  3. * @param jobDetail
  4. * @throws IOException
  5. */
  6. @Override
  7. public void update(JobDetail jobDetail) throws IOException {
  8. // 1. 判断对应ID的文档是否存在
  9. // a) 构建GetRequest
  10. GetRequest getRequest = new GetRequest(JOB_IDX, jobDetail.getId() + "");
  11. // b) 执行client的exists方法,发起请求,判断是否存在
  12. // 为什么要先判断是否存在呢?因为你不判断的话,你直接操作的话,万一这条数据不存在,就会抛异常出来
  13. boolean exists = restHighLevelClient.exists(getRequest, RequestOptions.DEFAULT);
  14. if(exists) {
  15. // 2. 构建UpdateRequest请求
  16. UpdateRequest updateRequest = new UpdateRequest(JOB_IDX, jobDetail.getId() + "");
  17. // 3. 设置UpdateRequest的文档,并配置为JSON格式
  18. updateRequest.doc(JSONObject.toJSONString(jobDetail), XContentType.JSON);
  19. // 4. 执行client发起update请求
  20. restHighLevelClient.update(updateRequest, RequestOptions.DEFAULT);
  21. }
  22. }

根据id删除

  1. /**
  2. * 根据id删除
  3. * @param id
  4. * @throws IOException
  5. */
  6. @Override
  7. public void deleteById(long id) throws IOException {
  8. // 1. 构建delete请求
  9. DeleteRequest deleteRequest = new DeleteRequest(JOB_IDX, id + "");
  10. // 2. 使用RestHighLevelClient执行delete请求
  11. restHighLevelClient.delete(deleteRequest, RequestOptions.DEFAULT);
  12. }

根据关键字搜索

  1. /**
  2. * 根据关键字搜索
  3. * @param keywords
  4. * @return
  5. * @throws IOException
  6. */
  7. @Override
  8. public List<JobDetail> searchByKeywords(String keywords) throws IOException {
  9. // 1.构建SearchRequest检索请求
  10. // 专门用来进行全文检索、关键字检索的API
  11. SearchRequest searchRequest = new SearchRequest(JOB_IDX);
  12. // 2.创建一个SearchSourceBuilder专门用于构建查询条件
  13. SearchSourceBuilder searchSourceBuilder = new SearchSourceBuilder();
  14. // 3.使用QueryBuilders.multiMatchQuery构建一个查询条件(搜索title、jd),并配置到SearchSourceBuilder
  15. MultiMatchQueryBuilder multiMatchQueryBuilder = QueryBuilders.multiMatchQuery(keywords, "title", "jd");
  16. // 将查询条件设置到查询请求构建器中
  17. searchSourceBuilder.query(multiMatchQueryBuilder);
  18. // 4.调用SearchRequest.source将查询条件设置到检索请求
  19. searchRequest.source(searchSourceBuilder);
  20. // 5.执行RestHighLevelClient.search发起请求
  21. SearchResponse searchResponse = restHighLevelClient.search(searchRequest, RequestOptions.DEFAULT);
  22. SearchHit[] hitArray = searchResponse.getHits().getHits();
  23. // 6.遍历结果
  24. ArrayList<JobDetail> jobDetailArrayList = new ArrayList<>();
  25. for (SearchHit documentFields : hitArray) {
  26. // 1)获取命中的结果
  27. String json = documentFields.getSourceAsString();
  28. // 2)将JSON字符串转换为对象
  29. JobDetail jobDetail = JSONObject.parseObject(json, JobDetail.class);
  30. // 3)使用SearchHit.getId设置文档ID
  31. jobDetail.setId(Long.parseLong(documentFields.getId()));
  32. jobDetailArrayList.add(jobDetail);
  33. }
  34. return jobDetailArrayList;
  35. }

基于form size分页查询

  1. @Override
  2. public Map<String, Object> searchByPage(String keywords, int pageNum, int pageSize) throws IOException {
  3. // 1.构建SearchRequest检索请求
  4. // 专门用来进行全文检索、关键字检索的API
  5. SearchRequest searchRequest = new SearchRequest(JOB_IDX);
  6. // 2.创建一个SearchSourceBuilder专门用于构建查询条件
  7. SearchSourceBuilder searchSourceBuilder = new SearchSourceBuilder();
  8. // 3.使用QueryBuilders.multiMatchQuery构建一个查询条件(搜索title、jd),并配置到SearchSourceBuilder
  9. MultiMatchQueryBuilder multiMatchQueryBuilder = QueryBuilders.multiMatchQuery(keywords, "title", "jd");
  10. // 将查询条件设置到查询请求构建器中
  11. searchSourceBuilder.query(multiMatchQueryBuilder);
  12. // 每页显示多少条
  13. searchSourceBuilder.size(pageSize);
  14. // 设置从第几条开始查询
  15. searchSourceBuilder.from((pageNum - 1) * pageSize);
  16. // 4.调用SearchRequest.source将查询条件设置到检索请求
  17. searchRequest.source(searchSourceBuilder);
  18. // 5.执行RestHighLevelClient.search发起请求
  19. SearchResponse searchResponse = restHighLevelClient.search(searchRequest, RequestOptions.DEFAULT);
  20. SearchHit[] hitArray = searchResponse.getHits().getHits();
  21. // 6.遍历结果
  22. ArrayList<JobDetail> jobDetailArrayList = new ArrayList<>();
  23. for (SearchHit documentFields : hitArray) {
  24. // 1)获取命中的结果
  25. String json = documentFields.getSourceAsString();
  26. // 2)将JSON字符串转换为对象
  27. JobDetail jobDetail = JSONObject.parseObject(json, JobDetail.class);
  28. // 3)使用SearchHit.getId设置文档ID
  29. jobDetail.setId(Long.parseLong(documentFields.getId()));
  30. jobDetailArrayList.add(jobDetail);
  31. }
  32. // 8. 将结果封装到Map结构中(带有分页信息)
  33. // a) total -> 使用SearchHits.getTotalHits().value获取到所有的记录数
  34. // b) content -> 当前分页中的数据
  35. long totalNum = searchResponse.getHits().getTotalHits().value;
  36. HashMap hashMap = new HashMap();
  37. hashMap.put("total", totalNum);
  38. hashMap.put("content", jobDetailArrayList);
  39. return hashMap;
  40. }

基于 scroll分页和查询结果带关键字就高亮处理

  1. // scroll分页解决深分页问题
  2. @Override
  3. public Map<String, Object> searchByScrollPage(String keywords, String scrollId, int pageSize) throws IOException {
  4. SearchResponse searchResponse = null;
  5. if(scrollId == null) {
  6. // 1.构建SearchRequest检索请求
  7. // 专门用来进行全文检索、关键字检索的API
  8. SearchRequest searchRequest = new SearchRequest(JOB_IDX);
  9. // 2.创建一个SearchSourceBuilder专门用于构建查询条件
  10. SearchSourceBuilder searchSourceBuilder = new SearchSourceBuilder();
  11. // 3.使用QueryBuilders.multiMatchQuery构建一个查询条件(搜索title、jd),并配置到SearchSourceBuilder
  12. MultiMatchQueryBuilder multiMatchQueryBuilder = QueryBuilders.multiMatchQuery(keywords, "title", "jd");
  13. // 将查询条件设置到查询请求构建器中
  14. searchSourceBuilder.query(multiMatchQueryBuilder);
  15. // 设置高亮
  16. HighlightBuilder highlightBuilder = new HighlightBuilder();
  17. highlightBuilder.field("title");
  18. highlightBuilder.field("jd");
  19. highlightBuilder.preTags("<font color='red'>");
  20. highlightBuilder.postTags("</font>");
  21. // 给请求设置高亮
  22. searchSourceBuilder.highlighter(highlightBuilder);
  23. // 每页显示多少条
  24. searchSourceBuilder.size(pageSize);
  25. // 4.调用SearchRequest.source将查询条件设置到检索请求
  26. searchRequest.source(searchSourceBuilder);
  27. //--------------------------
  28. // 设置scroll查询
  29. //--------------------------
  30. searchRequest.scroll(TimeValue.timeValueMinutes(5));
  31. // 5.执行RestHighLevelClient.search发起请求
  32. searchResponse = restHighLevelClient.search(searchRequest, RequestOptions.DEFAULT);
  33. }
  34. // 第二次查询的时候,直接通过scroll id查询数据
  35. else {
  36. SearchScrollRequest searchScrollRequest = new SearchScrollRequest(scrollId);
  37. searchScrollRequest.scroll(TimeValue.timeValueMinutes(5));
  38. // 使用RestHighLevelClient发送scroll请求
  39. searchResponse = restHighLevelClient.scroll(searchScrollRequest, RequestOptions.DEFAULT);
  40. }
  41. //--------------------------
  42. // 迭代ES响应的数据
  43. //--------------------------
  44. SearchHit[] hitArray = searchResponse.getHits().getHits();
  45. // 6.遍历结果
  46. ArrayList<JobDetail> jobDetailArrayList = new ArrayList<>();
  47. for (SearchHit documentFields : hitArray) {
  48. // 1)获取命中的结果
  49. String json = documentFields.getSourceAsString();
  50. // 2)将JSON字符串转换为对象
  51. JobDetail jobDetail = JSONObject.parseObject(json, JobDetail.class);
  52. // 3)使用SearchHit.getId设置文档ID
  53. jobDetail.setId(Long.parseLong(documentFields.getId()));
  54. jobDetailArrayList.add(jobDetail);
  55. // 设置高亮的一些文本到实体类中
  56. // 封装了高亮
  57. Map<String, HighlightField> highlightFieldMap = documentFields.getHighlightFields();
  58. HighlightField titleHL = highlightFieldMap.get("title");
  59. HighlightField jdHL = highlightFieldMap.get("jd");
  60. if(titleHL != null) {
  61. // 获取指定字段的高亮片段
  62. Text[] fragments = titleHL.getFragments();
  63. // 将这些高亮片段拼接成一个完整的高亮字段
  64. StringBuilder builder = new StringBuilder();
  65. for(Text text : fragments) {
  66. builder.append(text);
  67. }
  68. // 设置到实体类中
  69. jobDetail.setTitle(builder.toString());
  70. }
  71. if(jdHL != null) {
  72. // 获取指定字段的高亮片段
  73. Text[] fragments = jdHL.getFragments();
  74. // 将这些高亮片段拼接成一个完整的高亮字段
  75. StringBuilder builder = new StringBuilder();
  76. for(Text text : fragments) {
  77. builder.append(text);
  78. }
  79. // 设置到实体类中
  80. jobDetail.setJd(builder.toString());
  81. }
  82. }
  83. // 8. 将结果封装到Map结构中(带有分页信息)
  84. // a) total -> 使用SearchHits.getTotalHits().value获取到所有的记录数
  85. // b) content -> 当前分页中的数据
  86. long totalNum = searchResponse.getHits().getTotalHits().value;
  87. HashMap hashMap = new HashMap();
  88. hashMap.put("scroll_id", searchResponse.getScrollId());
  89. hashMap.put("content", jobDetailArrayList);
  90. return hashMap;
  91. }