项目地址:
直接参考码云地址: https://gitee.com/zjj19941/ZJJ_ElasticSearch.git
直接看 com.baiqi.elasticsearch.service.JobFullTextServiceTest 测试类
pom依赖
<dependency><groupId>org.elasticsearch</groupId><artifactId>elasticsearch</artifactId><version>7.6.1</version></dependency><!-- ES的高阶的客户端API --><dependency><groupId>org.elasticsearch.client</groupId><artifactId>elasticsearch-rest-high-level-client</artifactId><version>7.6.1</version></dependency>
连接ElasticSearch索引库
private RestHighLevelClient restHighLevelClient;// 索引库的名字private static final String JOB_IDX = "job_index";/*** 连接ElasticSearch索引库*/public JobFullTextServiceImpl() {// 建立与ES的连接// 1. 使用RestHighLevelClient构建客户端连接。// 2. 基于RestClient.builder方法来构建RestClientBuilder// 3. 用HttpHost来添加ES的节点RestClientBuilder restClientBuilder = RestClient.builder(new HttpHost("zjj101", 9200, "http"), new HttpHost("zjj102", 9200, "http"), new HttpHost("zjj103", 9200, "http"));/* RestClientBuilder restClientBuilder = RestClient.builder(new HttpHost("192.168.21.130", 9200, "http"));*/restHighLevelClient = new RestHighLevelClient(restClientBuilder);}
添加数据
/**添加数据** @param jobDetail* @throws IOException*/@Overridepublic void add(JobDetail jobDetail) throws IOException {//1. 构建IndexRequest对象,用来描述ES发起请求的数据。IndexRequest indexRequest = new IndexRequest(JOB_IDX);//2. 设置文档ID。indexRequest.id(jobDetail.getId() + "");//3. 使用FastJSON将实体类对象转换为JSON。String json = JSONObject.toJSONString(jobDetail);//4. 使用IndexRequest.source方法设置文档数据,并设置请求的数据为JSON格式。indexRequest.source(json, XContentType.JSON);//5. 使用ES High level client调用index方法发起请求,将一个文档添加到索引中。restHighLevelClient.index(indexRequest, RequestOptions.DEFAULT);}
根据id查找
/*** 根据id查找* @param id* @return* @throws IOException*/@Overridepublic JobDetail findById(long id) throws IOException {// 1. 构建GetRequest请求。GetRequest getRequest = new GetRequest(JOB_IDX, id + "");// 2. 使用RestHighLevelClient.get发送GetRequest请求,并获取到ES服务器的响应。GetResponse getResponse = restHighLevelClient.get(getRequest, RequestOptions.DEFAULT);// 3. 将ES响应的数据转换为JSON字符串 (这个字符串是_source代码块儿里面的数据)String json = getResponse.getSourceAsString();// 4. 并使用FastJSON将JSON字符串转换为JobDetail类对象JobDetail jobDetail = JSONObject.parseObject(json, JobDetail.class);// 5. 记得:单独设置ID,因为_source里面是没有id这个属性的,如果你需要id这个属性你就得单独设置进去jobDetail.setId(id);return jobDetail;}
更新数据
/*** 更新数据* @param jobDetail* @throws IOException*/@Overridepublic void update(JobDetail jobDetail) throws IOException {// 1. 判断对应ID的文档是否存在// a) 构建GetRequestGetRequest getRequest = new GetRequest(JOB_IDX, jobDetail.getId() + "");// b) 执行client的exists方法,发起请求,判断是否存在// 为什么要先判断是否存在呢?因为你不判断的话,你直接操作的话,万一这条数据不存在,就会抛异常出来boolean exists = restHighLevelClient.exists(getRequest, RequestOptions.DEFAULT);if(exists) {// 2. 构建UpdateRequest请求UpdateRequest updateRequest = new UpdateRequest(JOB_IDX, jobDetail.getId() + "");// 3. 设置UpdateRequest的文档,并配置为JSON格式updateRequest.doc(JSONObject.toJSONString(jobDetail), XContentType.JSON);// 4. 执行client发起update请求restHighLevelClient.update(updateRequest, RequestOptions.DEFAULT);}}
根据id删除
/*** 根据id删除* @param id* @throws IOException*/@Overridepublic void deleteById(long id) throws IOException {// 1. 构建delete请求DeleteRequest deleteRequest = new DeleteRequest(JOB_IDX, id + "");// 2. 使用RestHighLevelClient执行delete请求restHighLevelClient.delete(deleteRequest, RequestOptions.DEFAULT);}
根据关键字搜索
/*** 根据关键字搜索* @param keywords* @return* @throws IOException*/@Overridepublic List<JobDetail> searchByKeywords(String keywords) throws IOException {// 1.构建SearchRequest检索请求// 专门用来进行全文检索、关键字检索的APISearchRequest searchRequest = new SearchRequest(JOB_IDX);// 2.创建一个SearchSourceBuilder专门用于构建查询条件SearchSourceBuilder searchSourceBuilder = new SearchSourceBuilder();// 3.使用QueryBuilders.multiMatchQuery构建一个查询条件(搜索title、jd),并配置到SearchSourceBuilderMultiMatchQueryBuilder multiMatchQueryBuilder = QueryBuilders.multiMatchQuery(keywords, "title", "jd");// 将查询条件设置到查询请求构建器中searchSourceBuilder.query(multiMatchQueryBuilder);// 4.调用SearchRequest.source将查询条件设置到检索请求searchRequest.source(searchSourceBuilder);// 5.执行RestHighLevelClient.search发起请求SearchResponse searchResponse = restHighLevelClient.search(searchRequest, RequestOptions.DEFAULT);SearchHit[] hitArray = searchResponse.getHits().getHits();// 6.遍历结果ArrayList<JobDetail> jobDetailArrayList = new ArrayList<>();for (SearchHit documentFields : hitArray) {// 1)获取命中的结果String json = documentFields.getSourceAsString();// 2)将JSON字符串转换为对象JobDetail jobDetail = JSONObject.parseObject(json, JobDetail.class);// 3)使用SearchHit.getId设置文档IDjobDetail.setId(Long.parseLong(documentFields.getId()));jobDetailArrayList.add(jobDetail);}return jobDetailArrayList;}
基于form size分页查询
@Overridepublic Map<String, Object> searchByPage(String keywords, int pageNum, int pageSize) throws IOException {// 1.构建SearchRequest检索请求// 专门用来进行全文检索、关键字检索的APISearchRequest searchRequest = new SearchRequest(JOB_IDX);// 2.创建一个SearchSourceBuilder专门用于构建查询条件SearchSourceBuilder searchSourceBuilder = new SearchSourceBuilder();// 3.使用QueryBuilders.multiMatchQuery构建一个查询条件(搜索title、jd),并配置到SearchSourceBuilderMultiMatchQueryBuilder multiMatchQueryBuilder = QueryBuilders.multiMatchQuery(keywords, "title", "jd");// 将查询条件设置到查询请求构建器中searchSourceBuilder.query(multiMatchQueryBuilder);// 每页显示多少条searchSourceBuilder.size(pageSize);// 设置从第几条开始查询searchSourceBuilder.from((pageNum - 1) * pageSize);// 4.调用SearchRequest.source将查询条件设置到检索请求searchRequest.source(searchSourceBuilder);// 5.执行RestHighLevelClient.search发起请求SearchResponse searchResponse = restHighLevelClient.search(searchRequest, RequestOptions.DEFAULT);SearchHit[] hitArray = searchResponse.getHits().getHits();// 6.遍历结果ArrayList<JobDetail> jobDetailArrayList = new ArrayList<>();for (SearchHit documentFields : hitArray) {// 1)获取命中的结果String json = documentFields.getSourceAsString();// 2)将JSON字符串转换为对象JobDetail jobDetail = JSONObject.parseObject(json, JobDetail.class);// 3)使用SearchHit.getId设置文档IDjobDetail.setId(Long.parseLong(documentFields.getId()));jobDetailArrayList.add(jobDetail);}// 8. 将结果封装到Map结构中(带有分页信息)// a) total -> 使用SearchHits.getTotalHits().value获取到所有的记录数// b) content -> 当前分页中的数据long totalNum = searchResponse.getHits().getTotalHits().value;HashMap hashMap = new HashMap();hashMap.put("total", totalNum);hashMap.put("content", jobDetailArrayList);return hashMap;}
基于 scroll分页和查询结果带关键字就高亮处理
// scroll分页解决深分页问题@Overridepublic Map<String, Object> searchByScrollPage(String keywords, String scrollId, int pageSize) throws IOException {SearchResponse searchResponse = null;if(scrollId == null) {// 1.构建SearchRequest检索请求// 专门用来进行全文检索、关键字检索的APISearchRequest searchRequest = new SearchRequest(JOB_IDX);// 2.创建一个SearchSourceBuilder专门用于构建查询条件SearchSourceBuilder searchSourceBuilder = new SearchSourceBuilder();// 3.使用QueryBuilders.multiMatchQuery构建一个查询条件(搜索title、jd),并配置到SearchSourceBuilderMultiMatchQueryBuilder multiMatchQueryBuilder = QueryBuilders.multiMatchQuery(keywords, "title", "jd");// 将查询条件设置到查询请求构建器中searchSourceBuilder.query(multiMatchQueryBuilder);// 设置高亮HighlightBuilder highlightBuilder = new HighlightBuilder();highlightBuilder.field("title");highlightBuilder.field("jd");highlightBuilder.preTags("<font color='red'>");highlightBuilder.postTags("</font>");// 给请求设置高亮searchSourceBuilder.highlighter(highlightBuilder);// 每页显示多少条searchSourceBuilder.size(pageSize);// 4.调用SearchRequest.source将查询条件设置到检索请求searchRequest.source(searchSourceBuilder);//--------------------------// 设置scroll查询//--------------------------searchRequest.scroll(TimeValue.timeValueMinutes(5));// 5.执行RestHighLevelClient.search发起请求searchResponse = restHighLevelClient.search(searchRequest, RequestOptions.DEFAULT);}// 第二次查询的时候,直接通过scroll id查询数据else {SearchScrollRequest searchScrollRequest = new SearchScrollRequest(scrollId);searchScrollRequest.scroll(TimeValue.timeValueMinutes(5));// 使用RestHighLevelClient发送scroll请求searchResponse = restHighLevelClient.scroll(searchScrollRequest, RequestOptions.DEFAULT);}//--------------------------// 迭代ES响应的数据//--------------------------SearchHit[] hitArray = searchResponse.getHits().getHits();// 6.遍历结果ArrayList<JobDetail> jobDetailArrayList = new ArrayList<>();for (SearchHit documentFields : hitArray) {// 1)获取命中的结果String json = documentFields.getSourceAsString();// 2)将JSON字符串转换为对象JobDetail jobDetail = JSONObject.parseObject(json, JobDetail.class);// 3)使用SearchHit.getId设置文档IDjobDetail.setId(Long.parseLong(documentFields.getId()));jobDetailArrayList.add(jobDetail);// 设置高亮的一些文本到实体类中// 封装了高亮Map<String, HighlightField> highlightFieldMap = documentFields.getHighlightFields();HighlightField titleHL = highlightFieldMap.get("title");HighlightField jdHL = highlightFieldMap.get("jd");if(titleHL != null) {// 获取指定字段的高亮片段Text[] fragments = titleHL.getFragments();// 将这些高亮片段拼接成一个完整的高亮字段StringBuilder builder = new StringBuilder();for(Text text : fragments) {builder.append(text);}// 设置到实体类中jobDetail.setTitle(builder.toString());}if(jdHL != null) {// 获取指定字段的高亮片段Text[] fragments = jdHL.getFragments();// 将这些高亮片段拼接成一个完整的高亮字段StringBuilder builder = new StringBuilder();for(Text text : fragments) {builder.append(text);}// 设置到实体类中jobDetail.setJd(builder.toString());}}// 8. 将结果封装到Map结构中(带有分页信息)// a) total -> 使用SearchHits.getTotalHits().value获取到所有的记录数// b) content -> 当前分页中的数据long totalNum = searchResponse.getHits().getTotalHits().value;HashMap hashMap = new HashMap();hashMap.put("scroll_id", searchResponse.getScrollId());hashMap.put("content", jobDetailArrayList);return hashMap;}
