1. person索引

1.1 插入person数据

  1. PUT person
  2. {
  3. "settings":{
  4. "number_of_shards":20,
  5. "number_of_replicas":0
  6. },
  7. "mappings":{
  8. "properties":{
  9. "addr":{
  10. "type":"text",
  11. "analyzer":"ik_max_word"
  12. "fields":{
  13. "keyword":{
  14. "type":"keyword",
  15. "ignore_above":256
  16. }
  17. }
  18. },
  19. "age":{
  20. "type":"long"
  21. },
  22. "birdthday":{
  23. "type":"date",
  24. "format":"[yyyy-MM-dd]"
  25. },
  26. "color":{
  27. "type":"keyword",
  28. "doc_values": true
  29. },
  30. "create_time":{
  31. "type":"date",
  32. "format":"[yyyy-MM-dd HH:mm:ss]"
  33. },
  34. "height":{
  35. "type":"float"
  36. },
  37. "name":{
  38. "type":"text",
  39. "analyzer":"ik_max_word"
  40. "fields":{
  41. "keyword":{
  42. "type":"keyword",
  43. "ignore_above":256
  44. }
  45. }
  46. },
  47. "sex":{
  48. "type":"keyword",
  49. "doc_values": true
  50. },
  51. "tags":{
  52. "type":"keyword",
  53. "doc_values": true
  54. },
  55. "weight":{
  56. "type":"float",
  57. "doc_values": true
  58. }
  59. }
  60. }
  61. }

使用java插入数据

  1. package com.yxyy.yxpay.utils;
  2. import cn.hutool.core.date.DateField;
  3. import cn.hutool.core.date.DateUtil;
  4. import cn.hutool.core.io.FileUtil;
  5. import cn.hutool.core.util.RandomUtil;
  6. import cn.hutool.http.HttpUtil;
  7. import cn.hutool.json.JSONObject;
  8. import java.io.UnsupportedEncodingException;
  9. import java.math.BigDecimal;
  10. import java.util.*;
  11. /**
  12. * @Title: randomName
  13. * @return String 名字
  14. */
  15. public class RandomName {
  16. //地址列表
  17. public static List<String> addrList = new ArrayList<>();
  18. //标签列表
  19. public static List<String> tagsList = Arrays.asList("郁郁寡欢", "悲观失意", "好吃懒做", "疑神疑鬼", "患得患失", "异想天开", "多愁善感", "狡猾多变", "贪小便宜", "见异思迁", "情绪多变", "脾气暴躁", "重色轻友", "胆小怕事 好吃懒做", "成熟稳重", "幼稚调皮", "温柔体贴", "诚实坦白", "婆婆妈妈", "活泼可爱", "普普通通", "内向害羞", "外向开朗", "心地善良", "聪明伶俐", "善解人意", "风趣幽默", "思想开放", "积极进取", "小心谨慎", "郁郁寡欢", "正义正直", "悲观失意 好吃懒做", "处事洒脱", "疑神疑鬼", "患得患失", "异想天开", "多愁善感", "淡泊名利", "见利忘义", "瞻前顾后", "循规蹈矩", "热心助人", "快言快语", "少言寡语", "爱管闲事", "追求刺激", "豪放不羁", "狡猾多变", "贪小便宜", "见异思迁", "情绪多变", "水性扬花", "重色轻友", "胆小怕事", "积极负责", "勇敢正义", "聪明好学", "实事求是", "务实", "实际", "老实巴交", "圆滑", "老练", "脾气暴躁", "慢条斯理", "冲动", "任性", "神经质", "暴躁", "善变", "难以琢磨", "患得患失", "浮躁", "见异思迁", "莽撞", "易怒", "犹豫不决", "轻率", "善变", "温柔", "内向", "腼腆", "害羞", "多疑", "直率", "活泼", "开朗", "滑稽", "可笑", "古怪", "怪异", "狭窄", "宽容", "猜忌", "多情", "冷淡", "热情", "放荡", "拘谨", "谨慎", "严格", "严厉", "凶残", "残忍", "无情", "懦弱", "怯弱", "卑鄙", "无耻", "下流", "无赖", "好色", "肮脏", "飘逸", "圣洁", "纯洁", "清纯", "可爱", "贤慧", "慈爱", "仁慈", "老实", "木讷", "慷慨", "大方", "随便", "暴躁", "急躁", "尖酸", "刻薄", "侠义", "忠诚", "开朗", "温柔乐观", "健谈", "冲动", "莽撞", "易怒", "情绪低落","善良", "热情", "好客", "孝顺讲义气", "大公无私", "好心肠", "豪放", "爽朗", "爽快", "爽直", "豪爽直爽", "豁达", "小气", "小心眼多心", "外向", "有人缘", "孤僻", "不合群", "好交际兴趣广泛", "仔细", "严于律已宽以待人", "严守秩序有条理", "执着", "较真", "专注", "文质彬彬", "聪明", "迟钝", "冰雪聪明聪明绝世", "聪颖", "出口成章语惊四座", "娓娓而谈", "口若悬河", "才华横溢", "出类拔萃博大精深", "有成就感", "急功近利", "好大喜功", "勤劳", "勇敢", "自信", "坚强有志气", "懒惰", "胸无大志", "胆小怕事", "果敢", "倔强", "挑衅", "信心受挫", "意志坚定","人见人爱","冰山美人","环肥燕瘦","粉装玉琢","兰心慧质","衣冠楚楚","冰雪聪明","装潢门面","红颜知己","婀娜多姿","亭亭玉立","我见犹怜","冰肌玉骨","貌美如花","仙姿玉色","信言不美","美若天仙","梨花带雨","仙姿佚貌","浑金璞玉","花枝招展","娇艳欲滴","仙姿玉貌","倾国倾城","城北徐公","楚楚可人","美目盼兮","琪花瑶草","艳压群芳","绝代佳人","肤如凝脂","姹紫嫣红","宛转蛾眉","小鸟依人","闭月羞花","姑射神人","双瞳剪水","傅粉何郎","人淡如菊","美丽动人","美如冠玉","娇小玲珑","尽善尽美","含苞欲放","一表非凡","惠质兰心","仪态万方","人间尤物","美伦美奂","香草美人","白璧无瑕","风华绝代","明艳动人","秀色可餐","文过饰非","仪态万千","小家碧玉","螓首蛾眉","红粉佳人","钟灵毓秀","妍姿艳质","袅袅婷婷","秀外慧中","月里嫦娥","楚楚动人","齿如瓠犀","语笑嫣然","美艳绝伦","温柔可人","出水芙蓉","千娇百媚","林下风气","巧笑倩兮","淡妆浓抹","风姿绰约","秀外惠中","出尘脱俗","窈窕淑女","捧心西子","左家娇女","绰约多姿","朱唇皓齿","一笑千金","芳泽无加","夭桃秾李","活泼可爱","眉清目秀","掷果潘安","外向","善良","开朗","活泼","好动","轻松","愉快","热情","可亲","豁达","稳重","幽默","真诚","豪爽","耿直","成熟","独立","果断","健谈","机敏","深沉","坚强","兴奋","热情","率直","毅力","友爱","风趣","沉静","谨慎","忠诚","友善","严肃","忠心","乐观","坦率","勇敢","自信","自立","沉著","执著","容忍","体贴","满足","积极","有趣","知足","勤劳","和气","无畏","务实","轻浮","冲动","幼稚","自私","依赖","任性","自负","拜金","暴躁","倔强","虚伪","孤僻","刻薄","武断","浮躁","莽撞","易怒","轻率","善变","狡猾","易怒","多疑","懒惰","专横","顽固","猜疑","挑衅","冷漠","虚荣","冷淡","反覆","跋扈","自负","逆反","怨恨","鲁莽","放任","贫乏","固执内向","脆弱","自卑","害羞","敏感","迟钝","柔弱","畏缩","顺从","胆小","安静","寡言","保守","被动","忍让","抑郁","谨慎","胆怯","温和","老实","平和","顺服","含蓄","迁就","羞涩","忸怩","缓慢","乏味","散漫","迟缓","罗嗦","耐性","悲观","消极","拖延","烦躁","妥协","唠叨","好交际","善组织","有韧性","可依赖","规范型","好心肠","善交际","无异议","竞争性","自控性","受尊重","激励性","重秩序","有条理","聆听者","无拘束","领导者","受欢迎","神经质","糊涂虫","有惰性","易兴奋","好批评","不专注","好争吵","无目标","不宽恕","无热忱","易激动","难预测","不合群","不灵活","喜操纵","情绪化","大嗓门","统治欲","强迫性","好表现","猥琐","小气","恶心","邋遢","懒惰","任性","刻薄","贪吃","贪睡","贪玩","阴险","狡诈","无趣","幼稚","小气","冲动","自以为是","眼高手低","好高骛远","虚荣心强","爱吹牛","无赖","无聊","无知","无情","愚昧","小气","愚蠢","憨","笨","傻","小气","贪财","怕死","爱慕虚荣","邋遢","好吃懒做","不劳而获","信口开河","以讹传讹","拈轻怕重","墨守陈规","顽固不化","固执己见","自私自利","唯利是图","忘恩负义","粗心大意","半途而废");
  20. //负责列表
  21. public static List<String> colors = Arrays.asList("Yellow","White","Black","");
  22. //性别列表
  23. public static List<String> sexList = Arrays.asList("MAN","WOMAN","");
  24. public static void main(String[] args) {
  25. for (int i = 101; i <= 200; i++) {
  26. List<String> datas = getData(0, 200000);
  27. FileUtil.appendLines(datas,"C:\\Users\\1\\Desktop\\json\\100000"+i+".json","UTF-8");
  28. }
  29. }
  30. /**
  31. * 获取定量数据
  32. * @param start
  33. * @param end
  34. * @return
  35. */
  36. public static List<String> getData(int start,int end){
  37. List<String> list = new ArrayList<>();
  38. for (int i = start; i < end; i++) {
  39. JSONObject person = new JSONObject();
  40. person.put("name",randomName(true,3));
  41. person.put("age",RandomUtil.randomInt(10,80));
  42. person.put("weight",RandomUtil.randomBigDecimal(new BigDecimal("40"),new BigDecimal("105")).setScale(2, BigDecimal.ROUND_DOWN));
  43. person.put("color",colors.get(RandomUtil.randomInt(0,2)));
  44. person.put("height",RandomUtil.randomBigDecimal(new BigDecimal("150"),new BigDecimal("210")).setScale(2,BigDecimal.ROUND_DOWN));
  45. person.put("addr",getRandomAddr());
  46. person.put("create_time",RandomUtil.randomDate(new Date(), DateField.HOUR, -700000, 0).toString());
  47. person.put("birdthday", DateUtil.format(RandomUtil.randomDate(new Date(), DateField.HOUR, -700000, 0),"yyyy-MM-dd") .toString());
  48. person.put("sex",getRandomSex());
  49. List tags = new ArrayList();
  50. int tagsSize = RandomUtil.randomInt(1,10);
  51. for (int j = 0; j < tagsSize; j++) {
  52. tags.add(tagsList.get(RandomUtil.randomInt(0,tagsList.size())));
  53. }
  54. person.put("tags",tags);
  55. JSONObject action = new JSONObject().put("index",new JSONObject().put("_index","person").put("_type","_doc"));
  56. list.add(action.toString());
  57. list.add(person.toString());
  58. }
  59. return list;
  60. }
  61. /**
  62. * 获取随机性别
  63. * @return
  64. */
  65. public static String getRandomSex(){
  66. return sexList.get(RandomUtil.randomInt(0,sexList.size()));
  67. }
  68. /**
  69. * 获取随机地址
  70. * @return
  71. */
  72. public static String getRandomAddr(){
  73. return addrList.get(RandomUtil.randomInt(0,addrList.size()));
  74. }
  75. /**方法1*/
  76. public static String getRandomJianHan(int len) {
  77. String randomName = "";
  78. for (int i = 0; i < len; i++) {
  79. String str = null;
  80. int hightPos, lowPos; // 定义高低位
  81. Random random = new Random();
  82. hightPos = (176 + Math.abs(random.nextInt(39))); // 获取高位值
  83. lowPos = (161 + Math.abs(random.nextInt(93))); // 获取低位值
  84. byte[] b = new byte[2];
  85. b[0] = (new Integer(hightPos).byteValue());
  86. b[1] = (new Integer(lowPos).byteValue());
  87. try {
  88. str = new String(b, "GBK"); // 转成中文
  89. } catch (UnsupportedEncodingException ex) {
  90. ex.printStackTrace();
  91. }
  92. randomName += str;
  93. }
  94. return randomName;
  95. }
  96. /**方法2*/
  97. public static String randomName(boolean simple, int len) {
  98. String surName[] = {
  99. "赵","钱","孙","李","周","吴","郑","王","冯","陈","楮","卫","蒋","沈","韩","杨",
  100. "朱","秦","尤","许","何","吕","施","张","孔","曹","严","华","金","魏","陶","姜",
  101. "戚","谢","邹","喻","柏","水","窦","章","云","苏","潘","葛","奚","范","彭","郎",
  102. "鲁","韦","昌","马","苗","凤","花","方","俞","任","袁","柳","酆","鲍","史","唐",
  103. "费","廉","岑","薛","雷","贺","倪","汤","滕","殷","罗","毕","郝","邬","安","常",
  104. "乐","于","时","傅","皮","卞","齐","康","伍","余","元","卜","顾","孟","平","黄",
  105. "和","穆","萧","尹","姚","邵","湛","汪","祁","毛","禹","狄","米","贝","明","臧",
  106. "计","伏","成","戴","谈","宋","茅","庞","熊","纪","舒","屈","项","祝","董","梁",
  107. "杜","阮","蓝","闽","席","季","麻","强","贾","路","娄","危","江","童","颜","郭",
  108. "梅","盛","林","刁","锺","徐","丘","骆","高","夏","蔡","田","樊","胡","凌","霍",
  109. "虞","万","支","柯","昝","管","卢","莫","经","房","裘","缪","干","解","应","宗",
  110. "丁","宣","贲","邓","郁","单","杭","洪","包","诸","左","石","崔","吉","钮","龚",
  111. "程","嵇","邢","滑","裴","陆","荣","翁","荀","羊","於","惠","甄","麹","家","封",
  112. "芮","羿","储","靳","汲","邴","糜","松","井","段","富","巫","乌","焦","巴","弓",
  113. "牧","隗","山","谷","车","侯","宓","蓬","全","郗","班","仰","秋","仲","伊","宫",
  114. "宁","仇","栾","暴","甘","斜","厉","戎","祖","武","符","刘","景","詹","束","龙",
  115. "叶","幸","司","韶","郜","黎","蓟","薄","印","宿","白","怀","蒲","邰","从","鄂",
  116. "索","咸","籍","赖","卓","蔺","屠","蒙","池","乔","阴","郁","胥","能","苍","双",
  117. "闻","莘","党","翟","谭","贡","劳","逄","姬","申","扶","堵","冉","宰","郦","雍",
  118. "郤","璩","桑","桂","濮","牛","寿","通","边","扈","燕","冀","郏","浦","尚","农",
  119. "温","别","庄","晏","柴","瞿","阎","充","慕","连","茹","习","宦","艾","鱼","容",
  120. "向","古","易","慎","戈","廖","庾","终","暨","居","衡","步","都","耿","满","弘",
  121. "匡","国","文","寇","广","禄","阙","东","欧","殳","沃","利","蔚","越","夔","隆",
  122. "师","巩","厍","聂","晁","勾","敖","融","冷","訾","辛","阚","那","简","饶","空",
  123. "曾","毋","沙","乜","养","鞠","须","丰","巢","关","蒯","相","查","后","荆","红",
  124. "游","竺","权","逑","盖","益","桓","公","晋","楚","阎","法","汝","鄢","涂","钦",
  125. "岳","帅","缑","亢","况","后","有","琴","商","牟","佘","佴","伯","赏","墨","哈",
  126. "谯","笪","年","爱","阳","佟"};
  127. String doubleSurName[] = {"万俟","司马","上官","欧阳","夏侯","诸葛","闻人","东方",
  128. "赫连","皇甫","尉迟","公羊","澹台","公冶","宗政","濮阳","淳于","单于","太叔","申屠",
  129. "公孙","仲孙","轩辕","令狐","锺离","宇文","长孙","慕容","鲜于","闾丘","司徒","司空",
  130. "丌官","司寇","仉","督","子车","颛孙","端木","巫马","公西","漆雕","乐正","壤驷","公良",
  131. "拓拔","夹谷","宰父","谷梁","段干","百里","东郭","南门","呼延","归","海","羊舌","微生",
  132. "梁丘","左丘","东门","西门","南宫"};
  133. String[] word = {"一","乙","二","十","丁","厂","七","卜","人","入","八","九","几","儿","了","力","乃","刀","又",
  134. "三","于","干","亏","士","工","土","才","寸","下","大","丈","与","万","上","小","口","巾","山",
  135. "千","乞","川","亿","个","勺","久","凡","及","夕","丸","么","广","亡","门","义","之","尸","弓",
  136. "己","已","子","卫","也","女","飞","刃","习","叉","马","乡","丰","王","井","开","夫","天","无",
  137. "元","专","云","扎","艺","木","五","支","厅","不","太","犬","区","历","尤","友","匹","车","巨",
  138. "牙","屯","比","互","切","瓦","止","少","日","中","冈","贝","内","水","见","午","牛","手","毛",
  139. "气","升","长","仁","什","片","仆","化","仇","币","仍","仅","斤","爪","反","介","父","从","今",
  140. "凶","分","乏","公","仓","月","氏","勿","欠","风","丹","匀","乌","凤","勾","文","六","方","火",
  141. "为","斗","忆","订","计","户","认","心","尺","引","丑","巴","孔","队","办","以","允","予","劝",
  142. "双","书","幻","玉","刊","示","末","未","击","打","巧","正","扑","扒","功","扔","去","甘","世",
  143. "古","节","本","术","可","丙","左","厉","右","石","布","龙","平","灭","轧","东","卡","北","占",
  144. "业","旧","帅","归","且","旦","目","叶","甲","申","叮","电","号","田","由","史","只","央","兄",
  145. "叼","叫","另","叨","叹","四","生","失","禾","丘","付","仗","代","仙","们","仪","白","仔","他",
  146. "斥","瓜","乎","丛","令","用","甩","印","乐","句","匆","册","犯","外","处","冬","鸟","务","包",
  147. "饥","主","市","立","闪","兰","半","汁","汇","头","汉","宁","穴","它","讨","写","让","礼","训",
  148. "必","议","讯","记","永","司","尼","民","出","辽","奶","奴","加","召","皮","边","发","孕","圣",
  149. "对","台","矛","纠","母","幼","丝","式","刑","动","扛","寺","吉","扣","考","托","老","执","巩",
  150. "圾","扩","扫","地","扬","场","耳","共","芒","亚","芝","朽","朴","机","权","过","臣","再","协",
  151. "西","压","厌","在","有","百","存","而","页","匠","夸","夺","灰","达","列","死","成","夹","轨",
  152. "邪","划","迈","毕","至","此","贞","师","尘","尖","劣","光","当","早","吐","吓","虫","曲","团",
  153. "同","吊","吃","因","吸","吗","屿","帆","岁","回","岂","刚","则","肉","网","年","朱","先","丢",
  154. "舌","竹","迁","乔","伟","传","乒","乓","休","伍","伏","优","伐","延","件","任","伤","价","份",
  155. "华","仰","仿","伙","伪","自","血","向","似","后","行","舟","全","会","杀","合","兆","企","众",
  156. "爷","伞","创","肌","朵","杂","危","旬","旨","负","各","名","多","争","色","壮","冲","冰","庄",
  157. "庆","亦","刘","齐","交","次","衣","产","决","充","妄","闭","问","闯","羊","并","关","米","灯",
  158. "州","汗","污","江","池","汤","忙","兴","宇","守","宅","字","安","讲","军","许","论","农","讽",
  159. "设","访","寻","那","迅","尽","导","异","孙","阵","阳","收","阶","阴","防","奸","如","妇","好",
  160. "她","妈","戏","羽","观","欢","买","红","纤","级","约","纪","驰","巡","寿","弄","麦","形","进",
  161. "戒","吞","远","违","运","扶","抚","坛","技","坏","扰","拒","找","批","扯","址","走","抄","坝",
  162. "贡","攻","赤","折","抓","扮","抢","孝","均","抛","投","坟","抗","坑","坊","抖","护","壳","志",
  163. "扭","块","声","把","报","却","劫","芽","花","芹","芬","苍","芳","严","芦","劳","克","苏","杆",
  164. "杠","杜","材","村","杏","极","李","杨","求","更","束","豆","两","丽","医","辰","励","否","还",
  165. "歼","来","连","步","坚","旱","盯","呈","时","吴","助","县","里","呆","园","旷","围","呀","吨",
  166. "足","邮","男","困","吵","串","员","听","吩","吹","呜","吧","吼","别","岗","帐","财","针","钉",
  167. "告","我","乱","利","秃","秀","私","每","兵","估","体","何","但","伸","作","伯","伶","佣","低",
  168. "你","住","位","伴","身","皂","佛","近","彻","役","返","余","希","坐","谷","妥","含","邻","岔",
  169. "肝","肚","肠","龟","免","狂","犹","角","删","条","卵","岛","迎","饭","饮","系","言","冻","状",
  170. "亩","况","床","库","疗","应","冷","这","序","辛","弃","冶","忘","闲","间","闷","判","灶","灿",
  171. "弟","汪","沙","汽","沃","泛","沟","没","沈","沉","怀","忧","快","完","宋","宏","牢","究","穷",
  172. "灾","良","证","启","评","补","初","社","识","诉","诊","词","译","君","灵","即","层","尿","尾",
  173. "迟","局","改","张","忌","际","陆","阿","陈","阻","附","妙","妖","妨","努","忍","劲","鸡","驱",
  174. "纯","纱","纳","纲","驳","纵","纷","纸","纹","纺","驴","纽","奉","玩","环","武","青","责","现",
  175. "表","规","抹","拢","拔","拣","担","坦","押","抽","拐","拖","拍","者","顶","拆","拥","抵","拘",
  176. "势","抱","垃","拉","拦","拌","幸","招","坡","披","拨","择","抬","其","取","苦","若","茂","苹",
  177. "苗","英","范","直","茄","茎","茅","林","枝","杯","柜","析","板","松","枪","构","杰","述","枕",
  178. "丧","或","画","卧","事","刺","枣","雨","卖","矿","码","厕","奔","奇","奋","态","欧","垄","妻",
  179. "轰","顷","转","斩","轮","软","到","非","叔","肯","齿","些","虎","虏","肾","贤","尚","旺","具",
  180. "果","味","昆","国","昌","畅","明","易","昂","典","固","忠","咐","呼","鸣","咏","呢","岸","岩",
  181. "帖","罗","帜","岭","凯","败","贩","购","图","钓","制","知","垂","牧","物","乖","刮","秆","和",
  182. "季","委","佳","侍","供","使","例","版","侄","侦","侧","凭","侨","佩","货","依","的","迫","质",
  183. "欣","征","往","爬","彼","径","所","舍","金","命","斧","爸","采","受","乳","贪","念","贫","肤",
  184. "肺","肢","肿","胀","朋","股","肥","服","胁","周","昏","鱼","兔","狐","忽","狗","备","饰","饱",
  185. "饲","变","京","享","店","夜","庙","府","底","剂","郊","废","净","盲","放","刻","育","闸","闹",
  186. "郑","券","卷","单","炒","炊","炕","炎","炉","沫","浅","法","泄","河","沾","泪","油","泊","沿",
  187. "泡","注","泻","泳","泥","沸","波","泼","泽","治","怖","性","怕","怜","怪","学","宝","宗","定",
  188. "宜","审","宙","官","空","帘","实","试","郎","诗","肩","房","诚","衬","衫","视","话","诞","询",
  189. "该","详","建","肃","录","隶","居","届","刷","屈","弦","承","孟","孤","陕","降","限","妹","姑",
  190. "姐","姓","始","驾","参","艰","线","练","组","细","驶","织","终","驻","驼","绍","经","贯","奏",
  191. "春","帮","珍","玻","毒","型","挂","封","持","项","垮","挎","城","挠","政","赴","赵","挡","挺",
  192. "括","拴","拾","挑","指","垫","挣","挤","拼","挖","按","挥","挪","某","甚","革","荐","巷","带",
  193. "草","茧","茶","荒","茫","荡","荣","故","胡","南","药","标","枯","柄","栋","相","查","柏","柳",
  194. "柱","柿","栏","树","要","咸","威","歪","研","砖","厘","厚","砌","砍","面","耐","耍","牵","残",
  195. "殃","轻","鸦","皆","背","战","点","临","览","竖","省","削","尝","是","盼","眨","哄","显","哑",
  196. "冒","映","星","昨","畏","趴","胃","贵","界","虹","虾","蚁","思","蚂","虽","品","咽","骂","哗",
  197. "咱","响","哈","咬","咳","哪","炭","峡","罚","贱","贴","骨","钞","钟","钢","钥","钩","卸","缸",
  198. "拜","看","矩","怎","牲","选","适","秒","香","种","秋","科","重","复","竿","段","便","俩","贷",
  199. "顺","修","保","促","侮","俭","俗","俘","信","皇","泉","鬼","侵","追","俊","盾","待","律","很",
  200. "须","叙","剑","逃","食","盆","胆","胜","胞","胖","脉","勉","狭","狮","独","狡","狱","狠","贸",
  201. "怨","急","饶","蚀","饺","饼","弯","将","奖","哀","亭","亮","度","迹","庭","疮","疯","疫","疤",
  202. "姿","亲","音","帝","施","闻","阀","阁","差","养","美","姜","叛","送","类","迷","前","首","逆",
  203. "总","炼","炸","炮","烂","剃","洁","洪","洒","浇","浊","洞","测","洗","活","派","洽","染","济",
  204. "洋","洲","浑","浓","津","恒","恢","恰","恼","恨","举","觉","宣","室","宫","宪","突","穿","窃",
  205. "客","冠","语","扁","袄","祖","神","祝","误","诱","说","诵","垦","退","既","屋","昼","费","陡",
  206. "眉","孩","除","险","院","娃","姥","姨","姻","娇","怒","架","贺","盈","勇","怠","柔","垒","绑",
  207. "绒","结","绕","骄","绘","给","络","骆","绝","绞","统","耕","耗","艳","泰","珠","班","素","蚕",
  208. "顽","盏","匪","捞","栽","捕","振","载","赶","起","盐","捎","捏","埋","捉","捆","捐","损","都",
  209. "哲","逝","捡","换","挽","热","恐","壶","挨","耻","耽","恭","莲","莫","荷","获","晋","恶","真",
  210. "框","桂","档","桐","株","桥","桃","格","校","核","样","根","索","哥","速","逗","栗","配","翅",
  211. "辱","唇","夏","础","破","原","套","逐","烈","殊","顾","轿","较","顿","毙","致","柴","桌","虑",
  212. "监","紧","党","晒","眠","晓","鸭","晃","晌","晕","蚊","哨","哭","恩","唤","啊","唉","罢","峰",
  213. "圆","贼","贿","钱","钳","钻","铁","铃","铅","缺","氧","特","牺","造","乘","敌","秤","租","积",
  214. "秧","秩","称","秘","透","笔","笑","笋","债","借","值","倚","倾","倒","倘","俱","倡","候","俯",
  215. "倍","倦","健","臭","射","躬","息","徒","徐","舰","舱","般","航","途","拿","爹","爱","颂","翁",
  216. "脆","脂","胸","胳","脏","胶","脑","狸","狼","逢","留","皱","饿","恋","桨","浆","衰","高","席",
  217. "准","座","脊","症","病","疾","疼","疲","效","离","唐","资","凉","站","剖","竞","部","旁","旅",
  218. "畜","阅","羞","瓶","拳","粉","料","益","兼","烤","烘","烦","烧","烛","烟","递","涛","浙","涝",
  219. "酒","涉","消","浩","海","涂","浴","浮","流","润","浪","浸","涨","烫","涌","悟","悄","悔","悦",
  220. "害","宽","家","宵","宴","宾","窄","容","宰","案","请","朗","诸","读","扇","袜","袖","袍","被",
  221. "祥","课","谁","调","冤","谅","谈","谊","剥","恳","展","剧","屑","弱","陵","陶","陷","陪","娱",
  222. "娘","通","能","难","预","桑","绢","绣","验","继","球","理","捧","堵","描","域","掩","捷","排",
  223. "掉","堆","推","掀","授","教","掏","掠","培","接","控","探","据","掘","职","基","著","勒","黄",
  224. "萌","萝","菌","菜","萄","菊","萍","菠","营","械","梦","梢","梅","检","梳","梯","桶","救","副",
  225. "票","戚","爽","聋","袭","盛","雪","辅","辆","虚","雀","堂","常","匙","晨","睁","眯","眼","悬",
  226. "野","啦","晚","啄","距","跃","略","蛇","累","唱","患","唯","崖","崭","崇","圈","铜","铲","银",
  227. "甜","梨","犁","移","笨","笼","笛","符","第","敏","做","袋","悠","偿","偶","偷","您","售","停",
  228. "偏","假","得","衔","盘","船","斜","盒","鸽","悉","欲","彩","领","脚","脖","脸","脱","象","够",
  229. "猜","猪","猎","猫","猛","馅","馆","凑","减","毫","麻","痒","痕","廊","康","庸","鹿","盗","章",
  230. "竟","商","族","旋","望","率","着","盖","粘","粗","粒","断","剪","兽","清","添","淋","淹","渠",
  231. "渐","混","渔","淘","液","淡","深","婆","梁","渗","情","惜","惭","悼","惧","惕","惊","惨","惯",
  232. "寇","寄","宿","窑","密","谋","谎","祸","谜","逮","敢","屠","弹","随","蛋","隆","隐","婚","婶",
  233. "颈","绩","绪","续","骑","绳","维","绵","绸","绿","琴","斑","替","款","堪","搭","塔","越","趁",
  234. "趋","超","提","堤","博","揭","喜","插","揪","搜","煮","援","裁","搁","搂","搅","握","揉","斯",
  235. "期","欺","联","散","惹","葬","葛","董","葡","敬","葱","落","朝","辜","葵","棒","棋","植","森",
  236. "椅","椒","棵","棍","棉","棚","棕","惠","惑","逼","厨","厦","硬","确","雁","殖","裂","雄","暂",
  237. "雅","辈","悲","紫","辉","敞","赏","掌","晴","暑","最","量","喷","晶","喇","遇","喊","景","践",
  238. "跌","跑","遗","蛙","蛛","蜓","喝","喂","喘","喉","幅","帽","赌","赔","黑","铸","铺","链","销",
  239. "锁","锄","锅","锈","锋","锐","短","智","毯","鹅","剩","稍","程","稀","税","筐","等","筑","策",
  240. "筛","筒","答","筋","筝","傲","傅","牌","堡","集","焦","傍","储","奥","街","惩","御","循","艇",
  241. "舒","番","释","禽","腊","脾","腔","鲁","猾","猴","然","馋","装","蛮","就","痛","童","阔","善",
  242. "羡","普","粪","尊","道","曾","焰","港","湖","渣","湿","温","渴","滑","湾","渡","游","滋","溉",
  243. "愤","慌","惰","愧","愉","慨","割","寒","富","窜","窝","窗","遍","裕","裤","裙","谢","谣","谦",
  244. "属","屡","强","粥","疏","隔","隙","絮","嫂","登","缎","缓","编","骗","缘","瑞","魂","肆","摄",
  245. "摸","填","搏","塌","鼓","摆","携","搬","摇","搞","塘","摊","蒜","勤","鹊","蓝","墓","幕","蓬",
  246. "蓄","蒙","蒸","献","禁","楚","想","槐","榆","楼","概","赖","酬","感","碍","碑","碎","碰","碗",
  247. "碌","雷","零","雾","雹","输","督","龄","鉴","睛","睡","睬","鄙","愚","暖","盟","歇","暗","照",
  248. "跨","跳","跪","路","跟","遣","蛾","蜂","嗓","置","罪","罩","错","锡","锣","锤","锦","键","锯",
  249. "矮","辞","稠","愁","筹","签","简","毁","舅","鼠","催","傻","像","躲","微","愈","遥","腰","腥",
  250. "腹","腾","腿","触","解","酱","痰","廉","新","韵","意","粮","数","煎","塑","慈","煤","煌","满",
  251. "漠","源","滤","滥","滔","溪","溜","滚","滨","粱","滩","慎","誉","塞","谨","福","群","殿","辟",
  252. "障","嫌","嫁","叠","缝","缠","静","碧","璃","墙","撇","嘉","摧","截","誓","境","摘","摔","聚",
  253. "蔽","慕","暮","蔑","模","榴","榜","榨","歌","遭","酷","酿","酸","磁","愿","需","弊","裳","颗",
  254. "嗽","蜻","蜡","蝇","蜘","赚","锹","锻","舞","稳","算","箩","管","僚","鼻","魄","貌","膜","膊",
  255. "膀","鲜","疑","馒","裹","敲","豪","膏","遮","腐","瘦","辣","竭","端","旗","精","歉","熄","熔",
  256. "漆","漂","漫","滴","演","漏","慢","寨","赛","察","蜜","谱","嫩","翠","熊","凳","骡","缩","慧",
  257. "撕","撒","趣","趟","撑","播","撞","撤","增","聪","鞋","蕉","蔬","横","槽","樱","橡","飘","醋",
  258. "醉","震","霉","瞒","题","暴","瞎","影","踢","踏","踩","踪","蝶","蝴","嘱","墨","镇","靠","稻",
  259. "黎","稿","稼","箱","箭","篇","僵","躺","僻","德","艘","膝","膛","熟","摩","颜","毅","糊","遵",
  260. "潜","潮","懂","额","慰","劈","操","燕","薯","薪","薄","颠","橘","整","融","醒","餐","嘴","蹄",
  261. "器","赠","默","镜","赞","篮","邀","衡","膨","雕","磨","凝","辨","辩","糖","糕","燃","澡","激",
  262. "懒","壁","避","缴","戴","擦","鞠","藏","霜","霞","瞧","蹈","螺","穗","繁","辫","赢","糟","糠",
  263. "燥","臂","翼","骤","鞭","覆","蹦","镰","翻","鹰","警","攀","蹲","颤","瓣","爆","疆","壤","耀",
  264. "躁","嚼","嚷","籍","魔","灌","蠢","霸","露","囊","罐"};
  265. int surNameLen = surName.length;
  266. int doubleSurNameLen = doubleSurName.length;
  267. int wordLen = word.length;
  268. StringBuffer sb = new StringBuffer();
  269. Random random = new Random();
  270. if(simple){
  271. sb.append(surName[random.nextInt(surNameLen)]);
  272. int surLen = sb.toString().length();
  273. for (int i = 0; i < len - surLen; i++) {
  274. if(sb.toString().length() <= len){
  275. sb.append(word[random.nextInt(wordLen)]);
  276. }
  277. }
  278. }else{
  279. sb.append(doubleSurName[random.nextInt(doubleSurNameLen)]);
  280. int doubleSurLen = sb.toString().length();
  281. for (int i = 0; i < len - doubleSurLen; i++) {
  282. if(sb.toString().length() <= len){
  283. sb.append(word[random.nextInt(wordLen)]);
  284. }
  285. }
  286. }
  287. return sb.toString();
  288. }
  289. //初始化地址列表
  290. static {
  291. String string = HttpUtil.downloadString("https://res.zrbx.com/2021041723095713UY.txt", "UTF-8");
  292. String[] addrs = string.split("\\r\\n");
  293. for (String addr : addrs) {
  294. addrList.add(addr);
  295. }
  296. }
  297. }

controller

  1. package com.yxyy.yxpay;
  2. import cn.hutool.http.HttpResponse;
  3. import cn.hutool.http.HttpUtil;
  4. import cn.hutool.json.JSONUtil;
  5. import com.yxyy.yxpay.utils.RandomName;
  6. import lombok.extern.slf4j.Slf4j;
  7. import org.springframework.boot.SpringApplication;
  8. import org.springframework.boot.autoconfigure.SpringBootApplication;
  9. import org.springframework.web.bind.annotation.GetMapping;
  10. import org.springframework.web.bind.annotation.PathVariable;
  11. import org.springframework.web.bind.annotation.RestController;
  12. import java.io.PrintWriter;
  13. import java.io.StringWriter;
  14. import java.util.List;
  15. import java.util.StringJoiner;
  16. @RestController
  17. @Slf4j
  18. public class YxpayApplication {
  19. @GetMapping("/test/{size}")
  20. public String index1(@PathVariable Integer size){
  21. for (int i = 0; i < size; i++) {
  22. try {
  23. StringJoiner stringJoiner = new StringJoiner(System.lineSeparator(),"",System.lineSeparator());
  24. List<String> data = RandomName.getData(0, 100000);
  25. for (String datum : data) {
  26. stringJoiner.add(datum);
  27. }
  28. String url = "http://localhost:9201/_bulk";
  29. log.info("=================_bulk================");
  30. log.info("_bulk请求地址:"+url);
  31. log.info("_bulk请求内容長度:"+stringJoiner.toString().length());
  32. HttpResponse response = HttpUtil.createPost(url).header("Content-Type", "application/json").body(stringJoiner.toString()).execute();
  33. log.info("_bulk响应内容:"+JSONUtil.parseObj(response.body()).getBool("errors"));
  34. log.info("");
  35. }catch (Exception e){
  36. StringWriter stringWriter = new StringWriter();
  37. PrintWriter printWriter = new PrintWriter(stringWriter);
  38. e.printStackTrace(printWriter);
  39. log.error(stringWriter.toString());
  40. }
  41. }
  42. return "这里是壹心支付服务";
  43. }
  44. }

1.2 基础统计,平均值、总数、分组和平均值。

  1. #获取2020-05-01的人,并按照tags分组,求每个tag的总数,平均年龄,总体重
  2. GET person/_search?size=0
  3. {
  4. "query": {
  5. "bool": {
  6. "filter": [
  7. {
  8. "range": {
  9. "birdthday": {
  10. "format": "yyyy-MM-dd",
  11. "gte": "2020-05-01",
  12. "lte": "2020-05-01"
  13. }
  14. }
  15. }
  16. ]
  17. }
  18. },
  19. "aggs": {
  20. "按照标签分组": {
  21. "terms": {
  22. "field": "tags.keyword"
  23. },
  24. "aggs": {
  25. "标签的总数": {
  26. "value_count": {
  27. "field": "tags.keyword"
  28. }
  29. },
  30. "平均年龄":{
  31. "avg": {
  32. "field": "age"
  33. }
  34. },
  35. "总体重":{
  36. "sum": {
  37. "field": "weight"
  38. }
  39. }
  40. }
  41. }
  42. }
  43. }

2. histogram

2.1 avg、max、min、value_count、sum、分组

  1. GET person/_search?size=0
  2. {
  3. "aggs": {
  4. "avg_height": { //平均身高
  5. "avg": {
  6. "field": "height"
  7. }
  8. },
  9. "sum_age": { //总年龄
  10. "sum": {
  11. "field": "age"
  12. }
  13. },
  14. "count_id":{ //总数
  15. "value_count": {
  16. "field": "age"
  17. }
  18. },
  19. "max_age":{ //最大年龄
  20. "max": {
  21. "field": "age"
  22. }
  23. },
  24. "min_age":{ //最小年龄
  25. "min": {
  26. "field": "age"
  27. }
  28. },
  29. "aggs_group":{ //按照tags分组并统计每个组的最大体重和总量
  30. "terms": {
  31. "field": "tags.keyword"
  32. },
  33. "aggs": {
  34. "group_count": {
  35. "value_count": {
  36. "field": "age"
  37. }
  38. },
  39. "max_age":{
  40. "max": {
  41. "field": "weight"
  42. }
  43. }
  44. }
  45. },
  46. "group_name":{ //按照name分组并取每个组的总量
  47. "terms": {
  48. "field": "name.keyword"
  49. },
  50. "aggs": {
  51. "group_name_": {
  52. "value_count": {
  53. "field": "age"
  54. }
  55. }
  56. }
  57. },
  58. "createtime_group":{ //按照创建时间分组并取每个组的平均年龄
  59. "terms": {
  60. "field": "create_time"
  61. },
  62. "aggs": {
  63. "group_create": {
  64. "date_range": {
  65. "field": "create_time",
  66. "format": "yyyy-MM-dd",
  67. "ranges": [
  68. { "from": "1900-01-01", "to": "2000-01-01" },
  69. { "from": "2000-01-01", "to": "now" }
  70. ]
  71. },
  72. "aggs": {
  73. "group_create_avg_age": {
  74. "avg": {
  75. "field": "age"
  76. }
  77. }
  78. }
  79. }
  80. }
  81. }
  82. }
  83. }

2.1 range

先使用filter过滤0-40岁的人,再按照每10岁一组分组,计算每个组的平均年龄

  1. GET person/_search?size=0
  2. {
  3. "query": {
  4. "bool": {
  5. "filter": [
  6. {
  7. "range": {
  8. "age": {
  9. "gte": 0,
  10. "lte": 39
  11. }
  12. }
  13. }
  14. ]
  15. }
  16. },
  17. "aggs": {
  18. "age_step10_group": {
  19. "range": {
  20. "field": "age",
  21. "ranges": [
  22. {
  23. "from": 0,
  24. "to": 10
  25. },
  26. {
  27. "from": 10,
  28. "to": 20
  29. },
  30. {
  31. "from": 20,
  32. "to": 30
  33. },
  34. {
  35. "from": 30,
  36. "to": 40
  37. }
  38. ]
  39. }
  40. },
  41. "aggs_avg":{
  42. "avg": {
  43. "field": "age"
  44. }
  45. }
  46. }
  47. }

2.2 histogram

先使用filter过滤0-40岁的人,再使用。年龄按每10岁一组分组统计,并计算每组的平均年龄

  1. GET person/_search?size=0
  2. {
  3. "query": {
  4. "bool": {
  5. "filter": [
  6. {
  7. "range": {
  8. "age": {
  9. "gte": 0,
  10. "lte": 39
  11. }
  12. }
  13. }
  14. ]
  15. }
  16. },
  17. "aggs": {
  18. "test_histogram": {
  19. "histogram": {
  20. "field": "age",
  21. "interval": 10,
  22. "min_doc_count": 0
  23. },
  24. "aggs": {
  25. "test_group": {
  26. "avg": {
  27. "field": "age",
  28. "missing": 10
  29. }
  30. }
  31. }
  32. }
  33. }
  34. }

2.3 constant_score

不计算评分使用filter过滤0-40岁的人,再使用histogram。年龄按每10岁一组分组统计,并计算每组的平均年龄

  1. GET person/_search?size=0
  2. {
  3. "query": {
  4. "constant_score": {
  5. "filter": {
  6. "range": {
  7. "age": {
  8. "gte": 0,
  9. "lte": 39
  10. }
  11. }
  12. },
  13. "boost": 1.2
  14. }
  15. },
  16. "aggs": {
  17. "test_histogram": {
  18. "histogram": {
  19. "field": "age",
  20. "interval": 10,
  21. "keyed": true, //
  22. "missing": 0,
  23. "min_doc_count": 0
  24. },
  25. "aggs": {
  26. "test_group": {
  27. "avg": {
  28. "field": "age",
  29. "missing": 10
  30. }
  31. }
  32. }
  33. }
  34. }
  35. }

2.4 排序order

将40-45的人按照年龄分组,并按照每组的人数排序。

  1. GET person/_search?size=0
  2. {
  3. "query": {
  4. "bool": {
  5. "filter": [
  6. {
  7. "range": {
  8. "age": {
  9. "gte": 40,
  10. "lte": 45
  11. }
  12. }
  13. }
  14. ]
  15. }
  16. },
  17. "aggs": {
  18. "color_group":{
  19. "terms": {
  20. "field": "age",
  21. "order": {
  22. "_count": "asc"
  23. }
  24. }
  25. }
  26. }
  27. }

排序模式的枚举值为:**_count****_term****_key**

  • **_count**:按文档数排序。对 termshistogramdate_histogram有效。
  • **_term **:按词项的字符串值的字母顺序排序。只在 terms内使用。
  • **_key**:按每个桶的键值数值排序(理论上与 _term类似)。 只在 histogramdate_histogram内使用

2.4 histogram

过滤生日在 2020-05-01 到 2020-10-01 的人。再按照年龄每10人一组,求每组的doc数量和平均年龄。使用histogram

  1. #histogram
  2. GET person/_search?size=0
  3. {
  4. "query": {
  5. "bool": {
  6. "filter": [
  7. {
  8. "range": {
  9. "birdthday": {
  10. "format": "yyyy-MM-dd",
  11. "gte": "2020-05-01",
  12. "lte": "2020-10-01"
  13. }
  14. }
  15. }
  16. ]
  17. }
  18. },
  19. "aggs": {
  20. "aggs_histogram": {
  21. "histogram": {
  22. "field": "age",
  23. "interval": 10,
  24. "min_doc_count": 0
  25. },
  26. "aggs": {
  27. "sum_doc": {
  28. "sum": {
  29. "field": "age"
  30. }
  31. },
  32. "avg_age":{
  33. "avg": {
  34. "field": "age"
  35. }
  36. }
  37. }
  38. }
  39. }
  40. }

2.5 date_histogram

过滤生日在 2020-05-01 - 2020-10-01 的人。再按照生日每月分组,求每月的doc数量和平均年龄。

  1. #date_histogram
  2. GET person/_search?size=0
  3. {
  4. "query": {
  5. "bool": {
  6. "filter": [
  7. {
  8. "range": {
  9. "birdthday": {
  10. "format": "yyyy-MM-dd",
  11. "gte": "2020-05-01",
  12. "lte": "2020-10-01"
  13. }
  14. }
  15. }
  16. ]
  17. }
  18. },
  19. "aggs": {
  20. "aggs_histogram": {
  21. "date_histogram": {
  22. "field": "birdthday",
  23. "format": "yyyy-MM",
  24. "interval": "month", //可选参数: month/day/hour/minute/quarter/second/week/year/2D(2天)/1H(1小时)
  25. "min_doc_count": 0
  26. },
  27. "aggs": {
  28. "sum_doc": {
  29. "sum": {
  30. "field": "age"
  31. }
  32. },
  33. "avg_age":{
  34. "avg": {
  35. "field": "age"
  36. }
  37. }
  38. }
  39. }
  40. }
  41. }

不适用filter, 改为extended_bounds

  1. GET person/_search?size=0
  2. {
  3. "aggs": {
  4. "aggs_date_histogram": {
  5. "date_histogram": {
  6. "field": "birdthday",
  7. "interval": 10,
  8. "format": "yyyy-MM",
  9. "min_doc_count": 0,
  10. "extended_bounds": {
  11. "min": "2020-05",
  12. "max": "2020-10"
  13. }
  14. },
  15. "aggs": {
  16. "sum_doc": {
  17. "sum": {
  18. "field": "birdthday"
  19. }
  20. },
  21. "avg_age":{
  22. "avg": {
  23. "field": "birdthday"
  24. }
  25. }
  26. }
  27. }
  28. }
  29. }
  • date_histogram的字段必须是**date**类型。
  • nterval再高版本将会被删除,可替代的参数是:fixed_intervalcalendar_interval, 需要注意的是如果使用calendar_interval的话min_doc_count要设置为0,不然统计会不精准。

2.6auto_date_histogram

过滤生日在 2020-05-01 - 2020-10-01 的人。再按照生日分组,不指定分组规则,只规定分10个组,es会动态判断我们的需求,做出正确的分组。

  1. #auto_date_histogram
  2. GET person/_search?size=0
  3. {
  4. "query": {
  5. "bool": {
  6. "filter": [
  7. {
  8. "range": {
  9. "birdthday": {
  10. "format": "yyyy-MM-dd",
  11. "gte": "2020-05-01",
  12. "lte": "2020-10-01"
  13. }
  14. }
  15. }
  16. ]
  17. }
  18. },
  19. "aggs": {
  20. "aggs_auto_date_histogram": {
  21. "auto_date_histogram": {
  22. "field": "birdthday",
  23. "format": "yyyy-MM-dd",
  24. "buckets":10,
  25. "min_doc_count": 0
  26. },
  27. "aggs": {
  28. "sum_doc": {
  29. "sum": {
  30. "field": "age"
  31. }
  32. },
  33. "avg_age":{
  34. "avg": {
  35. "field": "age"
  36. }
  37. }
  38. }
  39. }
  40. }
  41. }

2.7 cumulative_sum

cumulative_sum是累计函数,它会把每个结果累计计算。
过滤生日在2020-09-01和2020-10-01之间的人,并以每10天一组分组,计算总数,平均年龄以及累计总数。

  1. GET person/_search?size=0
  2. {
  3. "query": {
  4. "bool": {
  5. "filter": [
  6. {
  7. "range": {
  8. "birdthday": {
  9. "format": "yyyy-MM-dd",
  10. "gte": "2020-09-01",
  11. "lte": "2020-10-01"
  12. }
  13. }
  14. }
  15. ]
  16. }
  17. },
  18. "aggs": {
  19. "aggs_date_histogram": {
  20. "date_histogram": {
  21. "field": "birdthday",
  22. "interval": "10D",
  23. "min_doc_count": 0
  24. },
  25. "aggs": {
  26. "总数": {
  27. "value_count": {
  28. "field": "age"
  29. }
  30. },
  31. "avg_age":{
  32. "avg": {
  33. "field": "birdthday"
  34. }
  35. },
  36. "累计总数":{
  37. "cumulative_sum": {
  38. "buckets_path": "总数"
  39. }
  40. }
  41. }
  42. }
  43. }
  44. }

2.8 多层聚合搜索(下钻)

过滤生日在2020-09-01和2020-10-01之间的人,并按照标签分组,每个标签组内再次按照年龄分组,计算每个标签组内的每个年龄组的doc数量。

  1. GET person/_search?size=0
  2. {
  3. "query": {
  4. "bool": {
  5. "filter": [
  6. {
  7. "range": {
  8. "birdthday": {
  9. "format": "yyyy-MM-dd",
  10. "gte": "2009-01-01",
  11. "lte": "2010-01-01"
  12. }
  13. }
  14. }
  15. ]
  16. }
  17. },
  18. "aggs": {
  19. "按照标签分组": {
  20. "terms": {
  21. "field": "tags.keyword"
  22. },
  23. "aggs": {
  24. "按照肤色分组": {
  25. "terms": {
  26. "field": "age"
  27. },
  28. "aggs":{
  29. "doc总量":{
  30. "value_count": {
  31. "field": "age"
  32. }
  33. }
  34. }
  35. }
  36. }
  37. }
  38. }
  39. }

3. missing

missing会在搜索时把值为null的字段设置为指定值
查询生日在2009-01-01到2010-01-01的人数,按照年份分组,统计每年总数。

  1. GET person/_search?size=0
  2. {
  3. "query": {
  4. "bool": {
  5. "filter": [
  6. {
  7. "range": {
  8. "birdthday": {
  9. "format": "yyyy-MM-dd",
  10. "gte": "2009-01-01",
  11. "lte": "2010-01-01"
  12. }
  13. }
  14. }
  15. ]
  16. }
  17. },
  18. "aggs": {
  19. "aggs_date_histogram": {
  20. "date_histogram": {
  21. "field": "birdthday",
  22. "interval": "year",
  23. "min_doc_count": 0,
  24. "missing": "1940-09-09"
  25. },
  26. "aggs": {
  27. "总数": {
  28. "value_count": {
  29. "field": "age"
  30. }
  31. }
  32. }
  33. }
  34. }
  35. }
  36. #结果
  37. [
  38. {
  39. "key_as_string":"2009-01-01",
  40. "key":1230768000000,
  41. "doc_count":510486,
  42. "总数":{
  43. "value":510486
  44. }
  45. },
  46. {
  47. "key_as_string":"2010-01-01",
  48. "key":1262304000000,
  49. "doc_count":1398,
  50. "总数":{
  51. "value":1398
  52. }
  53. }
  54. ]
  55. #修改2009年的一个数据
  56. PUT person/_doc/B3T16HgBwlsip8eIiMts
  57. {
  58. //"birdthday" : "2009-11-04",
  59. "color" : "Yellow",
  60. "create_time" : "2011-10-08 22:08:12",
  61. "name" : "锺务张",
  62. "weight" : 90.41,
  63. "addr" : "山东省东营市东营区西四路174号",
  64. "age" : 76,
  65. "height" : 184.85,
  66. "tags" : [
  67. "粗心大意",
  68. "一表非凡",
  69. "拜金",
  70. "易怒",
  71. "喜操纵",
  72. "善变"
  73. ]
  74. }
  75. }
  76. #再次查询的结果
  77. [
  78. {
  79. "key_as_string":"2009-01-01",
  80. "key":1230768000000,
  81. "doc_count":510485,
  82. "总数":{
  83. "value":510485
  84. }
  85. },
  86. {
  87. "key_as_string":"2010-01-01",
  88. "key":1262304000000,
  89. "doc_count":1398,
  90. "总数":{
  91. "value":1398
  92. }
  93. }
  94. ]
  • 大部分聚合搜索都支持missing
  • missing有自己单独的聚合搜索
    1. #查询birdthday为空值的_doc
    2. GET person/_search?size=0
    3. {
    4. "aggs": {
    5. "查询birdthday为空值的_doc": {
    6. "missing": {
    7. "field": "birdthday"
    8. }
    9. }
    10. }
    11. }

    4. statsstring_statsextended_stats

    stats是最常用的统计的整合查询,看下面的QueryDSL。
    按照生日过滤后的查询最身高常用的数据。 ```json

    身高最常用的查询

    GET person/_search?size=0 { “aggs”: { “test_height_stats”: {
    1. "stats": {
    2. "field": "height"
    3. }
    } } }

结果

{ “took” : 80, “timed_out” : false, “_shards” : { “total” : 10, “successful” : 10, “skipped” : 0, “failed” : 0 }, “hits” : { “total” : { “value” : 10000, “relation” : “gte” }, “max_score” : null, “hits” : [ ] }, “aggregations” : { “test_height_stats” : { “count” : 511883, “min” : 150.0, “max” : 209.99000549316406, “avg” : 180.0088688679315, “sum” : 9.214347982272339E7 } } }

  1. `stats`只支持精准类型的搜索,如果想要文本类型的搜索,需要把`stats`替换为`string_stats`。<br />`extended_stats`是常见的查询的总结
  2. ```json
  3. #extended_stats查询height字段
  4. GET person/_search?size=0
  5. {
  6. "aggs": {
  7. "test_height_stats": {
  8. "": {
  9. "field": "height"
  10. }
  11. }
  12. }
  13. }
  14. #结果
  15. {
  16. "took" : 3497,
  17. "timed_out" : false,
  18. "_shards" : {
  19. "total" : 20,
  20. "successful" : 20,
  21. "skipped" : 0,
  22. "failed" : 0
  23. },
  24. "hits" : {
  25. "total" : {
  26. "value" : 10000,
  27. "relation" : "gte"
  28. },
  29. "max_score" : null,
  30. "hits" : [ ]
  31. },
  32. "aggregations" : {
  33. "test_height_stats" : {
  34. "count" : 49600000,
  35. "min" : 150.0,
  36. "max" : 209.99000549316406,
  37. "avg" : 179.99486968326323,
  38. "sum" : 8.927745536289856E9,
  39. "sum_of_squares" : 1.6218301904987834E12,
  40. "variance" : 300.03621227733487,
  41. "variance_population" : 300.03621227733487,
  42. "variance_sampling" : 300.0362183264522,
  43. "std_deviation" : 17.321553402548364,
  44. "std_deviation_population" : 17.321553402548364,
  45. "std_deviation_sampling" : 17.321553577160802,
  46. "std_deviation_bounds" : {
  47. "upper" : 214.63797648835995,
  48. "lower" : 145.3517628781665,
  49. "upper_population" : 214.63797648835995,
  50. "lower_population" : 145.3517628781665,
  51. "upper_sampling" : 214.63797683758483,
  52. "lower_sampling" : 145.35176252894163
  53. }
  54. }
  55. }
  56. }

“熵(entropy)”是字段内每个项出现的概率

  1. #熵show_distribution
  2. GET person/_search?size=0
  3. {
  4. "aggs": {
  5. "test_string_stats": {
  6. "string_stats": {
  7. "field": "name.keyword",
  8. "show_distribution":true
  9. }
  10. }
  11. }
  12. }
  13. #结果
  14. {
  15. "took" : 26168,
  16. "timed_out" : false,
  17. "_shards" : {
  18. "total" : 20,
  19. "successful" : 20,
  20. "skipped" : 0,
  21. "failed" : 0
  22. },
  23. "hits" : {
  24. "total" : {
  25. "value" : 10000,
  26. "relation" : "gte"
  27. },
  28. "max_score" : null,
  29. "hits" : [ ]
  30. },
  31. "aggregations" : {
  32. "test_string_stats" : {
  33. "count" : 49600000,
  34. "min_length" : 3,
  35. "max_length" : 3,
  36. "avg_length" : 3.0,
  37. "entropy" : 11.124187802116122, //熵值
  38. "distribution" : {
  39. "后" : 0.0017931922043010753,
  40. "阎" : 0.0015224731182795698,
  41. "郁" : 0.0015212365591397848,
  42. "从" : 0.0010349932795698925,
  43. "万" : 0.0010343413978494624,
  44. "姜" : 0.0010343077956989247,
  45. "乔" : 0.0010342540322580646,
  46. "富" : 0.0010332123655913978,
  47. "融" : 0.0010330040322580646,
  48. "吉" : 0.0010329233870967742,
  49. "容" : 0.0010323387096774193,
  50. "祝" : 0.0010322983870967742,
  51. "商" : 0.0010321975806451614,
  52. "花" : 0.0010320564516129033,
  53. "权" : 0.0010320362903225807,
  54. "伏" : 0.0010319422043010753,
  55. "相" : 0.0010318817204301076,
  56. "经" : 0.0010318615591397848,
  57. "孔" : 0.0010317674731182796,
  58. "路" : 0.0010317540322580645,
  59. "贡" : 0.001031633064516129,
  60. "丁" : 0.001031606182795699,
  61. "杨" : 0.0010315793010752688,
  62. "唐" : 0.0010315658602150538,
  63. //.....太多了
  64. }
  65. }
  66. }
  67. }

5. 多bucket排序

按照生日过滤后按照标签分组,每个标签组内再次按照年龄分组,计算每个标签组内的每个年龄组的doc数量。排序顺序为:**group_color>stats.sum**

  1. GET person/_search?size=0
  2. {
  3. "query": {
  4. "bool": {
  5. "filter": [
  6. {
  7. "range": {
  8. "birdthday": {
  9. "format": "yyyy-MM-dd",
  10. "gte": "2009-01-01",
  11. "lte": "2010-01-01"
  12. }
  13. }
  14. }
  15. ]
  16. }
  17. },
  18. "aggs": {
  19. "按照标签分组": {
  20. "terms": {
  21. "field": "tags",
  22. "order": {
  23. "group_color>stats.sum": "asc"
  24. }
  25. },
  26. "aggs": {
  27. "group_color": {
  28. "filter": {
  29. "term": {
  30. "color.keyword": "Yellow"
  31. }
  32. },
  33. "aggs":{
  34. "stats": {
  35. "extended_stats": {
  36. "field": "age"
  37. }
  38. }
  39. }
  40. }
  41. }
  42. }
  43. }
  44. }

6. cardinality 去重

  1. GET person/_search?size=0
  2. {
  3. "aggs": {
  4. "索引的doc总数量":{
  5. "value_count": {
  6. "field": "age"
  7. }
  8. },
  9. "根据年龄去重后的数量": {
  10. "cardinality": {
  11. "field": "age"
  12. }
  13. },
  14. "根据生日去重后的数量": {
  15. "cardinality": {
  16. "field": "birdthday"
  17. }
  18. },
  19. "根据tags去重后的总数量":{
  20. "cardinality": {
  21. "field": "tags"
  22. }
  23. }
  24. }
  25. }

precision_threshold是以**牺牲内存**的方式控制去重的精度,es默认是1%-6%的精度损失,设置precision_threshold可以有效的控制精度损失,precision_threshold的控制范围是0-40000,默认为3000, 值越大精度越高,值越小精度越低。

  1. GET person/_search?size=0
  2. {
  3. "aggs": {
  4. "索引的doc总数量":{
  5. "value_count": {
  6. "field": "age"
  7. }
  8. },
  9. "根据年龄去重后的数量": {
  10. "cardinality": {
  11. "field": "age"
  12. }
  13. },
  14. "根据生日去重后的数量": {
  15. "cardinality": {
  16. "field": "birdthday",
  17. "precision_threshold":3000 //精度控制
  18. }
  19. },
  20. "根据tags去重后的总数量":{
  21. "cardinality": {
  22. "field": "tags",
  23. "precision_threshold":40000 //精度控制
  24. }
  25. },
  26. "使用脚本根据年龄和生日组合去重":{
  27. "cardinality": {
  28. "script": {
  29. "lang": "painless",
  30. "source": "doc['age'].value+'-'+doc['sex'].value"
  31. }
  32. }
  33. }
  34. }
  35. }

内存使用的计算为: memory = precision_threshold * 8

假设:precision_threshold = 3000; 则使用的memory的大小为 24000Byte≈ 23.75kb。看似很小,但是如果请求过多的话,内存也是扛不住的。一般这个字段就使用3000的默认值,不用刻意去设置。

何时调整precision_threshold? 当数据量很大并且重复项很多的时候可以把这个字段调整大一点,这样性能会有显著提升,比如:性别去重、肤色去重、年龄去重等等。

  1. #4600W数据,性别去重,
  2. #不使用precision_threshold耗时
  3. {
  4. "took" : 5679,
  5. }
  6. #使用precision_threshold=40000 耗时
  7. {
  8. "took" : 5690,
  9. }
  10. # What??没提升??

es内部使用HyperLogLog++算法去重,它会动态地为查询的field生成哈希值。所field越多。哈希值越多,越占用内存,但是比如性别等字段,不管多少doc,它的值只有2个,所以只会生成2个哈希值。

几乎所有的aggs都支持script

7. top hits aggs

查找年龄是13岁的,按照肤色分组,按照组内元素数量排序取前2组, 组内元素按照体重倒叙排序取前2个元素。

  1. GET person/_search?size=0
  2. {
  3. "query": {
  4. "bool": {
  5. "must": [
  6. {
  7. "constant_score": {
  8. "filter": {
  9. "term": {
  10. "age": "13"
  11. }
  12. }
  13. }
  14. }
  15. ]
  16. }
  17. },
  18. "aggs": {
  19. "按照肤色分组,按照组内元素数量排序取前2组": {
  20. "terms": {
  21. "field": "color",
  22. "size": 2,
  23. "order": {
  24. "_count": "asc"
  25. }
  26. },
  27. "aggs": {
  28. "组内元素按照体重倒叙排序取前2个元素": {
  29. "top_hits": {
  30. "size": 2,
  31. "sort":[
  32. {
  33. "height": "desc"
  34. }
  35. ]
  36. }
  37. }
  38. }
  39. }
  40. }
  41. }

8. filters

查找年龄是12岁、13岁、14岁的人,按照年龄分组,求每组的内的平均体重。
方式一:常规查询

  1. #方法一:常规过滤后统计
  2. GET person/_search?size=0
  3. {
  4. "query": {
  5. "bool": {
  6. "must": [
  7. {
  8. "constant_score": {
  9. "filter": {
  10. "terms": {
  11. "age": [ "12", "13", "14" ]
  12. }
  13. }
  14. }
  15. }
  16. ]
  17. }
  18. },
  19. "aggs": {
  20. "按照年龄分组": {
  21. "terms": {
  22. "field": "age"
  23. },
  24. "aggs": {
  25. "每个分组内的平均体重": {
  26. "top_hits": {
  27. "size": 2,
  28. "sort":[
  29. {
  30. "height": "desc"
  31. }
  32. ]
  33. }
  34. }
  35. }
  36. }
  37. }
  38. }
  39. #方式二:使用filters
  40. GET person/_search?size=0
  41. {
  42. "aggs": {
  43. "filters分组": {
  44. "filters": {
  45. "other_bucket": true,
  46. "filters": {
  47. "年龄12统计总数": {
  48. "term": {
  49. "age": "12"
  50. }
  51. },
  52. "年龄13统计总数": {
  53. "term": {
  54. "age": "13"
  55. }
  56. },
  57. "年龄13或14统计总数": {
  58. "terms": {
  59. "age": ["13","14"]
  60. }
  61. },
  62. "sex是MAN的统计": {
  63. "terms": {
  64. "sex": ["MAN"]
  65. }
  66. }
  67. }
  68. }
  69. }
  70. }
  71. }

9. median_absolute_deviation绝对中位差

假设有一列数:[1, 3, 1, 4, 5, 2, 1]。 绝对中位差会对这列数进行如下运算:

  1. 原始数组:[1, 3, 1, 4, 5, 2, 1]
  2. 排序:[1, 1, 1, 2, 3, 4, 5]
  3. 取中间数,如果中间是2个数则取平均值:2
  4. 把中间数与排序后的数相减,获得数组:[1, 1, 1, 0, -1, -2, -3]
  5. 取绝对值,获得数组:[1, 1, 1, 0, 1, 2, 3]
  6. 取中间数,获得最终结果:0

经过运算后,[1, 3, 1, 4, 5, 2, 1]的绝对中位差是0。

10. meta

meta就是给查询结果添加一些数据。

  1. GET person/_search?size=0
  2. {
  3. "query": {
  4. "bool": {
  5. "must": [
  6. {
  7. "constant_score": {
  8. "filter": {
  9. "terms": {
  10. "age": [ "12", "13", "14" ]
  11. }
  12. }
  13. }
  14. }
  15. ]
  16. }
  17. },
  18. "aggs": {
  19. "年龄分组": {
  20. "terms": {
  21. "field": "age"
  22. },
  23. "meta":{
  24. "自定义字段":"自定义字段值"
  25. },
  26. "aggs": {
  27. "取每组的平均值": {
  28. "avg": {
  29. "field": "age"
  30. }
  31. }
  32. }
  33. }
  34. }
  35. }
  36. #结果
  37. {
  38. "took" : 343,
  39. "timed_out" : false,
  40. "_shards" : {
  41. "total" : 20,
  42. "successful" : 20,
  43. "skipped" : 0,
  44. "failed" : 0
  45. },
  46. "hits" : {
  47. "total" : {
  48. "value" : 10000,
  49. "relation" : "gte"
  50. },
  51. "max_score" : null,
  52. "hits" : [ ]
  53. },
  54. "aggregations" : {
  55. "年龄分组" : {
  56. "meta" : {
  57. "自定义字段" : "自定义字段值"
  58. },
  59. "doc_count_error_upper_bound" : 0,
  60. "sum_other_doc_count" : 0,
  61. "buckets" : [
  62. {
  63. "key" : 12,
  64. "doc_count" : 709319,
  65. "取每组的平均值" : {
  66. "value" : 12.0
  67. }
  68. },
  69. {
  70. "key" : 13,
  71. "doc_count" : 708819,
  72. "取每组的平均值" : {
  73. "value" : 13.0
  74. }
  75. },
  76. {
  77. "key" : 14,
  78. "doc_count" : 708080,
  79. "取每组的平均值" : {
  80. "value" : 14.0
  81. }
  82. }
  83. ]
  84. }
  85. }
  86. }

11. 深度优先和广度优先

假设我们要按标签分组,每个标签一个桶,每个标签桶的doc数量排序后找出最大的两个最大的桶,然后再按照性别分组,求出每组的平均年龄。使用聚合是非常简单的:

  1. GET person/_search?size=0
  2. {
  3. "aggs": {
  4. "按照标签分组": {
  5. "terms": {
  6. "field": "tags",
  7. "size": 2,
  8. "order": {
  9. "_key": "desc"
  10. },
  11. "collect_mode": "depth_first"
  12. },
  13. "aggs": {
  14. "按照性别分组":{
  15. "terms": {
  16. "field": "sex",
  17. "size": 2
  18. },
  19. "aggs": {
  20. "平均年龄": {
  21. "avg": {
  22. "field": "age"
  23. }
  24. }
  25. }
  26. }
  27. }
  28. }
  29. }
  30. }

这看起来是一个简单的聚合查询,最终只返回 4条数据!但是, 这个看上去简单的查询可以轻而易举地消耗大量内存,我们可以通过在内存中构建一个树来查看这个 terms 聚合。

“按照标签分组”的聚合会构建树的第一层,每个年龄都有一个桶。然后,内套在第一层的每个节点之下, “按照性别分组”聚合会构建第二层,每个性别一个桶。这意味着每个年龄都会生成2个桶,如果有1W个标签的话,会生成2W和桶。

Elasticsearch 允许我们改变聚合的 集合模式 ,就是为了应对这种状况。 我们之前展示的策略叫做 **深度优先 **,它是默认设置, 先构建完整的树,然后修剪无用节点。 深度优先 的方式对于大多数聚合都能正常工作,但对于这样例子的情形就不太适用。

为了应对这些特殊的应用场景,我们应该使用另一种集合策略叫做 **广度优先** 。这种策略的工作方式有些不同,它先执行第一层聚合, 再继续下一层聚合之前会先做修剪。

在我们的示例中, “按照标签分组”聚合会首先执行,在这个时候,我们的树只有一层,但我们已经知道了前 2个最大的桶!这就没有必要保留其他的桶的信息,因为它们无论如何都不会出现在前2位中。因为我们已经知道了前2个最大的标签,我们可以安全的修剪其他节点。修剪后,下一层是基于它的 执行模式读入的,重复执行这个过程直到聚合完成。要使用广度优先,只需简单 的通过参数 collect_mode开启:

  1. GET person/_search?size=0
  2. {
  3. "aggs": {
  4. "按照标签分组": {
  5. "terms": {
  6. "field": "tags",
  7. "size": 2,
  8. "order": {
  9. "_key": "desc"
  10. },
  11. "collect_mode": "depth_first"
  12. },
  13. "aggs": {
  14. "按照性别分组":{
  15. "terms": {
  16. "field": "sex",
  17. "size": 2
  18. },
  19. "aggs": {
  20. "平均年龄": {
  21. "avg": {
  22. "field": "age"
  23. }
  24. }
  25. }
  26. }
  27. }
  28. }
  29. }
  30. }
  • **深度优先**是先把所有doc构建完整的terms树,再进行聚合等操作。
  • **广度优先**是先进行第一层数据筛选,筛选完成之后在进行第二层筛选,这样一层一层筛选下去的数据会越来越少,越来越快。
  • collect_mode默认是"depth_first"

广度优先仅仅适用于每个组的聚合数量远远小于当前总组数的情况下,因为广度优先会在内存中缓存裁剪后的仅仅需要缓存的每个组的所有数据,以便于它的子聚合分组查询可以复用上级聚合的数据

广度优先的内存使用情况与裁剪后的缓存分组数据量是成线性的。对于很多聚合来说,每个桶内的文档数量是相当大的。
想象一种按月分组的直方图,总组数肯定是固定的,因为每年只有12个月,这个时候每个月下的数据量可能非常大。这使广度优先不是一个好的选择,这也是为什么深度优先作为默认策略的原因。

针对上面的例子,如果数据量越大,那么默认的使用深度优先的聚合模式生成的总分组数就会非常多,但是预估二级的聚合字段分组后的数据量相比总的分组数会小很多所以这种情况下使用广度优先的模式能大大节省内存,从而通过优化聚合模式来大大提高了在某些特定场景下聚合查询的成功率。

12. 邻接矩阵