一次性完成,统计和排序

我们看下继承的reduce

image.png

reduce需要调用run方法,run方法中不仅执行了reduce最后还执行了cleanup
因为map不断的提交给reduce,reduce排序好了就要写,但是这时候一旦写到文件中,后面再来任务,再写的话,就不能和前面一起排序了

所以我们写到一个treeMap中,然后在cleanup中做treeMap做排序

代码主要把继承reduce中的那个类改了下

  1. package com.folwsum;
  2. import org.apache.hadoop.conf.Configuration;
  3. import org.apache.hadoop.fs.Path;
  4. import org.apache.hadoop.io.LongWritable;
  5. import org.apache.hadoop.io.Text;
  6. import org.apache.hadoop.mapreduce.Job;
  7. import org.apache.hadoop.mapreduce.Mapper;
  8. import org.apache.hadoop.mapreduce.Reducer;
  9. import org.apache.hadoop.mapreduce.lib.input.FileInputFormat;
  10. import org.apache.hadoop.mapreduce.lib.input.TextInputFormat;
  11. import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;
  12. import org.apache.hadoop.mapreduce.lib.output.TextOutputFormat;
  13. import org.apache.hadoop.util.StringUtils;
  14. import java.io.IOException;
  15. import java.util.Map;
  16. import java.util.Set;
  17. import java.util.TreeMap;
  18. public class OneStepFlowSumSort {
  19. public static class OneStepFlowSumMapper extends Mapper<LongWritable, Text, Text, FlowBean> {
  20. Text k = new Text();
  21. FlowBean v = new FlowBean();
  22. @Override
  23. protected void map(LongWritable key, Text value, Context context) throws IOException, InterruptedException {
  24. //将读取到的每一行数据进行字段的切分
  25. String line = value.toString();
  26. String[] fields = StringUtils.split(line, ' ');
  27. //抽取我们业务所需要的字段
  28. String phoneNum = fields[1];
  29. long upFlow = Long.parseLong(fields[fields.length - 3]);
  30. long downFlow = Long.parseLong(fields[fields.length - 2]);
  31. k.set(phoneNum);
  32. v.set(upFlow, downFlow);
  33. context.write(k, v);
  34. }
  35. }
  36. public static class OneStepFlowSumReducer extends Reducer<Text, FlowBean, Text, FlowBean> {
  37. //在这里进行reduce端的局部缓存TreeMap
  38. TreeMap<FlowBean,Text> treeMap = new TreeMap<FlowBean, Text>();
  39. //这里reduce方法接收到的key就是某一组《a手机号,bean》《a手机号,bean》 《b手机号,bean》《b手机号,bean》当中的第一个手机号
  40. //这里reduce方法接收到的values就是这一组kv对中的所以bean的一个迭代器
  41. @Override
  42. protected void reduce(Text key, Iterable<FlowBean> values, Context context) throws IOException, InterruptedException {
  43. long upFlowCount = 0;
  44. long downFlowCount = 0;
  45. for(FlowBean bean : values){
  46. upFlowCount += bean.getUpFlow();
  47. downFlowCount += bean.getDownFlow();
  48. }
  49. FlowBean sumbean = new FlowBean();
  50. sumbean.set(upFlowCount, downFlowCount);
  51. Text text = new Text(key.toString());
  52. treeMap.put(sumbean, text);
  53. }
  54. //这里进行我们全局的最终输出
  55. @Override
  56. protected void cleanup(Context context) throws IOException, InterruptedException {
  57. Set<Map.Entry<FlowBean,Text>> entrySet = treeMap.entrySet();
  58. for(Map.Entry<FlowBean,Text> ent :entrySet){
  59. context.write(ent.getValue(), ent.getKey());
  60. }
  61. }
  62. public static void main(String[] args) throws Exception {
  63. Configuration conf = new Configuration();
  64. Job job = Job.getInstance(conf);
  65. job.setJarByClass(OneStepFlowSumSort.class);
  66. //告诉程序,我们的程序所用的mapper类和reducer类是什么
  67. job.setMapperClass(OneStepFlowSumMapper.class);
  68. job.setReducerClass(OneStepFlowSumReducer.class);
  69. //告诉框架,我们程序输出的数据类型
  70. job.setMapOutputKeyClass(Text.class);
  71. job.setMapOutputValueClass(FlowBean.class);
  72. job.setOutputKeyClass(Text.class);
  73. job.setOutputValueClass(FlowBean.class);
  74. //告诉框架,我们程序使用的数据读取组件 结果输出所用的组件是什么
  75. //TextInputFormat是mapreduce程序中内置的一种读取数据组件 准确的说 叫做 读取文本文件的输入组件
  76. job.setInputFormatClass(TextInputFormat.class);
  77. job.setOutputFormatClass(TextOutputFormat.class);
  78. //告诉框架,我们要处理的数据文件在那个路劲下
  79. FileInputFormat.setInputPaths(job, new Path("/Users/jdxia/Desktop/website/hdfs/flowsum/input/"));
  80. //告诉框架,我们的处理结果要输出到什么地方
  81. FileOutputFormat.setOutputPath(job, new Path("/Users/jdxia/Desktop/website/hdfs/flowsum/output/"));
  82. boolean res = job.waitForCompletion(true);
  83. System.exit(res?0:1);
  84. }
  85. }
  86. }