准备数据

  1. Order_0000001,pd001,222.8
  2. Order_0000001,pd005,25.8
  3. Order_0000002,pd005,325.8
  4. Order_0000002,pd003,522.8
  5. Order_0000002,pd004,122.4
  6. Order_0000003,pd001,222.8
  7. Order_0000003,pd001,322.8

image.png

他是记录订单编号,商品和成交金额
然后取出每个订单的top1和topN的数据

里面需要用到一个分组的

  1. 利用“订单id和成交金额”作为key,可以将map阶段读取到的所有订单数据按照id分区,按照金额排序,发送到reduce
  2. 在reduce端利用GroupingComparator将订单id相同的kv聚合成组,然后取第一个即是最大值

简介

每个map就负责自己这边的先按照订单id排序,订单id一样按照金额排序

然后reduce从map这边获取,按照key归类,每个key是每组订单,reducer这边直接输出就行了

输出的时候groupingComparator会进行分组输出

top1代码

每个订单有多个商品,找出每笔订单中的最大的一个商品

image.png

OrderBean

  1. package com.top;
  2. import org.apache.hadoop.io.DoubleWritable;
  3. import org.apache.hadoop.io.Text;
  4. import org.apache.hadoop.io.WritableComparable;
  5. import java.io.DataInput;
  6. import java.io.DataOutput;
  7. import java.io.IOException;
  8. public class OrderBean implements WritableComparable<OrderBean> {
  9. private Text itemid;
  10. private DoubleWritable amount;
  11. public OrderBean() {
  12. }
  13. public OrderBean(Text itemid, DoubleWritable amount) {
  14. set(itemid, amount);
  15. }
  16. public void set(Text itemid, DoubleWritable amount) {
  17. this.itemid = itemid;
  18. this.amount = amount;
  19. }
  20. public Text getItemid() {
  21. return itemid;
  22. }
  23. public DoubleWritable getAmount() {
  24. return amount;
  25. }
  26. @Override
  27. public int compareTo(OrderBean o) {
  28. //比较他的订单id
  29. int cmp = this.itemid.compareTo(o.getItemid());
  30. //如果订单id相同就比较金额
  31. if (cmp == 0) {
  32. //-号表示倒序
  33. cmp = -this.amount.compareTo(o.getAmount());
  34. }
  35. return cmp;
  36. }
  37. @Override
  38. public void write(DataOutput out) throws IOException {
  39. out.writeUTF(itemid.toString());
  40. out.writeDouble(amount.get());
  41. }
  42. @Override
  43. public void readFields(DataInput in) throws IOException {
  44. String readUTF = in.readUTF();
  45. double readDouble = in.readDouble();
  46. this.itemid = new Text(readUTF);
  47. this.amount = new DoubleWritable(readDouble);
  48. }
  49. @Override
  50. public String toString() {
  51. return "OrderBean{" +
  52. "itemid=" + itemid +
  53. ", amount=" + amount +
  54. '}';
  55. }
  56. }

ItemIdPartitioner

自定义分区组件

保证一个订单中的相同bean的id一定能分到同一个地方

  1. package com.top;
  2. import org.apache.hadoop.io.NullWritable;
  3. import org.apache.hadoop.mapreduce.Partitioner;
  4. public class ItemIdPartitioner extends Partitioner<OrderBean, NullWritable> {
  5. @Override
  6. public int getPartition(OrderBean key, NullWritable nullWritable, int numPartitions) {
  7. //模拟源码中写的,保证一个订单中的相同bean的id一定能分到同一个地方
  8. return (key.getItemid().hashCode() & Integer.MAX_VALUE) % numPartitions;
  9. }
  10. }

ItemidGroupingComparator

  1. package com.top;
  2. import org.apache.hadoop.io.WritableComparable;
  3. import org.apache.hadoop.io.WritableComparator;
  4. public class ItemidGroupingComparator extends WritableComparator {
  5. protected ItemidGroupingComparator() {
  6. //一定要调用下super,里面放你要比较的对象
  7. super(OrderBean.class, true);
  8. }
  9. //他会传入2个你上面的写的对象,比如这边是2个bean
  10. @Override
  11. public int compare(WritableComparable a, WritableComparable b) {
  12. //把这个bean强行转换下
  13. OrderBean abean = (OrderBean) a;
  14. OrderBean bbean = (OrderBean) b;
  15. //取出这2个bean,如果这2个bean的id相比较是一样就放到一起比较
  16. //会调用bean里面的比较出结果,负的就舍弃,在reduce的输出阶段
  17. //reduce接收的时候都能接收,输出的时候谁要是小了就舍弃了
  18. return abean.getItemid().compareTo(bbean.getItemid());
  19. }
  20. }

TopOne

  1. package com.top;
  2. import org.apache.hadoop.conf.Configuration;
  3. import org.apache.hadoop.fs.FileSystem;
  4. import org.apache.hadoop.fs.Path;
  5. import org.apache.hadoop.io.DoubleWritable;
  6. import org.apache.hadoop.io.LongWritable;
  7. import org.apache.hadoop.io.NullWritable;
  8. import org.apache.hadoop.io.Text;
  9. import org.apache.hadoop.mapreduce.Job;
  10. import org.apache.hadoop.mapreduce.Mapper;
  11. import org.apache.hadoop.mapreduce.Reducer;
  12. import org.apache.hadoop.mapreduce.lib.input.FileInputFormat;
  13. import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;
  14. import org.apache.hadoop.util.StringUtils;
  15. import java.io.IOException;
  16. public class TopOne {
  17. public static class TopOneMapper extends Mapper<LongWritable, Text, OrderBean, NullWritable> {
  18. OrderBean bean = new OrderBean();
  19. // Text itemid = new Text();
  20. @Override
  21. protected void map(LongWritable key, Text value, Context context) throws IOException, InterruptedException {
  22. String line = value.toString();
  23. //如果是空行就直接返回
  24. if (line.equals("")) { return ;}
  25. String[] fields = StringUtils.split(line, ',');
  26. bean.set(new Text(fields[0]), new DoubleWritable(Double.parseDouble(fields[2])));
  27. context.write(bean, NullWritable.get());
  28. }
  29. }
  30. public static class TopOneReducer extends Reducer<OrderBean, NullWritable, OrderBean, NullWritable> {
  31. @Override
  32. protected void reduce(OrderBean key, Iterable<NullWritable> values, Context context) throws IOException, InterruptedException {
  33. context.write(key, NullWritable.get());
  34. }
  35. }
  36. public static void main(String[] args) throws IOException, ClassNotFoundException, InterruptedException {
  37. Configuration conf = new Configuration();
  38. Job job = Job.getInstance(conf);
  39. job.setJarByClass(TopOne.class);
  40. job.setMapperClass(TopOneMapper.class);
  41. job.setReducerClass(TopOneReducer.class);
  42. job.setOutputKeyClass(OrderBean.class);
  43. job.setOutputValueClass(NullWritable.class);
  44. FileInputFormat.setInputPaths(job, new Path("/Users/jdxia/Desktop/website/hdfs/index/input"));
  45. //如果有这个文件夹就删除
  46. Path out = new Path("/Users/jdxia/Desktop/website/hdfs/index/output/");
  47. FileSystem fileSystem = FileSystem.get(conf);
  48. if (fileSystem.exists(out)) {
  49. fileSystem.delete(out, true);
  50. }
  51. //告诉框架,我们的处理结果要输出到什么地方
  52. FileOutputFormat.setOutputPath(job, out);
  53. //注册一个GroupingComparator
  54. job.setGroupingComparatorClass(ItemidGroupingComparator.class);
  55. //设置分区
  56. job.setPartitionerClass(ItemIdPartitioner.class);
  57. //1个reduce任务
  58. job.setNumReduceTasks(1);
  59. job.waitForCompletion(true);
  60. }
  61. }

topN代码

bean中要添加

  1. @Override
  2. public boolean equals(Object o) {
  3. OrderBean bean = (OrderBean) o;
  4. return bean.getItemid().equals(this.itemid);
  5. }

主类中修改

  1. package com.top;
  2. import org.apache.hadoop.conf.Configuration;
  3. import org.apache.hadoop.fs.FileSystem;
  4. import org.apache.hadoop.fs.Path;
  5. import org.apache.hadoop.io.DoubleWritable;
  6. import org.apache.hadoop.io.LongWritable;
  7. import org.apache.hadoop.io.NullWritable;
  8. import org.apache.hadoop.io.Text;
  9. import org.apache.hadoop.mapreduce.Job;
  10. import org.apache.hadoop.mapreduce.Mapper;
  11. import org.apache.hadoop.mapreduce.Reducer;
  12. import org.apache.hadoop.mapreduce.lib.input.FileInputFormat;
  13. import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;
  14. import org.apache.hadoop.util.StringUtils;
  15. import java.io.IOException;
  16. public class TopN {
  17. static class TopNMapper extends Mapper<LongWritable, Text, OrderBean, OrderBean> {
  18. OrderBean v = new OrderBean();
  19. Text k = new Text();
  20. @Override
  21. protected void map(LongWritable key, Text value, Context context) throws IOException, InterruptedException {
  22. String line = value.toString();
  23. String[] fields = StringUtils.split(line, ',');
  24. k.set(fields[0]);
  25. v.set(new Text(fields[0]), new DoubleWritable(Double.parseDouble(fields[2])));
  26. context.write(v, v);
  27. }
  28. }
  29. static class TopNReducer extends Reducer<OrderBean, OrderBean, NullWritable, OrderBean> {
  30. int topn = 1;
  31. int count = 0;
  32. @Override
  33. protected void setup(Context context) throws IOException, InterruptedException {
  34. Configuration conf = context.getConfiguration();
  35. //从驱动配置中取值
  36. topn = Integer.parseInt(conf.get("topn"));
  37. }
  38. @Override
  39. protected void reduce(OrderBean key, Iterable<OrderBean> values, Context context) throws IOException, InterruptedException {
  40. count = 0;
  41. for (OrderBean bean : values) {
  42. //只给迭代topn次
  43. if ((count++) == topn) {
  44. return;
  45. }
  46. context.write(NullWritable.get(), bean);
  47. }
  48. }
  49. }
  50. public static void main(String[] args) throws IOException, ClassNotFoundException, InterruptedException {
  51. Configuration conf = new Configuration();
  52. // ָ如果要写配置文件就这样写
  53. // conf.addResource("userconfig.xml");
  54. // System.out.println(conf.get("top.n"));
  55. //代表要取top几
  56. // 我这边就直接设置要求top2了
  57. conf.set("topn", "2");
  58. Job job = Job.getInstance(conf);
  59. job.setJarByClass(TopN.class);
  60. job.setMapperClass(TopNMapper.class);
  61. job.setReducerClass(TopNReducer.class);
  62. job.setOutputKeyClass(OrderBean.class);
  63. job.setOutputValueClass(OrderBean.class);
  64. FileInputFormat.setInputPaths(job, new Path("/Users/jdxia/Desktop/website/hdfs/index/input"));
  65. //如果有这个文件夹就删除
  66. Path out = new Path("/Users/jdxia/Desktop/website/hdfs/index/output/");
  67. FileSystem fileSystem = FileSystem.get(conf);
  68. if (fileSystem.exists(out)) {
  69. fileSystem.delete(out, true);
  70. }
  71. //告诉框架,我们的处理结果要输出到什么地方
  72. FileOutputFormat.setOutputPath(job, out);
  73. //注册一个GroupingComparator,用来比较
  74. job.setGroupingComparatorClass(ItemidGroupingComparator.class);
  75. //设置分区分组
  76. job.setPartitionerClass(ItemIdPartitioner.class);
  77. //1个reduce任务
  78. job.setNumReduceTasks(1);
  79. job.waitForCompletion(true);
  80. }
  81. }

总结

top几是由reducer那边决定的,他决定要输出几个就是几个,
但是比如多个key到reducer这边,怎么分组是自定义groupingComparator的事情

分组完,按照自定义bean中的排序进行排的