准备
1、在/home路径下,新建words.txt文档,文档内容如下:
hello tom
hello jerry
hello kitty
hello world
hello tom
2、将words.txt上传到hdfs上: hdfs dfs -put words.txt /tangwx
编码
pom.xml
创建普通maven项目,引入依赖:
<dependencies>
<!-- 内部依赖了hadoop-common和hadoop-hdfs-->
<dependency>
<groupId>org.apache.hadoop</groupId>
<artifactId>hadoop-client</artifactId>
<version>2.6.0</version>
</dependency>
</dependencies>
Mapper
mapper代码:
package com.twx.bigdata;
import org.apache.hadoop.io.IntWritable;
import org.apache.hadoop.io.LongWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Mapper;
import java.io.IOException;
/**
* @author tangwx@soyuan.com.cn
* @date 2020/3/9 11:45
*/
public class WordCountMapper extends Mapper<LongWritable, Text,Text,IntWritable> {
Text k = new Text();
IntWritable v = new IntWritable(1);
@Override
protected void map(LongWritable key, Text value, Context context) throws IOException, InterruptedException {
// 1 获取一行
String line = value.toString();
// 2 切割
String[] words = line.split(" ");
// 3 输出
for (String word : words) {
k.set(word);
context.write(k, v);
}
}
}
reducer
package com.twx.bigdata;
import org.apache.hadoop.io.IntWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Reducer;
import java.io.IOException;
/**
* @author tangwx@soyuan.com.cn
* @date 2020/3/9 11:47
*/
public class WordCountReducer extends Reducer<Text, IntWritable,Text,IntWritable> {
IntWritable res = new IntWritable();
@Override
protected void reduce(Text key, Iterable<IntWritable> values, Context context) throws IOException, InterruptedException {
int sum=0;
for (IntWritable value : values) {
sum+=value.get();
}
res.set(sum);
context.write(key,res);
}
}
job
package com.twx.bigdata;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.IntWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.mapreduce.lib.input.FileInputFormat;
import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;
import java.io.IOException;
/**
* Hello world!
*
*/
public class WordCountDriver
{
public static void main( String[] args ) throws Exception
{
// 1 获取配置信息以及封装任务
Configuration configuration = new Configuration();
// configuration.set("dfs.client.use.datanode.hostname","true");
Job job = Job.getInstance(configuration);
// 2 设置jar加载路径
job.setJarByClass(WordCountDriver.class);
// 3 设置map和reduce类
job.setMapperClass(WordCountMapper.class);
job.setReducerClass(WordCountReducer.class);
// 4 设置map输出
job.setMapOutputKeyClass(Text.class);
job.setMapOutputValueClass(IntWritable.class);
// 5 设置reducer输出kv类型
job.setOutputKeyClass(Text.class);
job.setOutputValueClass(IntWritable.class);
// 6 设置输入和输出路径
FileInputFormat.setInputPaths(job,new Path(args[0]));
FileOutputFormat.setOutputPath(job, new Path(args[1]));
// 7 提交
boolean result = job.waitForCompletion(true);
System.exit(result ? 0 : 1);
}
}
打包项目: mvn clean package -DskipTests
将生成的jar包上传到hadoop任意节点的home目录
执行
在/home目录执行命令运行mapreduce程序:
hadoop jar word-count-demo-1.0-SNAPSHOT.jar com.twx.bigdata.WordCountDriver /twx/wordcount/input/ /twx/wordcount/output/
通过命令查看生成的文件:
hdfs dfs -ls /tangwx/wordResult
hdfs dfs -cat /tangwx/wordResult/part-r-00000