需求
过滤输入的log日志中是否包含java
- 包含java的网站输出到
e:/java.log
中 - 不包含java的网站输出到
e:/other.log
中
输入数据 : log.txt
java.org
jdxia
java.com
x.com
java
输出预期: java.log
other.log
OutputFormat接口实现类
OutputFormat是MapReduce输出的基类,所有实现MapReduce输出都实现了OutputFormat接口.
常见是OutputFormat实现类
- 文本输出TextOutputFormat
默认的输出格式是TextOutputFormat,它把每条记录写为文本行.他的键和值可以是任意类型,因为TextOutputFormat调用toString()方法把他们转换为字符串 - SequenceFileOutputFormat
SequenceFileOutputFormat将它的输出写为一个顺序文件.如果输出需要作为后续MapReduce任务的输入,这便是一种很好的输出格式,因为他的格式紧凑,很容易被压缩 - 自定义OutputFormat
根据用户需求,自定义实现输出
代码
自定义OutputFormat步骤
- 自定义一个类继承FileOutputFormat
- 改写recordwrite,具体改写输出数据的方法write()
自定义一个OutputFormat
import org.apache.hadoop.io.NullWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.RecordWriter;
import org.apache.hadoop.mapreduce.TaskAttemptContext;
import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;
import java.io.IOException;
public class FilterOutputFormat extends FileOutputFormat<Text, NullWritable> {
@Override
public RecordWriter<Text, NullWritable> getRecordWriter(TaskAttemptContext job) throws IOException, InterruptedException {
//创建一个RecordWriter
return new FilterRecordWriter(job);
}
}
import org.apache.hadoop.fs.FSDataOutputStream;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.NullWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.RecordWriter;
import org.apache.hadoop.mapreduce.TaskAttemptContext;
import java.io.IOException;
public class FilterRecordWriter extends RecordWriter<Text, NullWritable> {
FSDataOutputStream javaOut = null;
FSDataOutputStream otherOut = null;
public FilterRecordWriter(TaskAttemptContext job) {
//1. 获取文件系统
FileSystem fs;
try {
fs = FileSystem.get(job.getConfiguration());
//2. 创建输出文件路径
Path javaPath = new Path("/Users/jdxia/Desktop/website/data/java.log");
Path otherPath = new Path("/Users/jdxia/Desktop/website/data/other.log");
//3. 创建输出流
javaOut = fs.create(javaPath);
otherOut = fs.create(otherPath);
} catch (IOException e) {
e.printStackTrace();
}
}
@Override
public void write(Text key, NullWritable value) throws IOException, InterruptedException {
//判断是否包含"java"输出到不同文件
if (key.toString().contains("java")) {
javaOut.write(key.toString().getBytes());
} else {
otherOut.write(key.toString().getBytes());
}
}
@Override
public void close(TaskAttemptContext context) throws IOException, InterruptedException {
//关闭资源,流不关文件是空的
if (javaOut != null) {
javaOut.close();
}
if (otherOut != null) {
otherOut.close();
}
}
}
Mapper类
import org.apache.hadoop.io.LongWritable;
import org.apache.hadoop.io.NullWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Mapper;
import java.io.IOException;
public class FilterMapper extends Mapper<LongWritable, Text, Text, NullWritable> {
Text k = new Text();
@Override
protected void map(LongWritable key, Text value, Context context) throws IOException, InterruptedException {
//获取一行
String line = value.toString();
k.set(line);
//写出
context.write(k, NullWritable.get());
}
}
Reducer类
import org.apache.hadoop.io.NullWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Reducer;
import java.io.IOException;
public class FilterReducer extends Reducer<Text, NullWritable, Text, NullWritable> {
@Override
protected void reduce(Text key, Iterable<NullWritable> values, Context context) throws IOException, InterruptedException {
String k = key.toString();
k += "\r\n";
context.write(new Text(k), NullWritable.get());
}
}
驱动类
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.NullWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.mapreduce.lib.input.FileInputFormat;
import org.apache.hadoop.mapreduce.lib.input.TextInputFormat;
import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;
import java.io.IOException;
public class FilterDriver {
public static void main(String[] args) throws IOException, ClassNotFoundException, InterruptedException {
Configuration conf = new Configuration();
Job job = Job.getInstance(conf);
job.setJarByClass(FilterDriver.class);
job.setMapperClass(FilterMapper.class);
job.setReducerClass(FilterReducer.class);
//输入输出组件
job.setInputFormatClass(TextInputFormat.class);
job.setOutputFormatClass(FilterOutputFormat.class);
//Map的输出
job.setMapOutputKeyClass(Text.class);
job.setMapOutputValueClass(NullWritable.class);
//reduce的输出
job.setOutputKeyClass(Text.class);
job.setOutputValueClass(NullWritable.class);
//告诉框架,我们要处理的数据文件在那个路径下
FileInputFormat.setInputPaths(job, new Path("/Users/jdxia/Desktop/website/data/input/"));
//如果有这个文件夹就删除
Path out = new Path("/Users/jdxia/Desktop/website/data/output/");
FileSystem fileSystem = FileSystem.get(conf);
if (fileSystem.exists(out)) {
fileSystem.delete(out, true);
}
//告诉框架,我们的处理结果要输出到什么地方
FileOutputFormat.setOutputPath(job, out);
//虽然自定义OutputFormat,但是因为我们的OutputFormat继承自FileOutputFormat
//而FileOutputFormat要输出一个_SUCCESS文件,所以这里还需要指定一个输出目录
FileOutputFormat.setOutputPath(job, new Path("/Users/jdxia/Desktop/website/data/output/ "));
boolean res = job.waitForCompletion(true);
System.exit(res ? 0 : 1);
}
}
注意
自定义OutputFormat时,注意recordWriter中的close方法必须关闭流资源.否则输出的文件内容中数据为空