Flink相关pom文件

  1. <dependencies>
  2. <!-- flink-java -->
  3. <dependency>
  4. <groupId>org.apache.flink</groupId>
  5. <artifactId>flink-java</artifactId>
  6. <version>1.10.1</version>
  7. </dependency>
  8. <!-- flink-java-scala -->
  9. <dependency>
  10. <groupId>org.apache.flink</groupId>
  11. <artifactId>flink-streaming-java_2.12</artifactId>
  12. <version>1.10.1</version>
  13. </dependency>
  14. <!-- flink-connector-kafka -->
  15. <dependency>
  16. <groupId>org.apache.flink</groupId>
  17. <artifactId>flink-connector-kafka-0.11_2.12</artifactId>
  18. <version>1.10.1</version>
  19. </dependency>
  20. <!-- flink-connector-redis -->
  21. <dependency>
  22. <groupId>org.apache.bahir</groupId>
  23. <artifactId>flink-connector-redis_2.11</artifactId>
  24. <version>1.0</version>
  25. </dependency>
  26. <!-- flink-connector-elasticsearch -->
  27. <dependency>
  28. <groupId>org.apache.flink</groupId>
  29. <artifactId>flink-connector-elasticsearch6_2.12</artifactId>
  30. <version>1.10.1</version>
  31. </dependency>
  32. <!-- mysql-connector-java -->
  33. <dependency>
  34. <groupId>mysql</groupId>
  35. <artifactId>mysql-connector-java</artifactId>
  36. <version>5.1.44</version>
  37. </dependency>
  38. </dependencies>

Environment

getExecutionEnvironment

创建一个执行环境,表示当前执行程序的上下文。如果程序是独立调用的,则此方法返回本地执行环境;如果从命令行客户端调用程序以提交到集群,则此方法返回此集群的执行环境,也就是说, getExecutionEnvironment 会根据查询运行的方式决定返回什么样的运行环境,是最常用的一种创建执行环境的方式。

ExecutionEnvironment env = ExecutionEnvironment.getExecutionEnvironment();

StreamExectionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();

如果没有设置并行度,会以flink-conf.yaml中的配置为准,默认是1.(parallelism.default: 1

createLocalEnvironment

返回本地执行环境,需要在调用时执行默认的并行度。

LocalStreamEnvironment env = StreamExecutionEnvironment.createLocalEnvironment(1);

createRemoteEnvironment

返回集群执行环境,将Jar提交到远程服务器。需要在调用时执行 JobManager 的IP和端口号,并执行要在集群上运行的Jar包。

StreamExecutionEnvironment env = StreamExecutionEnvirnment.createRemoteEnvironment("jobmanage-hostname", 6123, "YOURPATH//WordCount.jar");

Source

从集合读取数据

package com.zh.apitest.source;

import com.zh.apitest.beans.SensorReading;
import org.apache.flink.streaming.api.datastream.DataStream;
import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment;

import java.util.Arrays;

/**
 * author: zhanghui
 * project: big-data-learning
 * package: com.zh.apitest.source
 * filename: SourceTest1_Collection
 * date: 2021/12/1 3:44 下午
 * description: 从集合中读取
 */
public class SourceTest1_Collection {
    public static void main(String[] args) throws Exception {
        // 创建执行环境
        StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();
        env.setParallelism(1);

        // 从集合中读取数据
        DataStream<SensorReading> dataStreamSource = env.fromCollection(Arrays.asList(
                new SensorReading("sensor_1", 1547718199L, 35.8),
                new SensorReading("sensor_6", 1547718201L, 15.4),
                new SensorReading("sensor_7", 1547718202L, 6.7),
                new SensorReading("sensor_10", 1547718205L, 38.1)
        ));

        // 从一个元素集合获取。
        DataStream<Integer> integerDataStreamSource = env.fromElements(1, 2, 4, 57, 100);

        // 打印输出
        dataStreamSource.print("data");
        integerDataStreamSource.print("int").setParallelism(1);

        // 执行任务
        env.execute("job");
    }
}

从文件读取数据

package com.zh.apitest.source;

import org.apache.flink.streaming.api.datastream.DataStream;
import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment;

/**
 * author: zhanghui
 * project: big-data-learning
 * package: com.zh.apitest.source
 * filename: SourceTest2_File
 * date: 2021/12/1 4:00 下午
 * description: 从文件中读取
 */
public class SourceTest2_File {
    public static void main(String[] args) throws Exception {
        StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();
        env.setParallelism(1);

        // 从文件中读取数据
        DataStream<String> dataStream = env.readTextFile("flink-FlinkTutorial/src/main/resources/sensor.txt");

        // 打印输出
        dataStream.print();

        env.execute();
    }
}

以Kafka消息队列的数据作为来源

需要引入kafka连接器的依赖

package com.zh.apitest.source;

import org.apache.flink.api.common.serialization.SimpleStringSchema;
import org.apache.flink.streaming.api.datastream.DataStream;
import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment;
import org.apache.flink.streaming.connectors.kafka.FlinkKafkaConsumer011;

import java.util.Properties;

/**
 * author: zhanghui
 * project: big-data-learning
 * package: com.zh.apitest.source
 * filename: SourceTest3_Kafka
 * date: 2021/12/1 4:07 下午
 * description: 从kafka读取数据
 */
public class SourceTest3_Kafka {
    public static void main(String[] args) throws Exception {
        StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();
        env.setParallelism(1);

        Properties properties = new Properties();
        properties.setProperty("bootstrap.servers", "localhost:9092");
        properties.setProperty("group.id", "consumer-group");
        properties.setProperty("key.deserializer", "org.apache.kafka.common.serialization.StringDeserializer");
        properties.setProperty("value.deserializer", "org.apache.kafka.common.serialization.StringDeserializer");
        properties.setProperty("auto.offset.reset", "latest");

        /* 从kafka中读取数据
        需要当前依赖
        <dependency>
            <groupId>org.apache.flink</groupId>
            <artifactId>flink-connector-kafka-0.11_2.12</artifactId>
            <version>1.10.1</version>
        </dependency>
         */
        DataStream<String> dataStream = env.addSource(new FlinkKafkaConsumer011<String>("sensor", new SimpleStringSchema(), properties));

        // 打印输出
        dataStream.print();

        env.execute();
    }
}

自定义Source

除了以上的Source数据来源,我们还可以自定义source。需要做的,只是传入一个 SourceFunction 就可以。

package com.zh.apitest.source;

import com.zh.apitest.beans.SensorReading;
import org.apache.flink.streaming.api.datastream.DataStream;
import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment;
import org.apache.flink.streaming.api.functions.source.SourceFunction;

import java.util.HashMap;
import java.util.Random;

/**
 * author: zhanghui
 * project: big-data-learning
 * package: com.zh.apitest.source
 * filename: SourceTest4_UDF
 * date: 2021/12/1 4:21 下午
 * description: 自定义数据源
 */
public class SourceTest4_UDF {
    public static void main(String[] args) throws Exception {
        StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();
        env.setParallelism(1);

        // 读取数据
        DataStream<SensorReading> dataStream = env.addSource(new MySensorSource());

        // 打印输出
        dataStream.print();

        env.execute();
    }

    // 实现自定义SourceFunction
    public static class MySensorSource implements SourceFunction<SensorReading> {
        // 定义一个
        private boolean running = true;


        @Override
        public void run(SourceContext<SensorReading> ctx) throws Exception {
            // 定义一个随机数发生器
            Random random = new Random();

            // 设置10个传感器的初始温度
            HashMap<String, Double> sensorTempMap = new HashMap<>();
            for (int i = 0; i < 10; i++) {
                sensorTempMap.put("sensor_" + (i + 1), 60 + random.nextGaussian() * 20); // [0,120)
            }

            while (running) {
                for (String sensorId : sensorTempMap.keySet()) {
                    // 在当前的温度基础上随机波动
                    Double newTemp = sensorTempMap.get(sensorId) + random.nextGaussian();
                    sensorTempMap.put(sensorId, newTemp);
                    ctx.collect(new SensorReading(sensorId, System.currentTimeMillis(), newTemp));
                }
                // 控制输出频率
                Thread.sleep(1000L);
            }
        }

        @Override
        public void cancel() {
            running = false;
        }
    }
}

Transform

转换算子

map、flatMap、filter

  • 转换算子,数据流向:DataStream —> DataStream
  • map: 把流中的内容处理为一条数据,例如:把string转换成长度输出
  • flatMap: 将一条数据分为多条数据输出
  • filter:根据条件筛选条件过滤数据 ```java package com.zh.apitest.transform;

import org.apache.flink.api.common.functions.FilterFunction; import org.apache.flink.api.common.functions.FlatMapFunction; import org.apache.flink.api.common.functions.MapFunction; import org.apache.flink.streaming.api.datastream.DataStream; import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment; import org.apache.flink.util.Collector;

/**

  • author: zhanghui
  • project: big-data-learning
  • package: com.zh.apitest.transform
  • filename: Transform1_Base
  • date: 2021/12/1 4:44 下午
  • description: 转换算子(map,flatmap,filter)
  • DataStream —> DataStream */ public class Transform1_Base { public static void main(String[] args) throws Exception { StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment(); env.setParallelism(1);

    // 从文件读取数据 DataStream inputStream = env.readTextFile(“flink-FlinkTutorial/src/main/resources/sensor.txt”);

    // 1.map:把String转换成长度输出 DataStream mapStream = inputStream.map(new MapFunction() { @Override public Integer map(String value) throws Exception { return value.length(); } });

    // 2.flatmap:按逗号分字段 DataStream flatMapStream = inputStream.flatMap(new FlatMapFunction() { @Override public void flatMap(String value, Collector out) throws Exception { String[] fields = value.split(“,”); for (String field : fields) {

    out.collect(field);
    

    } } });

    // 3.filter:筛选sensor_1开头的id对应的数据 DataStream filterStream = inputStream.filter(new FilterFunction() { @Override public boolean filter(String value) throws Exception { return value.startsWith(“sensor_1”); } });

    // 打印输出 mapStream.print(“map”); flatMapStream.print(“flatmap”); filterStream.print(“filter”);

    env.execute(); } } ```

KeyBy、滚动聚合算子(Rolling Aggregation)

  • KeyBy:DataStream --> KeyedStream 逻辑地将一个流拆分成不相交的分区,每个分区包含具有相同key的元素,在内部以Hash的形式实现的。
  • 滚动聚合算子:这些算子可以针对KeyedStream的每一个支流做聚合。
  • 聚合算子:sum(),min(),max(),minBy(),maxBy()
  • 整体数据流向:DataStream -keyBy-> KeyedStream --> DataStream
package com.zh.apitest.transform;

import com.zh.apitest.beans.SensorReading;
import org.apache.flink.api.java.tuple.Tuple;
import org.apache.flink.streaming.api.datastream.DataStream;
import org.apache.flink.streaming.api.datastream.KeyedStream;
import org.apache.flink.streaming.api.datastream.SingleOutputStreamOperator;
import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment;

/**
 * author: zhanghui
 * project: big-data-learning
 * package: com.zh.apitest.transform
 * filename: Transform2_RollingAggregation
 * date: 2021/12/1 5:00 下午
 * description: 聚合算子(分组:keyBy(数据传输算子);滚动聚合:max,maxBy,min,minBy,sum)
 *              DataStream -keyBy-> KeyedStream --> DataStream
 */
public class Transform2_RollingAggregation {
    public static void main(String[] args) throws Exception {
        StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();
        env.setParallelism(1);

        // 从文件读取数据
        DataStream<String> inputStream = env.readTextFile("flink-FlinkTutorial/src/main/resources/sensor.txt");

        // 转换成SensorReading类型
//        DataStream<SensorReading> dataStream = inputStream.map(new MapFunction<String, SensorReading>() {
//            @Override
//            public SensorReading map(String value) throws Exception {
//                String[] fields = value.split(",");
//                return new SensorReading(fields[0], new Long(fields[1]), new Double(fields[2]));
//            }
//        });

        DataStream<SensorReading> dataStream = inputStream.map(line -> {
            String[] fields = line.split(",");
            return new SensorReading(fields[0], new Long(fields[1]), new Double(fields[2]));
        });

        // 分组:keyBy()
        KeyedStream<SensorReading, Tuple> keyedStream = dataStream.keyBy("id");

        KeyedStream<SensorReading, String> keyedStream1 = dataStream.keyBy(data -> data.getId()); // 使用比较器KeySelector,返回的值式入参的类型

        // 滚动聚合,取当前最大的温度值:max
        SingleOutputStreamOperator<SensorReading> maxResultStream = keyedStream.max("temperature");
        SingleOutputStreamOperator<SensorReading> maxByResultStream = keyedStream.maxBy("temperature");

        maxResultStream.print("max");
        maxByResultStream.print("maxBy");

        env.execute();
    }
}

reduce

  • KeyedStream --> DataStream :一个分组数据流的聚合操作,合并当前的元素和上次聚合的结果,产生一个新的值,返回的流中包含每一次聚合的结果,而不是只返回上一次聚合的结果。
  • 数据流向:DataStream -keyBy-> KeyedStream --> DataStream
package com.zh.apitest.transform;

import com.zh.apitest.beans.SensorReading;
import org.apache.flink.api.common.functions.ReduceFunction;
import org.apache.flink.api.java.tuple.Tuple;
import org.apache.flink.streaming.api.datastream.DataStream;
import org.apache.flink.streaming.api.datastream.KeyedStream;
import org.apache.flink.streaming.api.datastream.SingleOutputStreamOperator;
import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment;

/**
 * author: zhanghui
 * project: big-data-learning
 * package: com.zh.apitest.transform
 * filename: Transform3_Reduce
 * date: 2021/12/2 3:38 下午
 * description: 数据聚合(reduce)
 *              DataStream -keyBy-> KeyedStream --> DataStream
 */
public class Transform3_Reduce {
    public static void main(String[] args) throws Exception {
        StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();
        env.setParallelism(1);

        // 从文件读取数据
        DataStream<String> inputStream = env.readTextFile("flink-FlinkTutorial/src/main/resources/sensor.txt");

        // 转换成SensorReading类型
        DataStream<SensorReading> dataStream = inputStream.map(line -> {
            String[] fields = line.split(",");
            return new SensorReading(fields[0], new Long(fields[1]), new Double(fields[2]));
        });

        // 分组:keyBy()
        KeyedStream<SensorReading, Tuple> keyedStream = dataStream.keyBy("id");

        // reduce聚合,取最大的温度值,以及当前最新的时间戳
        SingleOutputStreamOperator<SensorReading> resultStream = keyedStream.reduce(new ReduceFunction<SensorReading>() {
            @Override
            public SensorReading reduce(SensorReading value1, SensorReading value2) throws Exception {
                return new SensorReading(value1.getId(), value2.getTimestamp(), Math.max(value1.getTemperature(), value2.getTemperature()));
            }
        });

        // 匿名函数ReduceFunction
        SingleOutputStreamOperator<SensorReading> resultStream1 = keyedStream.reduce((curState, newData) -> {
            return new SensorReading(curState.getId(), newData.getTimestamp(), Math.max(curState.getTemperature(), newData.getTemperature()));
        });

        resultStream.print();

        env.execute();
    }
}

split、select、connect、CoMap/CoFlatMap、union

  • split:DataStream --> SplitStream:根据某些特征把一个DataStream拆分成两个或者多个SplitStream。
  • select:SplitStream --> DataStream:从一个SplitStream中获取一个或者多个DataStream。
  • connect:DataStream、DataStream --> ConnectedStreams:连接两个保持他们类型的数据流,两个数据流被Connect之后,只是被放在了一个同一个流中,内部仍然保持各自的数据和形式不发生任何变化,两个流相互独立。
  • CoMap/CoFlatMap:ConnectedStreams --> DataStream:作用于ConnectedStreams上,功能与map和flatMap一样,对ConnectedStreams中的每一个Stream分别进行map和flatMap处理。
  • union:DataStream --> DataStream:对两个或者两个以上的DataStream进行union操作,产生一个包含所有DataStream元素的新DataStream。
  • connect与union区别:
    • union之前两个流的类型必须是一样,connect可以不一样,在之后的CoMap中再去调整成为一样的。
    • connect只能操作两个流,union可以操作多个。
package com.zh.apitest.transform;

import com.zh.apitest.beans.SensorReading;
import org.apache.flink.api.common.functions.MapFunction;
import org.apache.flink.api.java.tuple.Tuple2;
import org.apache.flink.api.java.tuple.Tuple3;
import org.apache.flink.streaming.api.collector.selector.OutputSelector;
import org.apache.flink.streaming.api.datastream.*;
import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment;
import org.apache.flink.streaming.api.functions.co.CoMapFunction;

import java.util.Collections;

/**
 * author: zhanghui
 * project: big-data-learning
 * package: com.zh.apitest.transform
 * filename: Transform4_MultipleStreams
 * date: 2021/12/2 3:58 下午
 * description: 数据分流:split,select(split:根据OutputSelector实现select方法给数据打上不同的标记;通过select方法来获取不同标记的数据流),可以使用最底层的分流操作,split已被弃用
 *              数据合流:connect,coMap,coFlatMap
 *              DataStream -split-> SplitStream -select-> DataStream
 *              DataStream -connect-> ConnectStream -CoMap/CoFlatMap-> DataStream
 */
public class Transform4_MultipleStreams {
    public static void main(String[] args) throws Exception {
        StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();
        env.setParallelism(1);

        // 从文件读取数据
        DataStream<String> inputStream = env.readTextFile("flink-FlinkTutorial/src/main/resources/sensor.txt");

        // 转换成SensorReading类型
        DataStream<SensorReading> dataStream = inputStream.map(line -> {
            String[] fields = line.split(",");
            return new SensorReading(fields[0], new Long(fields[1]), new Double(fields[2]));
        });

        // 1.分流,按照温度30度为界分为两条流。
        SplitStream<SensorReading> splitStream = dataStream.split(new OutputSelector<SensorReading>() {
            @Override
            public Iterable<String> select(SensorReading value) {
                return value.getTemperature() > 30 ? Collections.singleton("high") : Collections.singleton("low");
            }
        });

        DataStream<SensorReading> highTempStream = splitStream.select("high");
        DataStream<SensorReading> lowTempStream = splitStream.select("low");
        DataStream<SensorReading> allTempStream = splitStream.select("high", "low");

        highTempStream.print("high");
        lowTempStream.print("low");
        allTempStream.print("all");

        // 2.合流 connect,将高温流转换成二元组类型,与低温流连接合并之后,输出状态信息
        DataStream<Tuple2<String, Double>> warningStream = highTempStream.map(new MapFunction<SensorReading, Tuple2<String, Double>>() {
            @Override
            public Tuple2<String, Double> map(SensorReading value) throws Exception {
                return new Tuple2<>(value.getId(), value.getTemperature());
            }
        });

        ConnectedStreams<Tuple2<String, Double>, SensorReading> connectedStreams = warningStream.connect(lowTempStream);

        SingleOutputStreamOperator<Object> resultStream = connectedStreams.map(new CoMapFunction<Tuple2<String, Double>, SensorReading, Object>() {
            @Override
            public Object map1(Tuple2<String, Double> value) throws Exception {
                return new Tuple3<>(value.f0, value.f1, "high temp warning");
            }

            @Override
            public Object map2(SensorReading value) throws Exception {
                return new Tuple2<>(value.getId(), "normal");
            }
        });

        resultStream.print("resultStream");

        // 3. union联合多条流
        DataStream<SensorReading> unionResultStream = highTempStream.union(lowTempStream, allTempStream);

        env.execute();
    }
}

支持的数据类型

Flink流应用程序处理的是以数据对象表示的事件流。所以在Flink内部,我们需要能够处理这些对象。他们需要被序列化和反序列化,以便通过网络传送他们;或者从状态后端、检查点和保存点读取它们。为了有效地做到这一点,Flink需要明确知道应用程序所处理得数据类型。Flink使用类型信息的概念来表示数据类型,并未每个数据类型生成特定的数据类型。Flink使用类型信息的概念来表示数据类型,并未每个数据类型生成特定的序列化器、反序列化器和比较器。
Flink还具有一个类型提取系统,该系统分析函数的输入和返回类型,以自动获取类型信息,从而获取序列化器和反序列化器。但是,在某些情况下,例如lambda函数或泛型类型,需要显式的提供类型信息,才能使应用程序正常工作或提高其性能。
Flink支持Java和Scala中所有常见数据类型。使用最广泛的类型有一下几种

基础数据类型

Flink支持所有的Java和Scala基础数据类型,Int、Double、Long、String、…

DataStream<Integer> numberStream = env.formElements(1, 2, 3, 4);
numberStream.map(data -> data * 2);

Java和Scala元组(Tuples)

Java中的Tuple为flink包提供的。

Scala样例类(case classes)、Java简单对象(POJOs)

case class Person(name: String, age: int)
val persons: DataStream[Person] = env.fromElements(
    Person("Adam", 17),
    Person("Sarah", 23) )
persons.filter(p => p.age > 18)

其他(Arrays、Lists、Maps、Enums等等)

Flink对Java和Scala中的一些特殊目的的类型也都是支持的,比如Java的ArrayList,HashMap,Enum等等

实现UDF函数

user defined function,更细粒度的控制流

函数类(Function Classes)

Flink暴露了所有udf函数的接口(实现方式为接口或者抽象类)。例如:MapFunction、FilterFunction、ProcessFunction等等。

// 实现FilterFunction接口
DataStream<String> flinkTweets = tweets.filter(new FlinkFilter());

public static class FlinkFilter implements FilterFunction<String> {
    @Override
    public boolean filter(String) throws Exception {
        return value.contains("flink");
    }
}


// 将函数实现成匿名类
DataStream<String> flinkTweets = tweets.filter(new FilterFunction<String>() {
    @Override
    public boolean filter(String) throws Exception {
        return value.contains("flink");
    }
})

// 我们filter的字符换“flink”还可以当做参数传进去
DataStream<String> tweets = env.readTextFile("INPUT_FILE ");

DataStream<String> flinkTweets = tweets.filter(new KeyWordFilter("flink"));

public static class KeyWordFilter implements FilterFunction<String> {
    private String keyWord;

    KeyWordFilter(String keyWord) { this.keyWord = keyWord; }

    @Override
    public boolean filter(String value) throws Exception {
        return value.contains(this.keyWord);
    }
}

匿名函数(Lambda Functions)

DataStream<String> tweets = env.readTextFile("INPUT_FILE");

DataStream<String> flinkTweets = tweets.filter(tweet -> tweet.contains("flink"));

富函数(Rich Functions)

“富函数”是DataStream API提供的一个函数类的接口,所有Flink函数类都有其Rich版本。他与常规函数的不同在于,可以获取运行环境的上下文,并拥有一些生命周期方法,所以可以实现更复杂的功能。

  • RichMapFunction
  • RichFlatMapFunction
  • RichFilterFunction

Rich Function有一个生命周期的概念。典型的生命周期方法有:

  • open()方法是rich function的初始化方法,当一个算子例如map或者filter被调用之前open()会被调用。
  • close()方法是生命周期中的最后一个调用的方法,做一些清理工作。
  • getRuntimeContext()方法提供了函数的RuntimeContext的一些信息,例如函数执行的并行度,任务的名字,以及state状态。

    // 实现自定义富函数类
    public static class MyMapper extends RichMapFunction<SensorReading, Tuple2<String, Integer>> {
    
       @Override
       public Tuple2<String, Integer> map(SensorReading value) throws Exception {
    //            getRuntimeContext().getState();
           return new Tuple2<>(value.getId(), getRuntimeContext().getIndexOfThisSubtask());
       }
    
       @Override
       public void open(Configuration parameters) throws Exception {
           // 初始化工作,一般是定义状态或者建立数据库连接
           System.out.println("open");
       }
    
       @Override
       public void close() throws Exception {
           // 一般是关闭连接和清空状态的操作
           System.out.println("close");
       }
    }
    

Sink

Flink没有类似于spark中foreach方法,让用户进行迭代的操作。虽有对外的输出操作都要利用Sink完成。最后通过类似如下方式完成整个任务最终输出操作。
stream.addSink(new MySink(xxxx))
官方提供了一部分的框架的sink。除此之外,需要用户自定义实现sink。

image.png

Kafka

package com.zh.apitest.sink;

import com.zh.apitest.beans.SensorReading;
import org.apache.flink.api.common.serialization.SimpleStringSchema;
import org.apache.flink.streaming.api.datastream.DataStream;
import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment;
import org.apache.flink.streaming.connectors.kafka.FlinkKafkaConsumer011;
import org.apache.flink.streaming.connectors.kafka.FlinkKafkaProducer011;

import java.util.Properties;

/**
 * author: zhanghui
 * project: big-data-learning
 * package: com.zh.apitest.sink
 * filename: SinkTest1_Kafka
 * date: 2021/12/3 4:28 下午
 * description: sink关于kafka的连接,通过DataStream.addSink(new SinkFunction());
 */
public class SinkTest1_Kafka {
    public static void main(String[] args) throws Exception {
        StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();
        env.setParallelism(1);

        Properties properties = new Properties();
        properties.setProperty("bootstrap.servers", "localhost:9092");
        properties.setProperty("group.id", "consumer-group");
        properties.setProperty("key.deserializer", "org.apache.kafka.common.serialization.StringDeserializer");
        properties.setProperty("value.deserializer", "org.apache.kafka.common.serialization.StringDeserializer");
        properties.setProperty("auto.offset.reset", "latest");

        /* 从kafka中读取数据
        需要当前依赖
        <dependency>
            <groupId>org.apache.flink</groupId>
            <artifactId>flink-connector-kafka-0.11_2.12</artifactId>
            <version>1.10.1</version>
        </dependency>
         */
        DataStream<String> inputStream = env.addSource(new FlinkKafkaConsumer011<String>("sensor", new SimpleStringSchema(), properties));

        // 转换成SensorReading类型
        DataStream<String> dataStream = inputStream.map(line -> {
            String[] fields = line.split(",");
            return new SensorReading(fields[0], new Long(fields[1]), new Double(fields[2])).toString();
        });

        dataStream.addSink(new FlinkKafkaProducer011<String>("localhost:9092", "sinktest", new SimpleStringSchema()));

        env.execute();
    }
}

Redis

package com.zh.apitest.sink;

import com.zh.apitest.beans.SensorReading;
import org.apache.flink.streaming.api.datastream.DataStream;
import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment;
import org.apache.flink.streaming.connectors.redis.RedisSink;
import org.apache.flink.streaming.connectors.redis.common.config.FlinkJedisPoolConfig;
import org.apache.flink.streaming.connectors.redis.common.mapper.RedisCommand;
import org.apache.flink.streaming.connectors.redis.common.mapper.RedisCommandDescription;
import org.apache.flink.streaming.connectors.redis.common.mapper.RedisMapper;

/**
 * author: zhanghui
 * project: big-data-learning
 * package: com.zh.apitest.sink
 * filename: SinkTest2_Redis
 * date: 2021/12/3 5:11 下午
 * description: redis作为sink
 */
public class SinkTest2_Redis {
    public static void main(String[] args) throws Exception {
        StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();
        env.setParallelism(1);

        // 从文件读取数据
        DataStream<String> inputStream = env.readTextFile("flink-FlinkTutorial/src/main/resources/sensor.txt");

        // 转换成SensorReading类型
        DataStream<SensorReading> dataStream = inputStream.map(line -> {
            String[] fields = line.split(",");
            return new SensorReading(fields[0], new Long(fields[1]), new Double(fields[2]));
        });

        // 定义jedis连接配置
        FlinkJedisPoolConfig config = new FlinkJedisPoolConfig.Builder()
                .setHost("localhost")
                .setPort(6379)
                .build();

        /*
        bahir的jar包
        <dependency>
            <groupId>org.apache.bahir</groupId>
            <artifactId>flink-connector-redis_2.11</artifactId>
            <version>1.0</version>
        </dependency> 
        */
        dataStream.addSink(new RedisSink<>(config, new MyRedisMapper()));

        env.execute();
    }

    // 自定义redisMapper
    public static class MyRedisMapper implements RedisMapper<SensorReading> {

        // 定义保存数据到redis的命令,存成hash表,hset sensor_temp id temperature
        @Override
        public RedisCommandDescription getCommandDescription() {
            return new RedisCommandDescription(RedisCommand.HSET, "sensor_temp");
        }

        @Override
        public String getKeyFromData(SensorReading data) {
            return data.getId();
        }

        @Override
        public String getValueFromData(SensorReading data) {
            return data.getTemperature().toString();
        }
    }
}

Elasticsearch

package com.zh.apitest.sink;

import com.zh.apitest.beans.SensorReading;
import org.apache.flink.api.common.functions.RuntimeContext;
import org.apache.flink.streaming.api.datastream.DataStream;
import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment;
import org.apache.flink.streaming.connectors.elasticsearch.ElasticsearchSinkFunction;
import org.apache.flink.streaming.connectors.elasticsearch.RequestIndexer;
import org.apache.flink.streaming.connectors.elasticsearch6.ElasticsearchSink;
import org.apache.http.HttpHost;
import org.elasticsearch.action.index.IndexRequest;
import org.elasticsearch.client.Requests;

import java.util.ArrayList;
import java.util.HashMap;

/**
 * author: zhanghui
 * project: big-data-learning
 * package: com.zh.apitest.sink
 * filename: SinkTest3_Elasticsearch
 * date: 2021/12/3 5:30 下午
 * description: elasticsearch作为sink
 */
public class SinkTest3_Elasticsearch {
    public static void main(String[] args) throws Exception {
        StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();
        env.setParallelism(1);

        // 从文件读取数据
        DataStream<String> inputStream = env.readTextFile("flink-FlinkTutorial/src/main/resources/sensor.txt");

        // 转换成SensorReading类型
        DataStream<SensorReading> dataStream = inputStream.map(line -> {
            String[] fields = line.split(",");
            return new SensorReading(fields[0], new Long(fields[1]), new Double(fields[2]));
        });

        // 定义elasticsearch的连接配置
        ArrayList<HttpHost> httpHosts = new ArrayList<>();
        httpHosts.add(new HttpHost("localhost", 9200));

        /*
        <dependency>
            <groupId>org.apache.flink</groupId>
            <artifactId>flink-connector-elasticsearch6_2.12</artifactId>
            <version>1.10.1</version>
        </dependency>
         */
        dataStream.addSink(new ElasticsearchSink.Builder<SensorReading>(httpHosts, new MyEsSinkFunction()).build());

        env.execute();
    }

    // 实现自定义的Es写入操作
    public static class MyEsSinkFunction implements ElasticsearchSinkFunction<SensorReading> {

        @Override
        public void process(SensorReading element, RuntimeContext ctx, RequestIndexer indexer) {
            // 定义写入的数据source
            HashMap<String, String> dataSource = new HashMap<>();
            dataSource.put("id", element.getId());
            dataSource.put("temp", element.getTemperature().toString());
            dataSource.put("ts", element.getTimestamp().toString());

            // 创建请求,作为向es发起的写入命令
            IndexRequest indexRequest = Requests.indexRequest().index("sensor").type("readingdata").source(dataSource);

            // 用index发送请求
            indexer.add(indexRequest);
        }
    }
}

JDBC自定义sink

package com.zh.apitest.sink;

import com.zh.apitest.beans.SensorReading;
import com.zh.apitest.source.SourceTest4_UDF;
import org.apache.flink.configuration.Configuration;
import org.apache.flink.streaming.api.datastream.DataStream;
import org.apache.flink.streaming.api.datastream.DataStreamSource;
import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment;
import org.apache.flink.streaming.api.functions.sink.RichSinkFunction;
import org.apache.flink.streaming.api.functions.sink.SinkFunction;

import java.sql.Connection;
import java.sql.DriverManager;
import java.sql.PreparedStatement;

/**
 * author: zhanghui
 * project: big-data-learning
 * package: com.zh.apitest.sink
 * filename: SinkTest4_Jdbc
 * date: 2021/12/3 5:49 下午
 * description: jdbc自定义sink
 */
public class SinkTest4_Jdbc {
    public static void main(String[] args) throws Exception {
        StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();
        env.setParallelism(1);

//        // 从文件读取数据
//        DataStream<String> inputStream = env.readTextFile("flink-FlinkTutorial/src/main/resources/sensor.txt");
//
//        // 转换成SensorReading类型
//        DataStream<SensorReading> dataStream = inputStream.map(line -> {
//            String[] fields = line.split(",");
//            return new SensorReading(fields[0], new Long(fields[1]), new Double(fields[2]));
//        });

        DataStream<SensorReading> dataStream = env.addSource(new SourceTest4_UDF.MySensorSource());

        /*
        <dependency>
            <groupId>mysql</groupId>
            <artifactId>mysql-connector-java</artifactId>
            <version>5.1.44</version>
        </dependency>
         */
        dataStream.addSink(new MyJdbcSink());

        env.execute();
    }

    public static class MyJdbcSink extends RichSinkFunction<SensorReading> {
        // 生命连接和预编译语句
        Connection connection = null;
        PreparedStatement insertStmt = null;
        PreparedStatement updateStmt = null;

        @Override
        public void open(Configuration parameters) throws Exception {
            connection = DriverManager.getConnection("jdbc:mysql://localhost:3306/test", "root", "123456");
            insertStmt = connection.prepareStatement("insert into sensor_temp (id, temp) value (?, ?)");
            updateStmt = connection.prepareStatement("update sensor_temp set temp = ? where id = ?");
        }

        // 每来一条数据,调用连接,执行sql
        @Override
        public void invoke(SensorReading value, Context context) throws Exception {
            // 直接执行更新语句,如果没有更新那么久插入
            updateStmt.setDouble(1, value.getTemperature());
            updateStmt.setString(2, value.getId());
            updateStmt.execute();
            if (updateStmt.getUpdateCount() == 0) {
                insertStmt.setString(1, value.getId());
                insertStmt.setDouble(2, value.getTemperature());
                insertStmt.execute();
            }
        }

        @Override
        public void close() throws Exception {
            insertStmt.close();
            updateStmt.close();
            connection.close();
        }
    }
}