配置文件信息
    flume_kafka.conf

    1. a1.sources = r1
    2. a1.sinks = k1
    3. a1.channels = c1
    4. a1.sources.r1.type=exec
    5. # 实时检查该文件是否发生变化,将变化的文件发送到sink
    6. a1.sources.r1.command=tail -F /root/data/hbreal/hbreal_2019071422_2019071423.txt
    7. #a1.sinks.k1.type=logger
    8. a1.sinks.k1.type = org.apache.flume.sink.kafka.KafkaSink
    9. a1.sinks.k1.kafka.topic = hbreal
    10. a1.sinks.k1.brokerList = s2:9092
    11. a1.sinks.k1.kafka.flumeBatchSize = 20
    12. a1.sinks.k1.kafka.producer.acks = 1
    13. a1.channels.c1.type=memory
    14. a1.sources.r1.channels=c1
    15. a1.sinks.k1.channel=c1

    启动kafka

    1. kafka-server-start.sh -daemon /soft/kafka/config/server.properties

    创建主题

    1. kafka-topics.sh --create --zookeeper s3:2181 --replication-factor 1 --partitions 3 --topic hbreal

    生产者

    1. kafka-console-producer.sh --broker-list s2:9092 --topic hbreal

    消费者

    1. kafka-console-consumer.sh --bootstrap-server s2:9092 --topic hbreal

    启动flume收集程序

    1. flume-ng agent -f ./flume_tail.conf -n a1 -Dflume.root.logger=INFO,console