1.kafka整合springboot

我们基本了解kafka后,我们来进行与springboot的整合,来做个小测验

首先导入我们需要的包:

pom.xml

  1. <dependency>
  2. <groupId>org.springframework.boot</groupId>
  3. <artifactId>spring-boot-starter-web</artifactId>
  4. </dependency>
  5. <dependency>
  6. <groupId>org.springframework.kafka</groupId>
  7. <artifactId>spring-kafka</artifactId>
  8. </dependency>
  9. <dependency>
  10. <groupId>com.alibaba</groupId>
  11. <artifactId>fastjson</artifactId>
  12. <version>1.2.73</version>
  13. </dependency>
  14. <dependency>
  15. <groupId>org.projectlombok</groupId>
  16. <artifactId>lombok</artifactId>
  17. <optional>true</optional>
  18. </dependency>

配置文件:

  1. spring.application.name=springboot-kafka
  2. server.port=8082
  3. #==============kafka==============#
  4. spring.kafka.bootstrap-servers=localhost:9092
  5. #==============provider===========#
  6. spring.kafka.producer.retries=0
  7. # 每次批量发送消息的数量
  8. spring.kafka.producer.batch-size=16384
  9. spring.kafka.producer.buffer-memory=33554432
  10. # 指定消息key和消息体的编解码方式
  11. spring.kafka.producer.key-serializer=org.apache.kafka.common.serialization.StringSerializer
  12. spring.kafka.producer.value-serializer=org.apache.kafka.common.serialization.StringSerializer
  13. #==============consumer===========#
  14. # 指定默认消费者group id
  15. spring.kafka.consumer.group-id=test-log-group
  16. spring.kafka.consumer.auto-offset-reset=earliest
  17. spring.kafka.consumer.enable-auto-commit=true
  18. spring.kafka.consumer.auto-commit-interval=100
  19. # 指定消息key和消息体的编解码方式
  20. spring.kafka.consumer.key-deserializer=org.apache.kafka.common.serialization.StringDeserializer
  21. spring.kafka.consumer.value-deserializer=org.apache.kafka.common.serialization.StringDeserializer

User.java

  1. package com.zym.kafka.entity;
  2. import lombok.Data;
  3. import lombok.experimental.Accessors;
  4. @Data
  5. @Accessors(chain = true)
  6. public class User {
  7. private String username;
  8. private String userid;
  9. private String state;
  10. }

UserProducer.java

  1. package com.zym.kafka.producer;
  2. import com.alibaba.fastjson.JSON;
  3. import com.zym.kafka.entity.User;
  4. import org.springframework.beans.factory.annotation.Autowired;
  5. import org.springframework.kafka.core.KafkaTemplate;
  6. import org.springframework.stereotype.Component;
  7. @Component
  8. public class UserProducer {
  9. @Autowired
  10. private KafkaTemplate kafkaTemplate;
  11. public void sendMessage(String userId){
  12. User user = new User();
  13. user.setUsername("zym").setUserid(userId).setState("正常");
  14. System.out.println(user.toString());
  15. kafkaTemplate.send("test", JSON.toJSONString(user));
  16. }
  17. }

UserConsumer.java

  1. package com.zym.kafka.consumer;
  2. import lombok.extern.slf4j.Slf4j;
  3. import org.apache.kafka.clients.consumer.ConsumerRecord;
  4. import org.springframework.kafka.annotation.KafkaListener;
  5. import org.springframework.stereotype.Component;
  6. import java.util.Optional;
  7. @Component
  8. @Slf4j
  9. public class UserConsumer {
  10. @KafkaListener(topics = {"test"})
  11. public void consumer(ConsumerRecord consumerRecord){
  12. Optional<?> kafkaMessage = Optional.ofNullable(consumerRecord.value());
  13. log.info(">>>>>>>>record<<<<<<<<" + kafkaMessage);
  14. if (kafkaMessage.isPresent()){
  15. Object message = kafkaMessage.get();
  16. System.out.println("消费消息" + message);
  17. }
  18. }
  19. }

然后我们修改一下启动类,让项目注入bean后就生产十条消息到kafka(或者可以新写接口去测试,我这里偷了懒)

SpringbootKafkaApplication.java

  1. package com.zym.kafka;
  2. import com.zym.kafka.producer.UserProducer;
  3. import org.springframework.beans.factory.annotation.Autowired;
  4. import org.springframework.boot.SpringApplication;
  5. import org.springframework.boot.autoconfigure.SpringBootApplication;
  6. import javax.annotation.PostConstruct;
  7. @SpringBootApplication
  8. public class SpringbootKafkaApplication {
  9. @Autowired
  10. private UserProducer userProducer;
  11. @PostConstruct
  12. public void init(){
  13. for (int i = 0; i < 10; i++) {
  14. userProducer.sendMessage(String.valueOf(i));
  15. }
  16. }
  17. public static void main(String[] args) {
  18. SpringApplication.run(SpringbootKafkaApplication.class, args);
  19. }
  20. }

然后我们启动项目,来查看是否整合成功:

查看控制台:
1.png
由于控制台打印顺序被打乱,我这里只截了后面九条消息,但实际上是十条,说明已经生产了十条消息到kafka,然后我们找一下消费者消费的打印语句
2.png
我们也可以看到,消息也已经被消费了。

2.遇到的问题:

1.控制台打印:Connection to node 1 (localhost/127.0.0.1:9092) could not be established.

问题原因:kafka启动后会在zookeeper的/brokers/ids下注册监听协议,包括IP和端口号,客户端连接的时候,会取得这个IP和端口号。
后来查看了kafka的配置,原来我忽视了listeners和advertised.listeners的区别,advertised.listeners才是真正暴露给外部使用的连接地址,会写入到zookeeper节点中的。于是再次进行修改,把IP配置到advertised.listeners中,问题再一次解决。

解决方法:在kafka的server.properties修改配置

  1. # 允许外部端口连接
  2. listeners=PLAINTEXT://0.0.0.0:9092
  3. # 外部代理地址
  4. advertised.listeners=PLAINTEXT://xx.xx.xx.xx:9092

3.后续学习

与常用的rabbitmq相比,使用起来大同小异,具体两者有什么区别,还需要深入了解kafka,我会在接下来的文章中更新。

参考文章:https://blog.csdn.net/qq_18603599/article/details/81169488