Kafka整合SpringBoot
前文 Kafka客户端详解
引入依赖
<properties><project.build.sourceEncoding>UTF-8</project.build.sourceEncoding><maven.compiler.source>8</maven.compiler.source><maven.compiler.target>8</maven.compiler.target><spring-boot.version>2.3.12.RELEASE</spring-boot.version><fastjson.version>2.0.51</fastjson.version><!--我服务器安装的kafka版本是3.4.0 所以最好和安装版本对应--><kafka.version>3.4.0</kafka.version>
</properties><dependencyManagement><dependencies><dependency><groupId>org.springframework.boot</groupId><artifactId>spring-boot-starter-parent</artifactId><version>${spring-boot.version}</version><type>pom</type><scope>import</scope></dependency><dependency><groupId>org.apache.kafka</groupId><artifactId>kafka-clients</artifactId><version>${kafka.version}</version></dependency><dependency><groupId>com.alibaba.fastjson2</groupId><artifactId>fastjson2</artifactId><version>${fastjson.version}</version></dependency><dependency><groupId>org.springframework.kafka</groupId><artifactId>spring-kafka</artifactId><version>${spring-boot.version}</version></dependency></dependencies>
</dependencyManagement><dependencies><dependency><groupId>org.apache.kafka</groupId><artifactId>kafka-clients</artifactId></dependency><dependency><groupId>com.alibaba.fastjson2</groupId><artifactId>fastjson2</artifactId></dependency><dependency><groupId>org.springframework.boot</groupId><artifactId>spring-boot-starter</artifactId></dependency><dependency><groupId>org.springframework.boot</groupId><artifactId>spring-boot-starter-web</artifactId></dependency><!--kafka整合SpringBoot--><dependency><groupId>org.springframework.kafka</groupId><artifactId>spring-kafka</artifactId></dependency><dependency><groupId>junit</groupId><artifactId>junit</artifactId><version>4.11</version><scope>test</scope></dependency>
</dependencies>
yml配置
spring:kafka:# 服务地址bootstrap-servers: 192.168.75.61:9092,192.168.75.62:9092,192.168.75.63:9092# 生产者相关配置producer:# 应答级别:多少个分区副本备份完成时向生产者发送ack确认(可选0、1、all/-1)acks: 1# 重试次数retries: 5# 默认批处理大小,ProducerBatch大小batch-size: 16384# 生产端缓冲区大小buffer-memory: 33554432# 发送消息的key - value 序列化类key-serializer: org.apache.kafka.common.serialization.StringSerializervalue-serializer: org.apache.kafka.common.serialization.StringSerializer# 消息发送 最大等待时长properties:linger:ms: 0# 消费端配置consumer:# 是否开启自动提交enable-auto-commit: true# 提交offset延时(接收到消息后多久提交offset)auto-commit-interval: 1000# 当kafka中没有初始offset或offset超出范围时将自动重置offset# earliest:重置为分区中最小的offset;# latest:重置为分区中最新的offset(消费分区中新产生的数据);# none:只要有一个分区不存在已提交的offset,就抛出异常;auto-offset-reset: latest# 接收消息的key - value 反序列化类key-deserializer: org.apache.kafka.common.serialization.StringDeserializervalue-deserializer: org.apache.kafka.common.serialization.StringDeserializerproperties:# 默认的消费组IDgroup:id: defaultConsumerGroup# 消费会话超时时间(超过这个时间consumer没有发送心跳,就会触发rebalance操作)session:timeout:ms: 120000# 消费请求超时时间request:timeout:ms: 180000
消息生产者
package com.hs.kfk.boot;import com.alibaba.fastjson2.JSON;
import com.hs.kfk.serializer.User;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.kafka.core.KafkaTemplate;
import org.springframework.web.bind.annotation.GetMapping;
import org.springframework.web.bind.annotation.RestController;/*** @Description: kafka整合SpringBoot,消息生产者* @Author 胡尚* @Date: 2024/8/8 17:03*/
@RestController()
public class BootProducer {@Autowiredprivate KafkaTemplate<String, String> kafkaTemplate;@GetMapping("/send")public void sendMessage(){User user = new User(1L, "hushang", 24);String message = JSON.toJSON(user).toString();kafkaTemplate.send("disTopic", "key", message);}
}
消息消费者
package com.hs.kfk.boot;import com.alibaba.fastjson2.JSON;
import com.hs.kfk.serializer.User;
import org.apache.kafka.clients.consumer.ConsumerRecord;
import org.springframework.kafka.annotation.KafkaListener;
import org.springframework.stereotype.Component;/*** @Description: kafka整合SpringBoot 消息消费者* @Author 胡尚* @Date: 2024/8/8 17:09*/
@Component
public class BootConsumer {@KafkaListener(topics = {"disTopic"})public void consumerMessage(ConsumerRecord<String, String> record){int partition = record.partition();long offset = record.offset();String topic = record.topic();String key = record.key();String message = record.value();System.out.println("topic:" + topic + "\tpartition:" + partition + "\toffset: " + offset + "\tkey: " + key + "\tmessage: " + message);User user = JSON.parseObject(message, User.class);System.out.println(user);}
}
输出结果
topic:disTopic partition:1 offset: 8 key: key message: {"age":24,"uId":1,"username":"hushang"}
User{uId=1, username='hushang', age=24}
topic:disTopic partition:1 offset: 9 key: key message: {"age":24,"uId":1,"username":"hushang"}
User{uId=1, username='hushang', age=24}
topic:disTopic partition:1 offset: 10 key: key message: {"age":24,"uId":1,"username":"hushang"}
User{uId=1, username='hushang', age=24}