Kafka的Java客户端

导读:本篇文章讲解 Kafka的Java客户端,希望对大家有帮助,欢迎收藏,转发!站点地址:www.bmabk.com

Kafka的Java客户端-demo版

0. 订单类

package com.hao.entity;

public class Order {

    private Long orderId;
    private int count;

    public Order(Long orderId, int count) {
        this.orderId = orderId;
        this.count = count;
    }

    public Order() {
    }

    public Long getOrderId() {
        return orderId;
    }

    public void setOrderId(Long orderId) {
        this.orderId = orderId;
    }

    public int getCount() {
        return count;
    }

    public void setCount(int count) {
        this.count = count;
    }
}


1. 依赖

<?xml version="1.0" encoding="UTF-8"?>
<project xmlns="http://maven.apache.org/POM/4.0.0"
         xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
         xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">
    <modelVersion>4.0.0</modelVersion>

    <groupId>com.aho</groupId>
    <artifactId>kafka-demo</artifactId>
    <version>1.0-SNAPSHOT</version>

    <properties>
        <maven.compiler.source>8</maven.compiler.source>
        <maven.compiler.target>8</maven.compiler.target>
    </properties>

    <dependencies>

        <dependency>
            <groupId>com.alibaba</groupId>
            <artifactId>fastjson</artifactId>
            <version>1.2.47</version>
        </dependency>

        <dependency>
            <groupId>org.apache.kafka</groupId>
            <artifactId>kafka-clients</artifactId>
            <version>2.4.1</version>
        </dependency>



    </dependencies>


</project>

2. 生产者发送消息

2.1 同步发送

package com.hao.kafka;

import com.alibaba.fastjson.JSON;
import com.hao.entity.Order;
import org.apache.kafka.clients.producer.*;
import org.apache.kafka.common.serialization.StringSerializer;

import java.util.Properties;
import java.util.concurrent.ExecutionException;

public class MyProducerDemo01 {
    // 1. topic名
    private static final String TOPIC_NAME = "my-replicated-topic";

    public static void main(String[] args) throws ExecutionException, InterruptedException {
        // 2. 配置
        Properties properties = new Properties();
        // 连接kafkaIP
        properties.put(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG,"192.168.100.108:9092,192.168.100.108:9093,192.168.100.108:9094");
        // 序列化器
        properties.put(ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG, StringSerializer.class.getName());
        properties.put(ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG, StringSerializer.class.getName());
        // 设置acks
        properties.put(ProducerConfig.ACKS_CONFIG,"0");
        // 3. 创建一个要发送的消息对象,并带上配置
        Producer<String,String> producer = new KafkaProducer<String, String>(properties);


        // 4. 封装消息对象
        // 生成order对象
        Order order = new Order(1001L,2);
        // 封装
        ProducerRecord<String,String> producerRecord = new ProducerRecord<String, String>(TOPIC_NAME,
                String.valueOf(order.getOrderId()),
                JSON.toJSONString(order));

        RecordMetadata metadata = producer.send(producerRecord).get();
        System.out.println("同步方式发送消息结果:" + "topic-" + metadata.topic() + "|partition-"
                    + metadata.partition() + "|offset-" + metadata.offset());



    }

}

在这里插入图片描述

2.2 异步发送

package com.hao.kafka;

import com.alibaba.fastjson.JSON;
import com.hao.entity.Order;
import org.apache.kafka.clients.producer.*;
import org.apache.kafka.common.serialization.StringSerializer;

import java.util.Properties;
import java.util.concurrent.CountDownLatch;

public class MyProducer02 {
    private static final String TOPIC_NAME = "my-replicated-topic";

    public static void main(String[] args) throws InterruptedException {
        // 1. 创建配置对象
        Properties properties = new Properties();
        properties.put(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG,"192.168.100.108:9092,192.168.100.108:9093,192.168.100.108:9094");
        properties.put(ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG, StringSerializer.class.getName());
        properties.put(ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG,StringSerializer.class.getName());
        properties.put(ProducerConfig.ACKS_CONFIG,"0");
        // 2. 创建一个发送消息的对象,并携带配置对象
        Producer<String,String> producer = new KafkaProducer<String, String>(properties);
        // 门栓
        CountDownLatch countDownLatch = new CountDownLatch(5);
        for (int i = 0; i < 5 ; i++) {
            // 3. 封装消息对象
            // 生成消息---生成订单对象
            Order order = new Order((long)i,2);
            // 封装
            ProducerRecord<String,String> producerRecord = new ProducerRecord<String, String>(TOPIC_NAME,
                    String.valueOf(order.getOrderId()),
                    JSON.toJSONString(order));
            //异步回调方式发送消息
            producer.send(producerRecord, new Callback() {
                @Override
                public void onCompletion(RecordMetadata recordMetadata, Exception e) {
                    if (e != null) {
                        System.err.println("发送消息失败:"+e.getStackTrace());
                    }
                    if (recordMetadata != null) {
                        System.out.println("异步方式发送消息结果:" + "topic-" + recordMetadata.topic() + "|partition-"
                                + recordMetadata.partition() + "|offset-" + recordMetadata.offset());
                    }
                    countDownLatch.countDown();
                }
            });
        }
        // 当countDownLatch变成0之前,一直阻塞
        countDownLatch.await();
        producer.close();
    }


}

在这里插入图片描述

3. 消费者消费消息

package com.hao.kafka;

import org.apache.kafka.clients.consumer.*;
import org.apache.kafka.common.TopicPartition;
import org.apache.kafka.common.serialization.StringDeserializer;
import org.apache.kafka.common.serialization.StringSerializer;

import java.time.Duration;
import java.util.Arrays;
import java.util.Map;
import java.util.Properties;

public class MyConsumer01 {
    private static final String TOPIC_NAME = "my-replicated-topic";
    private static final String GROUP_NAME = "testGroup999";

    public static void main(String[] args) {
        // 1. 配置
        Properties properties = new Properties();
        // kafka集群ip
        properties.setProperty(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG,"192.168.100.108:9092,192.168.100.108:9093,192.168.100.108:9094");
        // 序列化器
        properties.put(ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG, StringDeserializer.class.getName());
        properties.put(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG,StringDeserializer.class.getName());
        // 消费组的名称
        properties.setProperty(ConsumerConfig.GROUP_ID_CONFIG,GROUP_NAME);

        /*
        // 是否自动提交offset(偏移量),默认是自动提交,true
        properties.setProperty(ConsumerConfig.ENABLE_AUTO_COMMIT_CONFIG,"true");
        // 自动提交offset的时间间隔
        properties.setProperty(ConsumerConfig.MAX_POLL_INTERVAL_MS_CONFIG,"1000");
         */

        /*
        当消费主题的是一个新的消费组,或者指定offset的消费方式,offset不存在,那么应该如何消费
        latest(默认) :只消费自己启动之后发送到主题的消息
        earliest:第一次从头开始消费,以后按照消费offset记录继续消费,这个需要区别于consumer.seekToBeginning(每次都从头开始消费)
        */
        properties.put(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, "earliest");

        // 是否自动提交offset(偏移量),手动提交,false
        properties.put(ConsumerConfig.ENABLE_AUTO_COMMIT_CONFIG,"false");
        // 一次poll的最大拉去消息的条数,可以根据消费速度来设置
        properties.put(ConsumerConfig.MAX_POLL_RECORDS_CONFIG,500);
        //consumer给broker发送心跳的间隔时间
        properties.put(ConsumerConfig.HEARTBEAT_INTERVAL_MS_CONFIG, 1000);
        //kafka如果超过10秒没有收到消费者的心跳,则会把消费者踢出消费组,进行rebalance,把分区分配给其他消费者。
        properties.put(ConsumerConfig.SESSION_TIMEOUT_MS_CONFIG, 10 * 1000);
        //如果两次poll的时间如果超出了30s的时间间隔,kafka会认为其消费能力过弱,将其踢出消费组。将分区分配给其他消费者。-rebalance
        properties.put(ConsumerConfig.MAX_POLL_INTERVAL_MS_CONFIG, 30 * 1000);

        // 2. 创建一个要消费的消息对象,并带上配置
        KafkaConsumer<String,String> consumer = new KafkaConsumer<String, String>(properties);
        // 3. 订阅主题
        consumer.subscribe(Arrays.asList(TOPIC_NAME));
        // 4. 处理消息
        while (true) {
            ConsumerRecords<String,String> records = consumer.poll(Duration.ofMillis(1000));
            // 具体处理消息
            for (ConsumerRecord<String, String> record : records) {
                System.out.printf("收到消息:partition = %d,offset = %d, key = %s, value = %s%n", record.partition(),
                        record.offset(), record.key(), record.value());
            }
            // 处理完消息后,需要手动提交
            if (records.count() > 0) {
                // 1. 手动同步提交
                // 在kafka返回ack给消费者之前,该方法一直阻塞
                // 默认使用手动同步提交,因为在提交之后没有其他逻辑了。
                 consumer.commitSync();

                 // 2. 手动异步提交
//                consumer.commitAsync(new OffsetCommitCallback() {
//                    @Override
//                    public void onComplete(Map<TopicPartition, OffsetAndMetadata> map, Exception e) {
//                        if (e != null) {
//                            System.err.println("Commit failed for " + map);
//                            System.err.println("Commit failed exception: "+e.getStackTrace());
//                        }
//                    }
//                });

            }
        }

    }
}

在这里插入图片描述

版权声明:本文内容由互联网用户自发贡献,该文观点仅代表作者本人。本站仅提供信息存储空间服务,不拥有所有权,不承担相关法律责任。如发现本站有涉嫌侵权/违法违规的内容, 请发送邮件至 举报,一经查实,本站将立刻删除。

文章由极客之音整理,本文链接:https://www.bmabk.com/index.php/post/65638.html

(0)
小半的头像小半

相关推荐

极客之音——专业性很强的中文编程技术网站,欢迎收藏到浏览器,订阅我们!