• Java 操作KafKa API


    生产者:

     1 import net.sf.json.JSONObject;
     2 import org.apache.kafka.clients.producer.KafkaProducer;
     3 import org.apache.kafka.clients.producer.ProducerRecord;
     4 
     5 import java.util.Properties;
     6 
     7 public class KafkaRelayUtil {
     8     private static Properties props =null;
     9     private static KafkaProducer<String, byte[]> producer=null;
    10     static {
    11         props = new Properties();
    12         //集群地址,多个服务器用","分隔
    13         props.put("bootstrap.servers", "192.168.0.181:9092");
    14         //props.put("bootstrap.servers", "10.122.49.173:9092,10.122.49.195:9092,10.122.49.196:9092");
    15         //key、value的序列化,此处以字符串为例,使用kafka已有的序列化类
    16         props.put("key.serializer", "org.apache.kafka.common.serialization.StringSerializer");
    17         props.put("value.serializer", "org.apache.kafka.common.serialization.ByteArraySerializer");
    18         //props.put("partitioner.class", "com.kafka.demo.Partitioner");//分区操作,此处未写
    19         props.put("request.required.acks", "1");
    20         producer = new KafkaProducer<>(props);
    21     }
    22 
    23     public static void Push(String TopicName,byte[] byteArray) {
    24         //写入名为的topic
    25         ProducerRecord<String, byte[]> producerRecord = new ProducerRecord<>(TopicName, byteArray);
    26         producer.send(producerRecord);
    27     }
    28 }

    消费者:

    import cn.sttit.clw.Pojo.*;
    import cn.sttit.clw.conf.GBDataPool;
    import cn.sttit.clw.conf.GBOBDXXPool;
    import cn.sttit.clw.conf.QYDPSJBPool;
    import cn.sttit.clw.conf.QYGPSJBPool;
    import cn.sttit.clw.mybatis.MybatisUtil;
    import net.sf.json.JSON;
    import net.sf.json.JSONObject;
    import org.apache.ibatis.session.SqlSession;
    import org.apache.kafka.clients.consumer.Consumer;
    import org.apache.kafka.clients.consumer.ConsumerRecord;
    import org.apache.kafka.clients.consumer.ConsumerRecords;
    import org.apache.kafka.clients.consumer.KafkaConsumer;
    
    import javax.xml.bind.DatatypeConverter;
    import java.util.Arrays;
    import java.util.Properties;
    
    public class KafKaListener {
    
        String TopicName = null;
        public KafKaListener() {
    
        }
        private static   Properties props =null;
        private  static KafkaConsumer<String, String> consumer ;
        static{
            props = new Properties();
           // props.put("bootstrap.servers", "192.168.0.181:9092");
            props.put("bootstrap.servers", "10.122.49.173:9092,10.122.49.195:9092,10.122.49.196:9092");
            //设置消费者的group id
            props.put("group.id", "demo1");
            //如果为真,consumer所消费消息的offset将会自动的同步到zookeeper。如果消费者死掉时,由新的consumer使用继续接替
            props.put("enable.auto.commit", "true");
            //consumer向zookeeper提交offset的频率
            props.put("auto.commit.interval.ms", "10000");
            props.put("session.timeout.ms", "10000");
            props.put("max.poll.records", "1000");
            //反序列化
            props.put("key.deserializer", "org.apache.kafka.common.serialization.StringDeserializer");
            props.put("value.deserializer", "org.apache.kafka.common.serialization.StringDeserializer");
    
            consumer = new KafkaConsumer<>(props);
        }
    
        public void run() {
            //System.out.println("线程开启"+TopicName);
            consumer.subscribe(Arrays.asList("gbdata","gbobd","qygp","qydp","carlogin"));
            //持续监听
            while (true) {
                //poll频率
                ConsumerRecords<String, String> records= consumer.poll(2000);
    
                KafKaThread kafKaThread = new KafKaThread(records);
                kafKaThread.start();
    
    
            }
        }
    }
  • 相关阅读:
    HDFS上传文件
    SparkStreaming和Kafka的整合
    hadoop ha集群搭建
    Zookeeper安装
    hadoop完全分布式集群的搭建
    hadoop伪分布式集群的搭建
    java笔记之字符串,gc
    java笔记之字符串,gc
    java基础之short案例分析以及final关键字
    java基础之short案例分析以及final关键字
  • 原文地址:https://www.cnblogs.com/TheGreatDead/p/11136556.html
Copyright © 2020-2023  润新知