• k8s 单节点跑kafka


    [root@master-01 kafka]# cat kafka.yaml
    apiVersion: v1
    kind: Service
    metadata:
      name: kafka-svc
      namespace: uat
      labels:
        app: kafka
    spec:
      ports:
      - port: 9092
        name: server
      clusterIP: None
      selector:
        app: kafka
    ---
    apiVersion: policy/v1beta1
    kind: PodDisruptionBudget
    metadata:
      name: kafka-pdb
      namespace: uat
    spec:
      selector:
        matchLabels:
          app: kafka
      minAvailable: 2
    ---
    apiVersion: apps/v1
    kind: StatefulSet
    metadata:
      name: kafka
      namespace: uat
    spec:
      selector:
         matchLabels:
            app: kafka
      serviceName: kafka-svc
      replicas: 3  # kafka 集群数量
      template:
        metadata:
          labels:
            app: kafka
        spec:
          nodeSelector:
            env: uat
          affinity:
            podAffinity:
              preferredDuringSchedulingIgnoredDuringExecution:
                 - weight: 1
                   podAffinityTerm:
                     labelSelector:
                        matchExpressions:
                          - key: "app"
                            operator: In
                            values:
                            - zk
                     topologyKey: "kubernetes.io/hostname"
          terminationGracePeriodSeconds: 300
          imagePullSecrets:
          - name: registry-key
          containers:
          - name: k8s-kafka
            imagePullPolicy: IfNotPresent 
           # image: registry.cn-hangzhou.aliyuncs.com/ccgg/k8skafka:v1
            image: wurstmeister/kafka:2.13-2.7.0 
           # image: mirrorgooglecontainers/kubernetes-kafka:1.0-10.2.1
            resources:
              requests:
                memory: "1024Mi"
                cpu: 500m
            ports:
            - containerPort: 9092
              name: server
            command:
            - 'sh'
            - '-c'
            - "exec kafka-server-start.sh /opt/kafka/config/server.properties --override broker.id=${HOSTNAME##*-} 
              --override listeners=PLAINTEXT://:9092 
              --override zookeeper.connect=zk-0.zk-hs.uat.svc.cluster.local:2181,zk-1.zk-hs.uat.svc.cluster.local:2181,zk-2.zk-hs.uat.svc.cluster.local:2181  
              --override log.dirs=/var/lib/kafka/${HOSTNAME} 
              --override auto.create.topics.enable=true 
              --override delete.topic.enable=true 
              --override auto.leader.rebalance.enable=true 
              --override background.threads=10 
              --override compression.type=producer 
              --override delete.topic.enable=true 
              --override leader.imbalance.check.interval.seconds=300 
              --override leader.imbalance.per.broker.percentage=10 
              --override log.flush.interval.messages=9223372036854775807 
              --override log.flush.offset.checkpoint.interval.ms=60000 
              --override log.flush.scheduler.interval.ms=9223372036854775807 
              --override log.retention.bytes=-1 
              --override log.retention.hours=72 
              --override log.roll.hours=168 
              --override log.roll.jitter.hours=0 
              --override log.segment.bytes=1073741824 
              --override log.segment.delete.delay.ms=60000 
              --override message.max.bytes=1000012 
              --override min.insync.replicas=1 
              --override num.io.threads=8 
              --override num.network.threads=3 
              --override num.recovery.threads.per.data.dir=1 
              --override num.replica.fetchers=1 
              --override offset.metadata.max.bytes=4096 
              --override offsets.commit.required.acks=-1 
              --override offsets.commit.timeout.ms=5000 
              --override offsets.load.buffer.size=5242880 
              --override offsets.retention.check.interval.ms=600000 
              --override offsets.retention.minutes=1440 
              --override offsets.topic.compression.codec=0 
              --override offsets.topic.num.partitions=50 
              --override offsets.topic.replication.factor=3 
              --override offsets.topic.segment.bytes=104857600 
              --override queued.max.requests=500 
              --override quota.consumer.default=9223372036854775807 
              --override quota.producer.default=9223372036854775807 
              --override replica.fetch.min.bytes=1 
              --override replica.fetch.wait.max.ms=500 
              --override replica.high.watermark.checkpoint.interval.ms=5000 
              --override replica.lag.time.max.ms=10000 
              --override replica.socket.receive.buffer.bytes=65536 
              --override replica.socket.timeout.ms=30000 
              --override request.timeout.ms=30000 
              --override socket.receive.buffer.bytes=102400 
              --override socket.request.max.bytes=104857600 
              --override socket.send.buffer.bytes=102400 
              --override unclean.leader.election.enable=true 
              --override zookeeper.session.timeout.ms=6000 
              --override zookeeper.set.acl=false 
              --override broker.id.generation.enable=true 
              --override connections.max.idle.ms=600000 
              --override controlled.shutdown.enable=true 
              --override controlled.shutdown.max.retries=3 
              --override controlled.shutdown.retry.backoff.ms=5000 
              --override controller.socket.timeout.ms=30000 
              --override default.replication.factor=2 
              --override fetch.purgatory.purge.interval.requests=1000 
              --override group.max.session.timeout.ms=300000 
              --override group.min.session.timeout.ms=6000 
              --override inter.broker.protocol.version=2.2.0 
              --override log.cleaner.backoff.ms=15000 
              --override log.cleaner.dedupe.buffer.size=134217728 
              --override log.cleaner.delete.retention.ms=86400000 
              --override log.cleaner.enable=true 
              --override log.cleaner.io.buffer.load.factor=0.9 
              --override log.cleaner.io.buffer.size=524288 
              --override log.cleaner.io.max.bytes.per.second=1.7976931348623157E308 
              --override log.cleaner.min.cleanable.ratio=0.5 
              --override log.cleaner.min.compaction.lag.ms=0 
              --override log.cleaner.threads=1 
              --override log.cleanup.policy=delete 
              --override log.index.interval.bytes=4096 
              --override log.index.size.max.bytes=10485760 
              --override log.message.timestamp.difference.max.ms=9223372036854775807 
              --override log.message.timestamp.type=CreateTime 
              --override log.preallocate=false 
              --override log.retention.check.interval.ms=300000 
              --override max.connections.per.ip=2147483647 
              --override num.partitions=4 
              --override producer.purgatory.purge.interval.requests=1000 
              --override replica.fetch.backoff.ms=1000 
              --override replica.fetch.max.bytes=1048576 
              --override replica.fetch.response.max.bytes=10485760 
              --override reserved.broker.max.id=1000"
            env:
            - name: KAFKA_HEAP_OPTS
              value : "-Xmx1024M -Xms512M"
            - name: KAFKA_OPTS
              value: "-Dlogging.level=INFO"
            volumeMounts:
            - name: datadir
              mountPath: /var/lib/kafka
            - name: host-time
              mountPath: /etc/localtime
            readinessProbe:
              tcpSocket:
                port: 9092
              timeoutSeconds: 1
              initialDelaySeconds: 5
          securityContext:
            runAsUser: 0
            fsGroup: 0
          volumes:
    #      - name: datadir
    #        persistentVolumeClaim:
    #          claimName: pvc-nas-uat
          - name: datadir
            hostPath:
              path: /store/logs/uat/kafka
              type: DirectoryOrCreate
          - name: host-time
            hostPath:
              path: /etc/localtime
    
  • 相关阅读:
    二.线性表
    一.绪论
    托管服务器代码
    jquery easyui DataGrid 数据表格 属性
    用socket 模拟http请求
    struct和class的相同点与不同点
    c++中的基本数据类型
    当函数返回一个局部变量的指针问题,内存是否被释放
    Python首先生成包含1000个随机字符的字符串,然后统计每个字符的出现次数。(使用字典)
    浅谈结构体字节的求法
  • 原文地址:https://www.cnblogs.com/Applogize/p/15419406.html
Copyright © 2020-2023  润新知