LiFengMing LiFengMing
首页
云原生
中间件
工具导航
资源导航
  • 分类
  • 标签
  • 归档
关于作者
GitHub (opens new window)

LiFengMing

IT届李哥
首页
云原生
中间件
工具导航
资源导航
  • 分类
  • 标签
  • 归档
关于作者
GitHub (opens new window)
  • MongoDB

  • DTS

  • Kafka

    • kafka相关文档导航
    • docker-compose 快速搭建kafka集群
      • kafka版本迭代说明
    • 中间件
    • Kafka
    LiFengMing
    2023-03-02
    目录

    docker-compose 快速搭建kafka集群

    问题

    最近,在kafka部署的时候遇到一些坑,这里写下这篇文章沉淀。这里主要记录两种搭建方式:

    • 无密码高可用kafka搭建
    • 有密码高可用kafka搭建

    # 无密码高可用kafka搭建

    install_kafka.sh脚本内容如下:

    #!/bin/bash
    
    set -e
    IP_ADDR=$1
    
    ZOOKEEPER_IMAGE=zookeeper:3.5.9
    KAFKA_IMAGE=confluentinc/cp-kafka:5.5.0
    
    
    if [ ! $IP_ADDR ];then
    echo "address is empty, please input the ip address of this host."
    exit 1
    fi
    
    CURR_DIR=$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )
    
    DC_EXIST=`docker-compose -version | awk '{print $3}'`
    if [ ! ${DC_EXIST%?} ];then
    curl -L "https://github.com/docker/compose/releases/download/1.29.1/docker-compose-$(uname -s)-$(uname -m)" -o /usr/local/bin/docker-compose
    chmod +x /usr/local/bin/docker-compose
    ln -s /usr/local/bin/docker-compose /usr/bin/docker-compose
    fi
    
    docker-compose -version
    if [ $? -ne 0 ];then
    echo "docker-compose install failed, please check."
    exit 1
    fi
    
    #Create network for zookeeper-kafka cluster
    zknet=`docker network ls | grep zookeeper_kafka | awk {'print $2'}`
    if [ -n "$zknet" ]
    then
        echo 'The zookeeper_kafka is already existed.'
    else
        echo 'Create zookeeper_kafka.'
        docker network create --subnet 172.30.0.0/16 zookeeper_kafka
    fi
    
    echo "create local dir for kafka cluster"
    mkdir -p $CURR_DIR/kafka_data/kafka0/data
    mkdir -p $CURR_DIR/kafka_data/kafka1/data
    mkdir -p $CURR_DIR/kafka_data/kafka2/data
    
    echo "create local dir for zookeeper cluster"
    mkdir -p $CURR_DIR/zookeeper_data/zookeeper0/{data,datalog}
    mkdir -p $CURR_DIR/zookeeper_data/zookeeper1/{data,datalog}
    mkdir -p $CURR_DIR/zookeeper_data/zookeeper2/{data,datalog}
    
    if [ ! -e $CURR_DIR/kafka-compose.yaml ];then
    cat > $CURR_DIR/kafka-compose.yaml << EOF
    version: '3.3'
    
    services:
      zookeeper0:
        image: $ZOOKEEPER_IMAGE
        restart: always
        hostname: zookeeper0
        container_name: zookeeper0
        ports:
        - 12181:2181
        volumes:
        - $CURR_DIR/zookeeper_data/zookeeper0/data:/data
        - $CURR_DIR/zookeeper_data/zookeeper0/datalog:/datalog
        environment:
          ZOO_MY_ID: 1
          ZOO_SERVERS: server.1=0.0.0.0:2888:3888;2181 server.2=zookeeper1:2888:3888;2181 server.3=zookeeper2:2888:3888;2181
      zookeeper1:
        image: $ZOOKEEPER_IMAGE
        restart: always
        hostname: zookeeper1
        container_name: zookeeper1
        ports:
        - 12182:2181
        volumes:
        - $CURR_DIR/zookeeper_data/zookeeper1/data:/data
        - $CURR_DIR/zookeeper_data/zookeeper1/datalog:/datalog
        environment:
          ZOO_MY_ID: 2
          ZOO_SERVERS: server.1=zookeeper0:2888:3888;2181 server.2=0.0.0.0:2888:3888;2181 server.3=zookeeper2:2888:3888;2181
      zookeeper2:
        image: $ZOOKEEPER_IMAGE
        restart: always
        hostname: zookeeper2
        container_name: zookeeper2
        ports:
        - 12183:2181
        volumes:
        - $CURR_DIR/zookeeper_data/zookeeper2/data:/data
        - $CURR_DIR/zookeeper_data/zookeeper2/datalog:/datalog
        environment:
          ZOO_MY_ID: 3
          ZOO_SERVERS: server.1=zookeeper0:2888:3888;2181 server.2=zookeeper1:2888:3888;2181 server.3=0.0.0.0:2888:3888;2181
      kafka0:
        image: $KAFKA_IMAGE
        restart: always
        depends_on:
          - zookeeper0
          - zookeeper1
          - zookeeper2
        container_name: kafka0
        ports:
          - 19092:9092
        environment:
          KAFKA_ADVERTISED_LISTENERS: PLAINTEXT://$IP_ADDR:19092
          KAFKA_LISTENERS: PLAINTEXT://0.0.0.0:9092
          KAFKA_ZOOKEEPER_CONNECT: zookeeper0:2181,zookeeper1:2181,zookeeper2:2181
          KAFKA_BROKER_ID: 0
          KAFKA_LOG_DIRS: /opt/kafka/data/logs
        volumes:
          - $CURR_DIR/kafka_data/kafka0/data:/opt/kafka/data
      kafka1:
        image: $KAFKA_IMAGE
        restart: always
        depends_on:
          - zookeeper0
          - zookeeper1
          - zookeeper2
        container_name: kafka1
        ports:
          - 19093:9093
        environment:
          KAFKA_ADVERTISED_LISTENERS: PLAINTEXT://$IP_ADDR:19093
          KAFKA_LISTENERS: PLAINTEXT://0.0.0.0:9093
          KAFKA_ZOOKEEPER_CONNECT: zookeeper0:2181,zookeeper1:2181,zookeeper2:2181
          KAFKA_BROKER_ID: 1
          KAFKA_LOG_DIRS: /opt/kafka/data/logs
        volumes:
          - $CURR_DIR/kafka_data/kafka1/data:/opt/kafka/data
      kafka2:
        image: $KAFKA_IMAGE
        restart: always
        depends_on:
          - zookeeper0
          - zookeeper1
          - zookeeper2
        container_name: kafka2
        ports:
          - 19094:9094
        environment:
          KAFKA_ADVERTISED_LISTENERS: PLAINTEXT://$IP_ADDR:19094
          KAFKA_LISTENERS: PLAINTEXT://0.0.0.0:9094
          KAFKA_ZOOKEEPER_CONNECT: zookeeper0:2181,zookeeper1:2181,zookeeper2:2181
          KAFKA_BROKER_ID: 2
          KAFKA_LOG_DIRS: /opt/kafka/data/logs
        volumes:
          - $CURR_DIR/kafka_data/kafka2/data:/opt/kafka/data
    networks:
      default:
        external:
          name: zookeeper_kafka
    EOF
    fi
    
    #创建启动脚本
    cat > $CURR_DIR/startup.sh <<EOF
    #!/bin/bash
    
    docker-compose -f $CURR_DIR/kafka-compose.yaml up -d
    
    docker-compose -f $CURR_DIR/kafka-compose.yaml ps
    
    EOF
    
    #创建停止脚本
    cat > $CURR_DIR/stop.sh <<EOF
    #!/bin/bash
    
    docker-compose -f $CURR_DIR/kafka-compose.yaml down
    
    EOF
    
    #赋予shell脚本可执行权限
    chmod +x $CURR_DIR/*.sh
    
    
    1
    2
    3
    4
    5
    6
    7
    8
    9
    10
    11
    12
    13
    14
    15
    16
    17
    18
    19
    20
    21
    22
    23
    24
    25
    26
    27
    28
    29
    30
    31
    32
    33
    34
    35
    36
    37
    38
    39
    40
    41
    42
    43
    44
    45
    46
    47
    48
    49
    50
    51
    52
    53
    54
    55
    56
    57
    58
    59
    60
    61
    62
    63
    64
    65
    66
    67
    68
    69
    70
    71
    72
    73
    74
    75
    76
    77
    78
    79
    80
    81
    82
    83
    84
    85
    86
    87
    88
    89
    90
    91
    92
    93
    94
    95
    96
    97
    98
    99
    100
    101
    102
    103
    104
    105
    106
    107
    108
    109
    110
    111
    112
    113
    114
    115
    116
    117
    118
    119
    120
    121
    122
    123
    124
    125
    126
    127
    128
    129
    130
    131
    132
    133
    134
    135
    136
    137
    138
    139
    140
    141
    142
    143
    144
    145
    146
    147
    148
    149
    150
    151
    152
    153
    154
    155
    156
    157
    158
    159
    160
    161
    162
    163
    164
    165
    166
    167
    168
    169
    170
    171
    172
    173
    174
    175
    1. 准备脚本

    sh install_kafka.sh $你的主机_ip

    1. 部署

    sh startup.sh

    1. 停止

    sh stop.sh

    # 有密码高可用kafka搭建

    
    #!/bin/bash
    set -e
     
    #image
    KAFKA_IMAGE=confluentinc/cp-kafka:5.5.0
    ZOOKEEPER_IMAGE=zookeeper:3.5.5
      
    IP_ADDR=$1
    if [ ! $IP_ADDR ];then
    echo "address is empty, please input the ip address of this host."
    exit 1
    fi
      
    CURR_DIR=$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )
     
    DC_EXIST=`docker-compose -version | awk '{print $3}'`
    if [ ! ${DC_EXIST%?} ];then
    curl -L "https://github.com/docker/compose/releases/download/1.29.1/docker-compose-$(uname -s)-$(uname -m)" -o /usr/local/bin/docker-compose
    chmod +x /usr/local/bin/docker-compose
    ln -s /usr/local/bin/docker-compose /usr/bin/docker-compose
    fi
     
    docker-compose -version
    if [ $? -ne 0 ];then
    echo "docker-compose install failed, please check."
    exit 1
    fi
     
    #Create network for zookeeper-kafka cluster
    zknet=`docker network ls | grep zookeeper_kafka | awk {'print $2'}`
    if [ -n "$zknet" ]
    then
        echo 'The zookeeper_kafka is already existed.'
    else
        echo 'Create zookeeper_kafka.'
        docker network create --subnet 172.30.0.0/16 zookeeper_kafka
    fi
      
    echo "create local dir for kafka cluster"
    mkdir -p $CURR_DIR/kafka_data/kafka0/data
    mkdir -p $CURR_DIR/kafka_data/kafka1/data
    mkdir -p $CURR_DIR/kafka_data/kafka2/data
      
    echo "create local dir for zookeeper cluster"
    mkdir -p $CURR_DIR/zookeeper_data/zookeeper0/{data,datalog}
    mkdir -p $CURR_DIR/zookeeper_data/zookeeper1/{data,datalog}
    mkdir -p $CURR_DIR/zookeeper_data/zookeeper2/{data,datalog}
     
    mkdir -p $CURR_DIR/conf/security/kafka
    mkdir -p $CURR_DIR/conf/security/zookeeper
     
     
      
      
    if [ ! -e $CURR_DIR/kafka-compose.yaml ];then
    cat > $CURR_DIR/kafka-compose.yaml << EOF
    version: '3.3'
      
    services:
      zookeeper0:
        image: $ZOOKEEPER_IMAGE
        restart: always
        hostname: zookeeper0
        container_name: zookeeper0
        ports:
        - 12181:2181
        volumes:
        - $CURR_DIR/zookeeper_data/zookeeper0/data:/data
        - $CURR_DIR/zookeeper_data/zookeeper0/datalog:/datalog
        - $CURR_DIR/conf/security/zookeeper:/conf/jaas
        - $CURR_DIR/conf/zoo.cfg:/conf/zoo.cfg
        environment:
          ZOO_MY_ID: 1
          ZOO_AUTOPURGE_PURGEINTERVAL: 0
          ZOO_AUTOPURGE_SNAPRETAINCOUNT: 3
          ZOO_4LW_COMMANDS_WHITELIST: "*"
          ZOO_MAX_CLIENT_CNXNS: 60
          ZOO_STANDALONE_ENABLED: "false"
          ZOO_TICK_TIME: 2000
          ZOO_SERVERS: server.1=0.0.0.0:2888:3888;2181 server.2=zookeeper1:2888:3888;2181 server.3=zookeeper2:2888:3888;2181
          JVMFLAGS: -Djava.security.auth.login.config=/conf/jaas/kafka_zk_jaas.conf
      zookeeper1:
        image: $ZOOKEEPER_IMAGE
        restart: always
        hostname: zookeeper1
        container_name: zookeeper1
        ports:
        - 12182:2181
        volumes:
        - $CURR_DIR/zookeeper_data/zookeeper1/data:/data
        - $CURR_DIR/zookeeper_data/zookeeper1/datalog:/datalog
        - $CURR_DIR/conf/security/zookeeper:/conf/jaas
        - $CURR_DIR/conf/zoo.cfg:/conf/zoo.cfg
        environment:
          ZOO_MY_ID: 2
          ZOO_AUTOPURGE_PURGEINTERVAL: 0
          ZOO_AUTOPURGE_SNAPRETAINCOUNT: 3
          ZOO_4LW_COMMANDS_WHITELIST: "*"
          ZOO_MAX_CLIENT_CNXNS: 60
          ZOO_STANDALONE_ENABLED: "false"
          ZOO_TICK_TIME: 20000
          ZOO_SERVERS: server.1=zookeeper0:2888:3888;2181 server.2=0.0.0.0:2888:3888;2181 server.3=zookeeper2:2888:3888;2181
          JVMFLAGS: -Djava.security.auth.login.config=/conf/jaas/kafka_zk_jaas.conf
      zookeeper2:
        image: $ZOOKEEPER_IMAGE
        restart: always
        hostname: zookeeper2
        container_name: zookeeper2
        ports:
        - 12183:2181
        volumes:
        - $CURR_DIR/zookeeper_data/zookeeper2/data:/data
        - $CURR_DIR/zookeeper_data/zookeeper2/datalog:/datalog
        - $CURR_DIR/conf/security/zookeeper:/conf/jaas
        - $CURR_DIR/conf/zoo.cfg:/conf/zoo.cfg
        environment:
          ZOO_MY_ID: 3
          ZOO_AUTOPURGE_PURGEINTERVAL: 0
          ZOO_AUTOPURGE_SNAPRETAINCOUNT: 3
          ZOO_4LW_COMMANDS_WHITELIST: "*"
          ZOO_MAX_CLIENT_CNXNS: 60
          ZOO_STANDALONE_ENABLED: "false"
          ZOO_TICK_TIME: 2000
          ZOO_SERVERS: server.1=zookeeper0:2888:3888;2181 server.2=zookeeper1:2888:3888;2181 server.3=0.0.0.0:2888:3888;2181
          JVMFLAGS: -Djava.security.auth.login.config=/conf/jaas/kafka_zk_jaas.conf
      kafka0:
        image: $KAFKA_IMAGE
        restart: always
        depends_on:
          - zookeeper0
          - zookeeper1
          - zookeeper2
        container_name: kafka0
        ports:
          - 19092:9092
        environment:
          KAFKA_ADVERTISED_LISTENERS: SASL_PLAINTEXT://$IP_ADDR:19092
          KAFKA_LISTENERS: SASL_PLAINTEXT://0.0.0.0:9092
          KAFKA_ZOOKEEPER_CONNECT: zookeeper0:2181,zookeeper1:2181,zookeeper2:2181
          KAFKA_BROKER_ID: 0
          KAFKA_LOG_DIRS: /opt/kafka/data/logs
          KAFKA_SUPER_USERS: User:admin;User:hxdts
          KAFKA_ZOOKEEPER_SASL_CLIENT: "false"
          KAFKA_ZOOKEEPER_SET_ACL: "true"
          KAFKA_CONFLUENT_SUPPORT_METRICS_ENABLE: "false"
          KAFKA_ALLOW_EVERYONE_IF_NO_ACL_FOUND: "false"
          KAFKA_OFFSETS_TOPIC_REPLICATION_FACTOR: 2
          KAFKA_SECURITY_INTER_BROKER_PROTOCOL: "SASL_PLAINTEXT"
          KAFKA_SASL_MECHANISM_INTER_BROKER_PROTOCOL: "SCRAM-SHA-256"
          KAFKA_SASL_ENABLED_MECHANISMS: "SCRAM-SHA-256"
          KAFKA_AUTHORIZER_CLASS_NAME: "kafka.security.auth.SimpleAclAuthorizer"
          KAFKA_OPTS: -Djava.security.auth.login.config=/conf/kafka/jaas/kafka-server-jaas.conf
        volumes:
          - $CURR_DIR/kafka_data/kafka0/data:/opt/kafka/data
          - $CURR_DIR/conf/security/kafka:/conf/kafka/jaas
      kafka1:
        image: $KAFKA_IMAGE
        restart: always
        depends_on:
          - zookeeper0
          - zookeeper1
          - zookeeper2
        container_name: kafka1
        ports:
          - 19093:9093
        environment:
          KAFKA_ADVERTISED_LISTENERS: SASL_PLAINTEXT://$IP_ADDR:19093
          KAFKA_LISTENERS: SASL_PLAINTEXT://0.0.0.0:9093
          KAFKA_ZOOKEEPER_CONNECT: zookeeper0:2181,zookeeper1:2181,zookeeper2:2181
          KAFKA_BROKER_ID: 1
          KAFKA_LOG_DIRS: /opt/kafka/data/logs
          KAFKA_SUPER_USERS: User:admin;User:hxdts
          KAFKA_ZOOKEEPER_SASL_CLIENT: "false"
          KAFKA_ZOOKEEPER_SET_ACL: "true"
          KAFKA_CONFLUENT_SUPPORT_METRICS_ENABLE: "false"
          KAFKA_ALLOW_EVERYONE_IF_NO_ACL_FOUND: "false"
          KAFKA_OFFSETS_TOPIC_REPLICATION_FACTOR: 2
          KAFKA_SECURITY_INTER_BROKER_PROTOCOL: "SASL_PLAINTEXT"
          KAFKA_SASL_MECHANISM_INTER_BROKER_PROTOCOL: "SCRAM-SHA-256"
          KAFKA_SASL_ENABLED_MECHANISMS: "SCRAM-SHA-256"
          KAFKA_AUTHORIZER_CLASS_NAME: "kafka.security.auth.SimpleAclAuthorizer"
          KAFKA_OPTS: -Djava.security.auth.login.config=/conf/kafka/jaas/kafka-server-jaas.conf
        volumes:
          - $CURR_DIR/kafka_data/kafka1/data:/opt/kafka/data
          - $CURR_DIR/conf/security/kafka:/conf/kafka/jaas
      kafka2:
        image: $KAFKA_IMAGE
        restart: always
        depends_on:
          - zookeeper0
          - zookeeper1
          - zookeeper2
        container_name: kafka2
        ports:
          - 19094:9094
        environment:
          KAFKA_ADVERTISED_LISTENERS: SASL_PLAINTEXT://$IP_ADDR:19094
          KAFKA_LISTENERS: SASL_PLAINTEXT://0.0.0.0:9094
          KAFKA_ZOOKEEPER_CONNECT: zookeeper0:2181,zookeeper1:2181,zookeeper2:2181
          KAFKA_BROKER_ID: 2
          KAFKA_LOG_DIRS:  /opt/kafka/data/logs
          KAFKA_SUPER_USERS: User:admin;User:hxdts
          KAFKA_ZOOKEEPER_SASL_CLIENT: "false"
          KAFKA_ZOOKEEPER_SET_ACL: "true"
          KAFKA_CONFLUENT_SUPPORT_METRICS_ENABLE: "false"
          KAFKA_ALLOW_EVERYONE_IF_NO_ACL_FOUND: "false"
          KAFKA_OFFSETS_TOPIC_REPLICATION_FACTOR: 2
          KAFKA_SECURITY_INTER_BROKER_PROTOCOL: "SASL_PLAINTEXT"
          KAFKA_SASL_MECHANISM_INTER_BROKER_PROTOCOL: "SCRAM-SHA-256"
          KAFKA_SASL_ENABLED_MECHANISMS: "SCRAM-SHA-256"
          KAFKA_AUTHORIZER_CLASS_NAME: "kafka.security.auth.SimpleAclAuthorizer"
          KAFKA_OPTS: -Djava.security.auth.login.config=/conf/kafka/jaas/kafka-server-jaas.conf
        volumes:
          - $CURR_DIR/kafka_data/kafka2/data:/opt/kafka/data
          - $CURR_DIR/conf/security/kafka:/conf/kafka/jaas
    networks:
      default:
        external:
          name: zookeeper_kafka
    EOF
    fi
      
    #创建启动脚本
    cat > $CURR_DIR/startup.sh <<EOF
    #!/bin/bash
      
    docker-compose -f $CURR_DIR/kafka-compose.yaml up -d
     
    docker-compose -f $CURR_DIR/kafka-compose.yaml ps
      
    EOF
      
    #创建停止脚本
    cat > $CURR_DIR/stop.sh <<EOF
    #!/bin/bash
      
    docker-compose -f $CURR_DIR/kafka-compose.yaml down
      
    EOF
      
    #赋予shell脚本可执行权限
    chmod +x $CURR_DIR/*.sh
     
     
    cat > $CURR_DIR/conf/security/zookeeper/kafka_zk_jaas.conf <<EOF
    Server {
        org.apache.zookeeper.server.auth.DigestLoginModule required
        user_admin="kafka123456";
    };
    EOF
     
    cat > $CURR_DIR/conf/security/kafka/kafka-server-jaas.conf <<EOF
    KafkaServer {
        org.apache.kafka.common.security.scram.ScramLoginModule required
        username="admin"
        password="kafka123456";
    };
     
    KafkaClient {
        org.apache.kafka.common.security.scram.ScramLoginModule required
        username="admin"
        password="kafka123456";
    };
     
    Client {
        org.apache.zookeeper.server.auth.DigestLoginModule required
        username="admin"
        password="kafka123456";
    };
    EOF
     
    cat > $CURR_DIR/conf/zoo.cfg <<EOF
    clientPort=2181
    dataDir=/data
    dataLogDir=/data/log
    tickTime=2000
    initLimit=10
    syncLimit=10
    maxClientCnxns=60
    minSessionTimeout= 4000
    maxSessionTimeout= 40000
    autopurge.snapRetainCount=3
    autopurge.purgeInterval=0
    4lw.commands.whitelist=*
    authProvider.1=org.apache.zookeeper.server.auth.SASLAuthenticationProvider
    requireClientAuthScheme=sasl
    standaloneEnabled=true
    admin.enableServer=true
    server.1=zookeeper0:2888:3888;2181
    server.2=zookeeper1:2888:3888;2181
    server.3=zookeeper2:2888:3888;2181
    EOF
       
    
    
    1
    2
    3
    4
    5
    6
    7
    8
    9
    10
    11
    12
    13
    14
    15
    16
    17
    18
    19
    20
    21
    22
    23
    24
    25
    26
    27
    28
    29
    30
    31
    32
    33
    34
    35
    36
    37
    38
    39
    40
    41
    42
    43
    44
    45
    46
    47
    48
    49
    50
    51
    52
    53
    54
    55
    56
    57
    58
    59
    60
    61
    62
    63
    64
    65
    66
    67
    68
    69
    70
    71
    72
    73
    74
    75
    76
    77
    78
    79
    80
    81
    82
    83
    84
    85
    86
    87
    88
    89
    90
    91
    92
    93
    94
    95
    96
    97
    98
    99
    100
    101
    102
    103
    104
    105
    106
    107
    108
    109
    110
    111
    112
    113
    114
    115
    116
    117
    118
    119
    120
    121
    122
    123
    124
    125
    126
    127
    128
    129
    130
    131
    132
    133
    134
    135
    136
    137
    138
    139
    140
    141
    142
    143
    144
    145
    146
    147
    148
    149
    150
    151
    152
    153
    154
    155
    156
    157
    158
    159
    160
    161
    162
    163
    164
    165
    166
    167
    168
    169
    170
    171
    172
    173
    174
    175
    176
    177
    178
    179
    180
    181
    182
    183
    184
    185
    186
    187
    188
    189
    190
    191
    192
    193
    194
    195
    196
    197
    198
    199
    200
    201
    202
    203
    204
    205
    206
    207
    208
    209
    210
    211
    212
    213
    214
    215
    216
    217
    218
    219
    220
    221
    222
    223
    224
    225
    226
    227
    228
    229
    230
    231
    232
    233
    234
    235
    236
    237
    238
    239
    240
    241
    242
    243
    244
    245
    246
    247
    248
    249
    250
    251
    252
    253
    254
    255
    256
    257
    258
    259
    260
    261
    262
    263
    264
    265
    266
    267
    268
    269
    270
    271
    272
    273
    274
    275
    276
    277
    278
    279
    280
    281
    282
    283
    284
    285
    286
    287
    288
    289
    290
    291
    292
    293
    294
    295
    1. 准备脚本

    sh install_kafka.sh $你的主机_ip

    1. 部署

    sh startup.sh

    1. 停止

    sh stop.sh

    编辑 (opens new window)
    #project
    上次更新: 2025/01/19, 23:15:59
    kafka相关文档导航
    kafka版本迭代说明

    ← kafka相关文档导航 kafka版本迭代说明→

    最近更新
    01
    云原生资源
    05-25
    02
    快速搭建Spring项目
    03-27
    03
    kafka版本迭代说明
    03-11
    更多文章>
    Theme by Vdoing | Copyright © 2018-2025 LiFengMing | MIT License
    • 跟随系统
    • 浅色模式
    • 深色模式
    • 阅读模式