20

嗨,我目前正在使用 Docker 设置 Kafka。我已经设法使用已发布的融合图像设置 Zookeeper 和 Kafka,请参阅以下 docker-compose 文件:

version: '2'

services:
  zookeeper:
    image: confluentinc/cp-zookeeper:3.2.0
    container_name: zookeeper
    hostname: zookeeper
    ports:
      - "2181:2181"
    environment:
      ZOOKEEPER_CLIENT_PORT: 2181
      ZOOKEEPER_TICK_TIME: 2000
    restart: always

  kafka:
    image: confluentinc/cp-kafka:3.2.0
    hostname: kafka
    container_name: kafka
    depends_on:
      - zookeeper
    ports:
      - '9092:9092'
    environment:
      KAFKA_BROKER_ID: 1
      KAFKA_ZOOKEEPER_CONNECT: 'zookeeper:2181'
      KAFKA_ADVERTISED_LISTENERS: PLAINTEXT://192.168.99.100:9092
      LISTENERS: PLAINTEXT://0.0.0.0:9092
    restart: always

  kafka-rest:
   image: confluentinc/cp-kafka-rest:3.2.0
   container_name: kafka-rest
   depends_on:
     - kafka
   ports:
     - '8082:8082'
   environment:
     KAFKA_REST_ZOOKEEPER_CONNECT: 'zookeeper:2181'
     KAFKA_REST_LISTENERS: http://kafka-rest:8082
     KAFKA_REST_SCHEMA_REGISTRY_URL: http://schema-registry:8081
     KAFKA_REST_HOST_NAME: kafka-rest
   restart: always

 schema-registry:
   image: confluentinc/cp-schema-registry:3.2.0
   container_name: schema-registry
   depends_on:
     - kafka
   ports:
     - '8081'
   environment:
     SCHEMA_REGISTRY_KAFKASTORE_CONNECTION_URL: 'zookeeper:2181'
     SCHEMA_REGISTRY_HOST_NAME: schema-registry
     SCHEMA_REGISTRY_LISTENERS: http://schema-registry:8081
   restart: always

 connect:
   image: confluentinc/cp-kafka-connect:3.2.0
   container_name: kafka-connect
   depends_on:
     - zookeeper
     - kafka
     - schema-registry
   ports:
     - "8083:8083"
   restart: always
   environment:
     CONNECT_BOOTSTRAP_SERVERS: 'kafka:9092'
     CONNECT_REST_ADVERTISED_HOST_NAME: connect
     CONNECT_REST_PORT: 8083
     CONNECT_GROUP_ID: compose-connect-group
     CONNECT_CONFIG_STORAGE_TOPIC: docker-connect-configs
     CONNECT_OFFSET_STORAGE_TOPIC: docker-connect-offsets
     CONNECT_STATUS_STORAGE_TOPIC: docker-connect-status
     CONNECT_KEY_CONVERTER: io.confluent.connect.avro.AvroConverter
     CONNECT_KEY_CONVERTER_SCHEMA_REGISTRY_URL: http://schema-registry:8081
     CONNECT_VALUE_CONVERTER: io.confluent.connect.avro.AvroConverter
     CONNECT_VALUE_CONVERTER_SCHEMA_REGISTRY_URL: http://schema-registry:8081
     CONNECT_INTERNAL_KEY_CONVERTER: org.apache.kafka.connect.json.JsonConverter
     CONNECT_INTERNAL_VALUE_CONVERTER: org.apache.kafka.connect.json.JsonConverter
     CONNECT_ZOOKEEPER_CONNECT: "zookeeper:2181"

现在,我已经成功地将 Kafka 容器正确地暴露给我的非 dockerized 应用程序,方法是将adverted.listener 属性正确设置为 PLAINTEXT://{DOCKER_MACHINE_IP}:9092,但正如您所见,我还添加了其他融合应用程序到扩展我的 Kafka 设置(Kafka REST,Schema-Registry)。由于adverted.listener 属性,这些无法再连接到我的Kafka 实例。

我可以将其更改为正确的容器主机名 --> PLAINTEXT://kafka:9092 但随后我无法使用其他应用程序再次访问 kafka 实例。有什么简单的方法可以解决这个问题吗?

4

3 回答 3

7

奥马尔,也许你已经解决了你的问题,但为了将来参考,汉斯·杰斯佩森的评论对我有用,即使在 Windows 上也是如此。

以管理员身份打开C:\Windows\System32\drivers\etc\hosts并添加以下行以将 kafka 代理公开为 localhost。 127.0.0.1 broker

我的docker-compose.yml文件如下所示:

---
version: '2'
services:
  zookeeper:
    image: confluentinc/cp-zookeeper
    hostname: zookeeper
    extra_hosts:
    - "moby:127.0.0.1"
    ports:
      - "2181:2181"
    environment:
      ZOOKEEPER_CLIENT_PORT: 2181
      ZOOKEEPER_TICK_TIME: 2000

  broker:
    image: confluentinc/cp-kafka
    hostname: broker
    extra_hosts:
    - "moby:127.0.0.1"
    depends_on:
      - zookeeper
    ports:
      - '9092:9092'
    environment:
      KAFKA_BROKER_ID: 1
      KAFKA_ZOOKEEPER_CONNECT: 'zookeeper:2181'
      KAFKA_ADVERTISED_LISTENERS: 'PLAINTEXT://broker:9092'
      KAFKA_DEFAULT_REPLICATION_FACTOR: 1

  schema_registry:
    image: confluentinc/cp-schema-registry
    hostname: schema_registry
    # extra_hosts:
    # - "moby:127.0.0.1"
    depends_on:
      - zookeeper
      - broker
    ports:
      - '8081:8081'
    environment:
      SCHEMA_REGISTRY_HOST_NAME: schema_registry
      SCHEMA_REGISTRY_KAFKASTORE_CONNECTION_URL: 'zookeeper:2181'

  kafka-rest:
    image: confluentinc/cp-kafka-rest
    container_name: kafka-rest
    extra_hosts:
    - "moby:127.0.0.1"
    depends_on:
      - zookeeper
      - broker
    ports:
      - '8082:8082'
    environment:
      KAFKA_REST_ZOOKEEPER_CONNECT: 'zookeeper:2181'
      KAFKA_REST_LISTENERS: http://kafka-rest:8082
      KAFKA_REST_SCHEMA_REGISTRY_URL: http://schema-registry:8081
      KAFKA_REST_HOST_NAME: kafka-rest

或者,公开我的笔记本电脑的当前 IP 地址(使用 ipconfig /all)也可以,但这样做的缺点是,每当我的网络发生变化时,我也必须更改docker-compose.yml文件。

于 2017-05-10T17:37:35.723 回答
3

假设这是针对您的本地开发环境的设置,这里是在 Docker 网络中运行的解决方案。

version: '2'

services:
  zookeeper:
    image: confluentinc/cp-zookeeper:3.2.0
    environment:
      ZOOKEEPER_CLIENT_PORT: 2181
      ZOOKEEPER_TICK_TIME: 2000
    restart: always
    ports: ['2181:2181']

  kafka:
    image: confluentinc/cp-kafka:3.2.0
    depends_on:
    - zookeeper
    ports: ['29092:29092']
    environment:
      KAFKA_BROKER_ID: 1
      KAFKA_ZOOKEEPER_CONNECT: zookeeper:2181
      KAFKA_ADVERTISED_LISTENERS: PLAINTEXT://kafka:29092,PLAINTEXT_HOST://localhost:9092
      KAFKA_LISTENER_SECURITY_PROTOCOL_MAP: PLAINTEXT:PLAINTEXT,PLAINTEXT_HOST:PLAINTEXT
      KAFKA_INTER_BROKER_LISTENER_NAME: PLAINTEXT
      KAFKA_OFFSETS_TOPIC_REPLICATION_FACTOR: 1
    restart: always

  kafka-rest:
    image: confluentinc/cp-kafka-rest:3.2.0
    depends_on:
    - kafka
    ports: ['8082:8082']
    environment:
      KAFKA_REST_ZOOKEEPER_CONNECT: 'zookeeper:2181'
      KAFKA_REST_LISTENERS: http://0.0.0.0:8082
      KAFKA_REST_SCHEMA_REGISTRY_URL: http://schema-registry:8081
      KAFKA_REST_HOST_NAME: localhost
    restart: always

  schema-registry:
    image: confluentinc/cp-schema-registry:3.2.0
    depends_on:
    - kafka        
    ports: ['8081:8081']
    environment:
      SCHEMA_REGISTRY_KAFKASTORE_CONNECTION_URL: 'zookeeper:2181'
      SCHEMA_REGISTRY_HOST_NAME: schema-registry
      SCHEMA_REGISTRY_LISTENERS: http://0.0.0.0:8081
    restart: always

  connect:
    image: confluentinc/cp-kafka-connect:3.2.0
    depends_on:
    - zookeeper
    - kafka
    - schema-registry
    ports: ['8083:8083']
    restart: always
    environment:
      CONNECT_BOOTSTRAP_SERVERS: 'kafka:29092'
      CONNECT_REST_ADVERTISED_HOST_NAME: connect
      CONNECT_REST_PORT: 8083
      CONNECT_GROUP_ID: compose-connect-group
      CONNECT_CONFIG_STORAGE_TOPIC: docker-connect-configs
      CONNECT_OFFSET_STORAGE_TOPIC: docker-connect-offsets
      CONNECT_STATUS_STORAGE_TOPIC: docker-connect-status
      CONNECT_KEY_CONVERTER: io.confluent.connect.avro.AvroConverter
      CONNECT_KEY_CONVERTER_SCHEMA_REGISTRY_URL: http://schema-registry:8081
      CONNECT_VALUE_CONVERTER: io.confluent.connect.avro.AvroConverter
      CONNECT_VALUE_CONVERTER_SCHEMA_REGISTRY_URL: http://schema-registry:8081
      CONNECT_INTERNAL_KEY_CONVERTER: org.apache.kafka.connect.json.JsonConverter
      CONNECT_INTERNAL_VALUE_CONVERTER: org.apache.kafka.connect.json.JsonConverter
      CONNECT_ZOOKEEPER_CONNECT: "zookeeper:2181"

在这里更新 - https://github.com/confluentinc/examples/blob/5.3.1-post/cp-all-in-one/docker-compose.yml

于 2018-09-30T09:14:47.667 回答
0

单个 Kafka 和单个 Zookepper

version: '2.1'

services:
    zookeeper:
      image: wurstmeister/zookeeper
      hostname: zookeeper
      container_name: zookeeper
      ports:
        - "2181:2181"
      environment:
          ZOO_MY_ID: 1
          ZOO_PORT: 2181
          ZOO_SERVERS: server.1=zookeeper:2888:3888
          restart: always
      volumes:
        - ./zk-single-kafka-single/zookeeper/data:/data
        - ./zk-single-kafka-single/zookeeper/datalog:/datalog

    kafka:
       image: wurstmeister/kafka
       hostname: kafka
       container_name: kafka
       ports:
         - "9092:9092"
       environment:
         HOSTNAME_COMMAND: "docker info | grep ^Name: | cut -d' ' -f 2"
         KAFKA_ADVERTISED_LISTENERS: LISTENER_DOCKER_INTERNAL://kafka:9092,LISTENER_DOCKER_EXTERNAL://_{HOSTNAME_COMMAND}:9094
         KAFKA_LISTENER_SECURITY_PROTOCOL_MAP: LISTENER_DOCKER_INTERNAL:PLAINTEXT,LISTENER_DOCKER_EXTERNAL:PLAINTEXT
         KAFKA_ZOOKEEPER_CONNECT: "zookeeper:2181"
         KAFKA_LISTENERS: LISTENER_DOCKER_INTERNAL://:9092,LISTENER_DOCKER_EXTERNAL://:9094
         KAFKA_INTER_BROKER_LISTENER_NAME: LISTENER_DOCKER_INTERNAL
         KAFKA_ADVERTISED_HOST_NAME: 172.19.0.1
         KAFKA_BROKER_ID: 1
         KAFKA_LOG4J_LOGGERS: "kafka.controller=INFO,kafka.producer.async.DefaultEventHandler=INFO,state.change.logger=INFO"
         KAFKA_OFFSETS_TOPIC_REPLICATION_FACTOR: 1
         restart: always
       volumes:
         - ./zk-single-kafka-single/kafka/data:/var/lib/kafka/data
       depends_on:
         - zookeeper
于 2020-07-12T03:50:43.037 回答