I would like to run a Kafka cluster on one of my servers using a series of Docker containers. The associated Docker containers should not listen to 0.0.0.0, but to the internal and external IP of the server so that firewall rules still work correctly.
Below you will find my corresponding docker-compose.yml:
version: '3'
services:
kafka-ui:
container_name: kafka-ui
image: provectuslabs/kafka-ui:latest
ports:
- "111.111.111.111:9092:8080"
- "192.168.0.0:9092:8080"
depends_on:
- zookeeper
- kafka1
environment:
KAFKA_CLUSTERS_0_NAME: local
KAFKA_CLUSTERS_0_BOOTSTRAPSERVERS: kafka1:19091
KAFKA_CLUSTERS_0_ZOOKEEPER: zookeeper:2181
KAFKA_CLUSTERS_0_JMXPORT: 9997
SERVER_SERVLET_CONTEXT_PATH: /kafkaui
AUTH_TYPE: "LOGIN_FORM"
SPRING_SECURITY_USER_NAME: admin
SPRING_SECURITY_USER_PASSWORD: 555Nase
zookeeper:
image: confluentinc/cp-zookeeper:5.2.4
hostname: zookeeper
ports:
- "111.111.111.111:9092:2181"
- "192.168.0.0:9092:2181"
environment:
ZOOKEEPER_CLIENT_PORT: 2181
volumes:
- ${DOCKER_VOLUMES_PATH}/data/zookeeper/data:/data
- ${DOCKER_VOLUMES_PATH}/data/zookeeper/datalog:/datalog
kafka1:
image: confluentinc/cp-kafka:5.3.1
hostname: kafka1
ports:
- "111.111.111.111:9092:9091"
- "192.168.0.0:9092:9091"
environment:
KAFKA_ADVERTISED_LISTENERS: LISTENER_DOCKER_INTERNAL://kafka1:19091,LISTENER_DOCKER_EXTERNAL://111.111.111.111:9091,LISTENER_DOCKER_EXTERNAL_2://192.168.0.0:9091
KAFKA_LISTENER_SECURITY_PROTOCOL_MAP: LISTENER_DOCKER_INTERNAL:PLAINTEXT,LISTENER_DOCKER_EXTERNAL:PLAINTEXT,LISTENER_DOCKER_EXTERNAL_2:PLAINTEXT
KAFKA_INTER_BROKER_LISTENER_NAME: LISTENER_DOCKER_INTERNAL
KAFKA_ZOOKEEPER_CONNECT: "zookeeper:2181"
KAFKA_BROKER_ID: 1
KAFKA_OFFSETS_TOPIC_REPLICATION_FACTOR: 2
KAFKA_AUTO_CREATE_TOPICS_ENABLE: "False"
volumes:
- ${DOCKER_VOLUMES_PATH}/data/kafka1/data:/var/lib/kafka/data
depends_on:
- zookeeper
kafka2:
image: confluentinc/cp-kafka:5.3.1
hostname: kafka2
ports:
- "111.111.111.111:9092:9092"
- "192.168.0.0:9092:9092"
environment:
KAFKA_ADVERTISED_LISTENERS: LISTENER_DOCKER_INTERNAL://kafka2:19092,LISTENER_DOCKER_EXTERNAL://111.111.111.111:9092,LISTENER_DOCKER_EXTERNAL_2://192.168.0.0:9092
KAFKA_LISTENER_SECURITY_PROTOCOL_MAP: LISTENER_DOCKER_INTERNAL:PLAINTEXT,LISTENER_DOCKER_EXTERNAL:PLAINTEXT,LISTENER_DOCKER_EXTERNAL_2:PLAINTEXT
KAFKA_INTER_BROKER_LISTENER_NAME: LISTENER_DOCKER_INTERNAL
KAFKA_ZOOKEEPER_CONNECT: zookeeper:2181
KAFKA_BROKER_ID: 2
KAFKA_OFFSETS_TOPIC_REPLICATION_FACTOR: 2
KAFKA_AUTO_CREATE_TOPICS_ENABLE: "False"
volumes:
- ${DOCKER_VOLUMES_PATH}/data/kafka2/data:/var/lib/kafka/data
depends_on:
- zookeeper
kafka3:
image: confluentinc/cp-kafka:5.3.1
hostname: kafka3
ports:
- "111.111.111.111:9092:9093"
- "192.168.0.0:9092:9093"
environment:
KAFKA_ADVERTISED_LISTENERS: LISTENER_DOCKER_INTERNAL://kafka3:19093,LISTENER_DOCKER_EXTERNAL://111.111.111.111:9093,LISTENER_DOCKER_EXTERNAL_2://192.168.0.0:9093
KAFKA_LISTENER_SECURITY_PROTOCOL_MAP: LISTENER_DOCKER_INTERNAL:PLAINTEXT,LISTENER_DOCKER_EXTERNAL:PLAINTEXT,LISTENER_DOCKER_EXTERNAL_2:PLAINTEXT
KAFKA_INTER_BROKER_LISTENER_NAME: LISTENER_DOCKER_INTERNAL
KAFKA_ZOOKEEPER_CONNECT: "zookeeper:2181"
KAFKA_BROKER_ID: 3
KAFKA_OFFSETS_TOPIC_REPLICATION_FACTOR: 2
KAFKA_AUTO_CREATE_TOPICS_ENABLE: "False"
volumes:
- ${DOCKER_VOLUMES_PATH}/data/kafka3/data:/var/lib/kafka/data
depends_on:
- zookeeper
kafdrop:
image: obsidiandynamics/kafdrop
restart: "no"
ports:
- "111.111.111.111:9000:9093"
- "192.168.0.0:9000:9093"
environment:
KAFKA_BROKERCONNECT: "kafka1:19091"
depends_on:
- kafka1
- kafka2
- kafka3
However, I get the following error message when starting the containers:
2024-07-24 06:24:51,139 WARN [parallel-2] o.a.k.c.ClientUtils: Couldn't resolve server kafka1:19091 from bootstrap.servers as DNS resolution failed for kafka1
2024-07-24 06:24:51,151 ERROR [parallel-2] c.p.k.u.s.StatisticsService: Failed to collect cluster local info
java.lang.IllegalStateException: Error while creating AdminClient for Cluster local
at com.provectus.kafka.ui.service.AdminClientServiceImpl.lambda$createAdminClient$5(AdminClientServiceImpl.java:56)
at reactor.core.publisher.Mono.lambda$onErrorMap$28(Mono.java:3783)
at reactor.core.publisher.FluxOnErrorResume$ResumeSubscriber.onError(FluxOnErrorResume.java:94)
at reactor.core.publisher.Operators.error(Operators.java:198)
at reactor.core.publisher.FluxFlatMap.trySubscribeScalarMap(FluxFlatMap.java:135)
at reactor.core.publisher.MonoFlatMap.subscribeOrReturn(MonoFlatMap.java:53)
at reactor.core.publisher.Mono.subscribe(Mono.java:4480)
at reactor.core.publisher.FluxSwitchIfEmpty$SwitchIfEmptySubscriber.onComplete(FluxSwitchIfEmpty.java:82)
at reactor.core.publisher.Operators.complete(Operators.java:137)
at reactor.core.publisher.MonoEmpty.subscribe(MonoEmpty.java:46)
at reactor.core.publisher.Mono.subscribe(Mono.java:4495)
at reactor.core.publisher.FluxFlatMap$FlatMapMain.onNext(FluxFlatMap.java:427)
at reactor.core.publisher.FluxPublishOn$PublishOnSubscriber.runAsync(FluxPublishOn.java:440)
at reactor.core.publisher.FluxPublishOn$PublishOnSubscriber.run(FluxPublishOn.java:527)
at reactor.core.scheduler.WorkerTask.call(WorkerTask.java:84)
at reactor.core.scheduler.WorkerTask.call(WorkerTask.java:37)
at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264)
at java.base/java.util.concurrent.ScheduledThreadPoolExecutor$ScheduledFutureTask.run(ScheduledThreadPoolExecutor.java:304)
at java.base/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136)
at java.base/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635)
at java.base/java.lang.Thread.run(Thread.java:833)
Caused by: org.apache.kafka.common.KafkaException: Failed to create new KafkaAdminClient
at org.apache.kafka.clients.admin.KafkaAdminClient.createInternal(KafkaAdminClient.java:551)
at org.apache.kafka.clients.admin.KafkaAdminClient.createInternal(KafkaAdminClient.java:488)
at org.apache.kafka.clients.admin.Admin.create(Admin.java:134)
at org.apache.kafka.clients.admin.AdminClient.create(AdminClient.java:39)
at com.provectus.kafka.ui.service.AdminClientServiceImpl.lambda$createAdminClient$2(AdminClientServiceImpl.java:53)
at reactor.core.publisher.MonoSupplier.call(MonoSupplier.java:67)
at reactor.core.publisher.FluxFlatMap.trySubscribeScalarMap(FluxFlatMap.java:127)
... 16 common frames omitted
Caused by: org.apache.kafka.common.config.ConfigException: No resolvable bootstrap urls given in bootstrap.servers
at org.apache.kafka.clients.ClientUtils.parseAndValidateAddresses(ClientUtils.java:89)
at org.apache.kafka.clients.ClientUtils.parseAndValidateAddresses(ClientUtils.java:48)
at org.apache.kafka.clients.admin.KafkaAdminClient.createInternal(KafkaAdminClient.java:508)
... 22 common frames omitted
2024-07-24 06:24:51,158 DEBUG [parallel-2] c.p.k.u.s.ClustersStatisticsScheduler: Metrics updated for cluster: local
I have also tried to bring each of these docker containers together in a custom network to resolve the names correctly. Unfortunately without success.
Can anyone help me find the correct configuration?
Thanks in advance.