I have docker-engine configured on WSL2 and Win11; and installed a simple kafka cluster for development testing, etc.
I have not installed docker-desktop and I do not want to install it but it seems that this part brings a series of configurations to be able to communicate with the windows host.
I have read several articles that talk about the subject, but it is not clear to me what is happening.
It says that to communicate the windows host with Docker, you have to use the netsh command and open port, but nothing has worked.
netsh interface portproxy add v4tov4 listenport=9092 listenaddress=0.0.0.0 connectport=9092 connectaddress=172.X.X.X
According to the documentation, Docker creates a virtual network for each container.
To test, I have dockerized the configuration service and in the Kafka broker configuration with Spring, I have indicated this way:
spring.cloud.stream.kafka.binder.brokers=kafka-broker1:29092
It has been the only way to be able to connect to Kafka.
So far so good, but what I want is to connect from the windows host which is where I have the VSCode IDE to develop.
I've even made a copy of the code to move it to the WSL host and develop from there, but it doesn't communicate either.
It is true that there is a bug in WSL, any version, that eliminates the resolv.conf file, eliminating all the configuration to access both the internet and other networks, and for now you have to create them manually, this is something minor, I can create a script so that when starting WSL, the file is created.
Compose file Kafka cluster used
version: '3.9'
services:
kafka-ui:
image: provectuslabs/kafka-ui:latest
container_name: kafka-ui
networks:
- kafka_network
ports:
- "8080:8080"
restart: always
environment:
- KAFKA_CLUSTERS_0_ZOOKEEPER=zookeeper-1:2181
- KAFKA_CLUSTERS_0_NAME=kafka-local-1
- KAFKA_CLUSTERS_0_BOOTSTRAPSERVERS=kafka-broker1:29092
depends_on:
- zookeeper-1
- kafka-broker1
# ZooKeeper is a centralized service for maintaining configuration information,
# naming, providing distributed synchronization, and providing group services.
# It provides distributed coordination for our Kafka cluster.
# http://zookeeper.apache.org/
zookeeper-1:
image: confluentinc/cp-zookeeper:latest
# ZooKeeper is designed to "fail-fast", so it is important to allow it to
# restart automatically.
restart: unless-stopped
hostname: zookeeper-1
networks:
- kafka_network
container_name: zookeeper-1
# We'll expose the ZK client port so that we can connect to it from our applications.
ports:
- "2181:2181"
volumes:
- ./.zks-kfks-target/zookeeper-1/data:/var/lib/zookeeper/data
- ./.zks-kfks-target/zookeeper-1/datalog:/var/lib/zookeeper/log
environment:
ZOOKEEPER_SERVER_ID: 1
ZOOKEEPER_CLIENT_PORT: 2181
ZOOKEEPER_TICK_TIME: 2000
ZOOKEEPER_SERVERS: zookeeper-1:2888:3888
##################################################
##################################################
kafka-broker1:
image: confluentinc/cp-kafka:latest
hostname: kafka-broker1
container_name: kafka-broker1
networks:
- kafka_network
environment:
KAFKA_BROKER_ID: 1
KAFKA_ZOOKEEPER_CONNECT: "zookeeper-1:2181"
KAFKA_LISTENERS: LISTENER_DOCKER_INTERNAL://kafka-broker1:29092,LISTENER_DOCKER_EXTERNAL://${DOCKER_HOST_IP:-127.0.0.1}:9092
KAFKA_ADVERTISED_LISTENERS: LISTENER_DOCKER_INTERNAL://kafka-broker1:29092,LISTENER_DOCKER_EXTERNAL://${DOCKER_HOST_IP:-127.0.0.1}:9092
KAFKA_LISTENER_SECURITY_PROTOCOL_MAP: LISTENER_DOCKER_INTERNAL:PLAINTEXT,LISTENER_DOCKER_EXTERNAL:PLAINTEXT
KAFKA_INTER_BROKER_LISTENER_NAME: LISTENER_DOCKER_INTERNAL
KAFKA_LOG4J_LOGGERS: "kafka.controller=INFO,kafka.producer.async.DefaultEventHandler=INFO,state.change.logger=INFO"
KAFKA_AUTHORIZER_CLASS_NAME: kafka.security.authorizer.AclAuthorizer
KAFKA_OFFSETS_TOPIC_REPLICATION_FACTOR: 1
KAFKA_ALLOW_EVERYONE_IF_NO_ACL_FOUND: "true"
ports:
- "9092:9092"
- "29092:29092"
volumes:
- ./.zks-kfks-target/kafka-broker1/logs:/var/log/kafka
- ./.zks-kfks-target/kafka-broker1/data:/var/lib/kafka/data
depends_on:
- zookeeper-1
##################################################
# To execute command in order to create topics.
# replication factor 1 and particion 1.
##################################################
kafka-mktopics:
image: confluentinc/cp-kafka:latest
container_name: kafka_mktopics
networks:
- kafka_network
depends_on:
- zookeeper-1
- kafka-broker1
entrypoint: ['/bin/bash', '-c']
command: |
"echo Waiting for Kafka to be ready...
cub kafka-ready -b kafka-broker1:29092 1 60
echo -e 'Creating default topics'
kafka-topics --create --if-not-exists --bootstrap-server kafka-broker1:29092 --replication-factor 1 --partitions 2 --topic testkfk
kafka-topics --create --if-not-exists --bootstrap-server kafka-broker1:29092 --replication-factor 1 --partitions 2 --topic orders
kafka-topics --create --if-not-exists --bootstrap-server kafka-broker1:29092 --replication-factor 1 --partitions 2 --topic order-validations
kafka-topics --create --if-not-exists --bootstrap-server kafka-broker1:29092 --replication-factor 1 --partitions 2 --topic warehouse-inventory
kafka-topics --create --if-not-exists --bootstrap-server kafka-broker1:29092 --replication-factor 1 --partitions 2 --topic customers
kafka-topics --create --if-not-exists --bootstrap-server kafka-broker1:29092 --replication-factor 1 --partitions 2 --topic payments
kafka-topics --create --if-not-exists --bootstrap-server kafka-broker1:29092 --replication-factor 1 --partitions 2 --topic platinum
kafka-topics --create --if-not-exists --bootstrap-server kafka-broker1:29092 --replication-factor 1 --partitions 2 --topic gold
kafka-topics --create --if-not-exists --bootstrap-server kafka-broker1:29092 --replication-factor 1 --partitions 2 --topic silver
kafka-topics --create --if-not-exists --bootstrap-server kafka-broker1:29092 --replication-factor 1 --partitions 2 --topic bronze
echo -e 'Topics was created successfully'
kafka-topics --bootstrap-server kafka-broker1:29092 --list
"
networks:
kafka_network:
@whitespace the compose file is in the mykafka folder and the network it creates is mykafka_kafka_network. This is the result
[
{
"Name": "mykafka_kafka_network",
"Id": "275189f5502e5b93cc5de0bf456ef07248a09f3d3e5931e617d36901da324b05",
"Created": "2022-08-15T14:38:55.4024562+02:00",
"Scope": "local",
"Driver": "bridge",
"EnableIPv6": false,
"IPAM": {
"Driver": "default",
"Options": null,
"Config": [
{
"Subnet": "172.28.0.0/16",
"Gateway": "172.28.0.1"
}
]
},
"Internal": false,
"Attachable": true,
"Ingress": false,
"ConfigFrom": {
"Network": ""
},
"ConfigOnly": false,
"Containers": {
"02558162fedd616bba22f3f83d7d5ead36dafcdad95bc981a12d04c0da86b508": {
"Name": "kafka-broker1",
"EndpointID": "3d25a76e923200b7bf46f98a2d9bf594316776a2174b5d6d11a72345cc69e4e8",
"MacAddress": "02:42:ac:1c:00:03",
"IPv4Address": "172.28.0.3/16",
"IPv6Address": ""
},
"03cf11fa279c6e751d50f7cde7ef53e57cd8c6d5e70c5da34316528f042ac064": {
"Name": "zookeeper-1",
"EndpointID": "b121b67471767c573b9817ca54286aefc54c25b172f7a01cdbe94600db42f59c",
"MacAddress": "02:42:ac:1c:00:02",
"IPv4Address": "172.28.0.2/16",
"IPv6Address": ""
},
"263d22dbdcbecd37ac37094e8b9a4c93680fd0ff2d9b9d6e24161103ffb5fb45": {
"Name": "cmak-manager",
"EndpointID": "679fee2fd3b704181e93b951655815fd78ac2be9ea6ff7faba2624c5ad2e015a",
"MacAddress": "02:42:ac:1c:00:04",
"IPv4Address": "172.28.0.4/16",
"IPv6Address": ""
}
},
"Options": {},
"Labels": {
"com.docker.compose.network": "kafka_network",
"com.docker.compose.project": "mykafka",
"com.docker.compose.version": "1.29.2"
}
}
]
@OneCricketeer It doesn't work for me. :-(
I saw this video https://www.youtube.com/watch?v=ACjlvzw4bVE. My hostname return 2 IP different when execute the command wsl hostname -I on PowerShell. On Powershell ipconfig return 172.19.224.1 of WSL Into WSL, ifconfig return eth0 172.19.238.212 When restar are different
This is the complete compose file I used after correction
version: '3.9'
services:
cmak:
image: ghcr.io/eshepelyuk/dckr/cmak-3.0.0.5:latest
restart: always
container_name: cmak-manager
networks:
kafka_network:
aliases:
- cmak
ports:
- "9000:9000"
environment:
ZK_HOSTS: "zookeeper-1:2181"
depends_on:
- zookeeper-1
# ZooKeeper is a centralized service for maintaining configuration information,
# naming, providing distributed synchronization, and providing group services.
# It provides distributed coordination for our Kafka cluster.
# http://zookeeper.apache.org/
zookeeper-1:
image: confluentinc/cp-zookeeper:latest
# ZooKeeper is designed to "fail-fast", so it is important to allow it to
# restart automatically.
restart: unless-stopped
hostname: zookeeper-1
networks:
kafka_network:
aliases:
- zookeeper
container_name: zookeeper-1
# We'll expose the ZK client port so that we can connect to it from our applications.
ports:
- "2181:2181"
volumes:
- ./.zks-kfks-target/zookeeper-1/data:/var/lib/zookeeper/data
- ./.zks-kfks-target/zookeeper-1/datalog:/var/lib/zookeeper/log
environment:
ZOOKEEPER_SERVER_ID: 1
ZOOKEEPER_CLIENT_PORT: 2181
ZOOKEEPER_TICK_TIME: 2000
ZOOKEEPER_SERVERS: zookeeper-1:2888:3888
##################################################
##################################################
kafka-broker1:
image: confluentinc/cp-kafka:latest
hostname: kafka-broker1
container_name: kafka-broker1
networks:
kafka_network:
aliases:
- kafka-broker
environment:
KAFKA_BROKER_ID: 1
KAFKA_ZOOKEEPER_CONNECT: "zookeeper-1:2181"
KAFKA_LISTENERS: LISTENER_DOCKER_INTERNAL://kafka-broker1:29092,LISTENER_DOCKER_EXTERNAL://0.0.0.0:9092
KAFKA_ADVERTISED_LISTENERS: LISTENER_DOCKER_INTERNAL://kafka-broker1:29092,LISTENER_DOCKER_EXTERNAL://localhost:9092
KAFKA_LISTENER_SECURITY_PROTOCOL_MAP: LISTENER_DOCKER_INTERNAL:PLAINTEXT,LISTENER_DOCKER_EXTERNAL:PLAINTEXT
KAFKA_INTER_BROKER_LISTENER_NAME: LISTENER_DOCKER_INTERNAL
KAFKA_LOG4J_LOGGERS: "kafka.controller=INFO,kafka.producer.async.DefaultEventHandler=INFO,state.change.logger=INFO"
KAFKA_AUTHORIZER_CLASS_NAME: kafka.security.authorizer.AclAuthorizer
KAFKA_OFFSETS_TOPIC_REPLICATION_FACTOR: 1
KAFKA_ALLOW_EVERYONE_IF_NO_ACL_FOUND: "true"
ports:
- "9092:9092"
- "29092:29092"
volumes:
- ./.zks-kfks-target/kafka-broker1/logs:/var/log/kafka
- ./.zks-kfks-target/kafka-broker1/data:/var/lib/kafka/data
depends_on:
- zookeeper-1
##################################################
# To execute command in order to create topics.
# replication factor 1 and particion 1.
##################################################
kafka-mktopics:
image: confluentinc/cp-kafka:latest
container_name: kafka_mktopics
networks:
kafka_network:
aliases:
- mktopics
depends_on:
- zookeeper-1
- kafka-broker1
entrypoint: ['/bin/bash', '-c']
command: |
"echo Waiting for Kafka to be ready...
cub kafka-ready -b kafka-broker1:29092 1 60
echo -e 'Creating default topics'
kafka-topics --create --if-not-exists --bootstrap-server kafka-broker1:29092 --replication-factor 1 --partitions 2 --topic testkfk
kafka-topics --create --if-not-exists --bootstrap-server kafka-broker1:29092 --replication-factor 1 --partitions 2 --topic orders
kafka-topics --create --if-not-exists --bootstrap-server kafka-broker1:29092 --replication-factor 1 --partitions 2 --topic order-validations
kafka-topics --create --if-not-exists --bootstrap-server kafka-broker1:29092 --replication-factor 1 --partitions 2 --topic warehouse-inventory
kafka-topics --create --if-not-exists --bootstrap-server kafka-broker1:29092 --replication-factor 1 --partitions 2 --topic customers
kafka-topics --create --if-not-exists --bootstrap-server kafka-broker1:29092 --replication-factor 1 --partitions 2 --topic payments
kafka-topics --create --if-not-exists --bootstrap-server kafka-broker1:29092 --replication-factor 1 --partitions 2 --topic platinum
kafka-topics --create --if-not-exists --bootstrap-server kafka-broker1:29092 --replication-factor 1 --partitions 2 --topic gold
kafka-topics --create --if-not-exists --bootstrap-server kafka-broker1:29092 --replication-factor 1 --partitions 2 --topic silver
kafka-topics --create --if-not-exists --bootstrap-server kafka-broker1:29092 --replication-factor 1 --partitions 2 --topic bronze
echo -e 'Topics was created successfully'
kafka-topics --bootstrap-server kafka-broker1:29092 --list
"
networks:
kafka_network:
name: argades_kafka_net
driver: bridge