There are client, kafka and zookeeper in the same network, I am trying to connect from client to kafka with SERVICE_NAME:PORT but
driver-service-container | 2022-07-24 09:00:05.076 WARN 1 --- [| adminclient-1] org.apache.kafka.clients.NetworkClient : [AdminClient clientId=adminclient-1] Connection to node 1 (localhost/127.0.0.1:9092) could not be established. Broker may not be available.
I get an error.
I know that I can easily communicate containers in the same network using the service name, but I don't understand why it doesn't work.
The name of my client trying to communicate with kafka is driver-service
I looked through these resources but according to them my method should work:
driver-service githup repositorie
My docker-compose file:
version: '3'
services:
gateway-server:
image: gateway-server-image
container_name: gateway-server-container
ports:
- '5555:5555'
environment:
- SECURE_KEY_USERNAME=randomSecureKeyUsername!
- SECURE_KEY_PASSWORD=randomSecureKeyPassword!
- PASSENGER_SERVICE_URL=172.24.2.4:4444
- DRIVER_SERVICE_URL=172.24.2.5:3333
networks:
microservicesNetwork:
ipv4_address: 172.24.2.6
driver-service:
image: driver-service-image
container_name: driver-service-container
ports:
- '3333:3333'
environment:
- NOTIFICATION_SERVICE_URL=172.24.2.3:8888
- PAYMENT_SERVICE_URL=172.24.2.2:7777
- SECURE_KEY_USERNAME=randomSecureKeyUsername!
- SECURE_KEY_PASSWORD=randomSecureKeyPassword!
- KAFKA_GROUP_ID=driver-group-id
- KAFKA_BOOTSTRAP_SERVERS=broker:29092
- kafka.consumer.group.id=driver-group-id
- kafka.consumer.enable.auto.commit=true
- kafka.consumer.auto.commit.interval.ms=1000
- kafka.consumer.auto.offset.reset=earliest
- kafka.consumer.max.poll.records=1
networks:
microservicesNetwork:
ipv4_address: 172.24.2.5
passenger-service:
image: passenger-service-image
container_name: passenger-service-container
ports:
- '4444:4444'
environment:
- PAYMENT_SERVICE_URL=172.24.2.2:7777
- SECURE_KEY_USERNAME=randomSecureKeyUsername!
- SECURE_KEY_PASSWORD=randomSecureKeyPassword!
networks:
microservicesNetwork:
ipv4_address: 172.24.2.4
notification-service:
image: notification-service-image
container_name: notification-service-container
ports:
- '8888:8888'
environment:
- SECURE_KEY_USERNAME=randomSecureKeyUsername!
- SECURE_KEY_PASSWORD=randomSecureKeyPassword!
networks:
microservicesNetwork:
ipv4_address: 172.24.2.3
payment-service:
image: payment-service-image
container_name: payment-service-container
ports:
- '7777:7777'
environment:
- SECURE_KEY_USERNAME=randomSecureKeyUsername!
- SECURE_KEY_PASSWORD=randomSecureKeyPassword!
networks:
microservicesNetwork:
ipv4_address: 172.24.2.2
zookeeper:
image: confluentinc/cp-zookeeper:7.0.1
container_name: zookeeper
ports:
- "2181:2181"
- "2888:2888"
- "3888:3888"
environment:
ZOOKEEPER_CLIENT_PORT: 2181
ZOOKEEPER_TICK_TIME: 2000
networks:
- microservicesNetwork
broker:
image: confluentinc/cp-kafka:7.0.1
container_name: broker
ports:
- "9092:9092"
depends_on:
- zookeeper
environment:
KAFKA_BROKER_ID: 1
KAFKA_ZOOKEEPER_CONNECT: 'zookeeper:2181'
KAFKA_LISTENER_SECURITY_PROTOCOL_MAP: PLAINTEXT:PLAINTEXT,PLAINTEXT_INTERNAL:PLAINTEXT
KAFKA_ADVERTISED_LISTENERS: PLAINTEXT://localhost:9092,PLAINTEXT_INTERNAL://broker:29092
KAFKA_OFFSETS_TOPIC_REPLICATION_FACTOR: 1
KAFKA_TRANSACTION_STATE_LOG_MIN_ISR: 1
KAFKA_TRANSACTION_STATE_LOG_REPLICATION_FACTOR: 1
GROUP_ID: driver-group-id
KAFKA_CREATE_TOPICS: "product"
networks:
- microservicesNetwork
kafka-ui:
image: provectuslabs/kafka-ui
container_name: kafka-ui
ports:
- "8080:8080"
restart: always
environment:
- KAFKA_CLUSTERS_0_NAME=broker
- KAFKA_CLUSTERS_0_BOOTSTRAPSERVERS=broker:29092
- KAFKA_CLUSTERS_0_ZOOKEEPER=zookeeper:2181
- KAFKA_CLUSTERS_0_READONLY=true
networks:
- microservicesNetwork
elasticsearch:
image: docker.elastic.co/elasticsearch/elasticsearch:7.8.0
platform: linux/x86_64
environment:
- discovery.type=single-node
- max_open_files=65536
- max_content_length_in_bytes=100000000
- transport.host= elasticsearch
volumes:
- $HOME/app:/var/app
ports:
- "9200:9200"
- "9300:9300"
networks:
- microservicesNetwork
postgresql:
image: postgres:11.1-alpine
platform: linux/x86_64
container_name: postgresql
volumes:
- ./postgresql/:/var/lib/postgresql/data/
environment:
- POSTGRES_PASSWORD=123456
- POSTGRES_USER=postgres
- POSTGRES_DB=cqrs_db
ports:
- "5432:5432"
networks:
- microservicesNetwork
networks:
microservicesNetwork:
driver: bridge
ipam:
driver: default
config:
- subnet: 172.24.2.0/16
gateway: 172.24.2.1
application.prod.properties ->
#datasource
spring.datasource.url=jdbc:h2:mem:db_driver
spring.datasource.username=root
spring.datasource.password=1234
spring.datasource.driver-class-name=org.h2.Driver
spring.jpa.database-platform=org.hibernate.dialect.H2Dialect
#need spring-security config.
spring.h2.console.enabled=false
spring.h2.console.path=/h2-console
spring.jpa.show-sql=true
service.security.secure-key-username=${SECURE_KEY_USERNAME}
service.security.secure-key-password=${SECURE_KEY_PASSWORD}
payment.service.url=${PAYMENT_SERVICE_URL}
notification.service.url=${NOTIFICATION_SERVICE_URL}
#kafka configs
kafka.bootstrap.servers=${KAFKA_BOOTSTRAP_SERVERS}
kafka.group.id =${KAFKA_GROUP_ID}
spring.cache.cache-names=driver
spring.jackson.serialization.fail-on-empty-beans= false
spring.http.multipart.max-file-size=10MB
spring.http.multipart.max-request-size=11MB