0

I took this:

https://raw.githubusercontent.com/tchiotludo/akhq/master/docker-compose.yml

and started to change a bit...and at the end I got docker-compose file like that:

version: '3.8'

services:
  db:
    image: postgres:14.1-alpine
    restart: always
    hostname: postgres
    environment:
      - POSTGRES_USER=postgres
      - POSTGRES_PASSWORD=postgres
    ports:
      - '5432:5432'
    volumes:
      - db:/var/lib/postgresql/data

  akhq:
    image: tchiotludo/akhq
    environment:
      AKHQ_CONFIGURATION: |
        akhq:
          connections:
            docker-kafka-server:
              properties:
                bootstrap.servers: **kafka:9092**
    ports:
      - 8080:8080
    depends_on:
      - kafka

  zookeeper:
    image: confluentinc/cp-zookeeper
    container_name: zookeeper
    restart: always
    ports:
      - 2181:2181
    environment:
      ZOOKEEPER_CLIENT_PORT: '2181'
      #ZOOKEEPER_ADMIN_ENABLE_SERVER: 'false'

  kafka:
    image: confluentinc/cp-kafka
    container_name: kafka
    depends_on:
      - zookeeper
    ports:
      - "9092:9092"
    environment:
      LOG_DIR: "/tmp/logs"
      KAFKA_ADVERTISED_LISTENERS: 'PLAINTEXT://**kafka:9092**'
      #KAFKA_LISTENERS: PLAINTEXT://0.0.0.0:9092
      KAFKA_ZOOKEEPER_CONNECT: 'zookeeper:2181'


      #KAFKA_LISTENERS: PLAINTEXT://0.0.0.0:9092

volumes:
  db:
    driver: local

When I use yml above I can get http://localhost:8080/ui/docker-kafka-server/topic without any problems but as soon as I change kafka:9092 to localhost:9092 I have problems as soon as I get there http://localhost:8080/ui/docker-kafka-server/topic. The page are loading to much and during that a have a warnings

docker-compose.yml file with some problems:

version: '3.8'

services:   db:
    image: postgres:14.1-alpine
    restart: always
    hostname: postgres
    environment:
      - POSTGRES_USER=postgres
      - POSTGRES_PASSWORD=postgres
    ports:
      - '5432:5432'
    volumes:
      - db:/var/lib/postgresql/data

  akhq:
    image: tchiotludo/akhq
    environment:
      AKHQ_CONFIGURATION: |
        akhq:
          connections:
            docker-kafka-server:
              properties:
                bootstrap.servers: localhost:9092
    ports:
      - 8080:8080
    depends_on:
      - kafka

  zookeeper:
    image: confluentinc/cp-zookeeper
    container_name: zookeeper
    restart: always
    ports:
      - 2181:2181
    environment:
      ZOOKEEPER_CLIENT_PORT: '2181'
      #ZOOKEEPER_ADMIN_ENABLE_SERVER: 'false'

  kafka:
    image: confluentinc/cp-kafka
    container_name: kafka
    depends_on:
      - zookeeper
    ports:
      - "9092:9092"
    environment:
      LOG_DIR: "/tmp/logs"
      KAFKA_ADVERTISED_LISTENERS: 'PLAINTEXT://localhost:9092'
      #KAFKA_LISTENERS: PLAINTEXT://0.0.0.0:9092
      KAFKA_ZOOKEEPER_CONNECT: 'zookeeper:2181'


      #KAFKA_LISTENERS: PLAINTEXT://0.0.0.0:9092

volumes:   db:
    driver: local

enter image description here

Properties for problem docker-compose.yml file:

kafka       |   advertised.listeners = PLAINTEXT://localhost:9092
kafka       |   alter.config.policy.class.name = null
kafka       |   alter.log.dirs.replication.quota.window.num = 11
kafka       |   alter.log.dirs.replication.quota.window.size.seconds = 1
kafka       |   authorizer.class.name =
kafka       |   auto.create.topics.enable = true
kafka       |   auto.leader.rebalance.enable = true
kafka       |   background.threads = 10
kafka       |   broker.heartbeat.interval.ms = 2000
kafka       |   broker.id = -1
kafka       |   broker.id.generation.enable = true
kafka       |   broker.rack = null
kafka       |   broker.session.timeout.ms = 9000
kafka       |   client.quota.callback.class = null
kafka       |   compression.type = producer
kafka       |   connection.failed.authentication.delay.ms = 100
kafka       |   connections.max.idle.ms = 600000
kafka       |   connections.max.reauth.ms = 0
kafka       |   control.plane.listener.name = null
kafka       |   controlled.shutdown.enable = true
kafka       |   controlled.shutdown.max.retries = 3
kafka       |   controlled.shutdown.retry.backoff.ms = 5000
kafka       |   controller.listener.names = null
kafka       |   controller.quorum.append.linger.ms = 25
kafka       |   controller.quorum.election.backoff.max.ms = 1000
kafka       |   controller.quorum.election.timeout.ms = 1000
kafka       |   controller.quorum.fetch.timeout.ms = 2000
kafka       |   controller.quorum.request.timeout.ms = 2000
kafka       |   controller.quorum.retry.backoff.ms = 20
kafka       |   controller.quorum.voters = []
kafka       |   controller.quota.window.num = 11
kafka       |   controller.quota.window.size.seconds = 1
kafka       |   controller.socket.timeout.ms = 30000
kafka       |   create.topic.policy.class.name = null
kafka       |   default.replication.factor = 1
kafka       |   delegation.token.expiry.check.interval.ms = 3600000
kafka       |   delegation.token.expiry.time.ms = 86400000
kafka       |   delegation.token.master.key = null
kafka       |   delegation.token.max.lifetime.ms = 604800000
kafka       |   delegation.token.secret.key = null
kafka       |   delete.records.purgatory.purge.interval.requests = 1
kafka       |   delete.topic.enable = true
kafka       |   early.start.listeners = null
kafka       |   fetch.max.bytes = 57671680
kafka       |   fetch.purgatory.purge.interval.requests = 1000
kafka       |   group.initial.rebalance.delay.ms = 3000
kafka       |   group.max.session.timeout.ms = 1800000
kafka       |   group.max.size = 2147483647
kafka       |   group.min.session.timeout.ms = 6000
kafka       |   initial.broker.registration.timeout.ms = 60000
kafka       |   inter.broker.listener.name = null
kafka       |   inter.broker.protocol.version = 3.3-IV3
kafka       |   kafka.metrics.polling.interval.secs = 10
kafka       |   kafka.metrics.reporters = []
kafka       |   leader.imbalance.check.interval.seconds = 300
kafka       |   leader.imbalance.per.broker.percentage = 10
kafka       |   listener.security.protocol.map = PLAINTEXT:PLAINTEXT,SSL:SSL,SASL_PLAINTEXT:SASL_PLAINTEXT,SASL_SSL:SASL_SSL
kafka       |   listeners = PLAINTEXT://0.0.0.0:9092
kafka       |   log.cleaner.backoff.ms = 15000
kafka       |   log.cleaner.dedupe.buffer.size = 134217728
kafka       |   log.cleaner.delete.retention.ms = 86400000
kafka       |   log.cleaner.enable = true
kafka       |   log.cleaner.io.buffer.load.factor = 0.9
kafka       |   log.cleaner.io.buffer.size = 524288
kafka       |   log.cleaner.io.max.bytes.per.second = 1.7976931348623157E308
kafka       |   log.cleaner.max.compaction.lag.ms = 9223372036854775807
kafka       |   log.cleaner.min.cleanable.ratio = 0.5
kafka       |   log.cleaner.min.compaction.lag.ms = 0
kafka       |   log.cleaner.threads = 1
kafka       |   log.cleanup.policy = [delete]
kafka       |   log.dir = /tmp/kafka-logs
kafka       |   log.dirs = /var/lib/kafka/data
kafka       |   log.flush.interval.messages = 9223372036854775807
kafka       |   log.flush.interval.ms = null
kafka       |   log.flush.offset.checkpoint.interval.ms = 60000
kafka       |   log.flush.scheduler.interval.ms = 9223372036854775807
kafka       |   log.flush.start.offset.checkpoint.interval.ms = 60000
kafka       |   log.index.interval.bytes = 4096
kafka       |   log.index.size.max.bytes = 10485760
kafka       |   log.message.downconversion.enable = true
kafka       |   log.message.format.version = 3.0-IV1
kafka       |   log.message.timestamp.difference.max.ms = 9223372036854775807
kafka       |   log.message.timestamp.type = CreateTime
kafka       |   log.preallocate = false
kafka       |   log.retention.bytes = -1
kafka       |   log.retention.check.interval.ms = 300000
kafka       |   log.retention.hours = 168
kafka       |   log.retention.minutes = null
kafka       |   log.retention.ms = null
kafka       |   log.roll.hours = 168
kafka       |   log.roll.jitter.hours = 0
kafka       |   log.roll.jitter.ms = null
kafka       |   log.roll.ms = null
kafka       |   log.segment.bytes = 1073741824
kafka       |   log.segment.delete.delay.ms = 60000
kafka       |   max.connection.creation.rate = 2147483647
kafka       |   max.connections = 2147483647
kafka       |   max.connections.per.ip = 2147483647
kafka       |   max.connections.per.ip.overrides =
kafka       |   max.incremental.fetch.session.cache.slots = 1000
kafka       |   message.max.bytes = 1048588
kafka       |   metadata.log.dir = null
kafka       |   metadata.log.max.record.bytes.between.snapshots = 20971520
kafka       |   metadata.log.segment.bytes = 1073741824
kafka       |   metadata.log.segment.min.bytes = 8388608
kafka       |   metadata.log.segment.ms = 604800000
kafka       |   metadata.max.idle.interval.ms = 500
kafka       |   metadata.max.retention.bytes = -1
kafka       |   metadata.max.retention.ms = 604800000
kafka       |   metric.reporters = []
kafka       |   metrics.num.samples = 2
kafka       |   metrics.recording.level = INFO
kafka       |   metrics.sample.window.ms = 30000
kafka       |   min.insync.replicas = 1
kafka       |   node.id = -1
kafka       |   num.io.threads = 8
kafka       |   num.network.threads = 3
kafka       |   num.partitions = 1
kafka       |   num.recovery.threads.per.data.dir = 1
kafka       |   num.replica.alter.log.dirs.threads = null
kafka       |   num.replica.fetchers = 1
kafka       |   offset.metadata.max.bytes = 4096
kafka       |   offsets.commit.required.acks = -1
kafka       |   offsets.commit.timeout.ms = 5000
kafka       |   offsets.load.buffer.size = 5242880
kafka       |   offsets.retention.check.interval.ms = 600000
kafka       |   offsets.retention.minutes = 10080
kafka       |   offsets.topic.compression.codec = 0
kafka       |   offsets.topic.num.partitions = 50
kafka       |   offsets.topic.replication.factor = 3
kafka       |   offsets.topic.segment.bytes = 104857600
kafka       |   password.encoder.cipher.algorithm = AES/CBC/PKCS5Padding
kafka       |   password.encoder.iterations = 4096
kafka       |   password.encoder.key.length = 128
kafka       |   password.encoder.keyfactory.algorithm = null
kafka       |   password.encoder.old.secret = null
kafka       |   password.encoder.secret = null
kafka       |   principal.builder.class = class org.apache.kafka.common.security.authenticator.DefaultKafkaPrincipalBuilder
kafka       |   process.roles = []
kafka       |   producer.purgatory.purge.interval.requests = 1000
kafka       |   queued.max.request.bytes = -1
kafka       |   queued.max.requests = 500
kafka       |   quota.window.num = 11
kafka       |   quota.window.size.seconds = 1
kafka       |   remote.log.index.file.cache.total.size.bytes = 1073741824
kafka       |   remote.log.manager.task.interval.ms = 30000
kafka       |   remote.log.manager.task.retry.backoff.max.ms = 30000
kafka       |   remote.log.manager.task.retry.backoff.ms = 500
kafka       |   remote.log.manager.task.retry.jitter = 0.2
kafka       |   remote.log.manager.thread.pool.size = 10
kafka       |   remote.log.metadata.manager.class.name = null
kafka       |   remote.log.metadata.manager.class.path = null
kafka       |   remote.log.metadata.manager.impl.prefix = null
kafka       |   remote.log.metadata.manager.listener.name = null
kafka       |   remote.log.reader.max.pending.tasks = 100
kafka       |   remote.log.reader.threads = 10
kafka       |   remote.log.storage.manager.class.name = null
kafka       |   remote.log.storage.manager.class.path = null
kafka       |   remote.log.storage.manager.impl.prefix = null
kafka       |   remote.log.storage.system.enable = false
kafka       |   replica.fetch.backoff.ms = 1000
kafka       |   replica.fetch.max.bytes = 1048576
kafka       |   replica.fetch.min.bytes = 1
kafka       |   replica.fetch.response.max.bytes = 10485760
kafka       |   replica.fetch.wait.max.ms = 500
kafka       |   replica.high.watermark.checkpoint.interval.ms = 5000
kafka       |   replica.lag.time.max.ms = 30000
kafka       |   replica.selector.class = null
kafka       |   replica.socket.receive.buffer.bytes = 65536
kafka       |   replica.socket.timeout.ms = 30000
kafka       |   replication.quota.window.num = 11
kafka       |   replication.quota.window.size.seconds = 1
kafka       |   request.timeout.ms = 30000
kafka       |   reserved.broker.max.id = 1000
kafka       |   sasl.client.callback.handler.class = null
kafka       |   sasl.enabled.mechanisms = [GSSAPI]
kafka       |   sasl.jaas.config = null
kafka       |   sasl.kerberos.kinit.cmd = /usr/bin/kinit
kafka       |   sasl.kerberos.min.time.before.relogin = 60000
kafka       |   sasl.kerberos.principal.to.local.rules = [DEFAULT]
kafka       |   sasl.kerberos.service.name = null
kafka       |   sasl.kerberos.ticket.renew.jitter = 0.05
kafka       |   sasl.kerberos.ticket.renew.window.factor = 0.8
kafka       |   sasl.login.callback.handler.class = null
kafka       |   sasl.login.class = null
kafka       |   sasl.login.connect.timeout.ms = null
kafka       |   sasl.login.read.timeout.ms = null
kafka       |   sasl.login.refresh.buffer.seconds = 300
kafka       |   sasl.login.refresh.min.period.seconds = 60
kafka       |   sasl.login.refresh.window.factor = 0.8
kafka       |   sasl.login.refresh.window.jitter = 0.05
kafka       |   sasl.login.retry.backoff.max.ms = 10000
kafka       |   sasl.login.retry.backoff.ms = 100
kafka       |   sasl.mechanism.controller.protocol = GSSAPI
kafka       |   sasl.mechanism.inter.broker.protocol = GSSAPI
kafka       |   sasl.oauthbearer.clock.skew.seconds = 30
kafka       |   sasl.oauthbearer.expected.audience = null
kafka       |   sasl.oauthbearer.expected.issuer = null
kafka       |   sasl.oauthbearer.jwks.endpoint.refresh.ms = 3600000
kafka       |   sasl.oauthbearer.jwks.endpoint.retry.backoff.max.ms = 10000
kafka       |   sasl.oauthbearer.jwks.endpoint.retry.backoff.ms = 100
kafka       |   sasl.oauthbearer.jwks.endpoint.url = null
kafka       |   sasl.oauthbearer.scope.claim.name = scope
kafka       |   sasl.oauthbearer.sub.claim.name = sub
kafka       |   sasl.oauthbearer.token.endpoint.url = null
kafka       |   sasl.server.callback.handler.class = null
kafka       |   sasl.server.max.receive.size = 524288
kafka       |   security.inter.broker.protocol = PLAINTEXT
kafka       |   security.providers = null
kafka       |   socket.connection.setup.timeout.max.ms = 30000
kafka       |   socket.connection.setup.timeout.ms = 10000
kafka       |   socket.listen.backlog.size = 50
kafka       |   socket.receive.buffer.bytes = 102400
kafka       |   socket.request.max.bytes = 104857600
kafka       |   socket.send.buffer.bytes = 102400
kafka       |   ssl.cipher.suites = []
kafka       |   ssl.client.auth = none
kafka       |   ssl.enabled.protocols = [TLSv1.2, TLSv1.3]
kafka       |   ssl.endpoint.identification.algorithm = https
kafka       |   ssl.engine.factory.class = null
kafka       |   ssl.key.password = null
kafka       |   ssl.keymanager.algorithm = SunX509
kafka       |   ssl.keystore.certificate.chain = null
kafka       |   ssl.keystore.key = null
kafka       |   ssl.keystore.location = null
kafka       |   ssl.keystore.password = null
kafka       |   ssl.keystore.type = JKS
kafka       |   ssl.principal.mapping.rules = DEFAULT
kafka       |   ssl.protocol = TLSv1.3
kafka       |   ssl.provider = null
kafka       |   ssl.secure.random.implementation = null
kafka       |   ssl.trustmanager.algorithm = PKIX
kafka       |   ssl.truststore.certificates = null
kafka       |   ssl.truststore.location = null
kafka       |   ssl.truststore.password = null
kafka       |   ssl.truststore.type = JKS
kafka       |   transaction.abort.timed.out.transaction.cleanup.interval.ms = 10000
kafka       |   transaction.max.timeout.ms = 900000
kafka       |   transaction.remove.expired.transaction.cleanup.interval.ms = 3600000
kafka       |   transaction.state.log.load.buffer.size = 5242880
kafka       |   transaction.state.log.min.isr = 2
kafka       |   transaction.state.log.num.partitions = 50
kafka       |   transaction.state.log.replication.factor = 3
kafka       |   transaction.state.log.segment.bytes = 104857600
kafka       |   transactional.id.expiration.ms = 604800000
kafka       |   unclean.leader.election.enable = false
kafka       |   zookeeper.clientCnxnSocket = null
kafka       |   zookeeper.connect = zookeeper:2181
kafka       |   zookeeper.connection.timeout.ms = null
kafka       |   zookeeper.max.in.flight.requests = 10
kafka       |   zookeeper.session.timeout.ms = 18000
kafka       |   zookeeper.set.acl = false
kafka       |   zookeeper.ssl.cipher.suites = null
kafka       |   zookeeper.ssl.client.enable = false
kafka       |   zookeeper.ssl.crl.enable = false
kafka       |   zookeeper.ssl.enabled.protocols = null
kafka       |   zookeeper.ssl.endpoint.identification.algorithm = HTTPS
kafka       |   zookeeper.ssl.keystore.location = null
kafka       |   zookeeper.ssl.keystore.password = null
kafka       |   zookeeper.ssl.keystore.type = null
kafka       |   zookeeper.ssl.ocsp.enable = false
kafka       |   zookeeper.ssl.protocol = TLSv1.2
kafka       |   zookeeper.ssl.truststore.location = null
kafka       |   zookeeper.ssl.truststore.password = null
kafka       |   zookeeper.ssl.truststore.type = null

It seems I do something wrong...Need your help/advices :(

Disteonne
  • 59
  • 1
  • 8
  • I found out the discussion https://github.com/tchiotludo/akhq/issues/1085 and I did some changes according the last answer that. It helped me – Disteonne Apr 24 '23 at 21:01
  • You never needed to change `kafka:9092` to anything else. Explain why you thought that was correct? – OneCricketeer Apr 25 '23 at 19:04

0 Answers0