19

I have a production server with 8GB RAM. Im looking to host elastic,logstash and kibana on the server. Using docker compose.

What would be the recommended java sizes memory sizes for each of the containers. How might I configure this.

My docker-compose looks like the following

---
version: '3'
services

  kibana:
    build:
      context: kibana/
    container_name: kibana
    volumes:
      - ./kibana/config/:/usr/share/kibana/config:ro
    networks: ['elk']
    depends_on:
      - elasticsearch
    restart: always

  elasticsearch:
    build:
      context: elasticsearch/
    container_name: elasticsearch
    networks: ['elk']
    volumes:
      - ./elastic-data:/usr/share/elasticsearch/data
      - ./elasticsearch/config/elasticsearch.yml:/usr/share/elasticsearch/config/elasticsearch.yml:ro
    restart: always
    ulimits:
      memlock:
        soft: -1
        hard: -1
      nofile:
        soft: 65536
        hard: 65536
    environment:
      - cluster.name=es-docker
      - node.name=node1
      - bootstrap.memory_lock=true

  logstash:
      build:
        context: logstash/
      container_name: logstash
      volumes:
        - ./logstash/pipeline/logstash.conf:/usr/share/logstash/pipeline/logstash.conf:ro
      networks: ['elk']
      ports:
        - "5044:5044"
      depends_on:
        - elasticsearch
      restart: always

networks: {elk: {}}

Now searching around on the elastic documentatino im seeting some settings like - "ES_JAVA_OPTS=-Xms512m -Xmx512m" etc

So what I would like to know.. For the above docker-compose what settings should I allow for java heap sizes / memory limit and how do I update the compose to include it.

My thoughts are 4GB for elastic 2GB for logstash 1GB for Kibana

1GB reserved for host

Jim G.
  • 15,141
  • 22
  • 103
  • 166
Robbo_UK
  • 11,351
  • 25
  • 81
  • 117
  • 1
    If you have a lot of ingestion going on inside Logstash, 2GB might not be enough. 1GB for Kibana and host sound about right. That leaves you with 4GB for the ES container (of which 2GB must be affected to the heap so that Lucene gets the remaining 2GB). That might be enough... or not... In the end, it really depends on your use case and what you want to do with the stack. The sizing should go the other way, though, first you measure how much you need for each component in order to support your use cases and then you provision the host that fits the requirements. – Val Jul 26 '18 at 14:36
  • Can you explain what isn't clear from my comment above? Maybe I can add more details if something wasn't clear, but the main thing here is that there is not enough information on the context in order to assess whether your sizing is good or not. – Val Jul 30 '18 at 06:56
  • your comment agrees with what sizing I have used and it is really helpful. However its not extended how I do it in Docker composoe. What env options I need to set.. .. e.g. ```ES_JAVA_OPTS=-Xms512m -Xmx512m```. Also its a comment not an answer to the question – Robbo_UK Jul 30 '18 at 13:42
  • My comment did not intend to be an answer. My goal was to seek more information regarding your question on the recommended Java heap size. Now, supposing that the sizes are correct, what you need to know is how to specify them in docker-compose, right? – Val Jul 30 '18 at 13:47

1 Answers1

28

Following up with our discussion in the comments above, and supposing the sizes are right, what you need to do now is to size each Docker container as discussed. Note that since you're not using Swarm, you don't really need to use the v3 format, v2 is sufficient, hence I've modified the version line below. I've also added mem_limit for each container and the heap sizing in the environment section of the elasticsearch container.

version: '2.3'
services

  kibana:
    build:
      context: kibana/
    container_name: kibana
    volumes:
      - ./kibana/config/:/usr/share/kibana/config:ro
    networks: ['elk']
    depends_on:
      - elasticsearch
    restart: always
    mem_limit: 1g

  elasticsearch:
    build:
      context: elasticsearch/
    container_name: elasticsearch
    networks: ['elk']
    volumes:
      - ./elastic-data:/usr/share/elasticsearch/data
      - ./elasticsearch/config/elasticsearch.yml:/usr/share/elasticsearch/config/elasticsearch.yml:ro
    restart: always
    ulimits:
      memlock:
        soft: -1
        hard: -1
      nofile:
        soft: 65536
        hard: 65536
    environment:
      - cluster.name=es-docker
      - node.name=node1
      - bootstrap.memory_lock=true
      - "ES_JAVA_OPTS=-Xms2g -Xmx2g"
    mem_limit: 4g

  logstash:
      build:
        context: logstash/
      container_name: logstash
      volumes:
        - ./logstash/pipeline/logstash.conf:/usr/share/logstash/pipeline/logstash.conf:ro
      networks: ['elk']
      ports:
        - "5044:5044"
      depends_on:
        - elasticsearch
      restart: always
      mem_limit: 2g
      environment:
        - "LS_JAVA_OPTS=-Xmx1g -Xms1g"

networks: {elk: {}}
Val
  • 207,596
  • 13
  • 358
  • 360
  • how come mem_limit is double set values of java_ops? should they not be the same? – Robbo_UK Jul 30 '18 at 16:33
  • 3
    `mem_limit` is the amount of memory to give to the container and the `ES_JAVA_OPTS` setting is the amount of heap to give to the JVM. You should only give half the available memory to the heap. – Val Jul 30 '18 at 17:16
  • just came across this also explains why half size is needed. I thought I would add it to this answer for the next user. https://www.elastic.co/guide/en/elasticsearch/guide/current/heap-sizing.html Thanks for your help Val! – Robbo_UK Jul 31 '18 at 08:47
  • Does anybody know if the "elasticsearch heap should be half of machine memory" rule still applies when using containers mem_limit? The OS file cache is really separate from the container, it's managed by the OS of the docker server, right? Seems mem_limit can be the java heap plus a bit extra for the java code or does mem_limit really need to be twice the java heap? – jamshid Feb 06 '20 at 19:12
  • Unsupported config option for services.kibana: 'mem_limit' – fatemeh sadeghi Dec 17 '22 at 15:54