0

Have a Docker Compose with 4 containers.

How do I run a python script (python manage.py setup) in container in server_1 when postgres_1 is up, but only once (state should be persisted somewhere, maybe via volume?)

I persist PostgreSQL data to disk via volume.

Is this any nice way?

Want to make up setup and running of software very easy, just using docker-compose up. Should not matter, if this is first run or further runs. First run needs python manage.py setup invocation.

Is there a nice way of doing it?

Idea was to check for existence of file flag in mounted volume, but don't know how to wait in server_1 for postgres_1 to be up.

Here is my docker-compose.yml

version: '3'
services:
  server:
    build:
      context: .
      dockerfile: docker/backend/Dockerfile
    restart: always
    working_dir: /srv/scanmycode/
    entrypoint: python
    command: /srv/scanmycode/manage.py runserver
    ports:
      - 5000:5000
    volumes:
      - ./data1:/srv/scanmycode/quantifiedcode/data/
      - ./data2:/srv/scanmycode/quantifiedcode/backend/data/
    links:
      - "postgres"

  postgres:
    image: postgres:13.2
    restart: unless-stopped
    environment:
      POSTGRES_DB: qc
      POSTGRES_USER: qc
      POSTGRES_PASSWORD: qc
      PGDATA: /var/lib/postgresql/data/pgdata
    ports:
      - "5432:5432"
    volumes:
      -
        type: bind
        source: ./postgres-data
        target: /var/lib/postgresql/data
  worker_1:
    build:
      context: .
      dockerfile: docker/worker/Dockerfile
      args:
        - GIT_TOKEN
    hostname: worker_1
    restart: on-failure
    depends_on:
      - rabbitmq3
    working_dir: /srv/scanmycode/
    entrypoint: python
    command: /srv/scanmycode/manage.py runworker
    volumes:
      - ./data1:/srv/scanmycode/quantifiedcode/data/
      - ./data2:/srv/scanmycode/quantifiedcode/backend/data/
    links:
      - "rabbitmq3"
      - "server"
      - "postgres"  

  rabbitmq3:
    container_name: "rabbitmq"
    image: rabbitmq:3.8-management-alpine
    environment:
      - RABBITMQ_DEFAULT_USER=qc
      - RABBITMQ_DEFAULT_PASS=qc
    ports:
      - 5672:5672
      - 15672:15672
    healthcheck:
      test: [ "CMD", "nc", "-z", "localhost", "5672" ]
      interval: 5s
      timeout: 15s
      retries: 1
dev
  • 1,119
  • 1
  • 11
  • 34
  • 1
    use the "depends_on:" feature also for postgres to ensure that the postgres starts before. But you should also wait for postgres inside your python container. (Make retries). You can do the rest like you described. I would suggest using docker volumes instead of host path mapping to store the data/state. – Andreas Jagiella Feb 02 '22 at 09:17
  • [Docker Compose wait for container X before starting Y](https://stackoverflow.com/questions/31746182/docker-compose-wait-for-container-x-before-starting-y) has a fair amount of discussion on this topic. Do the approaches there help you? – David Maze Feb 02 '22 at 14:55

1 Answers1

0

Used this:

version: '3'
services:
  server:
    build:
      context: .
      dockerfile: docker/backend/Dockerfile
    restart: always
    depends_on:
      - postgres
    working_dir: /srv/scanmycode/
    entrypoint: sh
    command: -c "if [ -f /srv/scanmycode/setup_state/setup_done ]; then python /srv/scanmycode/manage.py runserver; else python /srv/scanmycode/manage.py setup && mkdir -p /srv/scanmycode/setup_state && touch /srv/scanmycode/setup_state/setup_done; fi"
    ports:
      - 5000:5000
    volumes:
      - ./data1:/srv/scanmycode/quantifiedcode/data/
      - ./data2:/srv/scanmycode/quantifiedcode/backend/data/
      - ./setup_state:/srv/scanmycode/setup_state
    links:
      - "postgres"

  postgres:
    image: postgres:13.2
    restart: unless-stopped
    environment:
      POSTGRES_DB: qc
      POSTGRES_USER: qc
      POSTGRES_PASSWORD: qc
      PGDATA: /var/lib/postgresql/data/pgdata
    ports:
      - "5432:5432"
    volumes:
      - db-data:/var/lib/postgresql/data
  worker_1:
    build:
      context: .
      dockerfile: docker/worker/Dockerfile
    hostname: worker_1
    restart: on-failure
    depends_on:
      - rabbitmq3
      - postgres
      - server
    working_dir: /srv/scanmycode/
    entrypoint: python
    command: /srv/scanmycode/manage.py runworker
    volumes:
      - ./data1:/srv/scanmycode/quantifiedcode/data/
      - ./data2:/srv/scanmycode/quantifiedcode/backend/data/
    links:
      - "rabbitmq3"
      - "server"
      - "postgres"  

  rabbitmq3:
    container_name: "rabbitmq"
    image: rabbitmq:3.8-management-alpine
    environment:
      - RABBITMQ_DEFAULT_USER=qc
      - RABBITMQ_DEFAULT_PASS=qc
    ports:
      - 5672:5672
      - 15672:15672
    healthcheck:
      test: [ "CMD", "nc", "-z", "localhost", "5672" ]
      interval: 5s
      timeout: 15s
      retries: 1

volumes:
  db-data:
    driver: local
dev
  • 1,119
  • 1
  • 11
  • 34
  • That seems like a lot of code to be in a `command:`; I'm not sure why you also need to override the `entrypoint:`. Would it make more sense to put it into a script and then make that script be the image's `CMD`? – David Maze Feb 02 '22 at 14:56